From 8dd868373fe0e1451ecd24f92a2883e1f747d227 Mon Sep 17 00:00:00 2001 From: Jannik Date: Fri, 18 Aug 2023 16:51:36 +0200 Subject: [PATCH 1/8] starting multi-group --- R/group_mxsem_by.R | 49 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 R/group_mxsem_by.R diff --git a/R/group_mxsem_by.R b/R/group_mxsem_by.R new file mode 100644 index 0000000..56a8232 --- /dev/null +++ b/R/group_mxsem_by.R @@ -0,0 +1,49 @@ + +group_mxsem_by <- function(mxModel, + grouping_variables, + parameters = c(mxsem::unicode_directed(), + mxsem::unicode_undirected())){ + + if(!is(mxModel, "MxRAMModel")) + stop("mxModel must be of class MxRAMModel.") + if(mxModel$data$type != "raw") + stop("The data of mxModel must be of type 'raw'") + + parameters <- OpenMx::omxGetParameters(mxModel) + + splitted_data <- mxModel$data$observed |> + dplyr::group_by(dplyr::vars(dplyr::one_of(grouping_variables))) |> + dplyr::group_split() + + names(splitted_data) <- paste0("group_", seq_len(n_groups)) + + n_groups <- length(splitted_data) + + mg_model <- OpenMx::mxModel() + + for(gr in seq_len(n_groups)){ + current_model <- OpenMx::mxModel(mxModel, + OpenMx::mxData(splitted_data[[gr]], type = "raw") + ) + current_parameter_labels <- names(parameters) + for(p in parameters){ + current_parameter_labels[grepl(p, current_parameter_labels)] <- paste0( + current_parameter_labels[grepl(p, current_parameters)], + "_group_", m + ) + } + + current_model <- OpenMx::omxSetParameters(model = current_model, + labels = names(parameters), + values = parameters, + newlabels = current_parameter_labels) + mg_model <- OpenMx::mxModel( + mg_model, + current_model + ) + } + + attr(mg_model, which = "groups") <- splitted_data + + return(mg_model) +} From 6e13cd4937ea2f4e15f19788b20a8de1fd8abf38 Mon Sep 17 00:00:00 2001 From: "Jannik H. Orzek" Date: Fri, 18 Aug 2023 23:12:10 +0200 Subject: [PATCH 2/8] working multi-group models --- DESCRIPTION | 3 +- NAMESPACE | 4 + R/group_mxsem_by.R | 233 ++++++++++++++++++++++++++-- man/get_groups.Rd | 37 +++++ man/group_mxsem_by.Rd | 63 ++++++++ man/print.multi_group_parameters.Rd | 16 ++ man/summarize_multi_group_model.Rd | 37 +++++ 7 files changed, 375 insertions(+), 18 deletions(-) create mode 100644 man/get_groups.Rd create mode 100644 man/group_mxsem_by.Rd create mode 100644 man/print.multi_group_parameters.Rd create mode 100644 man/summarize_multi_group_model.Rd diff --git a/DESCRIPTION b/DESCRIPTION index 651c1e9..170d155 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -17,7 +17,8 @@ Depends: OpenMx Imports: Rcpp (>= 1.0.10), stats, - methods + methods, + dplyr LinkingTo: Rcpp RoxygenNote: 7.2.3 Encoding: UTF-8 diff --git a/NAMESPACE b/NAMESPACE index 64f5bbc..d99470a 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -1,10 +1,14 @@ # Generated by roxygen2: do not edit by hand +S3method(print,multi_group_parameters) +export(get_groups) +export(group_mxsem_by) export(mxsem) export(parameters) export(set_starting_values) export(simulate_latent_growth_curve) export(simulate_moderated_nonlinear_factor_analysis) +export(summarize_multi_group_model) export(unicode_directed) export(unicode_undirected) import(OpenMx) diff --git a/R/group_mxsem_by.R b/R/group_mxsem_by.R index 56a8232..c4ecfb3 100644 --- a/R/group_mxsem_by.R +++ b/R/group_mxsem_by.R @@ -1,41 +1,116 @@ - +#' group_mxsem_by +#' +#' creates a multi-group model from an OpenMx model. +#' +#' group_mxsem_by creates a multi-group model by splitting the data found +#' in an mxModel object using dplyr's group_by function. The general idea is +#' as follows: +#' +#' 1. The function extracts the data from mxModel +#' 2. The data is split using the group_by function of dplyr with the variables +#' in grouping_variables +#' 3. a separate model is set up for each group. All parameters that match +#' those specified in the parameters argument are group specific +#' @param mxModel mxModel with the entire data +#' @param grouping_variables Variables used to split the data in groups +#' @param parameters the parameters that should be group specific. By default +#' all parameters are group specific. +#' @param use_grepl if set to TRUE, grepl is used to check which parameters are +#' group-specific. For instance, if parameters = "a" and use_grepl = TRUE, all parameters +#' whose label contains the letter "a" will be group specific. If use_grep = FALSE +#' only the parameter that has the label "a" is group specific. +#' @return mxModel with multiple groups. Use get_groups to extract the groups +#' @export +#' @examples +#' # THE FOLLOWING EXAMPLE IS ADAPTED FROM +#' # https://openmx.ssri.psu.edu/docs/OpenMx/latest/_static/Rdoc/mxModel.html +#' library(mxsem) +#' +#' model <- 'spatial =~ visual + cubes + paper +#' verbal =~ general + paragrap + sentence +#' math =~ numeric + series + arithmet' +#' +#' mxModel <- mxsem(model = model, +#' data = OpenMx::HS.ability.data) +#' +#' # we want separate models for all combinations of grades and schools: +#' mg_model <- mxsem:::group_mxsem_by(mxModel = mxModel, +#' grouping_variables = c("grade", "school")) |> +#' mxRun() +#' +#' # let's summarize the results: +#' summarize_multi_group_model(mg_model) group_mxsem_by <- function(mxModel, grouping_variables, - parameters = c(mxsem::unicode_directed(), - mxsem::unicode_undirected())){ + parameters = c(".*"), + use_grepl = TRUE){ if(!is(mxModel, "MxRAMModel")) stop("mxModel must be of class MxRAMModel.") if(mxModel$data$type != "raw") stop("The data of mxModel must be of type 'raw'") - parameters <- OpenMx::omxGetParameters(mxModel) - splitted_data <- mxModel$data$observed |> - dplyr::group_by(dplyr::vars(dplyr::one_of(grouping_variables))) |> + dplyr::group_by(dplyr::across(dplyr::all_of(grouping_variables))) |> dplyr::group_split() + n_groups <- length(splitted_data) + names(splitted_data) <- paste0("group_", seq_len(n_groups)) - n_groups <- length(splitted_data) + group_specific_parameters <- c() + parameter_labels <- names(OpenMx::omxGetParameters(mxModel)) + for(p in parameters){ + if(use_grepl){ + group_specific_parameters <- c(group_specific_parameters, + parameter_labels[grepl(p, parameter_labels)]) + }else{ + group_specific_parameters <- c(group_specific_parameters, + parameter_labels[parameter_labels == p] + ) + } + } + common_parameters <- parameter_labels[!parameter_labels %in% group_specific_parameters] + + message(paste0("The following parameters will be the same across groups: ", + paste0(common_parameters, collapse = ", "))) + message(paste0("The following parameters will be group specific: ", + paste0(group_specific_parameters, collapse = ", "))) - mg_model <- OpenMx::mxModel() + mg_model <- OpenMx::mxModel(name = mxModel$name) + + fitfunction <- c() for(gr in seq_len(n_groups)){ - current_model <- OpenMx::mxModel(mxModel, - OpenMx::mxData(splitted_data[[gr]], type = "raw") + + fitfunction <- c(fitfunction, + paste0(mxModel$name, "_group_", gr, ".fitfunction")) + + current_model <- OpenMx::mxModel( + name = paste0(mxModel$name, "_group_", gr), + mxModel, + OpenMx::mxData(splitted_data[[gr]], type = "raw") ) - current_parameter_labels <- names(parameters) + + current_parameter_labels <- names(OpenMx::omxGetParameters(mxModel)) + for(p in parameters){ - current_parameter_labels[grepl(p, current_parameter_labels)] <- paste0( - current_parameter_labels[grepl(p, current_parameters)], - "_group_", m - ) + if(use_grepl){ + current_parameter_labels[grepl(p, current_parameter_labels)] <- paste0( + current_parameter_labels[grepl(p, current_parameter_labels)], + "_group_", gr + ) + }else{ + current_parameter_labels[current_parameter_labels == p] <- paste0( + current_parameter_labels[current_parameter_labels == p], + "_group_", gr + ) + } } current_model <- OpenMx::omxSetParameters(model = current_model, - labels = names(parameters), - values = parameters, + labels = names(OpenMx::omxGetParameters(mxModel)), + values = OpenMx::omxGetParameters(mxModel), newlabels = current_parameter_labels) mg_model <- OpenMx::mxModel( mg_model, @@ -43,7 +118,131 @@ group_mxsem_by <- function(mxModel, ) } + mg_model <- OpenMx::mxModel( + mg_model, + # define fitfunction as the sum of the fitfunctions of the group models + OpenMx::mxAlgebraFromString(paste0(fitfunction, collapse = " + "), + name = "mg_fitfunction"), + OpenMx::mxFitFunctionAlgebra(algebra = "mg_fitfunction") + ) + attr(mg_model, which = "groups") <- splitted_data + attr(mg_model, which = "grouping_variables") <- grouping_variables + attr(mg_model, which = "common_parameters") <- common_parameters + attr(mg_model, which = "group_specific_parameters") <- group_specific_parameters return(mg_model) } + +#' get_groups +#' +#' returns a list of groups for a multi group model +#' @param multi_group_model multi group model created with group_mxsem_by +#' @return list with data for each group +#' @export +#' @examples +#' # THE FOLLOWING EXAMPLE IS ADAPTED FROM +#' # https://openmx.ssri.psu.edu/docs/OpenMx/latest/_static/Rdoc/mxModel.html +#' library(mxsem) +#' +#' model <- 'spatial =~ visual + cubes + paper +#' verbal =~ general + paragrap + sentence +#' math =~ numeric + series + arithmet' +#' +#' mxModel <- mxsem(model = model, +#' data = OpenMx::HS.ability.data) +#' +#' # we want separate models for all combinations of grades and schools: +#' mg_model <- mxsem:::group_mxsem_by(mxModel = mxModel, +#' grouping_variables = c("grade", "school")) |> +#' mxRun() +#' +#' # let's get the groups: +#' get_groups(mg_model) +get_groups <- function(multi_group_model){ + return(attr(multi_group_model, "groups")) +} + + +#' summarize_multi_group_model +#' +#' summarize the results of a multi group model created with group_mxsem_by +#' @param multi_group_model multi group model created with group_mxsem_by +#' @return list with goup specific parameters and common parameters +#' @export +#' @examples +#' # THE FOLLOWING EXAMPLE IS ADAPTED FROM +#' # https://openmx.ssri.psu.edu/docs/OpenMx/latest/_static/Rdoc/mxModel.html +#' library(mxsem) +#' +#' model <- 'spatial =~ visual + cubes + paper +#' verbal =~ general + paragrap + sentence +#' math =~ numeric + series + arithmet' +#' +#' mxModel <- mxsem(model = model, +#' data = OpenMx::HS.ability.data) +#' +#' # we want separate models for all combinations of grades and schools: +#' mg_model <- mxsem:::group_mxsem_by(mxModel = mxModel, +#' grouping_variables = c("grade", "school")) |> +#' mxRun() +#' +#' # let's summarize the results: +#' summarize_multi_group_model(mg_model) +summarize_multi_group_model <- function(multi_group_model){ + + groups <- attr(multi_group_model, which = "groups") + grouping_variables <- attr(multi_group_model, which = "grouping_variables") + common_parameters <- attr(multi_group_model, "common_parameters") + parameters <- OpenMx::omxGetParameters(model = multi_group_model) + summmarized <- summary(multi_group_model) + + parameter_table <- vector("list", length(groups) + 1) + names(parameter_table) <- c("common parameters", + names(groups)) + + parameter_table[["common parameters"]] <- summmarized$parameters[summmarized$parameters$name %in% common_parameters,] + + for(gr in seq_len(length(groups))){ + + group_name <- names(groups)[gr] + parameter_table[[group_name]] <- list( + parameters = summmarized$parameters[grepl(pattern = paste0(group_name, "$"), + x = names(parameters)),], + group_setting = groups[[gr]][,grouping_variables] |> + unique() + ) + } + + class(parameter_table) <- "multi_group_parameters" + return(parameter_table) +} + + +#' print the multi_group_parameters +#' @param x object from summarize_multi_group_model +#' @param ... not used +#' @method print multi_group_parameters +#' @export +print.multi_group_parameters <- function(x, ...){ + console_width <- getOption("width") + cat("\n") + cat("\n") + cat(paste0(rep("-", console_width), collapse = "")) + cat("\n") + cat("Common Parameters:") + cat("\n") + print(x$`common parameters`) + + for(gr in 2:length(x)){ + cat(paste0(rep("-", console_width), collapse = "")) + cat("\n") + cat(paste0(names(x[gr])[1], ":")) + cat("\n") + print(x[[gr]][["group_setting"]]) + cat("\n") + print(x[[gr]][["parameters"]]) + } + cat("\n") + cat(paste0(rep("-", console_width), collapse = "")) +} diff --git a/man/get_groups.Rd b/man/get_groups.Rd new file mode 100644 index 0000000..32cdf3e --- /dev/null +++ b/man/get_groups.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/group_mxsem_by.R +\name{get_groups} +\alias{get_groups} +\title{get_groups} +\usage{ +get_groups(multi_group_model) +} +\arguments{ +\item{multi_group_model}{multi group model created with group_mxsem_by} +} +\value{ +list with data for each group +} +\description{ +returns a list of groups for a multi group model +} +\examples{ +# THE FOLLOWING EXAMPLE IS ADAPTED FROM +# https://openmx.ssri.psu.edu/docs/OpenMx/latest/_static/Rdoc/mxModel.html +library(mxsem) + +model <- 'spatial =~ visual + cubes + paper + verbal =~ general + paragrap + sentence + math =~ numeric + series + arithmet' + +mxModel <- mxsem(model = model, + data = OpenMx::HS.ability.data) + +# we want separate models for all combinations of grades and schools: +mg_model <- mxsem:::group_mxsem_by(mxModel = mxModel, + grouping_variables = c("grade", "school")) |> + mxRun() + +# let's get the groups: +get_groups(mg_model) +} diff --git a/man/group_mxsem_by.Rd b/man/group_mxsem_by.Rd new file mode 100644 index 0000000..6198fc3 --- /dev/null +++ b/man/group_mxsem_by.Rd @@ -0,0 +1,63 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/group_mxsem_by.R +\name{group_mxsem_by} +\alias{group_mxsem_by} +\title{group_mxsem_by} +\usage{ +group_mxsem_by( + mxModel, + grouping_variables, + parameters = c(".*"), + use_grepl = TRUE +) +} +\arguments{ +\item{mxModel}{mxModel with the entire data} + +\item{grouping_variables}{Variables used to split the data in groups} + +\item{parameters}{the parameters that should be group specific. By default +all parameters are group specific.} + +\item{use_grepl}{if set to TRUE, grepl is used to check which parameters are +group-specific. For instance, if parameters = "a" and use_grepl = TRUE, all parameters +whose label contains the letter "a" will be group specific. If use_grep = FALSE +only the parameter that has the label "a" is group specific.} +} +\value{ +mxModel with multiple groups. Use get_groups to extract the groups +} +\description{ +creates a multi-group model from an OpenMx model. +} +\details{ +group_mxsem_by creates a multi-group model by splitting the data found +in an mxModel object using dplyr's group_by function. The general idea is +as follows: + +1. The function extracts the data from mxModel +2. The data is split using the group_by function of dplyr with the variables +in grouping_variables +3. a separate model is set up for each group. All parameters that match +those specified in the parameters argument are group specific +} +\examples{ +# THE FOLLOWING EXAMPLE IS ADAPTED FROM +# https://openmx.ssri.psu.edu/docs/OpenMx/latest/_static/Rdoc/mxModel.html +library(mxsem) + +model <- 'spatial =~ visual + cubes + paper + verbal =~ general + paragrap + sentence + math =~ numeric + series + arithmet' + +mxModel <- mxsem(model = model, + data = OpenMx::HS.ability.data) + +# we want separate models for all combinations of grades and schools: +mg_model <- mxsem:::group_mxsem_by(mxModel = mxModel, + grouping_variables = c("grade", "school")) |> + mxRun() + +# let's summarize the results: +summarize_multi_group_model(mg_model) +} diff --git a/man/print.multi_group_parameters.Rd b/man/print.multi_group_parameters.Rd new file mode 100644 index 0000000..21a5f08 --- /dev/null +++ b/man/print.multi_group_parameters.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/group_mxsem_by.R +\name{print.multi_group_parameters} +\alias{print.multi_group_parameters} +\title{print the multi_group_parameters} +\usage{ +\method{print}{multi_group_parameters}(x, ...) +} +\arguments{ +\item{x}{object from summarize_multi_group_model} + +\item{...}{not used} +} +\description{ +print the multi_group_parameters +} diff --git a/man/summarize_multi_group_model.Rd b/man/summarize_multi_group_model.Rd new file mode 100644 index 0000000..f7d10be --- /dev/null +++ b/man/summarize_multi_group_model.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/group_mxsem_by.R +\name{summarize_multi_group_model} +\alias{summarize_multi_group_model} +\title{summarize_multi_group_model} +\usage{ +summarize_multi_group_model(multi_group_model) +} +\arguments{ +\item{multi_group_model}{multi group model created with group_mxsem_by} +} +\value{ +list with goup specific parameters and common parameters +} +\description{ +summarize the results of a multi group model created with group_mxsem_by +} +\examples{ +# THE FOLLOWING EXAMPLE IS ADAPTED FROM +# https://openmx.ssri.psu.edu/docs/OpenMx/latest/_static/Rdoc/mxModel.html +library(mxsem) + +model <- 'spatial =~ visual + cubes + paper + verbal =~ general + paragrap + sentence + math =~ numeric + series + arithmet' + +mxModel <- mxsem(model = model, + data = OpenMx::HS.ability.data) + +# we want separate models for all combinations of grades and schools: +mg_model <- mxsem:::group_mxsem_by(mxModel = mxModel, + grouping_variables = c("grade", "school")) |> + mxRun() + +# let's summarize the results: +summarize_multi_group_model(mg_model) +} From ca485d8c716320523a47321d72749443c5a1e756 Mon Sep 17 00:00:00 2001 From: "Jannik H. Orzek" Date: Fri, 18 Aug 2023 23:31:33 +0200 Subject: [PATCH 3/8] adding test, updating documentation and workflow --- .github/workflows/test_package_ubuntu.yml | 5 +-- R/group_mxsem_by.R | 37 +++++++++++------------ man/get_groups.Rd | 13 ++++---- man/group_mxsem_by.Rd | 12 +++----- man/summarize_multi_group_model.Rd | 12 +++----- tests/testthat/test-multi-group.R | 24 +++++++++++++++ 6 files changed, 61 insertions(+), 42 deletions(-) create mode 100644 tests/testthat/test-multi-group.R diff --git a/.github/workflows/test_package_ubuntu.yml b/.github/workflows/test_package_ubuntu.yml index 464c259..0450eca 100644 --- a/.github/workflows/test_package_ubuntu.yml +++ b/.github/workflows/test_package_ubuntu.yml @@ -30,11 +30,11 @@ jobs: # install two helper packages we need sudo apt install --no-install-recommends software-properties-common dirmngr # add the signing key (by Michael Rutter) for these repos - # To verify key, run gpg --show-keys /etc/apt/trusted.gpg.d/cran_ubuntu_key.asc + # To verify key, run gpg --show-keys /etc/apt/trusted.gpg.d/cran_ubuntu_key.asc # Fingerprint: E298A3A825C0D65DFD57CBB651716619E084DAB9 wget -qO- https://cloud.r-project.org/bin/linux/ubuntu/marutter_pubkey.asc | sudo tee -a /etc/apt/trusted.gpg.d/cran_ubuntu_key.asc # add the R 4.0 repo from CRAN -- adjust 'focal' to 'groovy' or 'bionic' as needed - sudo add-apt-repository "deb https://cloud.r-project.org/bin/linux/ubuntu $(lsb_release -cs)-cran40/" + sudo add-apt-repository "deb https://cloud.r-project.org/bin/linux/ubuntu $(lsb_release -cs)-cran40/" - name: install compiled packages run: | sudo add-apt-repository ppa:c2d4u.team/c2d4u4.0+ @@ -46,6 +46,7 @@ jobs: sudo apt install --no-install-recommends r-cran-rcpp sudo apt install --no-install-recommends r-cran-testthat sudo apt install --no-install-recommends r-cran-rcmdcheck + sudo apt install --no-install-recommends r-cran-dplyr - name: Check run: rcmdcheck::rcmdcheck(path = ".", args = "--no-manual", error_on = "warning") shell: Rscript {0} diff --git a/R/group_mxsem_by.R b/R/group_mxsem_by.R index c4ecfb3..24a19a3 100644 --- a/R/group_mxsem_by.R +++ b/R/group_mxsem_by.R @@ -30,13 +30,11 @@ #' verbal =~ general + paragrap + sentence #' math =~ numeric + series + arithmet' #' -#' mxModel <- mxsem(model = model, -#' data = OpenMx::HS.ability.data) -#' -#' # we want separate models for all combinations of grades and schools: -#' mg_model <- mxsem:::group_mxsem_by(mxModel = mxModel, -#' grouping_variables = c("grade", "school")) |> -#' mxRun() +#' mg_model <- mxsem(model = model, +#' data = OpenMx::HS.ability.data) |> +#' # we want separate models for all combinations of grades and schools: +#' group_mxsem_by(grouping_variables = "school") |> +#' mxTryHard() #' #' # let's summarize the results: #' summarize_multi_group_model(mg_model) @@ -149,13 +147,14 @@ group_mxsem_by <- function(mxModel, #' verbal =~ general + paragrap + sentence #' math =~ numeric + series + arithmet' #' -#' mxModel <- mxsem(model = model, -#' data = OpenMx::HS.ability.data) +#' mg_model <- mxsem(model = model, +#' data = OpenMx::HS.ability.data) |> +#' # we want separate models for all combinations of grades and schools: +#' group_mxsem_by(grouping_variables = "school") |> +#' mxTryHard() #' -#' # we want separate models for all combinations of grades and schools: -#' mg_model <- mxsem:::group_mxsem_by(mxModel = mxModel, -#' grouping_variables = c("grade", "school")) |> -#' mxRun() +#' # let's summarize the results: +#' summarize_multi_group_model(mg_model) #' #' # let's get the groups: #' get_groups(mg_model) @@ -179,13 +178,11 @@ get_groups <- function(multi_group_model){ #' verbal =~ general + paragrap + sentence #' math =~ numeric + series + arithmet' #' -#' mxModel <- mxsem(model = model, -#' data = OpenMx::HS.ability.data) -#' -#' # we want separate models for all combinations of grades and schools: -#' mg_model <- mxsem:::group_mxsem_by(mxModel = mxModel, -#' grouping_variables = c("grade", "school")) |> -#' mxRun() +#' mg_model <- mxsem(model = model, +#' data = OpenMx::HS.ability.data) |> +#' # we want separate models for all combinations of grades and schools: +#' group_mxsem_by(grouping_variables = "school") |> +#' mxTryHard() #' #' # let's summarize the results: #' summarize_multi_group_model(mg_model) diff --git a/man/get_groups.Rd b/man/get_groups.Rd index 32cdf3e..e32511c 100644 --- a/man/get_groups.Rd +++ b/man/get_groups.Rd @@ -24,13 +24,14 @@ model <- 'spatial =~ visual + cubes + paper verbal =~ general + paragrap + sentence math =~ numeric + series + arithmet' -mxModel <- mxsem(model = model, - data = OpenMx::HS.ability.data) +mg_model <- mxsem(model = model, + data = OpenMx::HS.ability.data) |> + # we want separate models for all combinations of grades and schools: + group_mxsem_by(grouping_variables = "school") |> + mxTryHard() -# we want separate models for all combinations of grades and schools: -mg_model <- mxsem:::group_mxsem_by(mxModel = mxModel, - grouping_variables = c("grade", "school")) |> - mxRun() +# let's summarize the results: +summarize_multi_group_model(mg_model) # let's get the groups: get_groups(mg_model) diff --git a/man/group_mxsem_by.Rd b/man/group_mxsem_by.Rd index 6198fc3..0ccfa98 100644 --- a/man/group_mxsem_by.Rd +++ b/man/group_mxsem_by.Rd @@ -50,13 +50,11 @@ model <- 'spatial =~ visual + cubes + paper verbal =~ general + paragrap + sentence math =~ numeric + series + arithmet' -mxModel <- mxsem(model = model, - data = OpenMx::HS.ability.data) - -# we want separate models for all combinations of grades and schools: -mg_model <- mxsem:::group_mxsem_by(mxModel = mxModel, - grouping_variables = c("grade", "school")) |> - mxRun() +mg_model <- mxsem(model = model, + data = OpenMx::HS.ability.data) |> + # we want separate models for all combinations of grades and schools: + group_mxsem_by(grouping_variables = "school") |> + mxTryHard() # let's summarize the results: summarize_multi_group_model(mg_model) diff --git a/man/summarize_multi_group_model.Rd b/man/summarize_multi_group_model.Rd index f7d10be..91410f1 100644 --- a/man/summarize_multi_group_model.Rd +++ b/man/summarize_multi_group_model.Rd @@ -24,13 +24,11 @@ model <- 'spatial =~ visual + cubes + paper verbal =~ general + paragrap + sentence math =~ numeric + series + arithmet' -mxModel <- mxsem(model = model, - data = OpenMx::HS.ability.data) - -# we want separate models for all combinations of grades and schools: -mg_model <- mxsem:::group_mxsem_by(mxModel = mxModel, - grouping_variables = c("grade", "school")) |> - mxRun() +mg_model <- mxsem(model = model, + data = OpenMx::HS.ability.data) |> + # we want separate models for all combinations of grades and schools: + group_mxsem_by(grouping_variables = "school") |> + mxTryHard() # let's summarize the results: summarize_multi_group_model(mg_model) diff --git a/tests/testthat/test-multi-group.R b/tests/testthat/test-multi-group.R new file mode 100644 index 0000000..9a6fe87 --- /dev/null +++ b/tests/testthat/test-multi-group.R @@ -0,0 +1,24 @@ +test_that("multiplication works", { + library(lavaan) + library(mxsem) + set.seed(123) + HS.model <- 'visual =~ x1 + x2 + x3 + textual =~ x4 + x5 + x6 + speed =~ x7 + x8 + x9' + + fit <- cfa(HS.model, + data = HolzingerSwineford1939, + group = "school") + + mg_model <- mxsem(model = HS.model, + data = HolzingerSwineford1939) |> + # we want separate models for all combinations of grades and schools: + group_mxsem_by(grouping_variables = c("school")) |> + mxRun() + + testthat::expect_true(abs( + mg_model$fitfunction$result[[1]] - + -2*logLik(fit) + ) < .1 + ) +}) From 2c9296e243b2ba4b71cd410d85eb8945be0dbfe5 Mon Sep 17 00:00:00 2001 From: "Jannik H. Orzek" Date: Sat, 19 Aug 2023 19:18:59 +0200 Subject: [PATCH 4/8] added more tests for multi-group models --- DESCRIPTION | 2 +- tests/testthat/test-multi-group.R | 55 +++++++++++++++++++++++++++++-- 2 files changed, 54 insertions(+), 3 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index 170d155..ee50b48 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Package: mxsem Type: Package Title: Specify 'OpenMx' Models with a 'lavaan'-Style Syntax -Version: 0.0.5 +Version: 0.0.6 Date: 2023-07-12 Authors@R: c(person(given = "Jannik H.", family = "Orzek", role = c("aut", "cre", "cph"), diff --git a/tests/testthat/test-multi-group.R b/tests/testthat/test-multi-group.R index 9a6fe87..5077a0a 100644 --- a/tests/testthat/test-multi-group.R +++ b/tests/testthat/test-multi-group.R @@ -19,6 +19,57 @@ test_that("multiplication works", { testthat::expect_true(abs( mg_model$fitfunction$result[[1]] - -2*logLik(fit) - ) < .1 - ) + ) < .1) + + HS.model <- 'visual =~ x1 + x2 + x3 + textual =~ x4 + x5 + x6 + speed =~ x7 + x8 + x9 + # mxsem differs from lavaan in that it will not + # automatically set intercepts freed for all but one group + # once other parameters are set to equality + visual ~ 0*1 + textual ~ 0*1 + speed ~ 0*1 + ' + + fit <- cfa(HS.model, + data = HolzingerSwineford1939, + group = "school", + group.equal = c("intercepts", "loadings")) + + mg_model <- mxsem(model = HS.model, + data = HolzingerSwineford1939) |> + # we want separate models for all combinations of grades and schools: + group_mxsem_by(grouping_variables = c("school"), + parameters = mxsem::unicode_undirected()) |> + mxTryHard() + + testthat::expect_true(abs( + mg_model$fitfunction$result[[1]] - + -2*logLik(fit) + ) < .1) + + + HS.model <- 'visual =~ x1 + x2 + x3 + textual =~ x4 + x5 + x6 + speed =~ x7 + x8 + x9 + ' + + fit <- cfa(HS.model, + data = HolzingerSwineford1939, + group = "school", + group.equal = c("loadings")) + + mg_model <- mxsem(model = HS.model, + data = HolzingerSwineford1939) |> + # we want separate models for all combinations of grades and schools: + group_mxsem_by(grouping_variables = c("school"), + parameters = c(mxsem::unicode_undirected(), + "one")) |> + mxTryHard() + + testthat::expect_true(abs( + mg_model$fitfunction$result[[1]] - + -2*logLik(fit) + ) < .1) }) From 296b3a4dc171c802385fcb9da2c342f1ca367dd8 Mon Sep 17 00:00:00 2001 From: Jannik Date: Thu, 24 Aug 2023 23:09:05 +0200 Subject: [PATCH 5/8] renaming group model function --- NAMESPACE | 2 +- R/mxsem.R | 6 +++++- R/{group_mxsem_by.R => mxsem_group_by.R} | 18 +++++++++--------- man/get_groups.Rd | 6 +++--- man/{group_mxsem_by.Rd => mxsem_group_by.Rd} | 14 +++++++------- man/print.multi_group_parameters.Rd | 2 +- man/summarize_multi_group_model.Rd | 8 ++++---- tests/testthat/test-Political-Democracy.R | 1 + tests/testthat/test-multi-group.R | 6 +++--- 9 files changed, 34 insertions(+), 29 deletions(-) rename R/{group_mxsem_by.R => mxsem_group_by.R} (94%) rename man/{group_mxsem_by.Rd => mxsem_group_by.Rd} (87%) diff --git a/NAMESPACE b/NAMESPACE index d99470a..582e9f6 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -2,8 +2,8 @@ S3method(print,multi_group_parameters) export(get_groups) -export(group_mxsem_by) export(mxsem) +export(mxsem_group_by) export(parameters) export(set_starting_values) export(simulate_latent_growth_curve) diff --git a/R/mxsem.R b/R/mxsem.R index b1d75c4..868081f 100644 --- a/R/mxsem.R +++ b/R/mxsem.R @@ -314,7 +314,11 @@ mxsem <- function(model, mx_data <- data }else{ if(!add_intercepts){ - mx_data <- OpenMx::mxData(observed = stats::cov(data), + mx_data <- OpenMx::mxData(observed = stats::cov(as.matrix(data[,parameter_table$variables$manifests])), + means = apply(as.matrix(data[,parameter_table$variables$manifests]), + 2, + mean, + na.rm = TRUE), type = "cov", numObs = nrow(data)) }else{ diff --git a/R/group_mxsem_by.R b/R/mxsem_group_by.R similarity index 94% rename from R/group_mxsem_by.R rename to R/mxsem_group_by.R index 24a19a3..f45aa85 100644 --- a/R/group_mxsem_by.R +++ b/R/mxsem_group_by.R @@ -1,8 +1,8 @@ -#' group_mxsem_by +#' mxsem_group_by #' #' creates a multi-group model from an OpenMx model. #' -#' group_mxsem_by creates a multi-group model by splitting the data found +#' mxsem_group_by creates a multi-group model by splitting the data found #' in an mxModel object using dplyr's group_by function. The general idea is #' as follows: #' @@ -33,12 +33,12 @@ #' mg_model <- mxsem(model = model, #' data = OpenMx::HS.ability.data) |> #' # we want separate models for all combinations of grades and schools: -#' group_mxsem_by(grouping_variables = "school") |> +#' mxsem_group_by(grouping_variables = "school") |> #' mxTryHard() #' #' # let's summarize the results: #' summarize_multi_group_model(mg_model) -group_mxsem_by <- function(mxModel, +mxsem_group_by <- function(mxModel, grouping_variables, parameters = c(".*"), use_grepl = TRUE){ @@ -135,7 +135,7 @@ group_mxsem_by <- function(mxModel, #' get_groups #' #' returns a list of groups for a multi group model -#' @param multi_group_model multi group model created with group_mxsem_by +#' @param multi_group_model multi group model created with mxsem_group_by #' @return list with data for each group #' @export #' @examples @@ -150,7 +150,7 @@ group_mxsem_by <- function(mxModel, #' mg_model <- mxsem(model = model, #' data = OpenMx::HS.ability.data) |> #' # we want separate models for all combinations of grades and schools: -#' group_mxsem_by(grouping_variables = "school") |> +#' mxsem_group_by(grouping_variables = "school") |> #' mxTryHard() #' #' # let's summarize the results: @@ -165,8 +165,8 @@ get_groups <- function(multi_group_model){ #' summarize_multi_group_model #' -#' summarize the results of a multi group model created with group_mxsem_by -#' @param multi_group_model multi group model created with group_mxsem_by +#' summarize the results of a multi group model created with mxsem_group_by +#' @param multi_group_model multi group model created with mxsem_group_by #' @return list with goup specific parameters and common parameters #' @export #' @examples @@ -181,7 +181,7 @@ get_groups <- function(multi_group_model){ #' mg_model <- mxsem(model = model, #' data = OpenMx::HS.ability.data) |> #' # we want separate models for all combinations of grades and schools: -#' group_mxsem_by(grouping_variables = "school") |> +#' mxsem_group_by(grouping_variables = "school") |> #' mxTryHard() #' #' # let's summarize the results: diff --git a/man/get_groups.Rd b/man/get_groups.Rd index e32511c..f45a984 100644 --- a/man/get_groups.Rd +++ b/man/get_groups.Rd @@ -1,5 +1,5 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/group_mxsem_by.R +% Please edit documentation in R/mxsem_group_by.R \name{get_groups} \alias{get_groups} \title{get_groups} @@ -7,7 +7,7 @@ get_groups(multi_group_model) } \arguments{ -\item{multi_group_model}{multi group model created with group_mxsem_by} +\item{multi_group_model}{multi group model created with mxsem_group_by} } \value{ list with data for each group @@ -27,7 +27,7 @@ model <- 'spatial =~ visual + cubes + paper mg_model <- mxsem(model = model, data = OpenMx::HS.ability.data) |> # we want separate models for all combinations of grades and schools: - group_mxsem_by(grouping_variables = "school") |> + mxsem_group_by(grouping_variables = "school") |> mxTryHard() # let's summarize the results: diff --git a/man/group_mxsem_by.Rd b/man/mxsem_group_by.Rd similarity index 87% rename from man/group_mxsem_by.Rd rename to man/mxsem_group_by.Rd index 0ccfa98..010d065 100644 --- a/man/group_mxsem_by.Rd +++ b/man/mxsem_group_by.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/group_mxsem_by.R -\name{group_mxsem_by} -\alias{group_mxsem_by} -\title{group_mxsem_by} +% Please edit documentation in R/mxsem_group_by.R +\name{mxsem_group_by} +\alias{mxsem_group_by} +\title{mxsem_group_by} \usage{ -group_mxsem_by( +mxsem_group_by( mxModel, grouping_variables, parameters = c(".*"), @@ -31,7 +31,7 @@ mxModel with multiple groups. Use get_groups to extract the groups creates a multi-group model from an OpenMx model. } \details{ -group_mxsem_by creates a multi-group model by splitting the data found +mxsem_group_by creates a multi-group model by splitting the data found in an mxModel object using dplyr's group_by function. The general idea is as follows: @@ -53,7 +53,7 @@ model <- 'spatial =~ visual + cubes + paper mg_model <- mxsem(model = model, data = OpenMx::HS.ability.data) |> # we want separate models for all combinations of grades and schools: - group_mxsem_by(grouping_variables = "school") |> + mxsem_group_by(grouping_variables = "school") |> mxTryHard() # let's summarize the results: diff --git a/man/print.multi_group_parameters.Rd b/man/print.multi_group_parameters.Rd index 21a5f08..f5655f8 100644 --- a/man/print.multi_group_parameters.Rd +++ b/man/print.multi_group_parameters.Rd @@ -1,5 +1,5 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/group_mxsem_by.R +% Please edit documentation in R/mxsem_group_by.R \name{print.multi_group_parameters} \alias{print.multi_group_parameters} \title{print the multi_group_parameters} diff --git a/man/summarize_multi_group_model.Rd b/man/summarize_multi_group_model.Rd index 91410f1..23a93e6 100644 --- a/man/summarize_multi_group_model.Rd +++ b/man/summarize_multi_group_model.Rd @@ -1,5 +1,5 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/group_mxsem_by.R +% Please edit documentation in R/mxsem_group_by.R \name{summarize_multi_group_model} \alias{summarize_multi_group_model} \title{summarize_multi_group_model} @@ -7,13 +7,13 @@ summarize_multi_group_model(multi_group_model) } \arguments{ -\item{multi_group_model}{multi group model created with group_mxsem_by} +\item{multi_group_model}{multi group model created with mxsem_group_by} } \value{ list with goup specific parameters and common parameters } \description{ -summarize the results of a multi group model created with group_mxsem_by +summarize the results of a multi group model created with mxsem_group_by } \examples{ # THE FOLLOWING EXAMPLE IS ADAPTED FROM @@ -27,7 +27,7 @@ model <- 'spatial =~ visual + cubes + paper mg_model <- mxsem(model = model, data = OpenMx::HS.ability.data) |> # we want separate models for all combinations of grades and schools: - group_mxsem_by(grouping_variables = "school") |> + mxsem_group_by(grouping_variables = "school") |> mxTryHard() # let's summarize the results: diff --git a/tests/testthat/test-Political-Democracy.R b/tests/testthat/test-Political-Democracy.R index db57f5a..71467a3 100644 --- a/tests/testthat/test-Political-Democracy.R +++ b/tests/testthat/test-Political-Democracy.R @@ -1,6 +1,7 @@ test_that("Political Democracy works", { set.seed(123) library(lavaan) + library(mxsem) model <- ' # latent variable definitions diff --git a/tests/testthat/test-multi-group.R b/tests/testthat/test-multi-group.R index 5077a0a..4bbc9ef 100644 --- a/tests/testthat/test-multi-group.R +++ b/tests/testthat/test-multi-group.R @@ -13,7 +13,7 @@ test_that("multiplication works", { mg_model <- mxsem(model = HS.model, data = HolzingerSwineford1939) |> # we want separate models for all combinations of grades and schools: - group_mxsem_by(grouping_variables = c("school")) |> + mxsem_group_by(grouping_variables = c("school")) |> mxRun() testthat::expect_true(abs( @@ -40,7 +40,7 @@ test_that("multiplication works", { mg_model <- mxsem(model = HS.model, data = HolzingerSwineford1939) |> # we want separate models for all combinations of grades and schools: - group_mxsem_by(grouping_variables = c("school"), + mxsem_group_by(grouping_variables = c("school"), parameters = mxsem::unicode_undirected()) |> mxTryHard() @@ -63,7 +63,7 @@ test_that("multiplication works", { mg_model <- mxsem(model = HS.model, data = HolzingerSwineford1939) |> # we want separate models for all combinations of grades and schools: - group_mxsem_by(grouping_variables = c("school"), + mxsem_group_by(grouping_variables = c("school"), parameters = c(mxsem::unicode_undirected(), "one")) |> mxTryHard() From f49b437825396593f389ab953975d82b96535153 Mon Sep 17 00:00:00 2001 From: Jannik Date: Thu, 24 Aug 2023 23:19:59 +0200 Subject: [PATCH 6/8] removing means --- R/mxsem.R | 4 ---- 1 file changed, 4 deletions(-) diff --git a/R/mxsem.R b/R/mxsem.R index 868081f..c6ee73f 100644 --- a/R/mxsem.R +++ b/R/mxsem.R @@ -315,10 +315,6 @@ mxsem <- function(model, }else{ if(!add_intercepts){ mx_data <- OpenMx::mxData(observed = stats::cov(as.matrix(data[,parameter_table$variables$manifests])), - means = apply(as.matrix(data[,parameter_table$variables$manifests]), - 2, - mean, - na.rm = TRUE), type = "cov", numObs = nrow(data)) }else{ From e9e6b9c41d4b52fca51d68b46d05c575f8121e8d Mon Sep 17 00:00:00 2001 From: Jannik Date: Fri, 25 Aug 2023 14:49:33 +0200 Subject: [PATCH 7/8] functions to plot individual parameters when using definition variables in algebras (e.g., mnlfa) --- DESCRIPTION | 3 +- NAMESPACE | 3 + R/get_individual_algebra_results.R | 99 ++++ man/get_individual_algebra_results.Rd | 53 ++ tests/testthat/test-get_individual_algebras.R | 57 +++ tests/testthat/test-mnlfa.R | 18 + .../Moderated-Nonlinear-Factor-Analysis.Rmd | 58 ++- .../Moderated-Nonlinear-Factor-Analysis.md | 462 ++++++++++++++++++ ...oderated-Nonlinear-Factor-Analysis.mxsemmd | 58 ++- vignettes/figures/mnlfa_lS_1.png | Bin 0 -> 6789 bytes 10 files changed, 795 insertions(+), 16 deletions(-) create mode 100644 R/get_individual_algebra_results.R create mode 100644 man/get_individual_algebra_results.Rd create mode 100644 tests/testthat/test-get_individual_algebras.R create mode 100644 vignettes/Moderated-Nonlinear-Factor-Analysis.md create mode 100644 vignettes/figures/mnlfa_lS_1.png diff --git a/DESCRIPTION b/DESCRIPTION index ee50b48..f0063ed 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -18,7 +18,8 @@ Imports: Rcpp (>= 1.0.10), stats, methods, - dplyr + dplyr, + utils LinkingTo: Rcpp RoxygenNote: 7.2.3 Encoding: UTF-8 diff --git a/NAMESPACE b/NAMESPACE index 582e9f6..fbe2b62 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -2,6 +2,7 @@ S3method(print,multi_group_parameters) export(get_groups) +export(get_individual_algebra_results) export(mxsem) export(mxsem_group_by) export(parameters) @@ -17,4 +18,6 @@ importFrom(methods,is) importFrom(stats,cov) importFrom(stats,rnorm) importFrom(stats,runif) +importFrom(utils,setTxtProgressBar) +importFrom(utils,txtProgressBar) useDynLib(mxsem, .registration = TRUE) diff --git a/R/get_individual_algebra_results.R b/R/get_individual_algebra_results.R new file mode 100644 index 0000000..dfa946b --- /dev/null +++ b/R/get_individual_algebra_results.R @@ -0,0 +1,99 @@ +#' get_individual_algebra_results +#' +#' evaluates algebras for each subject in the data set. This function is +#' useful if you have algebras with definition variables (e.g., in mnlfa). +#' @param mxModel mxModel with algebras +#' @param algebra_names optional: Only compute individual algebras for a subset +#' of the parameters +#' @param progress_bar should a progress bar be shown? +#' @returns a list of data frames. The list contains data frames for each of the algebras. +#' The data frames contain the individual specific algebra results as well as all +#' definition variables used to predict said algebra +#' @export +#' @importFrom utils txtProgressBar +#' @importFrom utils setTxtProgressBar +#' @examples +#' library(mxsem) +#' +#' set.seed(123) +#' dataset <- simulate_moderated_nonlinear_factor_analysis(N = 50) +#' +#' model <- " +#' xi =~ x1 + x2 + x3 +#' eta =~ y1 + y2 + y3 +#' eta ~ {a := a0 + data.k*a1}*xi +#' " +#' fit <- mxsem(model = model, +#' data = dataset) |> +#' mxTryHard() +#' +#' algebra_results <- get_individual_algebra_results(mxModel = fit, +#' progress_bar = FALSE) +#' +#' # the following plot will only show two data points because there is only +#' # two values for the definition variable k (0 or 1). +#' +#' plot(x = algebra_results[["a"]]$k, +#' y = algebra_results[["a"]]$algebra_result) +get_individual_algebra_results <- function(mxModel, + algebra_names = NULL, + progress_bar = TRUE){ + n_subjects <- mxModel$data$numObs + if(is.null(algebra_names)){ + algebra_names <- names(mxModel$algebras) + }else{ + if(any(! algebra_names %in% names(mxModel$algebras))) + stop("Could not find the following algebra(s) in your model: ", + paste0(algebra_names[! algebra_names %in% names(mxModel$algebras)], collapse = ", "), + ". The algebras are: ", + paste0(names(mxModel$algebras), collapse = ", "), ".") + } + + if(is.null(algebra_names) | (length(algebra_names) == 0)) + stop("Could not find any algebras in your OpenMx model.") + + + algebra_results <- vector("list", length(algebra_names)) + names(algebra_results) <- algebra_names + + if(progress_bar) + pb <- utils::txtProgressBar(min = 0, + max = length(algebra_names) * n_subjects, + initial = 0, + style = 3) + + it <- 0 + + for(algebra_name in algebra_names){ + + # find definition variables used in this algebra + algebra_elements <- extract_algebra_elements(mxAlgebra_formula = mxModel$algebras[[algebra_name]]$formula) + definition_variables <- algebra_elements[grepl("^data\\.", x = algebra_elements)] |> + gsub(pattern = "data\\.", replacement = "", x = _) + + algebra_result <- data.frame(person = 1:n_subjects, + mxModel$data$observed[,definition_variables, drop = FALSE], + algebra_result = NA) + + for(i in 1:n_subjects){ + it <- it + 1 + if(progress_bar) + utils::setTxtProgressBar(pb = pb, + value = it) + + algebra_result_i <- mxEvalByName(name = algebra_name, + model = mxModel, + compute = TRUE, + defvar.row = i) + if((nrow(algebra_result_i) != 1) | + (ncol(algebra_result_i) != 1)) + stop("This function cannot handle algebras with non-scalar outcomes.") + + algebra_result$algebra_result[i] <- algebra_result_i[1,1] + } + + algebra_results[[algebra_name]] <- algebra_result + } + + return(algebra_results) +} diff --git a/man/get_individual_algebra_results.Rd b/man/get_individual_algebra_results.Rd new file mode 100644 index 0000000..a570e64 --- /dev/null +++ b/man/get_individual_algebra_results.Rd @@ -0,0 +1,53 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/get_individual_algebra_results.R +\name{get_individual_algebra_results} +\alias{get_individual_algebra_results} +\title{get_individual_algebra_results} +\usage{ +get_individual_algebra_results( + mxModel, + algebra_names = NULL, + progress_bar = TRUE +) +} +\arguments{ +\item{mxModel}{mxModel with algebras} + +\item{algebra_names}{optional: Only compute individual algebras for a subset +of the parameters} + +\item{progress_bar}{should a progress bar be shown?} +} +\value{ +a list of data frames. The list contains data frames for each of the algebras. +The data frames contain the individual specific algebra results as well as all +definition variables used to predict said algebra +} +\description{ +evaluates algebras for each subject in the data set. This function is +useful if you have algebras with definition variables (e.g., in mnlfa). +} +\examples{ +library(mxsem) + +set.seed(123) +dataset <- simulate_moderated_nonlinear_factor_analysis(N = 50) + +model <- " + xi =~ x1 + x2 + x3 + eta =~ y1 + y2 + y3 + eta ~ {a := a0 + data.k*a1}*xi + " +fit <- mxsem(model = model, + data = dataset) |> + mxTryHard() + +algebra_results <- get_individual_algebra_results(mxModel = fit, + progress_bar = FALSE) + +# the following plot will only show two data points because there is only +# two values for the definition variable k (0 or 1). + +plot(x = algebra_results[["a"]]$k, + y = algebra_results[["a"]]$algebra_result) +} diff --git a/tests/testthat/test-get_individual_algebras.R b/tests/testthat/test-get_individual_algebras.R new file mode 100644 index 0000000..fdee313 --- /dev/null +++ b/tests/testthat/test-get_individual_algebras.R @@ -0,0 +1,57 @@ +test_that("testing individual algebras", { + set.seed(123) + dataset <- simulate_moderated_nonlinear_factor_analysis(N = 50) + + model <- " + xi =~ x1 + x2 + x3 + eta =~ y1 + y2 + y3 + eta ~ a*xi + + !a0 + !a1 + a := a0 + data.k*a1 + " + + mod <- mxsem(model = model, + data = dataset) |> + mxTryHard() + + ind_alg <- get_individual_algebra_results(mxModel = mod) + + testthat::expect_true(length(ind_alg) == 1) + testthat::expect_true(names(ind_alg) == c("a")) + testthat::expect_true(all(colnames(ind_alg$a) == c("person", "k", "algebra_result"))) + + testthat::expect_error(get_individual_algebra_results(mxModel = mod, + algebra_names = "b")) + + model <- " + xi =~ x1 + x2 + x3 + eta =~ y1 + y2 + y3 + eta ~ {a := a0 + data.k*a1}*xi + " + + mod2 <- mxsem(model = model, + data = dataset) |> + mxTryHard() + + ind_alg <- get_individual_algebra_results(mxModel = mod2) + + testthat::expect_true(length(ind_alg) == 1) + testthat::expect_true(names(ind_alg) == c("a")) + testthat::expect_true(all(colnames(ind_alg$a) == c("person", "k", "algebra_result"))) + + testthat::expect_error(get_individual_algebra_results(mxModel = mod2, + algebra_names = "b")) + + model <- " + xi =~ x1 + x2 + x3 + eta =~ y1 + y2 + y3 + eta ~ xi + " + mod3 <- mxsem(model = model, + data = dataset) |> + mxTryHard() + + testthat::expect_error(get_individual_algebra_results(mxModel = mod3)) +}) diff --git a/tests/testthat/test-mnlfa.R b/tests/testthat/test-mnlfa.R index 651332c..cbe060a 100644 --- a/tests/testthat/test-mnlfa.R +++ b/tests/testthat/test-mnlfa.R @@ -22,4 +22,22 @@ test_that("mnlfa works", { testthat::expect_true(abs(omxGetParameters(mod)["a0"] - .7) < .1) testthat::expect_true(abs(omxGetParameters(mod)["a1"] - -.2) < .1) + model <- " + xi =~ x1 + x2 + x3 + eta =~ y1 + y2 + y3 + eta ~ {a0 + data.k*a1}*xi + " + + mod2 <- mxsem(model = model, + data = dataset) |> + mxTryHard() + + omxGetParameters(mod2) + + testthat::expect_true(abs(omxGetParameters(mod2)["a0"] - .7) < .1) + testthat::expect_true(abs(omxGetParameters(mod2)["a1"] - -.2) < .1) + + testthat::expect_true(abs(omxGetParameters(mod)["a0"] - omxGetParameters(mod2)["a0"]) < .001) + testthat::expect_true(abs(omxGetParameters(mod)["a1"] - omxGetParameters(mod2)["a1"]) < .001) + }) diff --git a/vignettes/Moderated-Nonlinear-Factor-Analysis.Rmd b/vignettes/Moderated-Nonlinear-Factor-Analysis.Rmd index b357812..764f959 100644 --- a/vignettes/Moderated-Nonlinear-Factor-Analysis.Rmd +++ b/vignettes/Moderated-Nonlinear-Factor-Analysis.Rmd @@ -41,9 +41,6 @@ are taken directly from that script: # at https://osf.io/527zr library(mokken) -#> Loading required package: poLCA -#> Loading required package: scatterplot3d -#> Loading required package: MASS ## Load data data("DS14", package="mokken") @@ -161,7 +158,7 @@ With **mxsem**, this can be implemented as follows: ```r -mlnfa_syntax <- " +mnlfa_syntax <- " ==== MNLFA ==== SI =~ {lSi_1 := lSi0_1 + lSi1_1*data.Age + lSi2_1*data.Male }*Si1 + @@ -232,7 +229,7 @@ Finally, we pass the syntax to the `mxsem()`-function to create an **OpenMx** mo ```r library(mxsem) -mlnfa_model <- mxsem(model = mlnfa_syntax, +mnlfa_model <- mxsem(model = mnlfa_syntax, data = DS14, # we scaled the latent variables manually, # so we will set all automatic scalings to FALSE: @@ -248,8 +245,8 @@ The model can now be fitted using `mxRun()` or `mxTryHard()`. ```r -mlnfa_model <- mxRun(mlnfa_model) -summary(mlnfa_model) +mnlfa_model <- mxRun(mnlfa_model) +summary(mnlfa_model) ```
@@ -406,8 +403,8 @@ summary(mlnfa_model) #> RMSEA: 0 [95% CI (NA, NA)] #> Prob(RMSEA <= 0.05): NA #> To get additional fit indices, see help(mxRefModels) -#> timestamp: 2023-08-02 17:35:53 -#> Wall clock time: 226.1492 secs +#> timestamp: 2023-08-25 14:20:14 +#> Wall clock time: 357.9646 secs #> optimizer: SLSQP #> OpenMx version number: 2.21.8 #> Need help? See help(mxSummary) @@ -418,4 +415,47 @@ Checking the regression coefficients `lSi1_1`, `lSi1_2`, ... will tell us if there is a linear change across age or if individuals with `Male = 0` differ from individuals with `Male = 1`. +## Plotting Individual Parameters + +MNLFA predicts individual parameter values (e.g., `lSi_1`) using definition +variables. To get a better picture of the individual parameters, **mxsem** +provides the `get_individual_algebra_results` function. This function will compute +for each algebra the individual parameters. Depending on the sample size and +the number of algebras in the model, this may take some time. Therefore, we will +only extract the individual parameter values for `lSi_1` as an example. + + +```r +lSi_1 <- get_individual_algebra_results(mxModel = mnlfa_model, + algebra_names = "lSi_1", + progress_bar = FALSE) +head(lSi_1$lSi_1) +#> person Age Male algebra_result +#> 1 1 0.03136864 1 0.8699212 +#> 2 2 -0.44266587 1 0.9246368 +#> 3 3 -0.82189349 1 0.9684094 +#> 4 4 -0.63227968 1 0.9465231 +#> 5 5 0.03136864 1 0.8699212 +#> 6 6 -0.06343826 0 0.7408356 +``` + +The function will return a list with data frames for all requested algebras. Here, +the list has only one element: `lSi_1`. The data frame will have fields for the +person, the definition variables used in the algebra (`Age` and `Male` in this case) +and the person specific parameter (`algebra_result`). We can plot these results +as follows: + + +```r +library(ggplot2) +ggplot(data = lSi_1$lSi_1, + aes(x = Age, + y = algebra_result, + color = factor(Male))) + + ylab("Individual Parameter Value for lSi_1") + + geom_point() +``` + +![plot of chunk unnamed-chunk-25](figure/unnamed-chunk-25-1.png) + ## References diff --git a/vignettes/Moderated-Nonlinear-Factor-Analysis.md b/vignettes/Moderated-Nonlinear-Factor-Analysis.md new file mode 100644 index 0000000..5593ad6 --- /dev/null +++ b/vignettes/Moderated-Nonlinear-Factor-Analysis.md @@ -0,0 +1,462 @@ +--- +title: "Moderated-Nonlinear-Factor-Analysis" +output: rmarkdown::html_vignette +bibliography: mxsem.bib +csl: apa.csl +vignette: > + %\VignetteIndexEntry{Moderated-Nonlinear-Factor-Analysis} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} + %\DeclareUnicodeCharacter{2194}{$\leftrightarrow$} + %\DeclareUnicodeCharacter{2192}{$\rightarrow$} +--- + + + +Moderated nonlinear factor analysis (MNLFA) has been proposed by @bauerMoreGeneralModel2017 +to assess measurement invariance. The general idea is that parameters within +the model (that is, loadings, regressions, intercepts, and (co-)variances) may +differ across individuals. These differences are predicted using covariates such +as age and gender. For instance, @kolbeAssessingMeasurementInvariance2022 demonstrate +MNLFA using the following model: + +![](figures/mnlfa.png) + +The loading `lSi_1` may differ linearly by age and gender [@kolbeAssessingMeasurementInvariance2022]. +That is `lSi_1 = lSi0_1 + lSi1_1*Age + lSi2_1*Male`. +If `lSi1_1` or `lSi2_1` are non-zero, this suggests measurement non-invariance. + +MNLFA is a powerful procedure, but specifying the models can be tedious. +@kolbeAssessingMeasurementInvariance2022 provide an in-depth introduction using +**OpenMx** using the DS14 data set by @denolletPredictiveValueSocial2013 +included in the **mokken** [@MokkenScaleAnalysis2007] package. + +The script for the tutorial by @kolbeAssessingMeasurementInvariance2022 can +be found on osf at https://osf.io/527zr. The following pre-processing steps +are taken directly from that script: + + +```r +# code copied from Laura Kolbe, Terrence D. Jorgensen, Suzanne Jak, and Dylan Molenaar +# at https://osf.io/527zr + +library(mokken) + +## Load data +data("DS14", package="mokken") + +## Save as data frame +DS14 <- data.frame(DS14) + +## Recode negatively worded items +DS14$Si1 <- 4 - DS14$Si1. +DS14$Si3 <- 4 - DS14$Si3. + +## Standardize age +DS14$Age <- (DS14$Age - mean(DS14$Age))/sd(DS14$Age) +# mean-centering age is another option, but caused convergence difficulties with +# this dataset + +## Change order of variables +DS14 <- DS14[,c("Male","Age","Si1","Si3","Si6","Si8","Si10","Si11","Si14", + "Na2","Na4","Na5","Na7","Na9","Na12", "Na13")] + +## Check data +head(DS14) +#> Male Age Si1 Si3 Si6 Si8 Si10 Si11 Si14 Na2 Na4 Na5 Na7 Na9 Na12 Na13 +#> 1 1 0.03136864 2 2 2 3 2 2 4 3 2 2 3 2 4 2 +#> 2 1 -0.44266587 2 1 2 2 2 4 2 0 0 0 0 0 3 0 +#> 3 1 -0.82189349 3 3 2 2 2 2 1 3 1 1 2 2 1 1 +#> 4 1 -0.63227968 2 2 1 2 3 1 1 3 0 0 0 1 1 0 +#> 5 1 0.03136864 2 3 2 3 3 2 3 2 2 2 3 2 3 1 +#> 6 0 -0.06343826 0 0 0 0 0 0 0 3 1 2 1 2 4 2 +``` + +## Model specification + +We want to specify the configural model outlined in @kolbeAssessingMeasurementInvariance2022. +In this model, all loadings, intercepts, variances and co-variances are predicted +with the covariates `Age` and `Male`. The only parameter that is constant across +all subjects are the latent variances of `Si` and `Na`, which are fixed to 1 for +scaling. + +For demonstrative purposes, we will first set up a confirmatory factor analysis +without the `Age` and `Male` covariates. Using **mxsem**, the syntax could look +as follows: + + + +```r +cfa_syntax <- " + +# loadings +Si =~ lSi_1*Si1 + + lSi_3*Si3 + + lSi_6*Si6 + + lSi_8*Si8 + + lSi_10*Si10 + + lSi_11*Si11 + + lSi_14*Si14 + +Na =~ lNa_2*Na2 + + lNa_4*Na4 + + lNa_5*Na5 + + lNa_7*Na7 + + lNa_9*Na9 + + lNa_12*Na12 + + lNa_13*Na13 + +# latent variances and covariances +Si ~~ 1*Si +Na ~~ 1*Na + cov*Si + +# manifest variances +Si1 ~~ vSi_1*Si1 +Si3 ~~ vSi_3*Si3 +Si6 ~~ vSi_6*Si6 +Si8 ~~ vSi_8*Si8 +Si10 ~~ vSi_10*Si10 +Si11 ~~ vSi_11*Si11 +Si14 ~~ vSi_14*Si14 + +Na2 ~~ vNa_2*Na2 +Na4 ~~ vNa_4*Na4 +Na5 ~~ vNa_5*Na5 +Na7 ~~ vNa_7*Na7 +Na9 ~~ vNa_9*Na9 +Na12 ~~ vNa_12*Na12 +Na13 ~~ vNa_13*Na13 + +# intercepts +Si1 ~ iSi_1*1 +Si3 ~ iSi_3*1 +Si6 ~ iSi_6*1 +Si8 ~ iSi_8*1 +Si10 ~ iSi_10*1 +Si11 ~ iSi_11*1 +Si14 ~ iSi_14*1 + +Na2 ~ iNa_2*1 +Na4 ~ iNa_4*1 +Na5 ~ iNa_5*1 +Na7 ~ iNa_7*1 +Na9 ~ iNa_9*1 +Na12 ~ iNa_12*1 +Na13 ~ iNa_13*1 +" +``` + +We want to predict each and every parameter in this model using `Age` and `Male`. + +* `lSi_1 = lSi0_1 + lSi1_1*Age + lSi2_1*Male` +* `lSi_3 = lSi0_3 + lSi1_3*Age + lSi2_3*Male` +* ... +* `iNa_4 = iNa0_4 + iNa1_4*Age + iNa2_4*Male` +* ... + +With **mxsem**, this can be implemented as follows: + + +```r +mnlfa_syntax <- " +==== MNLFA ==== + +SI =~ {lSi_1 := lSi0_1 + lSi1_1*data.Age + lSi2_1*data.Male }*Si1 + + {lSi_3 := lSi0_3 + lSi1_3*data.Age + lSi2_3*data.Male }*Si3 + + {lSi_6 := lSi0_6 + lSi1_6*data.Age + lSi2_6*data.Male }*Si6 + + {lSi_8 := lSi0_8 + lSi1_8*data.Age + lSi2_8*data.Male }*Si8 + + {lSi_10 := lSi0_10 + lSi1_10*data.Age + lSi2_10*data.Male}*Si10 + + {lSi_11 := lSi0_11 + lSi1_11*data.Age + lSi2_11*data.Male}*Si11 + + {lSi_14 := lSi0_14 + lSi1_14*data.Age + lSi2_14*data.Male}*Si14 + +NA =~ {lNa_2 := lNa0_2 + lNa1_2*data.Age + lNa2_2*data.Male }*Na2 + + {lNa_4 := lNa0_4 + lNa1_4*data.Age + lNa2_4*data.Male }*Na4 + + {lNa_5 := lNa0_5 + lNa1_5*data.Age + lNa2_5*data.Male }*Na5 + + {lNa_7 := lNa0_7 + lNa1_7*data.Age + lNa2_7*data.Male }*Na7 + + {lNa_9 := lNa0_9 + lNa1_9*data.Age + lNa2_9*data.Male }*Na9 + + {lNa_12 := lNa0_12 + lNa1_12*data.Age + lNa2_12*data.Male}*Na12 + + {lNa_13 := lNa0_13 + lNa1_13*data.Age + lNa2_13*data.Male}*Na13 + +SI ~~ 1*SI +NA ~~ 1*NA + {cov := cov0 + cov1*data.Age + cov2*data.Male }*SI + +Si1 ~~ {vSi_1 := exp(vSi0_1 + vSi1_1*data.Age + vSi2_1*data.Male )}*Si1 +Si3 ~~ {vSi_3 := exp(vSi0_3 + vSi1_3*data.Age + vSi2_3*data.Male )}*Si3 +Si6 ~~ {vSi_6 := exp(vSi0_6 + vSi1_6*data.Age + vSi2_6*data.Male )}*Si6 +Si8 ~~ {vSi_8 := exp(vSi0_8 + vSi1_8*data.Age + vSi2_8*data.Male )}*Si8 +Si10 ~~ {vSi_10 := exp(vSi0_10 + vSi1_10*data.Age + vSi2_10*data.Male)}*Si10 +Si11 ~~ {vSi_11 := exp(vSi0_11 + vSi1_11*data.Age + vSi2_11*data.Male)}*Si11 +Si14 ~~ {vSi_14 := exp(vSi0_14 + vSi1_14*data.Age + vSi2_14*data.Male)}*Si14 + +Na2 ~~ {vNa_2 := exp(vNa0_2 + vNa1_2*data.Age + vNa2_2*data.Male )}*Na2 +Na4 ~~ {vNa_4 := exp(vNa0_4 + vNa1_4*data.Age + vNa2_4*data.Male )}*Na4 +Na5 ~~ {vNa_5 := exp(vNa0_5 + vNa1_5*data.Age + vNa2_5*data.Male )}*Na5 +Na7 ~~ {vNa_7 := exp(vNa0_7 + vNa1_7*data.Age + vNa2_7*data.Male )}*Na7 +Na9 ~~ {vNa_9 := exp(vNa0_9 + vNa1_9*data.Age + vNa2_9*data.Male )}*Na9 +Na12 ~~ {vNa_12 := exp(vNa0_12 + vNa1_12*data.Age + vNa2_12*data.Male)}*Na12 +Na13 ~~ {vNa_13 := exp(vNa0_13 + vNa1_13*data.Age + vNa2_13*data.Male)}*Na13 + +Si1 ~ {iSi_1 := iSi0_1 + iSi1_1*data.Age + iSi2_1*data.Male }*1 +Si3 ~ {iSi_3 := iSi0_3 + iSi1_3*data.Age + iSi2_3*data.Male }*1 +Si6 ~ {iSi_6 := iSi0_6 + iSi1_6*data.Age + iSi2_6*data.Male }*1 +Si8 ~ {iSi_8 := iSi0_8 + iSi1_8*data.Age + iSi2_8*data.Male }*1 +Si10 ~ {iSi_10 := iSi0_10 + iSi1_10*data.Age + iSi2_10*data.Male}*1 +Si11 ~ {iSi_11 := iSi0_11 + iSi1_11*data.Age + iSi2_11*data.Male}*1 +Si14 ~ {iSi_14 := iSi0_14 + iSi1_14*data.Age + iSi2_14*data.Male}*1 + +Na2 ~ {iNa_2 := iNa0_2 + iNa1_2*data.Age + iNa2_2*data.Male }*1 +Na4 ~ {iNa_4 := iNa0_4 + iNa1_4*data.Age + iNa2_4*data.Male }*1 +Na5 ~ {iNa_5 := iNa0_5 + iNa1_5*data.Age + iNa2_5*data.Male }*1 +Na7 ~ {iNa_7 := iNa0_7 + iNa1_7*data.Age + iNa2_7*data.Male }*1 +Na9 ~ {iNa_9 := iNa0_9 + iNa1_9*data.Age + iNa2_9*data.Male }*1 +Na12 ~ {iNa_12 := iNa0_12 + iNa1_12*data.Age + iNa2_12*data.Male}*1 +Na13 ~ {iNa_13 := iNa0_13 + iNa1_13*data.Age + iNa2_13*data.Male}*1 +" +``` + +Note how each parameter is redefined exactly as outlined above. However, there +are some important details: + +1. The variance parameters (e.g., `vNa_2`), have been transformed with an `exp`-function. +This exponential function ensures that variances are always positive. +2. All transformations must be embraced in curly braces. This ensures that **mxsem** +sees them as algebras and knows how to specify them in **OpenMx**. +3. The covariates `Age` and `Male` must be specified with the `data.`-prefix to +let **OpenMx** know that the values can be found in the data set. + +Finally, we pass the syntax to the `mxsem()`-function to create an **OpenMx** model: + + +```r +library(mxsem) +mnlfa_model <- mxsem(model = mnlfa_syntax, + data = DS14, + # we scaled the latent variables manually, + # so we will set all automatic scalings to FALSE: + scale_loadings = FALSE, + scale_latent_variances = FALSE) +``` + + +## Fitting the model + +The model can now be fitted using `mxRun()` or `mxTryHard()`. + + + +```r +mnlfa_model <- mxRun(mnlfa_model) +summary(mnlfa_model) +``` + +
+Show summary + +``` +#> Summary of MNLFA +#> +#> free parameters: +#> name matrix row col Estimate Std.Error A +#> 1 lSi0_1 new_parameters 1 1 0.7335132445 0.13060112 +#> 2 lSi1_1 new_parameters 1 2 -0.1154255003 0.05220878 ! +#> 3 lSi2_1 new_parameters 1 3 0.1400286477 0.13891191 +#> 4 lSi0_3 new_parameters 1 4 0.3015782589 0.16646223 +#> 5 lSi1_3 new_parameters 1 5 -0.0879323934 0.05924633 +#> 6 lSi2_3 new_parameters 1 6 0.4591409253 0.17546688 +#> 7 lSi0_6 new_parameters 1 7 0.9194296857 0.15081098 +#> 8 lSi1_6 new_parameters 1 8 -0.0706498279 0.04992590 +#> 9 lSi2_6 new_parameters 1 9 -0.0979268798 0.15832176 +#> 10 lSi0_8 new_parameters 1 10 1.0701237959 0.12941517 +#> 11 lSi1_8 new_parameters 1 11 0.0561234341 0.04934508 +#> 12 lSi2_8 new_parameters 1 12 -0.0853303769 0.13807941 +#> 13 lSi0_10 new_parameters 1 13 1.1310725242 0.14919881 +#> 14 lSi1_10 new_parameters 1 14 -0.0484480541 0.05807462 +#> 15 lSi2_10 new_parameters 1 15 -0.1777563685 0.15904289 +#> 16 lSi0_11 new_parameters 1 16 0.7556453371 0.13499027 +#> 17 lSi1_11 new_parameters 1 17 -0.0525072490 0.05106096 +#> 18 lSi2_11 new_parameters 1 18 -0.0430455454 0.14376514 ! +#> 19 lSi0_14 new_parameters 1 19 0.6896194650 0.11970774 +#> 20 lSi1_14 new_parameters 1 20 0.1111536773 0.04813670 +#> 21 lSi2_14 new_parameters 1 21 0.1527644246 0.12859387 +#> 22 lNa0_2 new_parameters 1 22 0.7798765627 0.15343426 +#> 23 lNa1_2 new_parameters 1 23 -0.0757615377 0.05655119 +#> 24 lNa2_2 new_parameters 1 24 -0.0933488885 0.16478465 +#> 25 lNa0_4 new_parameters 1 25 1.0847614435 0.12762972 +#> 26 lNa1_4 new_parameters 1 26 -0.0943803303 0.04226564 +#> 27 lNa2_4 new_parameters 1 27 -0.2833533421 0.13491677 +#> 28 lNa0_5 new_parameters 1 28 0.5882977973 0.13857927 +#> 29 lNa1_5 new_parameters 1 29 -0.0689103011 0.05229366 +#> 30 lNa2_5 new_parameters 1 30 0.1443499696 0.14917073 +#> 31 lNa0_7 new_parameters 1 31 1.1002294251 0.12109433 +#> 32 lNa1_7 new_parameters 1 32 -0.0872053238 0.04421264 +#> 33 lNa2_7 new_parameters 1 33 -0.1714468809 0.12950415 +#> 34 lNa0_9 new_parameters 1 34 0.7460504287 0.12956267 +#> 35 lNa1_9 new_parameters 1 35 -0.0247286721 0.04319557 +#> 36 lNa2_9 new_parameters 1 36 -0.0758240476 0.13751169 +#> 37 lNa0_12 new_parameters 1 37 0.8353126483 0.14356848 +#> 38 lNa1_12 new_parameters 1 38 -0.0472960459 0.05227475 +#> 39 lNa2_12 new_parameters 1 39 0.0720072547 0.15349421 +#> 40 lNa0_13 new_parameters 1 40 1.3331780253 0.12235756 +#> 41 lNa1_13 new_parameters 1 41 -0.0808880672 0.03986492 +#> 42 lNa2_13 new_parameters 1 42 -0.4766565320 0.12845540 +#> 43 cov0 new_parameters 1 43 0.5021309060 0.10219834 +#> 44 cov1 new_parameters 1 44 0.0184185069 0.04156539 +#> 45 cov2 new_parameters 1 45 -0.0494766095 0.11083039 +#> 46 vSi0_1 new_parameters 1 46 -0.3207921796 0.19589811 +#> 47 vSi1_1 new_parameters 1 47 -0.0242559674 0.07040331 +#> 48 vSi2_1 new_parameters 1 48 -0.1741242778 0.21081463 +#> 49 vSi0_3 new_parameters 1 49 0.4646166539 0.17531839 +#> 50 vSi1_3 new_parameters 1 50 0.0600874940 0.06277607 +#> 51 vSi2_3 new_parameters 1 51 -0.4775700164 0.18936363 +#> 52 vSi0_6 new_parameters 1 52 -0.0244158740 0.19800424 +#> 53 vSi1_6 new_parameters 1 53 0.0410837294 0.07265704 +#> 54 vSi2_6 new_parameters 1 54 -0.4232699060 0.21334264 +#> 55 vSi0_8 new_parameters 1 55 -0.7723689124 0.27083920 +#> 56 vSi1_8 new_parameters 1 56 -0.1785979089 0.09286324 +#> 57 vSi2_8 new_parameters 1 57 0.0830338249 0.28418042 +#> 58 vSi0_10 new_parameters 1 58 -0.3311375404 0.23166890 +#> 59 vSi1_10 new_parameters 1 59 0.0456637752 0.06901235 +#> 60 vSi2_10 new_parameters 1 60 0.1333978832 0.24361137 +#> 61 vSi0_11 new_parameters 1 61 -0.1991709855 0.19272819 +#> 62 vSi1_11 new_parameters 1 62 0.0764535727 0.07264274 +#> 63 vSi2_11 new_parameters 1 63 -0.0606475819 0.20549172 +#> 64 vSi0_14 new_parameters 1 64 -0.4409584695 0.19422986 +#> 65 vSi1_14 new_parameters 1 65 -0.0797038128 0.07834221 +#> 66 vSi2_14 new_parameters 1 66 -0.1140400508 0.20943490 +#> 67 vNa0_2 new_parameters 1 67 0.0992622457 0.17976004 +#> 68 vNa1_2 new_parameters 1 68 0.0624813466 0.06506342 +#> 69 vNa2_2 new_parameters 1 69 0.0822169630 0.19285295 +#> 70 vNa0_4 new_parameters 1 70 -0.7374873408 0.19583900 +#> 71 vNa1_4 new_parameters 1 71 0.0972335776 0.08012644 +#> 72 vNa2_4 new_parameters 1 72 -0.0004451743 0.21161936 +#> 73 vNa0_5 new_parameters 1 73 0.0329558400 0.17544820 +#> 74 vNa1_5 new_parameters 1 74 0.0691105829 0.06831095 +#> 75 vNa2_5 new_parameters 1 75 -0.0541634913 0.18919526 +#> 76 vNa0_7 new_parameters 1 76 -1.0160611911 0.20599631 +#> 77 vNa1_7 new_parameters 1 77 0.0793410915 0.07895067 +#> 78 vNa2_7 new_parameters 1 78 0.2789485322 0.22248133 +#> 79 vNa0_9 new_parameters 1 79 -0.2587533822 0.17809218 +#> 80 vNa1_9 new_parameters 1 80 0.1619891803 0.07163809 +#> 81 vNa2_9 new_parameters 1 81 -0.2160735762 0.19236805 +#> 82 vNa0_12 new_parameters 1 82 -0.0164029086 0.17808276 +#> 83 vNa1_12 new_parameters 1 83 -0.0209998013 0.07047469 +#> 84 vNa2_12 new_parameters 1 84 -0.1532819407 0.19328146 +#> 85 vNa0_13 new_parameters 1 85 -2.0960703884 0.49156987 +#> 86 vNa1_13 new_parameters 1 86 -0.1933345991 0.09005339 +#> 87 vNa2_13 new_parameters 1 87 1.1069583557 0.49904058 ! +#> 88 iSi0_1 new_parameters 1 88 0.9952882525 0.13528668 +#> 89 iSi1_1 new_parameters 1 89 -0.1033698433 0.04992865 +#> 90 iSi2_1 new_parameters 1 90 0.3313792856 0.14484117 +#> 91 iSi0_3 new_parameters 1 91 1.7153301646 0.15882460 +#> 92 iSi1_3 new_parameters 1 92 -0.0212118752 0.05378318 +#> 93 iSi2_3 new_parameters 1 93 0.1107780341 0.16877197 +#> 94 iSi0_6 new_parameters 1 94 1.3003161963 0.16271966 +#> 95 iSi1_6 new_parameters 1 95 -0.0662085245 0.04956657 +#> 96 iSi2_6 new_parameters 1 96 -0.0985231721 0.17061476 +#> 97 iSi0_8 new_parameters 1 97 1.2459902423 0.15393450 +#> 98 iSi1_8 new_parameters 1 98 -0.0384614553 0.05219379 +#> 99 iSi2_8 new_parameters 1 99 0.0272620614 0.16347631 +#> 100 iSi0_10 new_parameters 1 100 1.2622340993 0.17056599 +#> 101 iSi1_10 new_parameters 1 101 -0.0522932413 0.05641430 +#> 102 iSi2_10 new_parameters 1 102 0.2239994097 0.18052436 +#> 103 iSi0_11 new_parameters 1 103 1.3729266832 0.14260815 +#> 104 iSi1_11 new_parameters 1 104 0.0040182246 0.04933784 +#> 105 iSi2_11 new_parameters 1 105 0.2206742569 0.15153766 +#> 106 iSi0_14 new_parameters 1 106 1.2037219914 0.12932136 +#> 107 iSi1_14 new_parameters 1 107 0.0151275082 0.04849783 +#> 108 iSi2_14 new_parameters 1 108 -0.0330127730 0.13950369 +#> 109 iNa0_2 new_parameters 1 109 2.3325064697 0.16085196 +#> 110 iNa1_2 new_parameters 1 110 -0.0846593809 0.05641898 +#> 111 iNa2_2 new_parameters 1 111 -0.5228701161 0.17148283 +#> 112 iNa0_4 new_parameters 1 112 1.2264649067 0.15514007 +#> 113 iNa1_4 new_parameters 1 113 -0.1588700260 0.04640533 +#> 114 iNa2_4 new_parameters 1 114 -0.3757878268 0.16223727 +#> 115 iNa0_5 new_parameters 1 115 1.7177420725 0.14265940 +#> 116 iNa1_5 new_parameters 1 116 -0.1539340798 0.05334204 +#> 117 iNa2_5 new_parameters 1 117 -0.0524636997 0.15332152 +#> 118 iNa0_7 new_parameters 1 118 1.2867668347 0.15096350 +#> 119 iNa1_7 new_parameters 1 119 -0.1371967477 0.04987060 +#> 120 iNa2_7 new_parameters 1 120 -0.3688512178 0.15962680 +#> 121 iNa0_9 new_parameters 1 121 1.1235341403 0.13970085 +#> 122 iNa1_9 new_parameters 1 122 -0.0732935609 0.04481884 +#> 123 iNa2_9 new_parameters 1 123 -0.2102908580 0.14738273 +#> 124 iNa0_12 new_parameters 1 124 2.5622357744 0.15703952 +#> 125 iNa1_12 new_parameters 1 125 -0.2614877993 0.05577675 +#> 126 iNa2_12 new_parameters 1 126 -0.8435587998 0.16758186 +#> 127 iNa0_13 new_parameters 1 127 1.3901921259 0.16530647 +#> 128 iNa1_13 new_parameters 1 128 -0.1506084332 0.04603240 +#> 129 iNa2_13 new_parameters 1 129 -0.5924378568 0.17160760 +#> +#> Model Statistics: +#> | Parameters | Degrees of Freedom | Fit (-2lnL units) +#> Model: 129 7435 20762.43 +#> Saturated: 119 7445 NA +#> Independence: 28 7536 NA +#> Number of observations/statistics: 541/7564 +#> +#> Information Criteria: +#> | df Penalty | Parameters Penalty | Sample-Size Adjusted +#> AIC: 5892.426 21020.43 21102.03 +#> BIC: -26029.146 21574.28 21164.78 +#> CFI: NA +#> TLI: 1 (also known as NNFI) +#> RMSEA: 0 [95% CI (NA, NA)] +#> Prob(RMSEA <= 0.05): NA +#> To get additional fit indices, see help(mxRefModels) +#> timestamp: 2023-08-25 14:36:18 +#> Wall clock time: 292.0618 secs +#> optimizer: SLSQP +#> OpenMx version number: 2.21.8 +#> Need help? See help(mxSummary) +``` +
+ +Checking the regression coefficients `lSi1_1`, `lSi1_2`, ... will tell us if +there is a linear change across age or if individuals with `Male = 0` differ +from individuals with `Male = 1`. + +## Plotting Individual Parameters + +MNLFA predicts individual parameter values (e.g., `lSi_1`) using definition +variables. To get a better picture of the individual parameters, **mxsem** +provides the `get_individual_algebra_results` function. This function will compute +for each algebra the individual parameters. Depending on the sample size and +the number of algebras in the model, this may take some time. Therefore, we will +only extract the individual parameter values for `lSi_1` as an example. + + +```r +lSi_1 <- get_individual_algebra_results(mxModel = mnlfa_model, + algebra_names = "lSi_1", + progress_bar = FALSE) +head(lSi_1$lSi_1) +#> person Age Male algebra_result +#> 1 1 0.03136864 1 0.8699212 +#> 2 2 -0.44266587 1 0.9246368 +#> 3 3 -0.82189349 1 0.9684094 +#> 4 4 -0.63227968 1 0.9465231 +#> 5 5 0.03136864 1 0.8699212 +#> 6 6 -0.06343826 0 0.7408356 +``` + +The function will return a list with data frames for all requested algebras. Here, +the list has only one element: `lSi_1`. The data frame will have fields for the +person, the definition variables used in the algebra (`Age` and `Male` in this case) +and the person specific parameter (`algebra_result`). We can plot these results +as follows: + + +```r +library(ggplot2) +ggplot(data = lSi_1$lSi_1, + aes(x = Age, + y = algebra_result, + color = factor(Male))) + + ylab("Individual Parameter Value for lSi_1") + + geom_point() +``` + + +![](figures/mnlfa_lS_1.png) + +## References diff --git a/vignettes/Moderated-Nonlinear-Factor-Analysis.mxsemmd b/vignettes/Moderated-Nonlinear-Factor-Analysis.mxsemmd index 1b512e0..cc83566 100644 --- a/vignettes/Moderated-Nonlinear-Factor-Analysis.mxsemmd +++ b/vignettes/Moderated-Nonlinear-Factor-Analysis.mxsemmd @@ -18,6 +18,7 @@ knitr::opts_chunk$set( ) library(mxsem) library(lavaan) +library(ggplot2) set.seed(123) # to knit this vignette, use # knitr::knit(input = "Moderated-Nonlinear-Factor-Analysis.mxsemmd", @@ -222,7 +223,7 @@ We want to predict each and every parameter in this model using `Age` and `Male` With **mxsem**, this can be implemented as follows: ```{r} -mlnfa_syntax <- " +mnlfa_syntax <- " ==== MNLFA ==== SI =~ {lSi_1 := lSi0_1 + lSi1_1*data.Age + lSi2_1*data.Male }*Si1 + @@ -292,7 +293,7 @@ Finally, we pass the syntax to the `mxsem()`-function to create an **OpenMx** mo ```{r} library(mxsem) -mlnfa_model <- mxsem(model = mlnfa_syntax, +mnlfa_model <- mxsem(model = mnlfa_syntax, data = DS14, # we scaled the latent variables manually, # so we will set all automatic scalings to FALSE: @@ -306,17 +307,17 @@ mlnfa_model <- mxsem(model = mlnfa_syntax, The model can now be fitted using `mxRun()` or `mxTryHard()`. ```{r, include = FALSE} -mlnfa_model <- mxRun(mlnfa_model) +mnlfa_model <- mxRun(mnlfa_model) ``` ```{r, eval = FALSE} -mlnfa_model <- mxRun(mlnfa_model) -summary(mlnfa_model) +mnlfa_model <- mxRun(mnlfa_model) +summary(mnlfa_model) ```
Show summary ```{r, echo = FALSE} -summary(mlnfa_model) +summary(mnlfa_model) ```
@@ -324,4 +325,49 @@ Checking the regression coefficients `lSi1_1`, `lSi1_2`, ... will tell us if there is a linear change across age or if individuals with `Male = 0` differ from individuals with `Male = 1`. +## Plotting Individual Parameters + +MNLFA predicts individual parameter values (e.g., `lSi_1`) using definition +variables. To get a better picture of the individual parameters, **mxsem** +provides the `get_individual_algebra_results` function. This function will compute +for each algebra the individual parameters. Depending on the sample size and +the number of algebras in the model, this may take some time. Therefore, we will +only extract the individual parameter values for `lSi_1` as an example. + +```{r} +lSi_1 <- get_individual_algebra_results(mxModel = mnlfa_model, + algebra_names = "lSi_1", + progress_bar = FALSE) +head(lSi_1$lSi_1) +``` + +The function will return a list with data frames for all requested algebras. Here, +the list has only one element: `lSi_1`. The data frame will have fields for the +person, the definition variables used in the algebra (`Age` and `Male` in this case) +and the person specific parameter (`algebra_result`). We can plot these results +as follows: + +```{r, eval = FALSE} +library(ggplot2) +ggplot(data = lSi_1$lSi_1, + aes(x = Age, + y = algebra_result, + color = factor(Male))) + + ylab("Individual Parameter Value for lSi_1") + + geom_point() +``` + +```{r, include = FALSE} +library(ggplot2) +png(filename = "figures/mnlfa_lS_1.png", width = 5, height = 5, units = "cm", res = 200) +ggplot(data = lSi_1$lSi_1, + aes(x = Age, + y = algebra_result, + color = factor(Male))) + + ylab("Individual Parameter Value for lSi_1") + + geom_point() +dev.off() +``` +![](figures/mnlfa_lS_1.png) + ## References diff --git a/vignettes/figures/mnlfa_lS_1.png b/vignettes/figures/mnlfa_lS_1.png new file mode 100644 index 0000000000000000000000000000000000000000..fa00058d40577919390842454c8726d0f722fd82 GIT binary patch literal 6789 zcmYjWcU%+A(+@=uk#q$3D|AT1!F20?^i=v9h@CJIVR zFjAx`U63NurGqHuJ)Yk`?__R2+1;7h%g)W*d^fo3CI%;2_*q~u*vV^#x@IsK9q#C2 zW`ITv88nli&W-Cx3q7a{gQ>t^E-)1pmCo{h|5rkR7X#g3uH?LVn`fzq;W)e7ks@7e%z%VV%q)C`;d-j zVwquLCL zTA3%REYY)!T2@A#E1RR1&C%*zVD&C4^>~O~Ai4i&{78HpYH9T}8Z{9`P4uMB(WtaJ zDvdUdR~g5I5=NvWhKijnB<>k%I&3%6lXc@>5q)Z#H zKa!d&J2EymNBeID$SR~qqtUpgznMe(3imd&_JzUDwjEt`o_!BBU@*R~*L1Zl9=%*8 zt~vq+K`l3B5s6i@PgzV@q_H~61BC5$?gt2w08ycfOg;nVtCDsbf?qa`1*Dc~v;pEU z@y$K?MnZ=D~Z@o(I z8{wMQ@6n;+OCt*}F3sNRO!{Ja4iNHm8jZR+*FW7O^=IJ^R;V;z3)HvvPabAZNswwJ ztS~&}X<@t;Vl9%uDrty-0fEyNsfJ^K$SF+Tr4}&gcAO|v2!d0nq~oka-8S*5liT-L8{0CNr%#3Q=1Vk)>Dgn3h?S0`^*2jqb=FfkREy9@tK{t(|c}V+)wsB_12z{l^$GzQhGWWv^59t)pifxTvZ4qqznz;KIbw}4K<6dh_>@Pf?A=feY$&kvP9B%z1 zM+P_Dr1-eY3{~-O=)DGw{<_!J{5I9l|;?99=iaPpnV!Bn9XpD}GZ@_S26{kfXBcs}y-?@N* z9s1~MZGgksk)-M-a$8FC(TWgk*5OT`vdDoLxUCGlzNV>cENO+D}(n zXmmj@Lyiqb&V3EQn))DeXYXGylH@^^?5Fb?ETLt%F%i>u9g2V1Ih!Z#-6+xmPL$kx zQ%}A-$kp0G>1V{=7E-mtAQ5!3a_G78rV|002)g7a7JuK%9*+U}64I~x=j6e)<-p0= zk_Nr2;G|=g3~3hj>S3LTRSr@feMdcaCYH*lc_2_vbm#vB*vRpqWcZb5O=bJZtK-*d z4&mLvF({lMbr(Lv}im$CMrVF~c0gf^Xls#OCsNmFB?Q%?DDEXehpvt?DojzS=wfQ|&`f&y=H|-%svX2-ZFZ1XD5Knx(>f z@AmJKh{^}%@unMJ<&j&Rp{x?8l>ilJZ zsdmEAd;)!Fs0FQDPO7Qd@t+iYr>}>#yWOuCCYAMpCdA@-Ny{Eup$B%3xJG z3T{&(#U=itnX-a34f;p6*^i%gRgF?KEq~h3GN2zn*2iUviiu2m9Pp`YQ>3vm8Rv60 zEr?8}dB)7(;&CiMhkXH#v)M*>^-K_QJ#}vdEI(jNmz`wj8L0xv`SD5iXStYAQ-esc zr{O71UFL@hw2P)nqz0JqMtt`OGo+5iB{xaL)h2)ZyVb3sQ1&4*G1i=O-m?+E!>C+5 zy=H5+S%o(F(8QZHPxIQ$3O6+7iF-~AYP;u9efG?Fa;s4ZJmV_0TZ88D(f$#6(LJiU zmRKfxOIdMB_Lp+~ z6muA6B5FIep6j4b86Csh<^BT?Eh@XAnDj(h8; z)Ej&GPQr}z%D2?lp&OohY?0z}KGd`h>4}pgc$8Cw{AIlgc*gY)udbY?1Iiyc)O;Gi zKH=TvF`7Ns#RbS2bN7)sm(fm=(=$ZQf%FlP?BK;suUjXtX566ww#yfgd;vIzh&Q*| zU6_nc0gft6XfZ|8Tt5fOpy9SbrOMg`)W~z+Zv(iSZ0{vo^T<2}GP0%zcl8r`21^S% zq-~Jz;AU#1HXN<~A+MaR_?K5C(affCCAZFPr*BHL8~d59ch0OP90$job_p9X&XD!}xyf6D5NKnv+3)#;mZ<6uR& zPsgm>1CiYgxuBMuUEQmwH7hW6BN{nRw|@(=qk=IOwt52B~Aw}i!e&FU9M?>BoS z`BUSsyVbUPo&B>v-6GSZ8-D(TaQQCO+7G#=qFnr@{eIJaNg z1jbe8eHdQ5uuMappE2r}$qZoRa0r!ow0?7|wx@TWcAF24(0!2EPP%JLaz7FL!PvFlPC#^K3Gw`z`)5`7#v=6Co-|RGFu2io(bJi(9Wtduk!IvE;tP&YStS#kNZu z&9*h4>Fc8%$KP~E(~$d>laj@|u1K$Wj4F{ExE79F>9#9v)-(uhBYj{%HC#@) z)ftb&9R?nZBqGDXmyg^PzrE=o-uUWM{B_eu)GdUG;C;K&A%ea0pWp4%PrhcAEPP4; zS&S}}bYmDZL5W^;4*aCdhe4Nux)}^TOrtlklIL~|*IQnc6~cS0Xq*B7Mwqs5a1vJV{zqHw$UnI{?UZ}=76_Qv<1nu9M?x)|ww9#i} z-*F51ZPeRmYSfR_`(5&=b|hUIh{LpLUjaT3R`=^_XYK1XjW8cgN1@eaKhJyxFgn)< z{PS#|kFd>t=bR)$uR1J4{djb`(gd@T`rS-#Vt+hnL7q%9ejseZ@xKbg+Z;Kv(}vRm z&i%g%qY8%7La1qmgCbAB#7XSC+l{Ec)qwC>$Nia_<9%++1)}fZsweF8!Ts1YZ{c3c zby-f2+ICJ3nI?D;a2c38v*#MbxBmF%v2il?rQd-H`tOUGk-;9rWuhG&U1a8W#SK|g z?9#~*tL+Yj+^3XjvKaAYvg6fE4n?0*nb1&j@m+nopJInTsV2g}Bf8Y@06ZuY ze|`BB!)tDuTIU`;R5z_TmMOi-`P}+jp7Zg^I}vK8g{xma2RRTNPTpYwEDwMRH&xkF z<#c`)TnW1*+u`gS5o?Bp>-}>L=Hmw=@gi=pJ$G&tfhrlx>cJn7S76Q1aIXyGCzbYs z>LC!9(6m|Z0otf(_R9mPfdzgoL~h^z+7*7Sn@RMj1xR6G$vGb$30Z<%So2@o?gsOK zJbU3}H10H$#tjCP1s$~i?rrv_Iq(Ex9peVE56c+O(2!dh5L-XXi83@g7*S3a9WcGJ z<|o^+uqI3{qM0zNtGT%2!>~Mf!ZYSymXr;tA zz6x1OusH9ss;TfbZOyBo$xz^E`!eefrYkjgiNl{>u|t;T;GFG2I^Vs~=c@_?rBm9y zw}K1nh+05nUhOIN`f||X+M-h>AG@iFxYwfNOxxgu{x}kOHd0=}NeA(gr977GOh0Cw zI?0&(5bPX+>Fi)L^z3dLp;mI?nX;p;55&88(qforgI#h~Yu%pfSsqQW4iFw66|AeS zBXo!GQDRC)k;5Vubl+^l#Zv}Kr2mp48j~)Sv0Iyua~rqvg^i~`bdVV*m zMcrKW5iJz!`~$F0qLPFT;$a=~rWKm99u-ho``gN&k$ckDF;EL(`85v{aEEQ^!L!Vg zp^u&HE{w*SiYbXHa6xor~9{t*EmWM5^-FO zz5z80B$mc{-!%$xb4TPp&?QtpMW;z?o;<<}(BlbDKahYBmD9;PsV!I&z#Exq^wJnu z9HQjG*`SnrF@NTozgomm74KM`v9qSjytJY~r=L(lev6O|e30q+%O*zPO*?}06g<&6 z4Ex+F1w6*+{EXzE5ZBHMbKtg^u_MGBcWMjy?kL}QY5VIhq^T zBAy5yDtXIaqacT3RL%cg-XHrloq$tE!D4tjpn92N?%yu$nkIxa}mzNk|kyPeYn$b4$;zcwb+7gtT6e}!uT7$E?rs1v(AdF zgfL7BT#AV3)4f{T&$ufp<{{I6jk^L7UI^qKgEm6@{37jH<=4s%^~7gTQW$fl{{t*SC}(D>mGFFG`+w{>d;O-O&3yo4cixClCrCaOpk3Z!${M~mn z=aJ^33Poy9tkq+JL$xFpvzyPaUi`=^k&X{(Tng>6E;4aP7US%1aU^eHh?nnlOA}K! z60e8OW#pK+cR$0rSTYbww233>pM!L?PBqURgJR-_JF?+Exu#VO+WcJ8vu6hT?eU{w z;6JrP`2YG}qvl0F{?z}FUXx@WANTa{Ra-Q4%YhGG6WWaeLuXnW7dNh|&@TkhwHMzL z{YYV;Rjd}>az{=kG<@{x51VL^px!C1%PbHFo(LW@Y3GXq2r}!kK7C0Xm0T#k{d8*CFXsN#er#ef z++9`g<3}=|EeXCZLmEut9O9P^3-W)!tEag#YR`Oh+%Q5{%S`Q7ORm|xvU+a`EH$8T zGXL)}(E>cq?>vEERkEeB-}%IPk>ZAlO9Z(MhTEmPhErGU?AG|!KR+5Mw6*&15cii? z+6Lb-i?y!y`8L1?LMbX?ca=XhT>D>T0Fz%K$*ecG`oroyQxE}e$5{#X|) z%`0ip_Ov4_kAv!AU>g~FBW+*uMbFN^pTuV&{twEUGqUfz zEThr`c^<$`&^Xii6gA5u0(lNJX=$NZ%ajz1}Z#qOF4p@#m| zy8Jv-&7rHwj$JZu1%5_fDso42x>3$fcGK4rcdb7rT-=uay){vbe z(!N1ll|o)*m!c$GYM*&9EH-W~60H(M7f{WGw@WE#QmDq3!+adS+s_=|Ao|1B6GBdJH2%$zaU7nzrNzloSnW!q3o|x!5m7P&B z@@dOhX)HNn^biOC#H)>p_qkjaX$|@hVK)3OI$6q|c&j@-aozQT$#ax&wnLFjI7-*8 zBe6Q^AE)G5Rw{Y-;!quHbGh)Z51~y7woR8h{X6#ZP6XfwrDk+LDv!5-7k18rX9}G7 zUCTz7;`&b3f>_yxV3+0vM9&tJ0VcZde?yqD9W^`T~PwPDEMaN@2uMg+N z`H5Z!C{fb#{^pq+@?=8=r)$r3hja$Rxi{qoIxp4;JKgtt>YDOJl7+lzPFdqy#ICTN z6W8P-9g4O~N68sAbw+<3#^z$EJb%PT6EkKm`Y7(#C6EJWa=dP^7)7S3iyS7D2IUHi zREkF1mJ@)-IhyI$!9=j{x_@VT}YIA+=D~^Ke zf_sjSYV)sJU$}3Po19!NqBxPsBTi#Az3R@wsV(YZYtmMtQ-By?x*V+rtebA+X&YbO zepg3N+9=UsF&fT`LT(4@k&V#teVDAgV;=gL7Iy3hJT8 zY2Hx-rYo|eF*4@ZR9HsNH=MJNU0{4lC#T?}iPFrigC4phihrP8Ej(DClFZdo2oMZe o)g%%N&25bIDXMD46O1(WJcpV`BF7&7_kYqgJrmtZojVc#2M8C|6951J literal 0 HcmV?d00001 From 099271cb492abaa01df1d071833a7a6ad44d7ba3 Mon Sep 17 00:00:00 2001 From: Jannik Date: Fri, 25 Aug 2023 15:03:57 +0200 Subject: [PATCH 8/8] replacing figure in mnlfa --- R/mxsem_group_by.R | 7 + man/mxsem_group_by.Rd | 5 + .../Moderated-Nonlinear-Factor-Analysis.Rmd | 7 +- .../Moderated-Nonlinear-Factor-Analysis.md | 462 ------------------ ...oderated-Nonlinear-Factor-Analysis.mxsemmd | 2 +- vignettes/figures/mnlfa_lS_1.png | Bin 6789 -> 14749 bytes 6 files changed, 17 insertions(+), 466 deletions(-) delete mode 100644 vignettes/Moderated-Nonlinear-Factor-Analysis.md diff --git a/R/mxsem_group_by.R b/R/mxsem_group_by.R index f45aa85..41b0b59 100644 --- a/R/mxsem_group_by.R +++ b/R/mxsem_group_by.R @@ -11,6 +11,13 @@ #' in grouping_variables #' 3. a separate model is set up for each group. All parameters that match #' those specified in the parameters argument are group specific +#' +#' **Warning**: The multi-group model may differ from **lavaan**! For instance, +#' **lavaan** will automatically set the latent variances for all but the first +#' group free if the loadings are fixed to equality. Such automatic procedures +#' are not yet implemented in **mxsem**. +#' +#' #' @param mxModel mxModel with the entire data #' @param grouping_variables Variables used to split the data in groups #' @param parameters the parameters that should be group specific. By default diff --git a/man/mxsem_group_by.Rd b/man/mxsem_group_by.Rd index 010d065..32a8e46 100644 --- a/man/mxsem_group_by.Rd +++ b/man/mxsem_group_by.Rd @@ -40,6 +40,11 @@ as follows: in grouping_variables 3. a separate model is set up for each group. All parameters that match those specified in the parameters argument are group specific + +**Warning**: The multi-group model may differ from **lavaan**! For instance, +**lavaan** will automatically set the latent variances for all but the first +group free if the loadings are fixed to equality. Such automatic procedures +are not yet implemented in **mxsem**. } \examples{ # THE FOLLOWING EXAMPLE IS ADAPTED FROM diff --git a/vignettes/Moderated-Nonlinear-Factor-Analysis.Rmd b/vignettes/Moderated-Nonlinear-Factor-Analysis.Rmd index 764f959..5593ad6 100644 --- a/vignettes/Moderated-Nonlinear-Factor-Analysis.Rmd +++ b/vignettes/Moderated-Nonlinear-Factor-Analysis.Rmd @@ -403,8 +403,8 @@ summary(mnlfa_model) #> RMSEA: 0 [95% CI (NA, NA)] #> Prob(RMSEA <= 0.05): NA #> To get additional fit indices, see help(mxRefModels) -#> timestamp: 2023-08-25 14:20:14 -#> Wall clock time: 357.9646 secs +#> timestamp: 2023-08-25 14:36:18 +#> Wall clock time: 292.0618 secs #> optimizer: SLSQP #> OpenMx version number: 2.21.8 #> Need help? See help(mxSummary) @@ -456,6 +456,7 @@ ggplot(data = lSi_1$lSi_1, geom_point() ``` -![plot of chunk unnamed-chunk-25](figure/unnamed-chunk-25-1.png) + +![](figures/mnlfa_lS_1.png) ## References diff --git a/vignettes/Moderated-Nonlinear-Factor-Analysis.md b/vignettes/Moderated-Nonlinear-Factor-Analysis.md deleted file mode 100644 index 5593ad6..0000000 --- a/vignettes/Moderated-Nonlinear-Factor-Analysis.md +++ /dev/null @@ -1,462 +0,0 @@ ---- -title: "Moderated-Nonlinear-Factor-Analysis" -output: rmarkdown::html_vignette -bibliography: mxsem.bib -csl: apa.csl -vignette: > - %\VignetteIndexEntry{Moderated-Nonlinear-Factor-Analysis} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} - %\DeclareUnicodeCharacter{2194}{$\leftrightarrow$} - %\DeclareUnicodeCharacter{2192}{$\rightarrow$} ---- - - - -Moderated nonlinear factor analysis (MNLFA) has been proposed by @bauerMoreGeneralModel2017 -to assess measurement invariance. The general idea is that parameters within -the model (that is, loadings, regressions, intercepts, and (co-)variances) may -differ across individuals. These differences are predicted using covariates such -as age and gender. For instance, @kolbeAssessingMeasurementInvariance2022 demonstrate -MNLFA using the following model: - -![](figures/mnlfa.png) - -The loading `lSi_1` may differ linearly by age and gender [@kolbeAssessingMeasurementInvariance2022]. -That is `lSi_1 = lSi0_1 + lSi1_1*Age + lSi2_1*Male`. -If `lSi1_1` or `lSi2_1` are non-zero, this suggests measurement non-invariance. - -MNLFA is a powerful procedure, but specifying the models can be tedious. -@kolbeAssessingMeasurementInvariance2022 provide an in-depth introduction using -**OpenMx** using the DS14 data set by @denolletPredictiveValueSocial2013 -included in the **mokken** [@MokkenScaleAnalysis2007] package. - -The script for the tutorial by @kolbeAssessingMeasurementInvariance2022 can -be found on osf at https://osf.io/527zr. The following pre-processing steps -are taken directly from that script: - - -```r -# code copied from Laura Kolbe, Terrence D. Jorgensen, Suzanne Jak, and Dylan Molenaar -# at https://osf.io/527zr - -library(mokken) - -## Load data -data("DS14", package="mokken") - -## Save as data frame -DS14 <- data.frame(DS14) - -## Recode negatively worded items -DS14$Si1 <- 4 - DS14$Si1. -DS14$Si3 <- 4 - DS14$Si3. - -## Standardize age -DS14$Age <- (DS14$Age - mean(DS14$Age))/sd(DS14$Age) -# mean-centering age is another option, but caused convergence difficulties with -# this dataset - -## Change order of variables -DS14 <- DS14[,c("Male","Age","Si1","Si3","Si6","Si8","Si10","Si11","Si14", - "Na2","Na4","Na5","Na7","Na9","Na12", "Na13")] - -## Check data -head(DS14) -#> Male Age Si1 Si3 Si6 Si8 Si10 Si11 Si14 Na2 Na4 Na5 Na7 Na9 Na12 Na13 -#> 1 1 0.03136864 2 2 2 3 2 2 4 3 2 2 3 2 4 2 -#> 2 1 -0.44266587 2 1 2 2 2 4 2 0 0 0 0 0 3 0 -#> 3 1 -0.82189349 3 3 2 2 2 2 1 3 1 1 2 2 1 1 -#> 4 1 -0.63227968 2 2 1 2 3 1 1 3 0 0 0 1 1 0 -#> 5 1 0.03136864 2 3 2 3 3 2 3 2 2 2 3 2 3 1 -#> 6 0 -0.06343826 0 0 0 0 0 0 0 3 1 2 1 2 4 2 -``` - -## Model specification - -We want to specify the configural model outlined in @kolbeAssessingMeasurementInvariance2022. -In this model, all loadings, intercepts, variances and co-variances are predicted -with the covariates `Age` and `Male`. The only parameter that is constant across -all subjects are the latent variances of `Si` and `Na`, which are fixed to 1 for -scaling. - -For demonstrative purposes, we will first set up a confirmatory factor analysis -without the `Age` and `Male` covariates. Using **mxsem**, the syntax could look -as follows: - - - -```r -cfa_syntax <- " - -# loadings -Si =~ lSi_1*Si1 + - lSi_3*Si3 + - lSi_6*Si6 + - lSi_8*Si8 + - lSi_10*Si10 + - lSi_11*Si11 + - lSi_14*Si14 - -Na =~ lNa_2*Na2 + - lNa_4*Na4 + - lNa_5*Na5 + - lNa_7*Na7 + - lNa_9*Na9 + - lNa_12*Na12 + - lNa_13*Na13 - -# latent variances and covariances -Si ~~ 1*Si -Na ~~ 1*Na + cov*Si - -# manifest variances -Si1 ~~ vSi_1*Si1 -Si3 ~~ vSi_3*Si3 -Si6 ~~ vSi_6*Si6 -Si8 ~~ vSi_8*Si8 -Si10 ~~ vSi_10*Si10 -Si11 ~~ vSi_11*Si11 -Si14 ~~ vSi_14*Si14 - -Na2 ~~ vNa_2*Na2 -Na4 ~~ vNa_4*Na4 -Na5 ~~ vNa_5*Na5 -Na7 ~~ vNa_7*Na7 -Na9 ~~ vNa_9*Na9 -Na12 ~~ vNa_12*Na12 -Na13 ~~ vNa_13*Na13 - -# intercepts -Si1 ~ iSi_1*1 -Si3 ~ iSi_3*1 -Si6 ~ iSi_6*1 -Si8 ~ iSi_8*1 -Si10 ~ iSi_10*1 -Si11 ~ iSi_11*1 -Si14 ~ iSi_14*1 - -Na2 ~ iNa_2*1 -Na4 ~ iNa_4*1 -Na5 ~ iNa_5*1 -Na7 ~ iNa_7*1 -Na9 ~ iNa_9*1 -Na12 ~ iNa_12*1 -Na13 ~ iNa_13*1 -" -``` - -We want to predict each and every parameter in this model using `Age` and `Male`. - -* `lSi_1 = lSi0_1 + lSi1_1*Age + lSi2_1*Male` -* `lSi_3 = lSi0_3 + lSi1_3*Age + lSi2_3*Male` -* ... -* `iNa_4 = iNa0_4 + iNa1_4*Age + iNa2_4*Male` -* ... - -With **mxsem**, this can be implemented as follows: - - -```r -mnlfa_syntax <- " -==== MNLFA ==== - -SI =~ {lSi_1 := lSi0_1 + lSi1_1*data.Age + lSi2_1*data.Male }*Si1 + - {lSi_3 := lSi0_3 + lSi1_3*data.Age + lSi2_3*data.Male }*Si3 + - {lSi_6 := lSi0_6 + lSi1_6*data.Age + lSi2_6*data.Male }*Si6 + - {lSi_8 := lSi0_8 + lSi1_8*data.Age + lSi2_8*data.Male }*Si8 + - {lSi_10 := lSi0_10 + lSi1_10*data.Age + lSi2_10*data.Male}*Si10 + - {lSi_11 := lSi0_11 + lSi1_11*data.Age + lSi2_11*data.Male}*Si11 + - {lSi_14 := lSi0_14 + lSi1_14*data.Age + lSi2_14*data.Male}*Si14 - -NA =~ {lNa_2 := lNa0_2 + lNa1_2*data.Age + lNa2_2*data.Male }*Na2 + - {lNa_4 := lNa0_4 + lNa1_4*data.Age + lNa2_4*data.Male }*Na4 + - {lNa_5 := lNa0_5 + lNa1_5*data.Age + lNa2_5*data.Male }*Na5 + - {lNa_7 := lNa0_7 + lNa1_7*data.Age + lNa2_7*data.Male }*Na7 + - {lNa_9 := lNa0_9 + lNa1_9*data.Age + lNa2_9*data.Male }*Na9 + - {lNa_12 := lNa0_12 + lNa1_12*data.Age + lNa2_12*data.Male}*Na12 + - {lNa_13 := lNa0_13 + lNa1_13*data.Age + lNa2_13*data.Male}*Na13 - -SI ~~ 1*SI -NA ~~ 1*NA + {cov := cov0 + cov1*data.Age + cov2*data.Male }*SI - -Si1 ~~ {vSi_1 := exp(vSi0_1 + vSi1_1*data.Age + vSi2_1*data.Male )}*Si1 -Si3 ~~ {vSi_3 := exp(vSi0_3 + vSi1_3*data.Age + vSi2_3*data.Male )}*Si3 -Si6 ~~ {vSi_6 := exp(vSi0_6 + vSi1_6*data.Age + vSi2_6*data.Male )}*Si6 -Si8 ~~ {vSi_8 := exp(vSi0_8 + vSi1_8*data.Age + vSi2_8*data.Male )}*Si8 -Si10 ~~ {vSi_10 := exp(vSi0_10 + vSi1_10*data.Age + vSi2_10*data.Male)}*Si10 -Si11 ~~ {vSi_11 := exp(vSi0_11 + vSi1_11*data.Age + vSi2_11*data.Male)}*Si11 -Si14 ~~ {vSi_14 := exp(vSi0_14 + vSi1_14*data.Age + vSi2_14*data.Male)}*Si14 - -Na2 ~~ {vNa_2 := exp(vNa0_2 + vNa1_2*data.Age + vNa2_2*data.Male )}*Na2 -Na4 ~~ {vNa_4 := exp(vNa0_4 + vNa1_4*data.Age + vNa2_4*data.Male )}*Na4 -Na5 ~~ {vNa_5 := exp(vNa0_5 + vNa1_5*data.Age + vNa2_5*data.Male )}*Na5 -Na7 ~~ {vNa_7 := exp(vNa0_7 + vNa1_7*data.Age + vNa2_7*data.Male )}*Na7 -Na9 ~~ {vNa_9 := exp(vNa0_9 + vNa1_9*data.Age + vNa2_9*data.Male )}*Na9 -Na12 ~~ {vNa_12 := exp(vNa0_12 + vNa1_12*data.Age + vNa2_12*data.Male)}*Na12 -Na13 ~~ {vNa_13 := exp(vNa0_13 + vNa1_13*data.Age + vNa2_13*data.Male)}*Na13 - -Si1 ~ {iSi_1 := iSi0_1 + iSi1_1*data.Age + iSi2_1*data.Male }*1 -Si3 ~ {iSi_3 := iSi0_3 + iSi1_3*data.Age + iSi2_3*data.Male }*1 -Si6 ~ {iSi_6 := iSi0_6 + iSi1_6*data.Age + iSi2_6*data.Male }*1 -Si8 ~ {iSi_8 := iSi0_8 + iSi1_8*data.Age + iSi2_8*data.Male }*1 -Si10 ~ {iSi_10 := iSi0_10 + iSi1_10*data.Age + iSi2_10*data.Male}*1 -Si11 ~ {iSi_11 := iSi0_11 + iSi1_11*data.Age + iSi2_11*data.Male}*1 -Si14 ~ {iSi_14 := iSi0_14 + iSi1_14*data.Age + iSi2_14*data.Male}*1 - -Na2 ~ {iNa_2 := iNa0_2 + iNa1_2*data.Age + iNa2_2*data.Male }*1 -Na4 ~ {iNa_4 := iNa0_4 + iNa1_4*data.Age + iNa2_4*data.Male }*1 -Na5 ~ {iNa_5 := iNa0_5 + iNa1_5*data.Age + iNa2_5*data.Male }*1 -Na7 ~ {iNa_7 := iNa0_7 + iNa1_7*data.Age + iNa2_7*data.Male }*1 -Na9 ~ {iNa_9 := iNa0_9 + iNa1_9*data.Age + iNa2_9*data.Male }*1 -Na12 ~ {iNa_12 := iNa0_12 + iNa1_12*data.Age + iNa2_12*data.Male}*1 -Na13 ~ {iNa_13 := iNa0_13 + iNa1_13*data.Age + iNa2_13*data.Male}*1 -" -``` - -Note how each parameter is redefined exactly as outlined above. However, there -are some important details: - -1. The variance parameters (e.g., `vNa_2`), have been transformed with an `exp`-function. -This exponential function ensures that variances are always positive. -2. All transformations must be embraced in curly braces. This ensures that **mxsem** -sees them as algebras and knows how to specify them in **OpenMx**. -3. The covariates `Age` and `Male` must be specified with the `data.`-prefix to -let **OpenMx** know that the values can be found in the data set. - -Finally, we pass the syntax to the `mxsem()`-function to create an **OpenMx** model: - - -```r -library(mxsem) -mnlfa_model <- mxsem(model = mnlfa_syntax, - data = DS14, - # we scaled the latent variables manually, - # so we will set all automatic scalings to FALSE: - scale_loadings = FALSE, - scale_latent_variances = FALSE) -``` - - -## Fitting the model - -The model can now be fitted using `mxRun()` or `mxTryHard()`. - - - -```r -mnlfa_model <- mxRun(mnlfa_model) -summary(mnlfa_model) -``` - -
-Show summary - -``` -#> Summary of MNLFA -#> -#> free parameters: -#> name matrix row col Estimate Std.Error A -#> 1 lSi0_1 new_parameters 1 1 0.7335132445 0.13060112 -#> 2 lSi1_1 new_parameters 1 2 -0.1154255003 0.05220878 ! -#> 3 lSi2_1 new_parameters 1 3 0.1400286477 0.13891191 -#> 4 lSi0_3 new_parameters 1 4 0.3015782589 0.16646223 -#> 5 lSi1_3 new_parameters 1 5 -0.0879323934 0.05924633 -#> 6 lSi2_3 new_parameters 1 6 0.4591409253 0.17546688 -#> 7 lSi0_6 new_parameters 1 7 0.9194296857 0.15081098 -#> 8 lSi1_6 new_parameters 1 8 -0.0706498279 0.04992590 -#> 9 lSi2_6 new_parameters 1 9 -0.0979268798 0.15832176 -#> 10 lSi0_8 new_parameters 1 10 1.0701237959 0.12941517 -#> 11 lSi1_8 new_parameters 1 11 0.0561234341 0.04934508 -#> 12 lSi2_8 new_parameters 1 12 -0.0853303769 0.13807941 -#> 13 lSi0_10 new_parameters 1 13 1.1310725242 0.14919881 -#> 14 lSi1_10 new_parameters 1 14 -0.0484480541 0.05807462 -#> 15 lSi2_10 new_parameters 1 15 -0.1777563685 0.15904289 -#> 16 lSi0_11 new_parameters 1 16 0.7556453371 0.13499027 -#> 17 lSi1_11 new_parameters 1 17 -0.0525072490 0.05106096 -#> 18 lSi2_11 new_parameters 1 18 -0.0430455454 0.14376514 ! -#> 19 lSi0_14 new_parameters 1 19 0.6896194650 0.11970774 -#> 20 lSi1_14 new_parameters 1 20 0.1111536773 0.04813670 -#> 21 lSi2_14 new_parameters 1 21 0.1527644246 0.12859387 -#> 22 lNa0_2 new_parameters 1 22 0.7798765627 0.15343426 -#> 23 lNa1_2 new_parameters 1 23 -0.0757615377 0.05655119 -#> 24 lNa2_2 new_parameters 1 24 -0.0933488885 0.16478465 -#> 25 lNa0_4 new_parameters 1 25 1.0847614435 0.12762972 -#> 26 lNa1_4 new_parameters 1 26 -0.0943803303 0.04226564 -#> 27 lNa2_4 new_parameters 1 27 -0.2833533421 0.13491677 -#> 28 lNa0_5 new_parameters 1 28 0.5882977973 0.13857927 -#> 29 lNa1_5 new_parameters 1 29 -0.0689103011 0.05229366 -#> 30 lNa2_5 new_parameters 1 30 0.1443499696 0.14917073 -#> 31 lNa0_7 new_parameters 1 31 1.1002294251 0.12109433 -#> 32 lNa1_7 new_parameters 1 32 -0.0872053238 0.04421264 -#> 33 lNa2_7 new_parameters 1 33 -0.1714468809 0.12950415 -#> 34 lNa0_9 new_parameters 1 34 0.7460504287 0.12956267 -#> 35 lNa1_9 new_parameters 1 35 -0.0247286721 0.04319557 -#> 36 lNa2_9 new_parameters 1 36 -0.0758240476 0.13751169 -#> 37 lNa0_12 new_parameters 1 37 0.8353126483 0.14356848 -#> 38 lNa1_12 new_parameters 1 38 -0.0472960459 0.05227475 -#> 39 lNa2_12 new_parameters 1 39 0.0720072547 0.15349421 -#> 40 lNa0_13 new_parameters 1 40 1.3331780253 0.12235756 -#> 41 lNa1_13 new_parameters 1 41 -0.0808880672 0.03986492 -#> 42 lNa2_13 new_parameters 1 42 -0.4766565320 0.12845540 -#> 43 cov0 new_parameters 1 43 0.5021309060 0.10219834 -#> 44 cov1 new_parameters 1 44 0.0184185069 0.04156539 -#> 45 cov2 new_parameters 1 45 -0.0494766095 0.11083039 -#> 46 vSi0_1 new_parameters 1 46 -0.3207921796 0.19589811 -#> 47 vSi1_1 new_parameters 1 47 -0.0242559674 0.07040331 -#> 48 vSi2_1 new_parameters 1 48 -0.1741242778 0.21081463 -#> 49 vSi0_3 new_parameters 1 49 0.4646166539 0.17531839 -#> 50 vSi1_3 new_parameters 1 50 0.0600874940 0.06277607 -#> 51 vSi2_3 new_parameters 1 51 -0.4775700164 0.18936363 -#> 52 vSi0_6 new_parameters 1 52 -0.0244158740 0.19800424 -#> 53 vSi1_6 new_parameters 1 53 0.0410837294 0.07265704 -#> 54 vSi2_6 new_parameters 1 54 -0.4232699060 0.21334264 -#> 55 vSi0_8 new_parameters 1 55 -0.7723689124 0.27083920 -#> 56 vSi1_8 new_parameters 1 56 -0.1785979089 0.09286324 -#> 57 vSi2_8 new_parameters 1 57 0.0830338249 0.28418042 -#> 58 vSi0_10 new_parameters 1 58 -0.3311375404 0.23166890 -#> 59 vSi1_10 new_parameters 1 59 0.0456637752 0.06901235 -#> 60 vSi2_10 new_parameters 1 60 0.1333978832 0.24361137 -#> 61 vSi0_11 new_parameters 1 61 -0.1991709855 0.19272819 -#> 62 vSi1_11 new_parameters 1 62 0.0764535727 0.07264274 -#> 63 vSi2_11 new_parameters 1 63 -0.0606475819 0.20549172 -#> 64 vSi0_14 new_parameters 1 64 -0.4409584695 0.19422986 -#> 65 vSi1_14 new_parameters 1 65 -0.0797038128 0.07834221 -#> 66 vSi2_14 new_parameters 1 66 -0.1140400508 0.20943490 -#> 67 vNa0_2 new_parameters 1 67 0.0992622457 0.17976004 -#> 68 vNa1_2 new_parameters 1 68 0.0624813466 0.06506342 -#> 69 vNa2_2 new_parameters 1 69 0.0822169630 0.19285295 -#> 70 vNa0_4 new_parameters 1 70 -0.7374873408 0.19583900 -#> 71 vNa1_4 new_parameters 1 71 0.0972335776 0.08012644 -#> 72 vNa2_4 new_parameters 1 72 -0.0004451743 0.21161936 -#> 73 vNa0_5 new_parameters 1 73 0.0329558400 0.17544820 -#> 74 vNa1_5 new_parameters 1 74 0.0691105829 0.06831095 -#> 75 vNa2_5 new_parameters 1 75 -0.0541634913 0.18919526 -#> 76 vNa0_7 new_parameters 1 76 -1.0160611911 0.20599631 -#> 77 vNa1_7 new_parameters 1 77 0.0793410915 0.07895067 -#> 78 vNa2_7 new_parameters 1 78 0.2789485322 0.22248133 -#> 79 vNa0_9 new_parameters 1 79 -0.2587533822 0.17809218 -#> 80 vNa1_9 new_parameters 1 80 0.1619891803 0.07163809 -#> 81 vNa2_9 new_parameters 1 81 -0.2160735762 0.19236805 -#> 82 vNa0_12 new_parameters 1 82 -0.0164029086 0.17808276 -#> 83 vNa1_12 new_parameters 1 83 -0.0209998013 0.07047469 -#> 84 vNa2_12 new_parameters 1 84 -0.1532819407 0.19328146 -#> 85 vNa0_13 new_parameters 1 85 -2.0960703884 0.49156987 -#> 86 vNa1_13 new_parameters 1 86 -0.1933345991 0.09005339 -#> 87 vNa2_13 new_parameters 1 87 1.1069583557 0.49904058 ! -#> 88 iSi0_1 new_parameters 1 88 0.9952882525 0.13528668 -#> 89 iSi1_1 new_parameters 1 89 -0.1033698433 0.04992865 -#> 90 iSi2_1 new_parameters 1 90 0.3313792856 0.14484117 -#> 91 iSi0_3 new_parameters 1 91 1.7153301646 0.15882460 -#> 92 iSi1_3 new_parameters 1 92 -0.0212118752 0.05378318 -#> 93 iSi2_3 new_parameters 1 93 0.1107780341 0.16877197 -#> 94 iSi0_6 new_parameters 1 94 1.3003161963 0.16271966 -#> 95 iSi1_6 new_parameters 1 95 -0.0662085245 0.04956657 -#> 96 iSi2_6 new_parameters 1 96 -0.0985231721 0.17061476 -#> 97 iSi0_8 new_parameters 1 97 1.2459902423 0.15393450 -#> 98 iSi1_8 new_parameters 1 98 -0.0384614553 0.05219379 -#> 99 iSi2_8 new_parameters 1 99 0.0272620614 0.16347631 -#> 100 iSi0_10 new_parameters 1 100 1.2622340993 0.17056599 -#> 101 iSi1_10 new_parameters 1 101 -0.0522932413 0.05641430 -#> 102 iSi2_10 new_parameters 1 102 0.2239994097 0.18052436 -#> 103 iSi0_11 new_parameters 1 103 1.3729266832 0.14260815 -#> 104 iSi1_11 new_parameters 1 104 0.0040182246 0.04933784 -#> 105 iSi2_11 new_parameters 1 105 0.2206742569 0.15153766 -#> 106 iSi0_14 new_parameters 1 106 1.2037219914 0.12932136 -#> 107 iSi1_14 new_parameters 1 107 0.0151275082 0.04849783 -#> 108 iSi2_14 new_parameters 1 108 -0.0330127730 0.13950369 -#> 109 iNa0_2 new_parameters 1 109 2.3325064697 0.16085196 -#> 110 iNa1_2 new_parameters 1 110 -0.0846593809 0.05641898 -#> 111 iNa2_2 new_parameters 1 111 -0.5228701161 0.17148283 -#> 112 iNa0_4 new_parameters 1 112 1.2264649067 0.15514007 -#> 113 iNa1_4 new_parameters 1 113 -0.1588700260 0.04640533 -#> 114 iNa2_4 new_parameters 1 114 -0.3757878268 0.16223727 -#> 115 iNa0_5 new_parameters 1 115 1.7177420725 0.14265940 -#> 116 iNa1_5 new_parameters 1 116 -0.1539340798 0.05334204 -#> 117 iNa2_5 new_parameters 1 117 -0.0524636997 0.15332152 -#> 118 iNa0_7 new_parameters 1 118 1.2867668347 0.15096350 -#> 119 iNa1_7 new_parameters 1 119 -0.1371967477 0.04987060 -#> 120 iNa2_7 new_parameters 1 120 -0.3688512178 0.15962680 -#> 121 iNa0_9 new_parameters 1 121 1.1235341403 0.13970085 -#> 122 iNa1_9 new_parameters 1 122 -0.0732935609 0.04481884 -#> 123 iNa2_9 new_parameters 1 123 -0.2102908580 0.14738273 -#> 124 iNa0_12 new_parameters 1 124 2.5622357744 0.15703952 -#> 125 iNa1_12 new_parameters 1 125 -0.2614877993 0.05577675 -#> 126 iNa2_12 new_parameters 1 126 -0.8435587998 0.16758186 -#> 127 iNa0_13 new_parameters 1 127 1.3901921259 0.16530647 -#> 128 iNa1_13 new_parameters 1 128 -0.1506084332 0.04603240 -#> 129 iNa2_13 new_parameters 1 129 -0.5924378568 0.17160760 -#> -#> Model Statistics: -#> | Parameters | Degrees of Freedom | Fit (-2lnL units) -#> Model: 129 7435 20762.43 -#> Saturated: 119 7445 NA -#> Independence: 28 7536 NA -#> Number of observations/statistics: 541/7564 -#> -#> Information Criteria: -#> | df Penalty | Parameters Penalty | Sample-Size Adjusted -#> AIC: 5892.426 21020.43 21102.03 -#> BIC: -26029.146 21574.28 21164.78 -#> CFI: NA -#> TLI: 1 (also known as NNFI) -#> RMSEA: 0 [95% CI (NA, NA)] -#> Prob(RMSEA <= 0.05): NA -#> To get additional fit indices, see help(mxRefModels) -#> timestamp: 2023-08-25 14:36:18 -#> Wall clock time: 292.0618 secs -#> optimizer: SLSQP -#> OpenMx version number: 2.21.8 -#> Need help? See help(mxSummary) -``` -
- -Checking the regression coefficients `lSi1_1`, `lSi1_2`, ... will tell us if -there is a linear change across age or if individuals with `Male = 0` differ -from individuals with `Male = 1`. - -## Plotting Individual Parameters - -MNLFA predicts individual parameter values (e.g., `lSi_1`) using definition -variables. To get a better picture of the individual parameters, **mxsem** -provides the `get_individual_algebra_results` function. This function will compute -for each algebra the individual parameters. Depending on the sample size and -the number of algebras in the model, this may take some time. Therefore, we will -only extract the individual parameter values for `lSi_1` as an example. - - -```r -lSi_1 <- get_individual_algebra_results(mxModel = mnlfa_model, - algebra_names = "lSi_1", - progress_bar = FALSE) -head(lSi_1$lSi_1) -#> person Age Male algebra_result -#> 1 1 0.03136864 1 0.8699212 -#> 2 2 -0.44266587 1 0.9246368 -#> 3 3 -0.82189349 1 0.9684094 -#> 4 4 -0.63227968 1 0.9465231 -#> 5 5 0.03136864 1 0.8699212 -#> 6 6 -0.06343826 0 0.7408356 -``` - -The function will return a list with data frames for all requested algebras. Here, -the list has only one element: `lSi_1`. The data frame will have fields for the -person, the definition variables used in the algebra (`Age` and `Male` in this case) -and the person specific parameter (`algebra_result`). We can plot these results -as follows: - - -```r -library(ggplot2) -ggplot(data = lSi_1$lSi_1, - aes(x = Age, - y = algebra_result, - color = factor(Male))) + - ylab("Individual Parameter Value for lSi_1") + - geom_point() -``` - - -![](figures/mnlfa_lS_1.png) - -## References diff --git a/vignettes/Moderated-Nonlinear-Factor-Analysis.mxsemmd b/vignettes/Moderated-Nonlinear-Factor-Analysis.mxsemmd index cc83566..88b24e5 100644 --- a/vignettes/Moderated-Nonlinear-Factor-Analysis.mxsemmd +++ b/vignettes/Moderated-Nonlinear-Factor-Analysis.mxsemmd @@ -359,7 +359,7 @@ ggplot(data = lSi_1$lSi_1, ```{r, include = FALSE} library(ggplot2) -png(filename = "figures/mnlfa_lS_1.png", width = 5, height = 5, units = "cm", res = 200) +png(filename = "figures/mnlfa_lS_1.png", width = 10, height = 10, units = "cm", res = 200) ggplot(data = lSi_1$lSi_1, aes(x = Age, y = algebra_result, diff --git a/vignettes/figures/mnlfa_lS_1.png b/vignettes/figures/mnlfa_lS_1.png index fa00058d40577919390842454c8726d0f722fd82..21293a5b3e3ba666429254f38dad265496399c6e 100644 GIT binary patch literal 14749 zcmdUWc|6qL|L<5zlvJZqLaDsdMkR)^l%}Hc7D`$bWyv0@63D7`@GNloY#51p0DS5o!6Uq7CtE~ zv{?v+LJ4bY;q*}`J~Z-w1s^CG_Rljxp?Gj-&zw6Bexgt+D3lpWMFph-{)V8;z>$qA zrXD(U2wXslk&l^*88|{z%(7KNLR3P)Uz!f`vDMKD(y;|cvQAK?8E8la9H1@=sg8U? z%*?XQLcl>Y%LWGr++Z7&Y#UT*n@kS^H`{^({Gta{aza#4-~l0Ks1P$1a9c`hRwp^gHn}o6xsskt=VY6qvdvVoL%?AMDsewFq%IBol|ed{LAI52 zP9=v!3sIqkn9;Jq5kkubRYH(j$cF>|(KsA>vMs$bh+YXA<_r!FPE1V9I@^F}fL6fG z99lL~i=ND(S90i}3er4i72L<+aKv~YM*%4c;?&vvJ z(lB}0PxN?pUowA0OvdHhNX>Ib_E&AzGpepWyHWl@@#DaoRgL#9+V{KPm=_k?HOJu; zZDJX2dTO^8()~ny;N~e6pP%$LV1i_9Zic(rM;Aq%y(PUMf+{)G$&XUO^P(g$Jl?BG zs3QU}>IEN!`sRa1{VEYeIsOkW_p}lj8;lBbZ!mpm+l}2bt9jt99XmSgqaW}>A$Ctj zb9Hb&=t`x}E^RV#C4%&5I@T7g*U3MjGa->f+=b_5$*_WGI8lrbA&gLABP~QaQlwqf zqquh2|C`N7k5VJza%94ve5w!o!R@&{l%0JkVJrz!R6|BBJxpyh%okOkqP}7vL-i*& zw+@7qCB`&QE{*S(D3RQhuQ`Yi~ znk-q@HVkzGFWoLH8u*cU>+C#Bx}_DT_#Zd#hl2W|(21$_<)X&w$u`GR1QX&9OvO7Q zJ;BYXVC(+K_y2E;_^+7x4NQ7M+J3I!5I_(tBc|+oD5?j9ftdPEeqcgc`nZNAiwZ}P znp@+v*PaKe#}y6EPW>+P*%DsFDNzdUo>_v7@6ozHRbTzKs%pqi(tz6<3X&h8AN}31 zyv8=l(jx`Gng(*)E5rL5ay9b*7^F^l!_Q#DfBHK{l8>nmdIKde5lAalzELzQ`z9v5 z27$bKlF`qipCyp)^NH7oWz&pyh$F)~^i*TTZMb03;ztR#SiV=!N*@K?oOoU(#Xn`5 z=tU_dP7|W5O{>gzd#zC*>29f&sjH3o#jbgJGDVw*6!Mwz#%qNF*%ZEq!C)`4&(a_JEbHkXs^-*t|54NA@cD%>YkD`Db zpL_Y4We>WRbba^nb3m^(V5M2@FQoRDgB%;uA)(tkjq;(=$Y7)rHmUB+y*niOQjJ@2 z3E0C4nV_G@K%ZgL3#!x0{4Sv-FkLEM_%6P(HX^+s`i5fk-y&Z4>_oLf0;mM(YYOcT>tsSMQ zzC!N$ube*!A(!Na<#QkMg8Zupy3QSP?ikP7Y&3Z$=rJ;f0vpx0^18%1FcHu~I?9W9 zqW?H4>sHAx=8V0RYifCJBZ)wy$bP~R|GR40Eq#cEBf6L1X*_O4!xq2h+#Z^}BXSGO zCm0{$)r7#&y29zOy8Bbu5qAe;`@iKp(}=V=*#N#Ef}M=b*N_^q1Lh(zkVX?R=aB#>P@9@!AYD@+-9Chg|kkQA#Gz zz2Asd+lb=%)=9l4U3yPSx2~d7BJQIn`U0{~AH~;xx7TWD#8vUVCnF>+4|ba4(}d@D9Io zDP{zhLfqDXe9>94Q(Ep#vLEtrDO=+7d)-&%MLN@vhyG~radd236d z5bx!UU$PPb5BM$SR0`%pR^fuB%8z&wW`3WkWR{F#iQpmunV*=!p@?gIf$3V$_^Qe4yQfw~*deGq~eI#ww06*q;i z3kCwx0VW9uYHQ7cze(=jpH)nvD24~JC?Td#bm3nA=NEO5I@XYm+{%Sa69~>(KC+vD zC@bJ&r@q3go&xr=gl4#5|G80=>^~QW;8iUKe>x|MIbgf^LTPLlf~!CJs6E&oI-*^& zq|Pr=iQ??|azQ6Zyym3y)3bj{Uv4BMlnd6-ZruKN(N1T84Ad}<6LPbsXe_EELx;Q@ zxo(-Q=JABv7b)b%YGQBU=ShQi;?M;y8z8u6EDhNUnmtWZo*o{&&@km>URL-@Yf$jX zKhk)yK#=}rrJH#$!uZGZ-_mNLRjIMN5Y);4xS5fDsFV6_$+6%=bqRdIOU|e4xca!T zDi`h?G3f=xDR64e#ekbhhhxJAZu}^xJ1^Gm|6i7odnGO+(aH z5!w(o@w|CpTd=k6XdJ?+riN{1mb#Ca<-#1Xfv2u(N4WW?!RU{IMTq(Me7Tv}EbHuc z{&J!pUzDiM4a2{qXuLbr(=zZdt|6)X`ijF{j@=)sFD|u?fKSoSe){cL>cvl3wY9f- zA=OGmWc`=+EOswe&}gF`b69i9SQ&hoAE8Eaj2wmj;*O;}x+74sN>iuPUx3x)XoPEHp^g=6XY(U-n3KP9a%te|W{C#E74?1=7axLk4 zRK&9`|#0{PPR~N+!JNMW$#Vjra zNS(~BQzxal0Zl`?(DG;=)#!tEM!+Ck$BDNGUOIj3RCawyn-G_4{SLjbBwM7Ku(RfO zS#FOI*W6+ai`H<7=EfjNTSR zZ64Y~MO?rta&OrI6BIsFMxI1ytPIUXXX(iViB1iV8P(I|&iuo#Twq6SuAnjxA3-^) z>YErJxRz4yIG?SyB+>JnHNQNaFZAIu zUOwnUpXYcaqB$s)-wQ6*iCTS?v21 zFeh@&Ly1yb?Ako0vkdXv_s|5^F}J-6t&H^f7>)7k@37;2pRU-m=3AA{BW;JPj)Yrr z#qKb9Y1dh@>pW6rdW7v4keK>}qkeh*1Yl*Sv{Q17!?Kp~et6P;eBh!DoxN!e(^ThL9#|ITYBV8>%UHYA{*z5HR9%6Ki z)D~a*-MTjI!jex)T39bF(*wI_9ZP(iRELzw!-5C}=JJY^=%dYqBh3(UuFwVEXLfIEWo?+~pF&bUbXaNB${Wb2ucyw6Q zX`yv|kYw%V(Oaf2>Uu{~wB-Z@2KCRno7nUPts<=m&sNU2qA9oB5*1D^U)b!Tf_bfd zbVN`_wiEZ_S+%LsRW~DKQOELOHwd^x?xM4lNbni@(q5%+G#tIVK5CK&`c#dqJI9o_5f=D6a>ICG)d2Ln z7me{!?^wW5k8gp)EzFyCVtLxc!^|bI0zEK3?&iYnxW>UD3j{~{UknH6>Vqc|Ej09K zgyw0#1aGv@x>^>_gW08aKuZX-b{7&;xIc{awS*X?lOt=Par zIx-B(eml5W?dr$@^@Hm2&~hE5GnQVTUb>C+YhKj}_p0VYjOETG6@~P`QA288zWvyi z6P!mCBZN3ny!6>TGA2eA%zvoH%3LfZFA?1cPjB3mkQ1I%)Dzb`{qsxo<1m31>&qI|Ga1h(_N_MN8fO>3agkH(%q2Um zRHlVlXZ2uk#wI#lVo&m;A6A6Dq{_(ADSZu!>!j_R<%!mdf4oUf3e$ej;k4t|#>kO` zYSKd9wSC{=L)IiMPuqh@-yDNH?(A7O@^WY|7bbEb;LMNx803`4dpsgx;KXI&Xmhit zk#aILXLyID%t_sSJ#cVp(ECj8*!p7LO9Z!DKpXBe3xFZ+izzyx<~8H>c6CgIQleLe zW1MTEZR@BW-YcBEj62&i@d={%G87tG&E%8mr5axl(5=XH2;NJUr|CIX>$066dzl|a zX2^6h{+_7#5cOK`bGzb%lijr5(Z=2dy1!G@;fAjG498#PYKQfmx)_t^wK-nzY$Y<8 znQl4O9WO|;8p(c%<@6s_yY}7c#kp<9n@zc^-3?rnI6gyaKZxxzYvW3$e}$T5CQvDD zPRa=9j^f}ClVw+1qt8QjU5Fki@8$iLS*9bt>Uym$*ZC^$!|Y|E9==lS8^P{O$@V!Dn3uM8V6-R zpxw)TZJn^M*uP}&2LHU9F9}IHP|UbS4TyP8ii1s?rChP{kJi-whVQ!eXTmHTirfmB zZvqXVCplUi>B93zhI1CLl@qIe8BE6{&-{Azg%~yzk#4STx+N$9v4Tg~@m^ZvMThRs z9izt8LH#q0A`IrccawIFS;;@Ijxy-;)*_CU!&(+kpA=rc6`AIi({L@3YzSSu$wY?{ ztRJmJyn;jmR1L{?`b&%N*jM8~BfVzN?HA80Ck}PW<{KR8_m6o}c)Zuq^I#I@ySwDK zA)Na1FZqF5u?Q2kcl%)^gbBs#W__P__gcIg{Dphh7yT~krK%M$9a>u#SWH`_{0cza zV8x*Jc7RH)?>AyxZTy$7dslxZ6%wUue?*yd_4ZIaXR!--|%e= z%Svx<;6Fh#vL1e$JA3iY*krN!)0W4Vs&4N!+<12Wjl>o6la&GJ!ti#%uZ zYRk7KXL^S&$GomJsH)zf@l$ziIN!zK)V`tjDT7wnDl%VF>uUH9xY8PxUE;2gT)I+O zK991Or@8vf&2>Gg;%joq*l;XWb~7JW2{IvJK-kZSepxj7X<3<-xP1vQW^M+H z;(b?UOO(HG7k$JHItR6ddAJKt5>^BFDIS69b;SpikD z@MRMBUB(9Q6YRc{Dth+1_yRsv7=4oNZ_r8^pItqA53T%XiG*>Yn${#!zQb~cT$n4eY>1J#5KyeP;^xKM^lXEZGLA`qsol8oNu@2>h zseL$4uY2`^VJoLsac3()t!2kD(u1#px5_?H>ee$V4*9uvKhyl}D?8p**{g)MYeM-# zrU$z6PHcf6g?HpClU{uHT);UO*Rm!3PBaZYKk~P_o=Rxy`dU)w6%72h5MQ<&nLwwO zFfo@m>yps}1$nB@zG#p73ezs!71)A-4nUK1Ck+7Eq4xT3wmKy>^+e6!shzJcBFgre zc;J8^d?gNT+Ej%Q@km|juzwN2@K9wvb*Qs_Vtf;Kj7TgYC;3i1Op-L-w?8BL;LjBA zKl6U|si~8u8ro=hkMjur=bL|(_c)oX;)>2>p0B)fWazbxCqAfI$;r&{D7eYTY~vE)f`!tfFGc;?13;25u%R7F_wCfOg=JuB zQFc$~i;F_1ie#4En2%*ORC_o`FKAfP{pa?vnf-!2qbA}5@LtLec7(-PT^Pwdw4;Mq zCwTm0E3__DIg}%(Xy5G`p?GEDi)*;}`{}bTaTDi8*Xv1P4`9E)KM?^W7tRS5CK=H; z4(%079aBi`YPQt7AU!fT)QR&dy81Lf+~SFe$9y4ybA2{#pV?-TtH=D@RIqtizkAam z^@T2hA}=w-IU}4Tf47KZ9AQJx_*XO}8a>4pEV74|k;DdYEUgN5LD_-fnf08TqQ|)F zG*3^>jePLAj<5H3k;S;IJu9Z-8tpRJmzpzE1RQOxRg9bn>>zDkF_@h0GsEMLmS8!bmmI*|(X?+&oPuSeU8Rm-zK7af=#7O%PmZX+ z3v7Su9+UnUE89t31F05ynq-n*kI1gR-}jMt6xQ;cM;U%kxW3s%+KLCdS>l0ex~P3X z-YZQMuerQU0RMR4YkLVhR=-4W6JDz1uoxL5;cEE1qBr_6cyv%bQKwL3E6L(3(6zU) z^)C-p7tR!j`;Q_ycIFz~nhqj~K%F=_L_toq#hiYqAcZh>5yTD>Y-@KMk1*bKbPXR| z`E7*SV@2EJtY|8k?|6OiU7@|RDAIG40%*B*Y_Hu~ z=k*hmVlcV50cyX=!NRd)rNPk-_kk*+Mh{q+i-JnwaN5tv7eZWpB>LFB zD10ugr;a=zX~HHeWQH8U|_Cqysk=e!U49ka_n7VX|c`abRF-jZ9>aRvHO zYdt`{6Fe^a(78mgi6uUWTa2wHX;ErcLn_GPCoXj9}m9sT;Bonu2}>dFofi7!*mo+eMVCN>fbWC?R@p!P%l zWmrg&t?oTVm_M8`%)0;2AOi+2?j40?%(K4CYobsanMf``_S1EgjA{#n&L!o*s!G`8 zv3@4)yfO^cYc<#PR`=(8^)o@{SqNHQ`USrTTTf6p=-4_i^T0JkUp#6PT>9JjR z3i{OYnw5G8b~ut)f~K;0OeLGPq_9s)9qZ`uk()*!$4^)NgkmVR`q*}BBxGw&YjkGAcy~{q4JN?&k z?I#E=v|aYmM83?@{003G%rYc=q>ML*+GQuoe;EZ4-la|({fs`tdbi^em(m=?#9D+d zUD-tjTZA#{1%sW zmk8xvkvm>TVrdSV1 zO9Si`pk_YpU^uA6zSaf`%H4eW`HW65g9eaZXn@c%x57uFR%zjZ&w#4vgC^XT>~)rR zAkc?A zR|`BBiHdgqe8$IoP6jv*>;QzkLG;7nx;p8>>ObFCG!7_G?AieLq5Giym^5y1<~?Sd zUI8s_G$d_@TI0nqBj?D4+j*b|3d;6(_DNXfVaoQ^#yM(!-@5DoMMns8G`^Jh@uK~< z?<-t3BqD4!09F{5ea*T89D=kH^n*s$$$2Vfy@z9nNYnBKFe-O9XF0sDRy*;y3Dx{F zB^Z-9aB7K2ui}G6ht^*>op#Om!V*O?5yMNHJDrzc+(PeMvb*~{&>h2-AM+y~6wleJ zQ@qCUQ+^-~Vc96h&26sl(|{G%zpmfpatVn}-0Q>roQN!z^UCO>erXqWh&%dl8%xY( zoj)BXT_PZ&YrK==k-d09&c#44s833ff$Xp(89)R2tE$Y!g72YmS|E<6RS zM*J1Gt_1NMz`>@cma8Z2pRmAEj5=%-7PxDHWu$KjVo|~N&|0mgnCwT`;ijN)#ahAR z9{uRUUf+PhY}LFIcgr)5^m>oW4sn;Ec#BISTGdk0OBe1+=Brlks!bV$#PDV66hQ4k zb?N@8JyTQgQ@#XlP6)YZ&dsk8_3pZ}Zvo({IE$H#&kTCTg64tFOKN9ZDXDU#YAyxG zl!h7?wDcV*C^ndyBg#Oq@xp-v-CtTWE@EzwMW8Ew?lI#GCcXN|f2-iu~Vy$w2c#r}(^HyHyL>0qP^M>5-b8~0kG^8&B zR^_QSN@;$Km`l8n@4Y(<)HVO8Ed9DhX?QJnZ)Bu|MYUl0AuI1yc#{G^ixh&fn*eWm zP(rk4257}HE+e|)s4j&q4y_K8TJ&cMURr#iR(`1{1x&wIGWyG9F_+CTwf$6wFPVsS zfo(aRN9AS20g%~b18;dFpjL!^PHHhcMW~uC;BH0VLw`g=#QHle1GkvLy@qMqvW1WQD$_Vbz7BNRJORj`&{<^X;`9Hmn z+8tj&qNO=w&Hysw5Tf5QmjD~cP!@T0

HWhve&3IjmR^+8s23y!{aZL-VYG(M<} zaj07NW{IG4r~eW4FRIQwpUAmB=u(DV*3j-w{Jvt`89Ls;^v}FD-cCn-i52UvJ6ZUT-oPRZ5j57Y4v{V_#~v)(&RAK>qSxA!`zj1v~#g1PE< zgY~*KJ;@TZTh4e*K6qBE$>HI{dlCk!VYlK5n6B*-gC>!|sXH&7aT&prjraUx#lPj` zqM0vnNoOSj7a`T7Ps-H6Dr8HEU>AF}>%qm8~edixnm6 zKTPB92oEAnybF}k3ARnZ z1|FgukUWNXy#79wSh4-fNEY<*hI1>65;M5W=LEJC!M=TaM~efqB=%2q`O8h#>h5hQ z;)Nf@h(LtcT4F`h55L5~+{o%*EJ6Pxqf^j z8AXEqql532$A7h3{|Fk7kdWn$l7xQvP?V2(K;vlq;PdgiwHsWxWZ?mPZbtMN^FXl9 z%XYyUs63UMn4sh0%VcsloSj8_rA``g5|>2EUC`0C;1~RjS$IuJOxFcgfCN2j;TB?+N`_A2d&&edNCl&Y=U7bH>y)J(GG@Ix;I_;_+59Bz# zTjZfG8&mt#{QSA-jY`&%@6ojal=Jn>eW?Y0=*=x}5?|RZH(9#28gjHw{dOO0eS*z< zGwAIood;5gP};*-6A;$$q5SOIhqsyC_p7`7my_hn zH--e+ClY>}&p}oJSZbGrG_r`t-9Bbo57-{&KP;9U*IEGbkUPn)>Nr8n(WQN!^fc2d z*PcVauZ@viYgB?C+e3#`z!FHx`1Jc{BW+y;DUZ+V*<5Q&yUP!c_1U!y@9y*$|J|p; zLG5t@;+X#wEDUN(O(5PwxWHAv#M#b%sc8odcYREUi7pS+2#9{y9B~D+3nym-@3rfW zirckx9$$yXHmy`IZ4J@_D|5NwLGo-P=6w3nAZIe5epWVEM*>{WGb)t`24yvUxl9S^p-mW4%ll-{SXVQ)3#;7^vw~KJH2XDCpv-CMC{n zmh1(bB$Nvi!}EfwwKb`1KH60PWxiS6rF7aUpg1MO7ZWz|@>Qcz$p9+kcNv)Ktib*9 zZ-^<|QS1PV!@04=XQ~fxLqTrFKMJP>k>BCm-*eZ#XP(#q)A!mL+o76tT|)s4@-}z5 z7Y@ErkZ%=30t9Lw*VjUZN`tSx=bc@j)T(IkK$}aIov*XrGMg`7z6t3c|g&> zzkI6!N*=64(lttmc;<(`KUo*jorIXX)-RWmjE)r5+r^at%)ZuJEK*LF0SGQVwW3J@cz+PT&8f zEM`r|?gkeoiyauyAXWI`fbLyRQ^ZE@9?VjnwpEyXta9iKr+0LZhWm%h_Hq5omLMES z;y}~(Xo2+-NPO??>QAN@*BWZ5MtLejYqM_m4(Qs*2CSc*p$0*o^Je~E z4H&fUIHv_NA@cnf9xg}LP~lP6Lb#5No9UgPqj`@`^gvPC;KC>8+J#T-)LJ&DTB4qSTneC)L#sdTq)d|*G%voxub4L9@#d{cuKL9dkwZ9c%(qis1&^BdakFmOZQ}h6dDAn z4ZV;E%!=7>h%)=4-em?NRlL!9jE{o6c;`7Qz-!6)nS2`bhb)?w{z%xVte z(r2dHS@XpzwVv)Si>1e&E|eZ(vX<4o0a6;|232)DU#%T< z^&l{_hbr5~Tefd?gQk~xo{ubiM!BHHEpUc{c6>UfX0J)lJb8o;UIaO{>ic|`|5x*1 z?_VTLm05CzZF3|`r@x%#{|%Hq16fc?N!cM%E>Gq`cgW~HPdXYe_y=9_XFxa&_gS!m zK@+(1a(@}KISzEI+e!1zR{e#jh%2N1N1M0d2606P&yUB>Pe$fH88o@2;XZ~Nq<+8h zUJjQ^S+fWPmV!nBNJah6UW)o3zB6@{jz6d@w_t~QiGm)0ESfsI1kT#)t3bkOPYXZx zp_UI?VMCb9;vmSti|1^*&fm#TY?9Kc(TF;;Uz>w1Sy3Xm?(?SU1ZtpzZUSeUw-n~+ zjk^XCN;?xT3J+n!w2RNit~?1dKDXaRLB+lL2QA!@!Z@D4_i^_kvQo0tf;j^Wqott&r^ z!0%vu<61GiZ^;ey?(kaMD~~UW7{-SfxQMj1wf_E`b5Y{%XbjrHd1W`uU`@1{!c?ev!zFw!=DlO8!LHj&kxe4}MjppHyw z<7_Y8+R1*lN!rb@x$^US(v=S{zP@1Pul;k+WdVJkICUFzN?%1*m@NXdCgM$IE;IzdENKF E0Zi8Ez5oCK delta 6590 zcmYj$c|26#|G%vm%F?wZQqdh-*0E)mEoMw(DQn4~$u1&W*D_@6A~72K`nF_jDatk@ zS+bQqvLuA;q~tsOKL7mA3G(j=$Tkwo>DDO-<9%J zhJGRSG`#~8_v{z*R3Caii2w6-JspcjFV;wFSimr-^_CnWv0Cm4t0}8AURQO9xU<3Y z03jLx7rxBwGi1IdX}2l#Y0E@V>c{c%5NVY3>Yic~G1ERcXNEa>q~>OJ&SL0drh9B% zWW_Q^n71(!nJwe2Y7#JA{98rnq!Rf{zslPtnAVMZ^r-l<$fCaZ6mGL2OE7SE*YtaNYNh1Uu5IAd*W;6ka zp26i`X$6Ds#KD4Ew#VT(N(*{XBX9 z8o-%O_^`P3oqjtEMhvS)R(no8sW18_X3v74r_YS}TxkGsN&|uBFqG)~D!7s1+*ak| zzx`_w{ns9f4KU%q_zT=)k8tKh(9hzip8Fz1E74pS^E&jGk~JErvAKoPsn$H9NR9#B zLl)t>HDZ>zFd^LDPQT2%EX?(s3aI$b!Q}JEw)#7NyC9WZyW`YN+c>im#DTJwliqH* zSqI@oNA-paAJF@QdipCwl`jHI;2!hnV@1+=Ei9qM3->e^7DEIRh1vaplKzu-l8?6? z2~M$BpuOIcDD6)QWG#O6gL0QSr=y9V%Zw{ucg{jC;ot>VqSetA3pxw{|1sLq=iB#R zCF7(i=dPRkD_FX+UX*YU2(EC1l;6p*-gd{HeRX!+O)NY>!ey4q{qPn!xY4RH_$%FXH4*10}Y`|7{a( z{F-?NnDoZitKwd2P8}=-&r#}G24ul zFra5EHuL4ONl7j^hLa)=Y?i@w*BN*(b;({J-Vnd9Hu#PFOG_u9(}XMJGeG1_GmN_c zvMcKv#EI!uTcUBgvc3VMMb%su&;S#Y`nunRfPh_w=o%e>(-})va}&KIrFHuCCO1mE zFj12>+taMp27u0KZYjZ_x_<5_8xKyGaUE+o`5~exHFXEe`2?TDWwwH| z6RX8u!t?dxv;p4BYV-G0biY6|Cx@FC+8~A&+eymE`{iv07ANO;JT&KTknpqCw27%0p{%BQ-2JGq7g9b z2<~gSJLfHt3!|aLY<~?hO5`fFo8JeuAP)Tr(44s_12ETZ8X={2o;^>hDRKq3ntr9B zs|0>;$Q`-rf1ovtR}a?I1h?=-FE9SqIDRXZ4W4`ZQi90&(}I1#f8zC7aqJ5r0DD`M z3xLF8OCtYL*J8mx@+X?}zXqno&&uB=30n?RZj96K5*L45{u`BjZsgiDUUK-la7MfluB`nKX0L~A~8XKDK-c3 zv#x4U%9x5Ljjcll@e>2w7*tGTQgFbBo-NU)rWAtD`Sc(P<=8W3ju1~^1v>2u30y5U zdTZx`kQ-_Ht6;?;dxqRJW8WB54N?l?lkCrPGoxmPkz!B6Q=NLukCcutV^qkEbRwJa zy<;r2{&+%4vqW57%KJaty_!np?;;ap&AApmo04~#R7+;pZOyi-(WdX3`LY*}y|%K$ zjZApso{@su?>W|-KR21uW?Tx(yw=sLdF=7t{t;!_J*uUo3#PgIv>(D5-VQa4k*wlF z>!7+~v(S%t0I6$x-?+sU&Jd^t003+$2g;iPzlM{>zx~<&<&_mM3r0J(rb)%lel!1j zULiO@0aeW)VSwL%1hPhl5MnopDP+v~YjFa>3fK+jyjYL`J(0@j93Ow82rC!-9b`$5MSHGmagf>0( z*(1dje7e#*rKe7hC8L}o6lL@)VVO7H{U?8x9;kTaSo>iJ|Cn!&*LeOy4>zD-!ZSeO z`hj+moSh?a4P}gp=7cP7dEGvJEz^Mt*#5YLE}nbBg(m^?p6 z>afv{Vb#_3OQ^ADzF&q2x7goGw&hcJ3uWanhj$GU`i9F2JEd)qZ(wHXOd*lPYfAJ5FCz<~I+r+8j1|goV)DQy_07QS9$J#-+|Nj-B~aZ2gLr&lQ>M zzhk$8L`Ljmp_>&|p(}hX92q$qUF?ghEq??R{k@5MdY7YMPb&(i9vx-ulRjq)dBxWZ z`Wp6|h6LbxKIx!EZ=mWPvLUE zEl{pl4qcTn749O#Re8&2sQf%XdGvd9W|q>LO7A=FzFK@;V!N{0Vq5!>p&=SO`KlXy z$)~$EbGA>>907d6tH=B*`JMA_IJ-IrYQNjR8ooIpEF$1IAk(GxIY(nxTiK`PRZhrPD`Ml)=cMD}Edf%yXjNs`0>v!kuUP@*3V z<-$(Oz8iKate?X%&}sH3R`K46;eO4BvO;)I6i-qv5xpHde;L^C2L5%$d-{DiUb_26K>c57*9+o#WsZ`pdX~0AoCe z*Z)=esBSDn8i>QS>&OEihieA)bg~cho5xuGRY##UD&;+) z&Qc&T(u)-AwJc6$pE6kpqLkb^TpaZ z2!fjGuS0*|)cRrnuCH->PQQ_3QI1NQsB}}4J5xdLXThDYN46c! z$rZI`T)Hv1&}cq+I2JGJMz`5>1M=|1X6)+~_=L(!F##nsAm` z^Clz8f}SP<_jY?sE-Zmm&$LMzz%#`&Hs)0Y#5T-xp^S_V$5b=mLm2sWKe^7O4R=~+ z-KZpLG&4?ZEst<=lrA5Z@Ra2z?vnV8gf(A@yF%(nDO%Cp7fAZdv1`Crw1KtPx2Ug8 zRz7M2csd9nH@*sA8qb5lRPF^RTo<0Sr!6t+VrJ$6>dz1i#X_47lfIk}izyEF|L7t` z6f+vB^);mdyr{eRJ|$b&ljS~|s0FI|xN+zY?b6|UjP$DA>rQVURnQP->tD(&D)6BB zG#d9U8PUO{rC(W0gG?7QY4Xc@q5U&kycB;$X4dw^&0mFhrOC#QU-&7@I?4a(@IOFuX#O} zp1-KLbG$d}Q0@Ao8l|G##f;FI85pHF@q451Dz0RNaa@|2bEVOig-o=lSE z&bz#NCVX8->l$b}5;)%RgY6r$d~LGC(RZ)d5leG$!S*nN|K9ksH6^0T8J+&yAw~5h zZJ;T??hHpm1!!@7*{O=319Mf}YZ*J&K0IYGiA0`{R8(@(MZ93Gh^06)OjxH)Gvz%5 zyGQ7Bck!9}cDIaCt9h_2x$(9K;!qE7dJM~ah)eETo!c{g%YQX&L&V^JFm}y#jQ$8V zPD;%zc3j4T?py7Icb~Z4u*;O! z#u^aDpfT3~!7e5k#`~Lx^unC&(1$qxb#WqRSb>lfJ~rkom;0B-mjr4t5^++Up%FC? zB$mZ`-!%?(bB7Rl5A=vtPtfVoTBrXJ3EClrWgJS-$d=P-hqP9_Dd3IFGJatKERRt0 zVeB;Pd%0lly1#nFKXi|+$=qGnV_8{MqBlq=rMyPS1wP2~{Am*-_^JazegdBA9;N%( zCItlJbiYRmOo{7chdJ_C%-IoRPCB)Re#I&dKzjceqb(9IM*67q@Fw;nBQdPBR>1c> zt+a#d>>$2y#I;v7!?``ia^RwCW#@}XYY#Z$?u~Z>zU;Inm7gD>l>dFO#Q{&0lz_OFTxUw()z8h*nAqdp46Kz`thaj zc!qsEFZEHz(h_H`hohK_NIo8-NXiZR@T3zQqoo(>@P)m25z3>b$yfY6dU8zXot4>$ zVYpP76bUh)cdc%aX%8;uAv<`TrxFof1mvBdZH3Oo<>RMSpP?(lZv(fV-}pyxqyhH1 zmuuR=&>Q2xa(71QCAvJ*|j4(&neb0>P z?byW$MJ=6do=a~$+a=MZa04IHk?}zL)vt*6Y+|*j=I58LdbYD=uS{ILKZkVpS;{>Y zcO6mqRcP`_9`ZSi7lb@sNHxBE5wsFQe}Z-yB0sC{;cKF5Jsc_sry1iBr=jin^vGc$hz3n9a%!Kzs;GljU&l8^h%S` zHWP0^3z@m5?!8a(E|!eMQXSG*#>XIC?K3S4Cuo{+(;eA(pHkbVPTT)H%=vS}gZ9be zVc=i&BiR4)pQ9Gx@4p+o%cxDVkB@uu=b9~=w&uVGFNqz-fzVuA)AHuEs|-s)^c_&i zJ@|Vn<8kF$@ojhHbV5_7+9!naDtnsS1e-+&(m~j^KwYW$>QO7|pG`>p+otyWgv4#o zd+NH#LwDL0K}tfTILLXNR?4j zIIA|@`EA&5GTQ(?(J(=yTUrld5sU**g-n=s@J9he*$p|Lfh5idr%w!u3=-fOqaYaF%@p)%X(xSo-Z@xJU<=;We9zX)8-S!{7krBiWS#xctJV};$i zO|h6YpQK^?lg{jX&Mpr_+eqkU`hn#0zTLmyNl!!lA5@5cL~`A8+u*43kikV1psrIg za~xh&93Qo)G(Zs_ng@l5w97NAqtU0N3ZeWEG#6Izo0N_5w8bF_Td?vD`<)tC;;y9x z8Tr@f3GmLeKx--39@*D|-=nV-yQ8_>sOP7985&4?*6$N8?@0eznY4`#P|FbM-seui z){MWfZxmOfQkFTSs0mj(<{pfSO`3~FUk#!UsNqhwhf?YgL$6Nt^w<|TN%_~*ruQse z(MmA0&G95GrZhM=HclxDoa2$1D4R&1WmmeRipu0nLHKH#H=HVee(ul-HJC;?3V*sl z{e$;^ZQE!-ectjazcX5x$M94bfCE;=b^T<~btmjF1OK#^CJOo&IaUO64_COjwd6Z* z_z+acG{+#{rYj~hFR|$qwo4iGy~;b;bVkaF+058FA+#%^S!RlACPA+-VXCHFc515U zOipIy*oSQsm5G#y@goB41D_5m-bbc9(i-$1!EO3ocCwT^^;&Os>W1qj(`P7=9LHkW zaFm`~XJSp#UoOe>Y+aPS%Omw{Efpd^-$77wf^GAaZvW2x{8ItR!%}m4?^P#T!ArXr z!E=R90b_hn;&k8diEHX7Nmk0TITc#x zU&gPpUl7;gCLh5&q@xs!o4cbwkK*%iUA(`=#}hN>F8e4S)F)5^=W@MnvKmLGYlt2t zlm+F9h*rU)ZA?YLH6H7oBVH&}%**D@lyqROKU-v8BEcM02<`wD z4?)|c*EHI@roa_VXnEea_PW5Y)#usbqf(g%repHzy@YB zU&lmd=S@8Wd9zfP)p#^N5+T%M(C1TN+Fz#RczNuc7u0zEC0hPH`lK=n!