Testing Reliability with Random Slopes Extracted from Linear Mixed-Effects Modeling (LMM)

Testing Case 2

Zhiyi Wu

This dataset is a subset of data for the Simon Task from Huensch (2022), publicly available on OSF (osf.io/fxzvj/). Read the original study here: doi.org/10.1017/S0272263124000238.

# Set the working directory to the location of this R Markdown file
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))

# Import dataset
d = read.csv("S-data.csv")

# Load necessary libraries for data manipulation
library(tidyr)
library(tidyverse)
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr     1.1.4     ✔ purrr     1.0.2
## ✔ forcats   1.0.0     ✔ readr     2.1.5
## ✔ ggplot2   3.5.1     ✔ stringr   1.5.1
## ✔ lubridate 1.9.3     ✔ tibble    3.2.1
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag()    masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
# Check the descriptive data
library(rstatix)
## 
## Attaching package: 'rstatix'
## 
## The following object is masked from 'package:stats':
## 
##     filter
## To get the descriptives (means & standard deviations of the two conditions) of these two datasets
d %>% group_by(Congruency) %>% get_summary_stats(RT, type = "mean_sd")
## # A tibble: 2 × 5
##   Congruency  variable     n  mean    sd
##   <chr>       <fct>    <dbl> <dbl> <dbl>
## 1 Congruent   RT        1608  479.  147.
## 2 Incongruent RT        1525  518.  180.
# Mean RT for Congruent Condition is shorter than Incongruent Condition

Step 1: Split Data into Two Halves

# Split the data in to two halves 
de = d %>% filter(ItemNo %% 2 == 0)
do = d %>% filter(ItemNo %% 2 == 1)

Step 2: Fitting models to the datasets

library(optimx)
library(blme)
## Loading required package: lme4
## Loading required package: Matrix
## 
## Attaching package: 'Matrix'
## The following objects are masked from 'package:tidyr':
## 
##     expand, pack, unpack
m_de = blmer(-1/RT ~ Congruency + (1+Congruency|Subject) + 
               (1+Congruency|ItemNo), 
             data = de, 
             control=lmerControl(optimizer = "nloptwrap",
                                 optCtrl = list(algorithm = 
                                                  "NLOPT_LN_NELDERMEAD", 
                                                maxit = 2e5)))

m_do = blmer(-1/RT ~ Congruency + (1+Congruency|Subject) + 
               (1+Congruency|ItemNo),
             data = do, 
             control=lmerControl(optimizer = "nloptwrap",
                                 optCtrl = list(algorithm = 
                                                  "NLOPT_LN_NELDERMEAD", 
                                                maxit = 2e5)))

Step 3: Extract Random Slopes

###get the slopes for all the individuals
#even-numbered
person_rand_e=data.frame(ranef(m_de)$Subject)
person_rand_e$Subject= row.names(person_rand_e)

#odd-numbered
person_rand_o=data.frame(ranef(m_do)$Subject)
person_rand_o$Subject= row.names(person_rand_o)

Step 4: Combine Even-Numbered and Odd-Numbered Sets

#combine them and create the same data frame
person = inner_join(person_rand_e, person_rand_o, 
                    by = "Subject", 
                    suffix = c("_even", "_odd"))

Step 5: Correlation between Even-Numbered and Odd-Numbered Sets

#check the correlation between even- and odd-numbered sets
library(psych)
## 
## Attaching package: 'psych'
## The following objects are masked from 'package:ggplot2':
## 
##     %+%, alpha
library(car)
## Loading required package: carData
## 
## Attaching package: 'car'
## The following object is masked from 'package:psych':
## 
##     logit
## The following object is masked from 'package:dplyr':
## 
##     recode
## The following object is masked from 'package:purrr':
## 
##     some
### Pearson
print(corr.test(person$CongruencyIncongruent_even, 
                person$CongruencyIncongruent_odd), 
      short = FALSE) # .72
## Call:corr.test(x = person$CongruencyIncongruent_even, y = person$CongruencyIncongruent_odd)
## Correlation matrix 
## [1] 0.72
## Sample Size 
## [1] 58
## These are the unadjusted probability values.
##   The probability values  adjusted for multiple tests are in the p.adj object. 
## [1] 0
## 
##  Confidence intervals based upon normal theory.  To get bootstrapped values, try cor.ci
##       raw.lower raw.r raw.upper raw.p lower.adj upper.adj
## NA-NA      0.56  0.72      0.82     0      0.56      0.82
### Robust
WRS2::pbcor(person$CongruencyIncongruent_even, 
            person$CongruencyIncongruent_odd, 
            ci = T) #.725
## Call:
## WRS2::pbcor(x = person$CongruencyIncongruent_even, y = person$CongruencyIncongruent_odd, 
##     ci = T)
## 
## Robust correlation coefficient: 0.7249
## Test statistic: 7.8741
## p-value: 0 
## 
## Bootstrap CI: [0.5636; 0.8321]

In practice, once you’ve established good reliability, you would use the random slopes from your full model (using all items) as your individual difference measure in further analyses.