Study 1

Read data files

Note: Anomynized datasets were created by removing possibly identifying information and data that are not relevant to confirmatory analyses.

options(width=120)

df_resilience <- read.csv2(file = "20210901_study_1_resilience_df_long.csv",fileEncoding="UTF-8", stringsAsFactors = T)
df_cama <- read.csv2(file = "20210901_study_1_cama_df_long.csv",fileEncoding="UTF-8", stringsAsFactors = T)
df <- read.csv2(file = "20210901_study_1_df_long.csv",fileEncoding="UTF-8", stringsAsFactors = T)
data <- read.csv2(file = "20210702_study_1_df_wide.csv",fileEncoding="UTF-8", stringsAsFactors = T)


data$s_iv_technical_terms <- factor(data$s_iv_technical_terms, levels = c("Expl None","Replace", "Expl Glossar"))
df_resilience$s_iv_technical_terms <- factor(df_resilience$s_iv_technical_terms, levels = c("Expl None","Replace", "Expl Glossar"))
df$s_iv_technical_terms <- factor(df$s_iv_technical_terms, levels = c("Expl None","Replace", "Expl Glossar"))

df_cama$pls <- factor(df_cama$pls, levels = c("Dunst","Christodoulou"))
df_resilience$pls <- factor(df_resilience$pls, levels = c("Rasmussen","Groth"))
df$pls <- factor(df$pls, levels = c("Dunst","Christodoulou", "Rasmussen","Groth"))

Descriptive Within Level

CAMA

require(psych)
## Lade nötiges Paket: psych
names(df_cama)
##  [1] "id"                       "s_study_arm"              "s_iv_quality_of_evidence" "s_iv_operationalization" 
##  [5] "s_iv_technical_terms"     "s_int_psy"                "s_knowledge_evidence"     "pls"                     
##  [9] "accessibility"            "understanding"            "empowerment"              "pref_quality_of_evidence"
## [13] "knowledge_evidence"       "knowledge"
df1 <- df_cama[,c("accessibility", "understanding", "empowerment", "pref_quality_of_evidence", "knowledge_evidence","knowledge", "s_iv_quality_of_evidence", "s_iv_operationalization")]

describe(df1)
##                           vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility                1 2262 6.08 1.89      7    6.33 1.48   1   8     7 -0.86    -0.09 0.04
## understanding                2 2258 6.08 1.87      7    6.32 1.48   1   8     7 -0.85    -0.07 0.04
## empowerment                  3 2260 4.86 2.01      5    4.91 1.48   1   8     7 -0.17    -0.80 0.04
## pref_quality_of_evidence     4 2235 4.69 1.87      5    4.68 1.48   1   8     7  0.03    -0.58 0.04
## knowledge_evidence           5 2230 0.39 0.49      0    0.37 0.00   0   1     1  0.43    -1.81 0.01
## knowledge                    6 2230 0.65 0.48      1    0.68 0.00   0   1     1 -0.62    -1.62 0.01
## s_iv_quality_of_evidence*    7 2274 1.50 0.50      1    1.50 0.00   1   2     1  0.02    -2.00 0.01
## s_iv_operationalization*     8 2274 1.50 0.50      1    1.49 0.00   1   2     1  0.02    -2.00 0.01
df5 <- data.frame(psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu Yes" & df1$s_iv_operationalization == "Op Yes",], na.rm = T)$mean,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu Yes" & df1$s_iv_operationalization == "Op Yes",], na.rm = T)$sd,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu No" & df1$s_iv_operationalization == "Op Yes",], na.rm = T)$mean,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu No" & df1$s_iv_operationalization == "Op Yes",], na.rm = T)$sd,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu Yes" & df1$s_iv_operationalization == "Op No",], na.rm = T)$mean,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu Yes" & df1$s_iv_operationalization == "Op No",], na.rm = T)$sd,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu No" & df1$s_iv_operationalization == "Op No",], na.rm = T)$mean,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu No" & df1$s_iv_operationalization == "Op No",], na.rm = T)$sd)



rownames(df5) <- colnames(df1)



colnames(df5) <- c("Mean_Qu_Yes_Op_Yes", "SD_Qu_Yes_Op_Yes","Mean_Qu_No_Op_Yes", "SD_Qu_No_Op_Yes", "Mean_Qu_Yes_Op_No", "SD_Qu_Yes_Op_No", "Mean_Qu_No_Op_No", "SD_Qu_No_Op_No")

df5 <- df5[1:(nrow(df5)-2),]

df5
##                          Mean_Qu_Yes_Op_Yes SD_Qu_Yes_Op_Yes Mean_Qu_No_Op_Yes SD_Qu_No_Op_Yes Mean_Qu_Yes_Op_No
## accessibility                     5.9198543        1.9211052         5.8947368       2.0672712         6.1448517
## understanding                     5.9253188        1.8805274         5.8929825       2.0246599         6.1083916
## empowerment                       4.7868852        1.9724682         4.6830123       2.0722160         4.8706294
## pref_quality_of_evidence          4.8704380        1.8419255         4.5666667       1.9274936         4.8064516
## knowledge_evidence                0.4493554        0.4978872         0.3132743       0.4642359         0.4575972
## knowledge                         0.6593002        0.4743816         0.6566372       0.4752521         0.6219081
##                          SD_Qu_Yes_Op_No Mean_Qu_No_Op_No SD_Qu_No_Op_No
## accessibility                  1.7446107        6.3578947      1.7987447
## understanding                  1.7015907        6.3827160      1.8059338
## empowerment                    1.9550225        5.0950704      2.0188120
## pref_quality_of_evidence       1.8992093        4.5366726      1.7995411
## knowledge_evidence             0.4986394        0.3561151      0.4792810
## knowledge                      0.4853397        0.6510791      0.4770581
psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu Yes" & df1$s_iv_operationalization == "Op Yes",], na.rm = T)
##                           vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility                1 549 5.92 1.92      6    6.12 2.97   1   8     7 -0.64    -0.58 0.08
## understanding                2 549 5.93 1.88      6    6.12 2.97   1   8     7 -0.65    -0.55 0.08
## empowerment                  3 549 4.79 1.97      5    4.81 1.48   1   8     7 -0.11    -0.82 0.08
## pref_quality_of_evidence     4 548 4.87 1.84      5    4.88 1.48   1   8     7  0.00    -0.64 0.08
## knowledge_evidence           5 543 0.45 0.50      0    0.44 0.00   0   1     1  0.20    -1.96 0.02
## knowledge                    6 543 0.66 0.47      1    0.70 0.00   0   1     1 -0.67    -1.55 0.02
## s_iv_quality_of_evidence*    7 552 2.00 0.00      2    2.00 0.00   2   2     0   NaN      NaN 0.00
## s_iv_operationalization*     8 552 2.00 0.00      2    2.00 0.00   2   2     0   NaN      NaN 0.00
psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu Yes" & df1$s_iv_operationalization == "Op No",], na.rm = T)
##                           vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility                1 573 6.14 1.74      6    6.35 1.48   1   8     7 -0.80    -0.16 0.07
## understanding                2 572 6.11 1.70      6    6.28 1.48   1   8     7 -0.75    -0.08 0.07
## empowerment                  3 572 4.87 1.96      5    4.91 1.48   1   8     7 -0.17    -0.77 0.08
## pref_quality_of_evidence     4 558 4.81 1.90      5    4.83 1.48   1   8     7 -0.13    -0.56 0.08
## knowledge_evidence           5 566 0.46 0.50      0    0.45 0.00   0   1     1  0.17    -1.97 0.02
## knowledge                    6 566 0.62 0.49      1    0.65 0.00   0   1     1 -0.50    -1.75 0.02
## s_iv_quality_of_evidence*    7 576 2.00 0.00      2    2.00 0.00   2   2     0   NaN      NaN 0.00
## s_iv_operationalization*     8 576 1.00 0.00      1    1.00 0.00   1   1     0   NaN      NaN 0.00
psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu No" & df1$s_iv_operationalization == "Op Yes",], na.rm = T)
##                           vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility                1 570 5.89 2.07      6    6.18 2.97   1   8     7 -0.88    -0.18 0.09
## understanding                2 570 5.89 2.02      6    6.15 2.97   1   8     7 -0.83    -0.23 0.08
## empowerment                  3 571 4.68 2.07      5    4.72 1.48   1   8     7 -0.12    -0.87 0.09
## pref_quality_of_evidence     4 570 4.57 1.93      4    4.53 1.48   1   8     7  0.14    -0.63 0.08
## knowledge_evidence           5 565 0.31 0.46      0    0.27 0.00   0   1     1  0.80    -1.36 0.02
## knowledge                    6 565 0.66 0.48      1    0.70 0.00   0   1     1 -0.66    -1.57 0.02
## s_iv_quality_of_evidence*    7 575 1.00 0.00      1    1.00 0.00   1   1     0   NaN      NaN 0.00
## s_iv_operationalization*     8 575 2.00 0.00      2    2.00 0.00   2   2     0   NaN      NaN 0.00
psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu No" & df1$s_iv_operationalization == "Op No",], na.rm = T)
##                           vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility                1 570 6.36 1.80      7    6.62 1.48   1   8     7 -1.05     0.39 0.08
## understanding                2 567 6.38 1.81      7    6.66 1.48   1   8     7 -1.10     0.54 0.08
## empowerment                  3 568 5.10 2.02      5    5.20 1.48   1   8     7 -0.28    -0.73 0.08
## pref_quality_of_evidence     4 559 4.54 1.80      4    4.50 1.48   1   8     7  0.12    -0.45 0.08
## knowledge_evidence           5 556 0.36 0.48      0    0.32 0.00   0   1     1  0.60    -1.64 0.02
## knowledge                    6 556 0.65 0.48      1    0.69 0.00   0   1     1 -0.63    -1.60 0.02
## s_iv_quality_of_evidence*    7 571 1.00 0.00      1    1.00 0.00   1   1     0   NaN      NaN 0.00
## s_iv_operationalization*     8 571 1.00 0.00      1    1.00 0.00   1   1     0   NaN      NaN 0.00
names(df_cama)
##  [1] "id"                       "s_study_arm"              "s_iv_quality_of_evidence" "s_iv_operationalization" 
##  [5] "s_iv_technical_terms"     "s_int_psy"                "s_knowledge_evidence"     "pls"                     
##  [9] "accessibility"            "understanding"            "empowerment"              "pref_quality_of_evidence"
## [13] "knowledge_evidence"       "knowledge"
df1 <- df_cama[,c("accessibility", "understanding", "empowerment", "pref_quality_of_evidence","s_iv_operationalization")]

describe(df1)
##                          vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility               1 2262 6.08 1.89      7    6.33 1.48   1   8     7 -0.86    -0.09 0.04
## understanding               2 2258 6.08 1.87      7    6.32 1.48   1   8     7 -0.85    -0.07 0.04
## empowerment                 3 2260 4.86 2.01      5    4.91 1.48   1   8     7 -0.17    -0.80 0.04
## pref_quality_of_evidence    4 2235 4.69 1.87      5    4.68 1.48   1   8     7  0.03    -0.58 0.04
## s_iv_operationalization*    5 2274 1.50 0.50      1    1.49 0.00   1   2     1  0.02    -2.00 0.01
df5 <- data.frame(psych::describe(df1[df1$s_iv_operationalization == "Op Yes",], na.rm = T)$mean,
             psych::describe(df1[df1$s_iv_operationalization == "Op Yes",], na.rm = T)$sd,
             psych::describe(df1[df1$s_iv_operationalization == "Op No",], na.rm = T)$mean,
             psych::describe(df1[df1$s_iv_operationalization == "Op No",], na.rm = T)$sd)

psych::describe(df1[df1$s_iv_operationalization == "Op Yes",], na.rm = T)
##                          vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility               1 1119 5.91 2.00      6    6.15 2.97   1   8     7 -0.78    -0.32 0.06
## understanding               2 1119 5.91 1.95      6    6.14 2.97   1   8     7 -0.76    -0.34 0.06
## empowerment                 3 1120 4.73 2.02      5    4.76 1.48   1   8     7 -0.12    -0.84 0.06
## pref_quality_of_evidence    4 1118 4.72 1.89      5    4.70 1.48   1   8     7  0.07    -0.64 0.06
## s_iv_operationalization*    5 1127 2.00 0.00      2    2.00 0.00   2   2     0   NaN      NaN 0.00
psych::describe(df1[df1$s_iv_operationalization == "Op No",], na.rm = T)
##                          vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility               1 1143 6.25 1.77      7    6.49 1.48   1   8     7 -0.92     0.10 0.05
## understanding               2 1139 6.24 1.76      7    6.47 1.48   1   8     7 -0.92     0.20 0.05
## empowerment                 3 1140 4.98 1.99      5    5.05 1.48   1   8     7 -0.22    -0.75 0.06
## pref_quality_of_evidence    4 1117 4.67 1.85      5    4.66 1.48   1   8     7  0.00    -0.53 0.06
## s_iv_operationalization*    5 1147 1.00 0.00      1    1.00 0.00   1   1     0   NaN      NaN 0.00
rownames(df5) <- colnames(df1)



colnames(df5) <- c("Mean_Op_Yes", "SD_Op_Yes", "Mean_Op_No", "SD_Op_No")

df5 <- df5[1:(nrow(df5)-1),]

df5
##                          Mean_Op_Yes SD_Op_Yes Mean_Op_No SD_Op_No
## accessibility               5.907060  1.996046   6.251094 1.774241
## understanding               5.908847  1.954469   6.244952 1.758899
## empowerment                 4.733929  2.023700   4.982456 1.989359
## pref_quality_of_evidence    4.715564  1.891305   4.671441 1.854090
#correlations

cor(df_cama$understanding, df_cama$accessibility, use = "pairwise.complete.obs")
## [1] 0.8221927
cor(df_cama$understanding, df_cama$empowerment, use = "pairwise.complete.obs")
## [1] 0.6402011
cor(df_cama$accessibility, df_cama$empowerment, use = "pairwise.complete.obs")
## [1] 0.5586959
df1 <- df[,c("accessibility", "understanding", "empowerment", "pref_quality_of_evidence","s_iv_quality_of_evidence")]

describe(df1)
##                           vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility                1 4326 5.57 2.07      6    5.77 2.97   1   8     7 -0.55    -0.72 0.03
## understanding                2 4323 5.60 2.02      6    5.80 2.97   1   8     7 -0.58    -0.60 0.03
## empowerment                  3 4324 4.49 2.07      5    4.50 2.97   1   8     7 -0.04    -0.90 0.03
## pref_quality_of_evidence     4 4276 4.74 1.80      5    4.74 1.48   1   8     7  0.02    -0.48 0.03
## s_iv_quality_of_evidence*    5 4350 1.49 0.50      1    1.48 0.00   1   2     1  0.06    -2.00 0.01
df5 <- data.frame(psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu Yes",], na.rm = T)$mean,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu Yes",], na.rm = T)$sd,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu No",], na.rm = T)$mean,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu No",], na.rm = T)$sd)

psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu Yes",], na.rm = T)
##                           vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility                1 2100 5.53 2.04      6    5.70 2.97   1   8     7 -0.50    -0.73 0.04
## understanding                2 2101 5.54 1.97      6    5.70 2.97   1   8     7 -0.51    -0.63 0.04
## empowerment                  3 2099 4.47 2.04      4    4.48 2.97   1   8     7 -0.03    -0.87 0.04
## pref_quality_of_evidence     4 2078 4.85 1.82      5    4.86 1.48   1   8     7 -0.06    -0.56 0.04
## s_iv_quality_of_evidence*    5 2112 2.00 0.00      2    2.00 0.00   2   2     0   NaN      NaN 0.00
psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu No",], na.rm = T)
##                           vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility                1 2226 5.61 2.11      6    5.83 2.97   1   8     7 -0.60    -0.70 0.04
## understanding                2 2222 5.67 2.06      6    5.89 2.97   1   8     7 -0.65    -0.56 0.04
## empowerment                  3 2225 4.51 2.11      5    4.51 2.97   1   8     7 -0.04    -0.93 0.04
## pref_quality_of_evidence     4 2198 4.64 1.77      5    4.62 1.48   1   8     7  0.09    -0.38 0.04
## s_iv_quality_of_evidence*    5 2238 1.00 0.00      1    1.00 0.00   1   1     0   NaN      NaN 0.00
rownames(df5) <- colnames(df1)



colnames(df5) <- c("Mean_Qu_Yes", "SD_Qu_Yes", "Mean_Qu_No", "SD_Qu_No")

df5 <- df5[1:(nrow(df5)-1),]

df5
##                          Mean_Qu_Yes SD_Qu_Yes Mean_Qu_No SD_Qu_No
## accessibility               5.527143  2.035588   5.613657 2.106570
## understanding               5.536411  1.971278   5.666067 2.057943
## empowerment                 4.472130  2.036502   4.508764 2.106226
## pref_quality_of_evidence    4.850337  1.817697   4.637398 1.769103

Resilience

names(df_resilience)
##  [1] "id"                       "s_study_arm"              "s_iv_quality_of_evidence" "s_iv_operationalization" 
##  [5] "s_iv_technical_terms"     "s_int_psy"                "s_knowledge_evidence"     "pls"                     
##  [9] "accessibility"            "understanding"            "empowerment"              "pref_quality_of_evidence"
## [13] "knowledge_evidence"       "knowledge"
df1 <- df_resilience[,c("accessibility", "understanding", "empowerment", "pref_quality_of_evidence", "knowledge_evidence","knowledge", "s_iv_quality_of_evidence", "s_iv_technical_terms")]

require(psych)

describe(df1)
##                           vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility                1 2064 5.01 2.12      5    5.10 2.97   1   8     7 -0.25    -0.99 0.05
## understanding                2 2065 5.08 2.05      5    5.18 2.97   1   8     7 -0.33    -0.84 0.05
## empowerment                  3 2064 4.09 2.07      4    4.03 2.97   1   8     7  0.13    -0.91 0.05
## pref_quality_of_evidence     4 2041 4.79 1.71      5    4.79 1.48   1   8     7  0.01    -0.38 0.04
## knowledge_evidence           5 1990 0.42 0.49      0    0.40 0.00   0   1     1  0.33    -1.89 0.01
## knowledge                    6 1990 0.65 0.48      1    0.69 0.00   0   1     1 -0.63    -1.61 0.01
## s_iv_quality_of_evidence*    7 2076 1.47 0.50      1    1.47 0.00   1   2     1  0.10    -1.99 0.01
## s_iv_technical_terms*        8 2076 1.97 0.80      2    1.97 1.48   1   3     2  0.05    -1.46 0.02
df5 <- data.frame(psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu Yes" & df1$s_iv_technical_terms == "Expl Glossar",], na.rm = T)$mean,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu Yes" & df1$s_iv_technical_terms == "Expl Glossar",], na.rm = T)$sd,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu Yes" & df1$s_iv_technical_terms == "Replace",], na.rm = T)$mean,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu Yes" & df1$s_iv_technical_terms == "Replace",], na.rm = T)$sd,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu Yes" & df1$s_iv_technical_terms == "Expl None",], na.rm = T)$mean,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu Yes" & df1$s_iv_technical_terms == "Expl None",], na.rm = T)$sd,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu No" & df1$s_iv_technical_terms == "Expl Glossar",], na.rm = T)$mean,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu No" & df1$s_iv_technical_terms == "Expl Glossar",], na.rm = T)$sd,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu No" & df1$s_iv_technical_terms == "Replace",], na.rm = T)$mean,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu No" & df1$s_iv_technical_terms == "Replace",], na.rm = T)$sd,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu No" & df1$s_iv_technical_terms == "Expl None",], na.rm = T)$mean,
             psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu No" & df1$s_iv_technical_terms == "Expl None",], na.rm = T)$sd)



rownames(df5) <- colnames(df1)



colnames(df5) <- c("Mean_Qu_Yes_Tech_Glo", "SD_Qu_Yes_Tech_Glo","Mean_Qu_Yes_Tech_Repl", "SD_Qu_Yes_Tech_Repl", "Mean_Qu_Yes_Tech_None", "SD_Qu_Yes_Tech_None", "Mean_Qu_No_Tech_Glo", "SD_Qu_No_Tech_Glo","Mean_Qu_No_Tech_Repl", "SD_Qu_No_Tech_Repl", "Mean_Qu_No_Tech_None", "SD_Qu_No_Tech_None")

df5 <- df5[1:(nrow(df5)-2),]

df5
##                          Mean_Qu_Yes_Tech_Glo SD_Qu_Yes_Tech_Glo Mean_Qu_Yes_Tech_Repl SD_Qu_Yes_Tech_Repl
## accessibility                       4.9036545          2.0576734             5.2168675           2.1062313
## understanding                       5.0897010          1.9668741             5.2462462           1.9801787
## empowerment                         4.2033333          2.0237690             4.2349398           2.0680819
## pref_quality_of_evidence            5.0872483          1.6714392             4.8851964           1.7885473
## knowledge_evidence                  0.4265734          0.4954461             0.4923077           0.5007117
## knowledge                           0.6503497          0.4776954             0.6584615           0.4749569
##                          Mean_Qu_Yes_Tech_None SD_Qu_Yes_Tech_None Mean_Qu_No_Tech_Glo SD_Qu_No_Tech_Glo
## accessibility                        4.7188406           2.1004758           5.3274854         2.0358385
## understanding                        4.6416185           2.0652829           5.5877193         1.9198519
## empowerment                          3.7745665           2.0089771           4.3918129         2.0171530
## pref_quality_of_evidence             4.6501458           1.7770610           5.0029940         1.6004851
## knowledge_evidence                   0.4700599           0.4998516           0.4000000         0.4906534
## knowledge                            0.6197605           0.4861739           0.5938462         0.4918712
##                          Mean_Qu_No_Tech_Repl SD_Qu_No_Tech_Repl Mean_Qu_No_Tech_None SD_Qu_No_Tech_None
## accessibility                       5.2208122          2.1064480            4.6657143          2.1939540
## understanding                       5.3274112          2.0407826            4.5902579          2.1188853
## empowerment                         4.3832487          2.0878477            3.5285714          2.0363560
## pref_quality_of_evidence            4.7046632          1.6279472            4.4899713          1.7113674
## knowledge_evidence                  0.3884514          0.4880390            0.3421829          0.4751418
## knowledge                           0.7139108          0.4525259            0.6519174          0.4770665
psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu Yes" & df1$s_iv_technical_terms == "Expl Glossar",], na.rm = T)
##                           vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility                1 301 4.90 2.06      5    4.97 2.97   1   8     7 -0.21    -0.94 0.12
## understanding                2 301 5.09 1.97      5    5.17 1.48   1   8     7 -0.29    -0.79 0.11
## empowerment                  3 300 4.20 2.02      4    4.19 2.97   1   8     7  0.01    -0.91 0.12
## pref_quality_of_evidence     4 298 5.09 1.67      5    5.07 1.48   1   8     7  0.06    -0.69 0.10
## knowledge_evidence           5 286 0.43 0.50      0    0.41 0.00   0   1     1  0.30    -1.92 0.03
## knowledge                    6 286 0.65 0.48      1    0.69 0.00   0   1     1 -0.63    -1.61 0.03
## s_iv_quality_of_evidence*    7 301 2.00 0.00      2    2.00 0.00   2   2     0   NaN      NaN 0.00
## s_iv_technical_terms*        8 301 3.00 0.00      3    3.00 0.00   3   3     0   NaN      NaN 0.00
psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu Yes" & df1$s_iv_technical_terms == "Replace",], na.rm = T)
##                           vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility                1 332 5.22 2.11    5.5    5.36 2.22   1   8     7 -0.43    -0.83 0.12
## understanding                2 333 5.25 1.98    6.0    5.37 1.48   1   8     7 -0.42    -0.73 0.11
## empowerment                  3 332 4.23 2.07    4.0    4.22 2.97   1   8     7  0.04    -0.99 0.11
## pref_quality_of_evidence     4 331 4.89 1.79    5.0    4.91 1.48   1   8     7 -0.11    -0.55 0.10
## knowledge_evidence           5 325 0.49 0.50    0.0    0.49 0.00   0   1     1  0.03    -2.01 0.03
## knowledge                    6 325 0.66 0.47    1.0    0.70 0.00   0   1     1 -0.67    -1.56 0.03
## s_iv_quality_of_evidence*    7 335 2.00 0.00    2.0    2.00 0.00   2   2     0   NaN      NaN 0.00
## s_iv_technical_terms*        8 335 2.00 0.00    2.0    2.00 0.00   2   2     0   NaN      NaN 0.00
psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu Yes" & df1$s_iv_technical_terms == "Expl None",], na.rm = T)
##                           vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility                1 345 4.72 2.10      5    4.76 2.97   1   8     7 -0.06    -0.98 0.11
## understanding                2 346 4.64 2.07      5    4.67 2.97   1   8     7 -0.10    -0.89 0.11
## empowerment                  3 346 3.77 2.01      4    3.66 1.48   1   8     7  0.28    -0.74 0.11
## pref_quality_of_evidence     4 343 4.65 1.78      5    4.66 1.48   1   8     7 -0.01    -0.53 0.10
## knowledge_evidence           5 334 0.47 0.50      0    0.46 0.00   0   1     1  0.12    -1.99 0.03
## knowledge                    6 334 0.62 0.49      1    0.65 0.00   0   1     1 -0.49    -1.76 0.03
## s_iv_quality_of_evidence*    7 348 2.00 0.00      2    2.00 0.00   2   2     0   NaN      NaN 0.00
## s_iv_technical_terms*        8 348 1.00 0.00      1    1.00 0.00   1   1     0   NaN      NaN 0.00
psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu No" & df1$s_iv_technical_terms == "Expl Glossar",], na.rm = T)
##                           vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility                1 342 5.33 2.04      5    5.47 2.97   1   8     7 -0.38    -0.78 0.11
## understanding                2 342 5.59 1.92      6    5.76 1.48   1   8     7 -0.58    -0.40 0.10
## empowerment                  3 342 4.39 2.02      4    4.41 2.97   1   8     7 -0.05    -0.88 0.11
## pref_quality_of_evidence     4 334 5.00 1.60      5    4.99 1.48   1   8     7  0.02    -0.19 0.09
## knowledge_evidence           5 325 0.40 0.49      0    0.38 0.00   0   1     1  0.41    -1.84 0.03
## knowledge                    6 325 0.59 0.49      1    0.62 0.00   0   1     1 -0.38    -1.86 0.03
## s_iv_quality_of_evidence*    7 343 1.00 0.00      1    1.00 0.00   1   1     0   NaN      NaN 0.00
## s_iv_technical_terms*        8 343 3.00 0.00      3    3.00 0.00   3   3     0   NaN      NaN 0.00
psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu No" & df1$s_iv_technical_terms == "Replace",], na.rm = T)
##                           vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility                1 394 5.22 2.11      6    5.34 2.97   1   8     7 -0.34    -1.02 0.11
## understanding                2 394 5.33 2.04      6    5.47 2.97   1   8     7 -0.45    -0.76 0.10
## empowerment                  3 394 4.38 2.09      4    4.36 2.97   1   8     7  0.02    -0.91 0.11
## pref_quality_of_evidence     4 386 4.70 1.63      5    4.67 1.48   1   8     7  0.13    -0.27 0.08
## knowledge_evidence           5 381 0.39 0.49      0    0.36 0.00   0   1     1  0.46    -1.80 0.03
## knowledge                    6 381 0.71 0.45      1    0.77 0.00   0   1     1 -0.94    -1.11 0.02
## s_iv_quality_of_evidence*    7 395 1.00 0.00      1    1.00 0.00   1   1     0   NaN      NaN 0.00
## s_iv_technical_terms*        8 395 2.00 0.00      2    2.00 0.00   2   2     0   NaN      NaN 0.00
psych::describe(df1[df1$s_iv_quality_of_evidence == "Qu No" & df1$s_iv_technical_terms == "Expl None",], na.rm = T)
##                           vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility                1 350 4.67 2.19      5    4.70 2.97   1   8     7 -0.08    -1.19 0.12
## understanding                2 349 4.59 2.12      5    4.62 2.97   1   8     7 -0.09    -1.10 0.11
## empowerment                  3 350 3.53 2.04      3    3.35 2.97   1   8     7  0.47    -0.70 0.11
## pref_quality_of_evidence     4 349 4.49 1.71      4    4.48 1.48   1   8     7  0.07    -0.18 0.09
## knowledge_evidence           5 339 0.34 0.48      0    0.30 0.00   0   1     1  0.66    -1.57 0.03
## knowledge                    6 339 0.65 0.48      1    0.69 0.00   0   1     1 -0.64    -1.60 0.03
## s_iv_quality_of_evidence*    7 354 1.00 0.00      1    1.00 0.00   1   1     0   NaN      NaN 0.00
## s_iv_technical_terms*        8 354 1.00 0.00      1    1.00 0.00   1   1     0   NaN      NaN 0.00
#Correlations

cor(df_resilience$understanding, df_resilience$accessibility, use = "pairwise.complete.obs")
## [1] 0.8323697
cor(df_resilience$understanding, df_resilience$empowerment, use = "pairwise.complete.obs")
## [1] 0.7123068
cor(df_resilience$accessibility, df_resilience$empowerment, use = "pairwise.complete.obs")
## [1] 0.6439495
df1 <- df_resilience[,c("accessibility", "understanding", "empowerment", "pref_quality_of_evidence", "s_iv_technical_terms")]

require(psych)

describe(df1)
##                          vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility               1 2064 5.01 2.12      5    5.10 2.97   1   8     7 -0.25    -0.99 0.05
## understanding               2 2065 5.08 2.05      5    5.18 2.97   1   8     7 -0.33    -0.84 0.05
## empowerment                 3 2064 4.09 2.07      4    4.03 2.97   1   8     7  0.13    -0.91 0.05
## pref_quality_of_evidence    4 2041 4.79 1.71      5    4.79 1.48   1   8     7  0.01    -0.38 0.04
## s_iv_technical_terms*       5 2076 1.97 0.80      2    1.97 1.48   1   3     2  0.05    -1.46 0.02
df5 <- data.frame(psych::describe(df1[df1$s_iv_technical_terms == "Expl Glossar",], na.rm = T)$mean,
             psych::describe(df1[df1$s_iv_technical_terms == "Expl Glossar",], na.rm = T)$sd,
             psych::describe(df1[df1$s_iv_technical_terms == "Replace",], na.rm = T)$mean,
             psych::describe(df1[df1$s_iv_technical_terms == "Replace",], na.rm = T)$sd,
             psych::describe(df1[df1$s_iv_technical_terms == "Expl None",], na.rm = T)$mean,
             psych::describe(df1[df1$s_iv_technical_terms == "Expl None",], na.rm = T)$sd)


psych::describe(df1[df1$s_iv_technical_terms == "Expl Glossar",], na.rm = T)
##                          vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility               1 643 5.13 2.06      5    5.24 2.97   1   8     7 -0.30    -0.86 0.08
## understanding               2 643 5.35 1.96      6    5.50 1.48   1   8     7 -0.44    -0.63 0.08
## empowerment                 3 642 4.30 2.02      4    4.30 2.97   1   8     7 -0.02    -0.89 0.08
## pref_quality_of_evidence    4 632 5.04 1.63      5    5.02 1.48   1   8     7  0.04    -0.43 0.06
## s_iv_technical_terms*       5 644 3.00 0.00      3    3.00 0.00   3   3     0   NaN      NaN 0.00
psych::describe(df1[df1$s_iv_technical_terms == "Replace",], na.rm = T)
##                          vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility               1 726 5.22 2.10      6    5.35 2.97   1   8     7 -0.38    -0.93 0.08
## understanding               2 727 5.29 2.01      6    5.42 2.97   1   8     7 -0.44    -0.75 0.07
## empowerment                 3 726 4.32 2.08      4    4.29 2.97   1   8     7  0.03    -0.94 0.08
## pref_quality_of_evidence    4 717 4.79 1.71      5    4.78 1.48   1   8     7  0.02    -0.42 0.06
## s_iv_technical_terms*       5 730 2.00 0.00      2    2.00 0.00   2   2     0   NaN      NaN 0.00
psych::describe(df1[df1$s_iv_technical_terms == "Expl None",], na.rm = T)
##                          vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## accessibility               1 695 4.69 2.15      5    4.73 2.97   1   8     7 -0.07    -1.09 0.08
## understanding               2 695 4.62 2.09      5    4.64 2.97   1   8     7 -0.10    -0.99 0.08
## empowerment                 3 696 3.65 2.03      4    3.51 2.97   1   8     7  0.37    -0.73 0.08
## pref_quality_of_evidence    4 692 4.57 1.74      4    4.57 1.48   1   8     7  0.04    -0.36 0.07
## s_iv_technical_terms*       5 702 1.00 0.00      1    1.00 0.00   1   1     0   NaN      NaN 0.00
rownames(df5) <- colnames(df1)



colnames(df5) <- c("Mean_Tech_Glo", "SD_Tech_Glo","Mean_Tech_Repl", "SD_Tech_Repl", "Mean_Tech_None", "SD_Tech_None")

df5 <- df5[1:(nrow(df5)-1),]

df5
##                          Mean_Tech_Glo SD_Tech_Glo Mean_Tech_Repl SD_Tech_Repl Mean_Tech_None SD_Tech_None
## accessibility                 5.129082    2.055418       5.219008     2.104897       4.692086     2.146677
## understanding                 5.354588    1.956360       5.290234     2.012277       4.615827     2.091022
## empowerment                   4.303738    2.020863       4.315427     2.078716       3.650862     2.025080
## pref_quality_of_evidence      5.042722    1.633566       4.788006     1.705142       4.569364     1.744817

Confirmatory Analyses

Load Packages

require(lme4)
## Lade nötiges Paket: lme4
## Lade nötiges Paket: Matrix
require(lmerTest)
## Lade nötiges Paket: lmerTest
## 
## Attache Paket: 'lmerTest'
## Das folgende Objekt ist maskiert 'package:lme4':
## 
##     lmer
## Das folgende Objekt ist maskiert 'package:stats':
## 
##     step
require(sjstats)
## Lade nötiges Paket: sjstats
## Registered S3 methods overwritten by 'parameters':
##   method                           from      
##   as.double.parameters_kurtosis    datawizard
##   as.double.parameters_skewness    datawizard
##   as.double.parameters_smoothness  datawizard
##   as.numeric.parameters_kurtosis   datawizard
##   as.numeric.parameters_skewness   datawizard
##   as.numeric.parameters_smoothness datawizard
##   print.parameters_distribution    datawizard
##   print.parameters_kurtosis        datawizard
##   print.parameters_skewness        datawizard
##   summary.parameters_kurtosis      datawizard
##   summary.parameters_skewness      datawizard
## 
## Attache Paket: 'sjstats'
## Das folgende Objekt ist maskiert 'package:psych':
## 
##     phi
require(MuMIn)
## Lade nötiges Paket: MuMIn
library(multcomp)
## Lade nötiges Paket: mvtnorm
## Lade nötiges Paket: survival
## Lade nötiges Paket: TH.data
## Lade nötiges Paket: MASS
## 
## Attache Paket: 'TH.data'
## Das folgende Objekt ist maskiert 'package:MASS':
## 
##     geyser
library(r2glmm)
library(performance)
## 
## Attache Paket: 'performance'
## Die folgenden Objekte sind maskiert von 'package:sjstats':
## 
##     icc, r2
require(effectsize)
## Lade nötiges Paket: effectsize
## 
## Attache Paket: 'effectsize'
## Die folgenden Objekte sind maskiert von 'package:sjstats':
## 
##     cohens_f, phi
## Das folgende Objekt ist maskiert 'package:psych':
## 
##     phi

H1 and H2 Technical Terms

A Accessibility

m1 <- lmer(accessibility ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id), data = df_resilience)



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ s_iv_technical_terms + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_resilience
## 
## REML criterion at convergence: 8442.6
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -3.02541 -0.48177  0.03719  0.52881  2.87533 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.675    1.636   
##  Residual             1.673    1.293   
## Number of obs: 2064, groups:  id, 1101
## 
## Fixed effects:
##                                    Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                         5.01046    0.11752 1228.90462  42.637  < 2e-16 ***
## s_iv_technical_termsReplace         0.54451    0.13803 1091.60442   3.945 8.49e-05 ***
## s_iv_technical_termsExpl Glossar    0.43651    0.14213 1093.93513   3.071  0.00218 ** 
## s_iv_quality_of_evidenceQu Yes     -0.09228    0.11457 1093.37180  -0.806  0.42070    
## plsGroth                           -0.55018    0.05815 1011.55853  -9.462  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v__R s___EG s____Y
## s_v_tchnc_R -0.615                     
## s_v_tchn_EG -0.594  0.497              
## s_v_qlt__QY -0.484  0.032  0.023       
## plsGroth    -0.247 -0.002 -0.002  0.003
test = glht(m1,linfct=mcp(s_iv_technical_terms="Tukey"))
summary(test, test = adjusted("none"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = accessibility ~ s_iv_technical_terms + s_iv_quality_of_evidence + 
##     pls + (1 | id), data = df_resilience)
## 
## Linear Hypotheses:
##                               Estimate Std. Error z value Pr(>|z|)    
## Replace - Expl None == 0        0.5445     0.1380   3.945 7.98e-05 ***
## Expl Glossar - Expl None == 0   0.4365     0.1421   3.071  0.00213 ** 
## Expl Glossar - Replace == 0    -0.1080     0.1405  -0.769  0.44218    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- none method)
summary(test, test = adjusted("BH"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = accessibility ~ s_iv_technical_terms + s_iv_quality_of_evidence + 
##     pls + (1 | id), data = df_resilience)
## 
## Linear Hypotheses:
##                               Estimate Std. Error z value Pr(>|z|)    
## Replace - Expl None == 0        0.5445     0.1380   3.945 0.000239 ***
## Expl Glossar - Expl None == 0   0.4365     0.1421   3.071 0.003197 ** 
## Expl Glossar - Replace == 0    -0.1080     0.1405  -0.769 0.442179    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- BH method)
r.squaredGLMM(m1)
## Warning: 'r.squaredGLMM' now calculates a revised statistic. See the help page.
##             R2m      R2c
## [1,] 0.02999658 0.626808
r2beta(m1, method = "nsj")
##                             Effect   Rsq upper.CL lower.CL
## 1                            Model 0.030    0.047    0.018
## 5                         plsGroth 0.017    0.030    0.008
## 2      s_iv_technical_termsReplace 0.012    0.022    0.004
## 3 s_iv_technical_termsExpl Glossar 0.007    0.016    0.002
## 4   s_iv_quality_of_evidenceQu Yes 0.000    0.004    0.000
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## term                     |    sumsq |  meansq | NumDF |    DenDF | statistic | p.value |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_technical_terms     |   28.776 |  14.388 |     2 | 1093.597 |     8.600 |  < .001 |      | 0.013 |         0.014 |         |                 |           |    0.118 |      
## s_iv_quality_of_evidence |    1.085 |   1.085 |     1 | 1093.372 |     0.649 |   0.421 |      | 0.000 |         0.001 |         |                 |           |    0.023 |      
## pls                      |  149.764 | 149.764 |     1 | 1011.559 |    89.521 |  < .001 |      | 0.066 |         0.067 |         |                 |           |    0.268 |      
## Residuals                | 2080.261 |   1.008 |       |          |           |         | 2064 |       |               |         |                 |           |          |
anova_stats(m1)$term
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "s_iv_technical_terms"     "s_iv_quality_of_evidence" "pls"                      "Residuals"
interpret_omega_squared(anova_stats(m1)$partial.etasq, rules = "field2013")
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "small"      "very small" "medium"     NA          
## (Rules: field2013)
# Interest in psychological research

m1 <- lmer(accessibility ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id), data = df_resilience[!is.na(df_resilience$s_int_psy),])



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ s_iv_technical_terms + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_resilience[!is.na(df_resilience$s_int_psy), ]
## 
## REML criterion at convergence: 8442.6
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -3.02541 -0.48177  0.03719  0.52881  2.87533 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.675    1.636   
##  Residual             1.673    1.293   
## Number of obs: 2064, groups:  id, 1101
## 
## Fixed effects:
##                                    Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                         5.01046    0.11752 1228.90462  42.637  < 2e-16 ***
## s_iv_technical_termsReplace         0.54451    0.13803 1091.60442   3.945 8.49e-05 ***
## s_iv_technical_termsExpl Glossar    0.43651    0.14213 1093.93513   3.071  0.00218 ** 
## s_iv_quality_of_evidenceQu Yes     -0.09228    0.11457 1093.37180  -0.806  0.42070    
## plsGroth                           -0.55018    0.05815 1011.55853  -9.462  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v__R s___EG s____Y
## s_v_tchnc_R -0.615                     
## s_v_tchn_EG -0.594  0.497              
## s_v_qlt__QY -0.484  0.032  0.023       
## plsGroth    -0.247 -0.002 -0.002  0.003
r.squaredGLMM(m1)
##             R2m      R2c
## [1,] 0.02999658 0.626808
r2beta(m1, method = "nsj")
##                             Effect   Rsq upper.CL lower.CL
## 1                            Model 0.030    0.047    0.018
## 5                         plsGroth 0.017    0.030    0.008
## 2      s_iv_technical_termsReplace 0.012    0.022    0.004
## 3 s_iv_technical_termsExpl Glossar 0.007    0.016    0.002
## 4   s_iv_quality_of_evidenceQu Yes 0.000    0.004    0.000
m6 <- lmer(accessibility ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id) + s_int_psy, data = df_resilience[!is.na(df_resilience$s_int_psy),])

summary(m6)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ s_iv_technical_terms + s_iv_quality_of_evidence +      pls + (1 | id) + s_int_psy
##    Data: df_resilience[!is.na(df_resilience$s_int_psy), ]
## 
## REML criterion at convergence: 8310.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2278 -0.4836  0.0575  0.5139  2.7345 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.250    1.500   
##  Residual             1.677    1.295   
## Number of obs: 2064, groups:  id, 1101
## 
## Fixed effects:
##                                    Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                         5.01966    0.11080 1240.12256  45.303  < 2e-16 ***
## s_iv_technical_termsReplace         0.54343    0.12961 1085.34403   4.193 2.98e-05 ***
## s_iv_technical_termsExpl Glossar    0.45064    0.13348 1087.80582   3.376 0.000761 ***
## s_iv_quality_of_evidenceQu Yes     -0.10679    0.10759 1087.13307  -0.993 0.321159    
## plsGroth                           -0.54654    0.05814 1012.22283  -9.401  < 2e-16 ***
## s_int_psy                           0.64344    0.05346 1099.02550  12.035  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v__R s___EG s____Y plsGrt
## s_v_tchnc_R -0.613                            
## s_v_tchn_EG -0.591  0.497                     
## s_v_qlt__QY -0.483  0.032  0.023              
## plsGroth    -0.262 -0.002 -0.002  0.003       
## s_int_psy    0.006  0.000  0.009 -0.010  0.007
r.squaredGLMM(m6)
##            R2m       R2c
## [1,] 0.1230622 0.6255119
r2beta(m6, method = "nsj")
##                             Effect   Rsq upper.CL lower.CL
## 1                            Model 0.123    0.151    0.100
## 6                        s_int_psy 0.095    0.120    0.073
## 5                         plsGroth 0.019    0.032    0.009
## 2      s_iv_technical_termsReplace 0.013    0.024    0.005
## 3 s_iv_technical_termsExpl Glossar 0.008    0.018    0.002
## 4   s_iv_quality_of_evidenceQu Yes 0.001    0.005    0.000
m7 <- lmer(accessibility ~ s_iv_technical_terms * s_int_psy + s_iv_quality_of_evidence + pls + (1 | id), data = df_resilience[!is.na(df_resilience$s_int_psy),])

summary(m7)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ s_iv_technical_terms * s_int_psy + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_resilience[!is.na(df_resilience$s_int_psy), ]
## 
## REML criterion at convergence: 8314.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2039 -0.4859  0.0552  0.5125  2.7195 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.253    1.501   
##  Residual             1.677    1.295   
## Number of obs: 2064, groups:  id, 1101
## 
## Fixed effects:
##                                              Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                                   5.02033    0.11086 1237.82813  45.287  < 2e-16 ***
## s_iv_technical_termsReplace                   0.54288    0.12967 1083.60834   4.187 3.06e-05 ***
## s_iv_technical_termsExpl Glossar              0.44795    0.13356 1085.63521   3.354 0.000824 ***
## s_int_psy                                     0.71876    0.09135 1087.96312   7.868 8.63e-15 ***
## s_iv_quality_of_evidenceQu Yes               -0.10832    0.10767 1085.51069  -1.006 0.314656    
## plsGroth                                     -0.54548    0.05814 1012.26696  -9.383  < 2e-16 ***
## s_iv_technical_termsReplace:s_int_psy        -0.08362    0.12748 1091.89295  -0.656 0.511998    
## s_iv_technical_termsExpl Glossar:s_int_psy   -0.15271    0.13432 1100.36284  -1.137 0.255820    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v__R s_v__EG s_nt_p s____Y plsGrt s___R:
## s_v_tchnc_R -0.613                                           
## s_v_tchn_EG -0.591  0.497                                    
## s_int_psy    0.012 -0.005 -0.005                             
## s_v_qlt__QY -0.483  0.032  0.023  -0.023                     
## plsGroth    -0.262 -0.002 -0.002   0.015  0.003              
## s_v_tc_R:__ -0.013  0.007  0.003  -0.717  0.025 -0.009       
## s_v_t_EG:__ -0.004  0.003  0.018  -0.680  0.010 -0.016  0.487
r.squaredGLMM(m7)
##            R2m      R2c
## [1,] 0.1238579 0.626189
r2beta(m7, method = "nsj")
##                                       Effect   Rsq upper.CL lower.CL
## 1                                      Model 0.124    0.153    0.101
## 4                                  s_int_psy 0.043    0.062    0.028
## 6                                   plsGroth 0.019    0.032    0.009
## 2                s_iv_technical_termsReplace 0.013    0.024    0.005
## 3           s_iv_technical_termsExpl Glossar 0.008    0.018    0.002
## 8 s_iv_technical_termsExpl Glossar:s_int_psy 0.001    0.005    0.000
## 5             s_iv_quality_of_evidenceQu Yes 0.001    0.005    0.000
## 7      s_iv_technical_termsReplace:s_int_psy 0.000    0.004    0.000
anova(m1,m6, m7)
## refitting model(s) with ML (instead of REML)
## Data: df_resilience[!is.na(df_resilience$s_int_psy), ]
## Models:
## m1: accessibility ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id)
## m6: accessibility ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id) + s_int_psy
## m7: accessibility ~ s_iv_technical_terms * s_int_psy + s_iv_quality_of_evidence + pls + (1 | id)
##    npar    AIC    BIC  logLik deviance    Chisq Df Pr(>Chisq)    
## m1    7 8441.9 8481.3 -4214.0   8427.9                           
## m6    8 8307.6 8352.6 -4145.8   8291.6 136.3477  1     <2e-16 ***
## m7   10 8310.3 8366.6 -4145.1   8290.3   1.3127  2     0.5187    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#ICC Subject

m0 <- lmer(accessibility ~ (1 | id), data = df_resilience)

summary(m0)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ (1 | id)
##    Data: df_resilience
## 
## REML criterion at convergence: 8535.5
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.69336 -0.55614  0.08356  0.56335  2.49701 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.658    1.630   
##  Residual             1.819    1.349   
## Number of obs: 2064, groups:  id, 1101
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 5.019e+00  5.766e-02 1.095e+03   87.05   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
performance::icc(m0)
## # Intraclass Correlation Coefficient
## 
##      Adjusted ICC: 0.594
##   Conditional ICC: 0.594

B Understanding

m1 <- lmer(understanding ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id), data = df_resilience)



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ s_iv_technical_terms + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_resilience
## 
## REML criterion at convergence: 8277.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2210 -0.4793  0.0598  0.5198  3.0099 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.476    1.574   
##  Residual             1.536    1.239   
## Number of obs: 2065, groups:  id, 1101
## 
## Fixed effects:
##                                   Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)                         4.9493     0.1130 1230.8055  43.806  < 2e-16 ***
## s_iv_technical_termsReplace         0.6827     0.1326 1091.9235   5.147 3.14e-07 ***
## s_iv_technical_termsExpl Glossar    0.7511     0.1366 1094.7145   5.498 4.77e-08 ***
## s_iv_quality_of_evidenceQu Yes     -0.1359     0.1101 1093.7269  -1.235    0.217    
## plsGroth                           -0.5442     0.0557 1012.3304  -9.771  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v__R s___EG s____Y
## s_v_tchnc_R -0.616                     
## s_v_tchn_EG -0.594  0.497              
## s_v_qlt__QY -0.485  0.032  0.024       
## plsGroth    -0.248 -0.001 -0.001  0.003
r.squaredGLMM(m1)
##            R2m       R2c
## [1,] 0.0462724 0.6349152
r2beta(m1, method = "nsj")
##                             Effect   Rsq upper.CL lower.CL
## 1                            Model 0.046    0.067    0.032
## 3 s_iv_technical_termsExpl Glossar 0.022    0.036    0.011
## 2      s_iv_technical_termsReplace 0.020    0.033    0.010
## 5                         plsGroth 0.018    0.031    0.009
## 4   s_iv_quality_of_evidenceQu Yes 0.001    0.006    0.000
test = glht(m1,linfct=mcp(s_iv_technical_terms="Tukey"))
summary(test, test = adjusted("none"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = understanding ~ s_iv_technical_terms + s_iv_quality_of_evidence + 
##     pls + (1 | id), data = df_resilience)
## 
## Linear Hypotheses:
##                               Estimate Std. Error z value Pr(>|z|)    
## Replace - Expl None == 0        0.6827     0.1326   5.147 2.65e-07 ***
## Expl Glossar - Expl None == 0   0.7511     0.1366   5.498 3.83e-08 ***
## Expl Glossar - Replace == 0     0.0684     0.1351   0.506    0.613    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- none method)
summary(test, test = adjusted("BH"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = understanding ~ s_iv_technical_terms + s_iv_quality_of_evidence + 
##     pls + (1 | id), data = df_resilience)
## 
## Linear Hypotheses:
##                               Estimate Std. Error z value Pr(>|z|)    
## Replace - Expl None == 0        0.6827     0.1326   5.147 3.98e-07 ***
## Expl Glossar - Expl None == 0   0.7511     0.1366   5.498 1.15e-07 ***
## Expl Glossar - Replace == 0     0.0684     0.1351   0.506    0.613    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- BH method)
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## term                     |    sumsq |  meansq | NumDF |    DenDF | statistic | p.value |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_technical_terms     |   58.314 |  29.157 |     2 | 1094.057 |    18.984 |  < .001 |      | 0.028 |         0.030 |         |                 |           |    0.175 |      
## s_iv_quality_of_evidence |    2.342 |   2.342 |     1 | 1093.727 |     1.525 |   0.217 |      | 0.001 |         0.001 |         |                 |           |    0.035 |      
## pls                      |  146.629 | 146.629 |     1 | 1012.330 |    95.468 |  < .001 |      | 0.069 |         0.071 |         |                 |           |    0.277 |      
## Residuals                | 1908.572 |   0.924 |       |          |           |         | 2065 |       |               |         |                 |           |          |
anova_stats(m1)$term
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "s_iv_technical_terms"     "s_iv_quality_of_evidence" "pls"                      "Residuals"
interpret_omega_squared(anova_stats(m1)$partial.etasq, rules = "field2013")
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "small"      "very small" "medium"     NA          
## (Rules: field2013)
# Interest in psychological research

m1 <- lmer(understanding ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id), data = df_resilience[!is.na(df_resilience$s_int_psy),])



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ s_iv_technical_terms + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_resilience[!is.na(df_resilience$s_int_psy), ]
## 
## REML criterion at convergence: 8277.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2210 -0.4793  0.0598  0.5198  3.0099 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.476    1.574   
##  Residual             1.536    1.239   
## Number of obs: 2065, groups:  id, 1101
## 
## Fixed effects:
##                                   Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)                         4.9493     0.1130 1230.8055  43.806  < 2e-16 ***
## s_iv_technical_termsReplace         0.6827     0.1326 1091.9235   5.147 3.14e-07 ***
## s_iv_technical_termsExpl Glossar    0.7511     0.1366 1094.7145   5.498 4.77e-08 ***
## s_iv_quality_of_evidenceQu Yes     -0.1359     0.1101 1093.7269  -1.235    0.217    
## plsGroth                           -0.5442     0.0557 1012.3304  -9.771  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v__R s___EG s____Y
## s_v_tchnc_R -0.616                     
## s_v_tchn_EG -0.594  0.497              
## s_v_qlt__QY -0.485  0.032  0.024       
## plsGroth    -0.248 -0.001 -0.001  0.003
r.squaredGLMM(m1)
##            R2m       R2c
## [1,] 0.0462724 0.6349152
r2beta(m1, method = "nsj")
##                             Effect   Rsq upper.CL lower.CL
## 1                            Model 0.046    0.067    0.032
## 3 s_iv_technical_termsExpl Glossar 0.022    0.036    0.011
## 2      s_iv_technical_termsReplace 0.020    0.033    0.010
## 5                         plsGroth 0.018    0.031    0.009
## 4   s_iv_quality_of_evidenceQu Yes 0.001    0.006    0.000
m6 <- lmer(understanding ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id) + s_int_psy, data = df_resilience[!is.na(df_resilience$s_int_psy),])

summary(m6)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ s_iv_technical_terms + s_iv_quality_of_evidence +      pls + (1 | id) + s_int_psy
##    Data: df_resilience[!is.na(df_resilience$s_int_psy), ]
## 
## REML criterion at convergence: 8090.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2386 -0.4818  0.0590  0.5118  3.1263 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.944    1.394   
##  Residual             1.538    1.240   
## Number of obs: 2065, groups:  id, 1101
## 
## Fixed effects:
##                                    Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                         4.95949    0.10411 1250.05216  47.635  < 2e-16 ***
## s_iv_technical_termsReplace         0.68157    0.12153 1086.45097   5.608 2.59e-08 ***
## s_iv_technical_termsExpl Glossar    0.76595    0.12517 1089.51371   6.119 1.31e-09 ***
## s_iv_quality_of_evidenceQu Yes     -0.15405    0.10088 1088.35118  -1.527    0.127    
## plsGroth                           -0.53865    0.05563 1016.09681  -9.683  < 2e-16 ***
## s_int_psy                           0.72298    0.05012 1099.54000  14.425  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v__R s___EG s____Y plsGrt
## s_v_tchnc_R -0.612                            
## s_v_tchn_EG -0.590  0.497                     
## s_v_qlt__QY -0.482  0.032  0.024              
## plsGroth    -0.268 -0.001 -0.001  0.003       
## s_int_psy    0.006  0.000  0.009 -0.010  0.007
r.squaredGLMM(m6)
##            R2m       R2c
## [1,] 0.1715256 0.6340409
r2beta(m6, method = "nsj")
##                             Effect   Rsq upper.CL lower.CL
## 1                            Model 0.172    0.202    0.145
## 6                        s_int_psy 0.131    0.158    0.105
## 3 s_iv_technical_termsExpl Glossar 0.027    0.042    0.015
## 2      s_iv_technical_termsReplace 0.022    0.037    0.012
## 5                         plsGroth 0.020    0.034    0.010
## 4   s_iv_quality_of_evidenceQu Yes 0.002    0.007    0.000
m7 <- lmer(understanding ~ s_iv_technical_terms * s_int_psy + s_iv_quality_of_evidence + pls + (1 | id), data = df_resilience[!is.na(df_resilience$s_int_psy),])

summary(m7)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ s_iv_technical_terms * s_int_psy + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_resilience[!is.na(df_resilience$s_int_psy), ]
## 
## REML criterion at convergence: 8094.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2463 -0.4891  0.0612  0.5049  3.0876 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.947    1.395   
##  Residual             1.538    1.240   
## Number of obs: 2065, groups:  id, 1101
## 
## Fixed effects:
##                                              Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                                 4.959e+00  1.042e-01  1.248e+03  47.609  < 2e-16 ***
## s_iv_technical_termsReplace                 6.817e-01  1.216e-01  1.085e+03   5.607 2.61e-08 ***
## s_iv_technical_termsExpl Glossar            7.632e-01  1.252e-01  1.087e+03   6.094 1.53e-09 ***
## s_int_psy                                   7.590e-01  8.561e-02  1.088e+03   8.866  < 2e-16 ***
## s_iv_quality_of_evidenceQu Yes             -1.537e-01  1.010e-01  1.087e+03  -1.522    0.128    
## plsGroth                                   -5.378e-01  5.563e-02  1.016e+03  -9.668  < 2e-16 ***
## s_iv_technical_termsReplace:s_int_psy       5.865e-03  1.195e-01  1.092e+03   0.049    0.961    
## s_iv_technical_termsExpl Glossar:s_int_psy -1.294e-01  1.259e-01  1.102e+03  -1.028    0.304    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v__R s_v__EG s_nt_p s____Y plsGrt s___R:
## s_v_tchnc_R -0.612                                           
## s_v_tchn_EG -0.590  0.497                                    
## s_int_psy    0.011 -0.004 -0.004                             
## s_v_qlt__QY -0.482  0.033  0.024  -0.023                     
## plsGroth    -0.268 -0.001 -0.001   0.015  0.003              
## s_v_tc_R:__ -0.013  0.006  0.003  -0.717  0.025 -0.008       
## s_v_t_EG:__ -0.004  0.003  0.018  -0.680  0.011 -0.015  0.487
r.squaredGLMM(m7)
##            R2m       R2c
## [1,] 0.1722773 0.6347116
r2beta(m7, method = "nsj")
##                                       Effect   Rsq upper.CL lower.CL
## 1                                      Model 0.172    0.203    0.147
## 4                                  s_int_psy 0.054    0.074    0.037
## 3           s_iv_technical_termsExpl Glossar 0.026    0.041    0.014
## 2                s_iv_technical_termsReplace 0.022    0.037    0.012
## 6                                   plsGroth 0.020    0.034    0.010
## 5             s_iv_quality_of_evidenceQu Yes 0.002    0.007    0.000
## 8 s_iv_technical_termsExpl Glossar:s_int_psy 0.001    0.005    0.000
## 7      s_iv_technical_termsReplace:s_int_psy 0.000    0.002    0.000
anova(m1,m6, m7)
## refitting model(s) with ML (instead of REML)
## Data: df_resilience[!is.na(df_resilience$s_int_psy), ]
## Models:
## m1: understanding ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id)
## m6: understanding ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id) + s_int_psy
## m7: understanding ~ s_iv_technical_terms * s_int_psy + s_iv_quality_of_evidence + pls + (1 | id)
##    npar    AIC    BIC  logLik deviance   Chisq Df Pr(>Chisq)    
## m1    7 8276.0 8315.4 -4131.0   8262.0                          
## m6    8 8086.9 8132.0 -4035.5   8070.9 191.046  1     <2e-16 ***
## m7   10 8089.5 8145.8 -4034.7   8069.5   1.459  2     0.4822    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#ICC Subject

m0 <- lmer(understanding ~ (1 | id), data = df_resilience)

summary(m0)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ (1 | id)
##    Data: df_resilience
## 
## REML criterion at convergence: 8396.4
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.81113 -0.49898  0.07828  0.56346  2.58388 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.512    1.585   
##  Residual             1.683    1.297   
## Number of obs: 2065, groups:  id, 1101
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 5.087e+00  5.589e-02 1.096e+03   91.02   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
performance::icc(m0)
## # Intraclass Correlation Coefficient
## 
##      Adjusted ICC: 0.599
##   Conditional ICC: 0.599

C Knowledge

table(df_resilience$knowledge, df_resilience$s_iv_technical_terms)/sum(table(df_resilience$knowledge, df_resilience$s_iv_technical_terms)) * 100
##    
##     Expl None  Replace Expl Glossar
##   0  12.31156 11.05528     11.65829
##   1  21.50754 24.42211     19.04523
table(df_resilience$knowledge[df_resilience$s_iv_technical_terms == "Replace"])/sum(table(df_resilience$knowledge[df_resilience$s_iv_technical_terms == "Replace"])) * 100
## 
##        0        1 
## 31.16147 68.83853
table(df_resilience$knowledge[df_resilience$s_iv_technical_terms == "Expl None"])/sum(table(df_resilience$knowledge[df_resilience$s_iv_technical_terms == "Expl None"])) * 100
## 
##        0        1 
## 36.40416 63.59584
table(df_resilience$knowledge[df_resilience$s_iv_technical_terms == "Expl Glossar"])/sum(table(df_resilience$knowledge[df_resilience$s_iv_technical_terms == "Expl Glossar"])) * 100
## 
##        0        1 
## 37.97054 62.02946
m1 <- glmer(knowledge ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id), family = binomial("logit"), data = df_resilience)


summary(m1)
## Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
##  Family: binomial  ( logit )
## Formula: knowledge ~ s_iv_technical_terms + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_resilience
## 
##      AIC      BIC   logLik deviance df.resid 
##   2439.4   2473.0  -1213.7   2427.4     1984 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -1.8769 -0.8946  0.3655  0.5982  1.1178 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  id     (Intercept) 1.617    1.272   
## Number of obs: 1990, groups:  id, 1026
## 
## Fixed effects:
##                                  Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                       1.41519    0.16374   8.643   <2e-16 ***
## s_iv_technical_termsReplace       0.32753    0.16791   1.951   0.0511 .  
## s_iv_technical_termsExpl Glossar -0.10669    0.17057  -0.625   0.5317    
## s_iv_quality_of_evidenceQu Yes   -0.08229    0.13841  -0.595   0.5521    
## plsGroth                         -1.17890    0.12431  -9.483   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v__R s___EG s____Y
## s_v_tchnc_R -0.473                     
## s_v_tchn_EG -0.526  0.483              
## s_v_qlt__QY -0.437  0.024  0.027       
## plsGroth    -0.511 -0.055  0.020  0.016
test = glht(m1,linfct=mcp(s_iv_technical_terms="Tukey"))
summary(test, test = adjusted("none"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: glmer(formula = knowledge ~ s_iv_technical_terms + s_iv_quality_of_evidence + 
##     pls + (1 | id), data = df_resilience, family = binomial("logit"))
## 
## Linear Hypotheses:
##                               Estimate Std. Error z value Pr(>|z|)  
## Replace - Expl None == 0        0.3275     0.1679   1.951   0.0511 .
## Expl Glossar - Expl None == 0  -0.1067     0.1706  -0.625   0.5317  
## Expl Glossar - Replace == 0    -0.4342     0.1721  -2.523   0.0116 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- none method)
summary(test, test = adjusted("BH"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: glmer(formula = knowledge ~ s_iv_technical_terms + s_iv_quality_of_evidence + 
##     pls + (1 | id), data = df_resilience, family = binomial("logit"))
## 
## Linear Hypotheses:
##                               Estimate Std. Error z value Pr(>|z|)  
## Replace - Expl None == 0        0.3275     0.1679   1.951   0.0767 .
## Expl Glossar - Expl None == 0  -0.1067     0.1706  -0.625   0.5317  
## Expl Glossar - Replace == 0    -0.4342     0.1721  -2.523   0.0349 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- BH method)
r.squaredGLMM(m1)
## Warning: The null model is correct only if all variables used by the original model remain unchanged.
##                    R2m       R2c
## theoretical 0.07259137 0.3782418
## delta       0.05996717 0.3124626
r2beta(m1, method = "nsj")
##                             Effect   Rsq upper.CL lower.CL
## 1                            Model 0.049    0.071    0.034
## 5                         plsGroth 0.045    0.065    0.029
## 2      s_iv_technical_termsReplace 0.002    0.008    0.000
## 3 s_iv_technical_termsExpl Glossar 0.000    0.004    0.000
## 4   s_iv_quality_of_evidenceQu Yes 0.000    0.003    0.000
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: npar
## term                     | npar |    sumsq |  meansq | statistic |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## ----------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_technical_terms     |    2 |    6.301 |   3.150 |     3.150 |      | 0.004 |         0.004 |         |                 |           |    0.062 |      
## s_iv_quality_of_evidence |    1 |    0.349 |   0.349 |     0.349 |      | 0.000 |         0.000 |         |                 |           |    0.015 |      
## pls                      |    1 |  122.559 | 122.559 |   122.559 |      | 0.070 |         0.070 |         |                 |           |    0.275 |      
## Residuals                |      | 1625.649 |   0.124 |           | 1990 |       |               |         |                 |           |          |
exp(fixef(m1))
##                      (Intercept)      s_iv_technical_termsReplace s_iv_technical_termsExpl Glossar 
##                        4.1172612                        1.3875345                        0.8988072 
##   s_iv_quality_of_evidenceQu Yes                         plsGroth 
##                        0.9210017                        0.3076168
exp(summary(test, test = adjusted("none"))$test$coefficients)
##      Replace - Expl None Expl Glossar - Expl None   Expl Glossar - Replace 
##                1.3875345                0.8988072                0.6477729
interpret_oddsratio(exp(summary(test, test = adjusted("none"))$test$coefficients), rules = "cohen1988")
##      Replace - Expl None Expl Glossar - Expl None   Expl Glossar - Replace 
##             "very small"             "very small"                  "small" 
## (Rules: cohen1988)
# Interest in psychological research

m6 <- glmer(knowledge ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id) + s_int_psy, family = binomial("logit"), data = df_resilience)

summary(m6)
## Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
##  Family: binomial  ( logit )
## Formula: knowledge ~ s_iv_technical_terms + s_iv_quality_of_evidence +      pls + (1 | id) + s_int_psy
##    Data: df_resilience
## 
##      AIC      BIC   logLik deviance df.resid 
##   2382.8   2422.0  -1184.4   2368.8     1983 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.3459 -0.7626  0.3522  0.5679  1.4836 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  id     (Intercept) 1.471    1.213   
## Number of obs: 1990, groups:  id, 1026
## 
## Fixed effects:
##                                  Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                       1.42311    0.16264   8.750  < 2e-16 ***
## s_iv_technical_termsReplace       0.31830    0.16581   1.920   0.0549 .  
## s_iv_technical_termsExpl Glossar -0.09473    0.16854  -0.562   0.5741    
## s_iv_quality_of_evidenceQu Yes   -0.08209    0.13675  -0.600   0.5483    
## plsGroth                         -1.19426    0.12499  -9.555  < 2e-16 ***
## s_int_psy                         0.52404    0.07329   7.150 8.66e-13 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v__R s___EG s____Y plsGrt
## s_v_tchnc_R -0.473                            
## s_v_tchn_EG -0.520  0.485                     
## s_v_qlt__QY -0.434  0.023  0.024              
## plsGroth    -0.518 -0.053  0.016  0.017       
## s_int_psy    0.178  0.037 -0.004 -0.013 -0.199
m7 <- glmer(knowledge ~ s_iv_technical_terms * s_int_psy + s_iv_quality_of_evidence + pls + (1 | id), family = binomial("logit"), data = df_resilience, glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 100000)))

summary(m7)
## Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
##  Family: binomial  ( logit )
## Formula: knowledge ~ s_iv_technical_terms * s_int_psy + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_resilience
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))
## 
##      AIC      BIC   logLik deviance df.resid 
##   2383.9   2434.2  -1182.9   2365.9     1981 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.2619 -0.7512  0.3448  0.5637  1.6052 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  id     (Intercept) 1.458    1.208   
## Number of obs: 1990, groups:  id, 1026
## 
## Fixed effects:
##                                            Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                                 1.42411    0.16247   8.765  < 2e-16 ***
## s_iv_technical_termsReplace                 0.31000    0.16539   1.874   0.0609 .  
## s_iv_technical_termsExpl Glossar           -0.08676    0.16895  -0.514   0.6076    
## s_int_psy                                   0.55152    0.11856   4.652 3.29e-06 ***
## s_iv_quality_of_evidenceQu Yes             -0.08315    0.13656  -0.609   0.5426    
## plsGroth                                   -1.19525    0.12494  -9.567  < 2e-16 ***
## s_iv_technical_termsReplace:s_int_psy      -0.17410    0.16297  -1.068   0.2854    
## s_iv_technical_termsExpl Glossar:s_int_psy  0.11651    0.17255   0.675   0.4995    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v__R s_v__EG s_nt_p s____Y plsGrt s___R:
## s_v_tchnc_R -0.474                                           
## s_v_tchn_EG -0.518  0.485                                    
## s_int_psy    0.137 -0.010 -0.037                             
## s_v_qlt__QY -0.434  0.024  0.025  -0.025                     
## plsGroth    -0.518 -0.053  0.014  -0.118  0.016              
## s_v_tc_R:__ -0.049  0.036  0.024  -0.702  0.022  0.019       
## s_v_t_EG:__ -0.009  0.025  0.057  -0.647  0.017 -0.031  0.475
anova(m1,m6,m7)
## Data: df_resilience
## Models:
## m1: knowledge ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id)
## m6: knowledge ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id) + s_int_psy
## m7: knowledge ~ s_iv_technical_terms * s_int_psy + s_iv_quality_of_evidence + pls + (1 | id)
##    npar    AIC    BIC  logLik deviance   Chisq Df Pr(>Chisq)    
## m1    6 2439.4 2473.0 -1213.7   2427.4                          
## m6    7 2382.8 2422.0 -1184.4   2368.8 58.5999  1  1.932e-14 ***
## m7    9 2383.9 2434.2 -1182.9   2365.9  2.9631  2     0.2273    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#ICC Subject


m0 <- glmer(knowledge ~ (1 | id), family = binomial("logit"), data = df_resilience)

summary(m0)
## Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
##  Family: binomial  ( logit )
## Formula: knowledge ~ (1 | id)
##    Data: df_resilience
## 
##      AIC      BIC   logLik deviance df.resid 
##   2546.6   2557.8  -1271.3   2542.6     1988 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -1.2928 -0.9289  0.5462  0.5462  0.7735 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  id     (Intercept) 0.9791   0.9895  
## Number of obs: 1990, groups:  id, 1026
## 
## Fixed effects:
##             Estimate Std. Error z value Pr(>|z|)    
## (Intercept)  0.75961    0.06828   11.12   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
performance::icc(m0)
## # Intraclass Correlation Coefficient
## 
##      Adjusted ICC: 0.229
##   Conditional ICC: 0.229

D Empowerment

m1 <- lmer(empowerment ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id), data = df_resilience)



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ s_iv_technical_terms + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_resilience
## 
## REML criterion at convergence: 8196.5
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2070 -0.4795 -0.0377  0.4650  3.3815 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.824    1.681   
##  Residual             1.315    1.147   
## Number of obs: 2064, groups:  id, 1101
## 
## Fixed effects:
##                                    Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                         3.85345    0.11608 1201.13824  33.197  < 2e-16 ***
## s_iv_technical_termsReplace         0.67952    0.13715 1089.44717   4.955 8.39e-07 ***
## s_iv_technical_termsExpl Glossar    0.66392    0.14122 1091.93598   4.701 2.91e-06 ***
## s_iv_quality_of_evidenceQu Yes     -0.01451    0.11384 1091.40493  -0.127    0.899    
## plsGroth                           -0.43105    0.05168 1000.27711  -8.340 2.44e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v__R s___EG s____Y
## s_v_tchnc_R -0.619                     
## s_v_tchn_EG -0.598  0.497              
## s_v_qlt__QY -0.488  0.032  0.024       
## plsGroth    -0.223 -0.001 -0.001  0.004
test = glht(m1,linfct=mcp(s_iv_technical_terms="Tukey"))
summary(test, test = adjusted("none"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = empowerment ~ s_iv_technical_terms + s_iv_quality_of_evidence + 
##     pls + (1 | id), data = df_resilience)
## 
## Linear Hypotheses:
##                               Estimate Std. Error z value Pr(>|z|)    
## Replace - Expl None == 0        0.6795     0.1371   4.955 7.24e-07 ***
## Expl Glossar - Expl None == 0   0.6639     0.1412   4.701 2.58e-06 ***
## Expl Glossar - Replace == 0    -0.0156     0.1396  -0.112    0.911    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- none method)
summary(test, test = adjusted("BH"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = empowerment ~ s_iv_technical_terms + s_iv_quality_of_evidence + 
##     pls + (1 | id), data = df_resilience)
## 
## Linear Hypotheses:
##                               Estimate Std. Error z value Pr(>|z|)    
## Replace - Expl None == 0        0.6795     0.1371   4.955 2.17e-06 ***
## Expl Glossar - Expl None == 0   0.6639     0.1412   4.701 3.88e-06 ***
## Expl Glossar - Replace == 0    -0.0156     0.1396  -0.112    0.911    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- BH method)
r.squaredGLMM(m1)
##             R2m       R2c
## [1,] 0.03441642 0.6931759
r2beta(m1, method = "nsj")
##                             Effect   Rsq upper.CL lower.CL
## 1                            Model 0.034    0.053    0.022
## 2      s_iv_technical_termsReplace 0.019    0.032    0.009
## 3 s_iv_technical_termsExpl Glossar 0.017    0.030    0.008
## 5                         plsGroth 0.011    0.022    0.004
## 4   s_iv_quality_of_evidenceQu Yes 0.000    0.002    0.000
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## term                     |    sumsq | meansq | NumDF |    DenDF | statistic | p.value |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_technical_terms     |   41.052 | 20.526 |     2 | 1091.600 |    15.603 |  < .001 |      | 0.024 |         0.026 |         |                 |           |    0.162 |      
## s_iv_quality_of_evidence |    0.021 |  0.021 |     1 | 1091.405 |     0.016 |   0.899 |      | 0.000 |         0.000 |         |                 |           |    0.004 |      
## pls                      |   91.502 | 91.502 |     1 | 1000.277 |    69.558 |  < .001 |      | 0.054 |         0.055 |         |                 |           |    0.242 |      
## Residuals                | 1561.427 |  0.757 |       |          |           |         | 2064 |       |               |         |                 |           |          |
anova_stats(m1)$term
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "s_iv_technical_terms"     "s_iv_quality_of_evidence" "pls"                      "Residuals"
interpret_omega_squared(anova_stats(m1)$partial.etasq, rules = "field2013")
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "small"      "very small" "small"      NA          
## (Rules: field2013)
# Interest in psychological research

m1 <- lmer(empowerment ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id), data = df_resilience[!is.na(df_resilience$s_int_psy),])



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ s_iv_technical_terms + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_resilience[!is.na(df_resilience$s_int_psy), ]
## 
## REML criterion at convergence: 8196.5
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2070 -0.4795 -0.0377  0.4650  3.3815 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.824    1.681   
##  Residual             1.315    1.147   
## Number of obs: 2064, groups:  id, 1101
## 
## Fixed effects:
##                                    Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                         3.85345    0.11608 1201.13824  33.197  < 2e-16 ***
## s_iv_technical_termsReplace         0.67952    0.13715 1089.44717   4.955 8.39e-07 ***
## s_iv_technical_termsExpl Glossar    0.66392    0.14122 1091.93598   4.701 2.91e-06 ***
## s_iv_quality_of_evidenceQu Yes     -0.01451    0.11384 1091.40493  -0.127    0.899    
## plsGroth                           -0.43105    0.05168 1000.27711  -8.340 2.44e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v__R s___EG s____Y
## s_v_tchnc_R -0.619                     
## s_v_tchn_EG -0.598  0.497              
## s_v_qlt__QY -0.488  0.032  0.024       
## plsGroth    -0.223 -0.001 -0.001  0.004
r.squaredGLMM(m1)
##             R2m       R2c
## [1,] 0.03441642 0.6931759
r2beta(m1, method = "nsj")
##                             Effect   Rsq upper.CL lower.CL
## 1                            Model 0.034    0.053    0.022
## 2      s_iv_technical_termsReplace 0.019    0.032    0.009
## 3 s_iv_technical_termsExpl Glossar 0.017    0.030    0.008
## 5                         plsGroth 0.011    0.022    0.004
## 4   s_iv_quality_of_evidenceQu Yes 0.000    0.002    0.000
m6 <- lmer(empowerment ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id) + s_int_psy, data = df_resilience[!is.na(df_resilience$s_int_psy),])

summary(m6)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ s_iv_technical_terms + s_iv_quality_of_evidence +      pls + (1 | id) + s_int_psy
##    Data: df_resilience[!is.na(df_resilience$s_int_psy), ]
## 
## REML criterion at convergence: 7965
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.4346 -0.4920 -0.0359  0.4758  3.2519 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.141    1.463   
##  Residual             1.315    1.147   
## Number of obs: 2064, groups:  id, 1101
## 
## Fixed effects:
##                                    Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                         3.86694    0.10487 1223.68011  36.872  < 2e-16 ***
## s_iv_technical_termsReplace         0.67889    0.12319 1087.15313   5.511 4.45e-08 ***
## s_iv_technical_termsExpl Glossar    0.68246    0.12687 1089.99230   5.379 9.16e-08 ***
## s_iv_quality_of_evidenceQu Yes     -0.03348    0.10227 1089.31094  -0.327    0.743    
## plsGroth                           -0.42649    0.05157 1007.90377  -8.271 4.18e-16 ***
## s_int_psy                           0.82291    0.05078 1098.91902  16.205  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v__R s___EG s____Y plsGrt
## s_v_tchnc_R -0.616                            
## s_v_tchn_EG -0.594  0.497                     
## s_v_qlt__QY -0.485  0.032  0.024              
## plsGroth    -0.247 -0.001 -0.001  0.004       
## s_int_psy    0.006  0.000  0.010 -0.011  0.007
r.squaredGLMM(m6)
##           R2m       R2c
## [1,] 0.193389 0.6930716
r2beta(m6, method = "nsj")
##                             Effect   Rsq upper.CL lower.CL
## 1                            Model 0.193    0.224    0.166
## 6                        s_int_psy 0.164    0.193    0.137
## 2      s_iv_technical_termsReplace 0.022    0.037    0.012
## 3 s_iv_technical_termsExpl Glossar 0.021    0.035    0.011
## 5                         plsGroth 0.013    0.024    0.005
## 4   s_iv_quality_of_evidenceQu Yes 0.000    0.003    0.000
m7 <- lmer(empowerment ~ s_iv_technical_terms * s_int_psy + s_iv_quality_of_evidence + pls + (1 | id), data = df_resilience[!is.na(df_resilience$s_int_psy),])

summary(m7)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ s_iv_technical_terms * s_int_psy + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_resilience[!is.na(df_resilience$s_int_psy), ]
## 
## REML criterion at convergence: 7969.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.4521 -0.4924 -0.0341  0.4801  3.2571 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.144    1.464   
##  Residual             1.315    1.147   
## Number of obs: 2064, groups:  id, 1101
## 
## Fixed effects:
##                                              Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                                   3.86579    0.10494 1221.07672  36.839  < 2e-16 ***
## s_iv_technical_termsReplace                   0.67957    0.12326 1085.04471   5.513 4.40e-08 ***
## s_iv_technical_termsExpl Glossar              0.68135    0.12697 1087.47655   5.366 9.82e-08 ***
## s_int_psy                                     0.79702    0.08679 1087.63076   9.184  < 2e-16 ***
## s_iv_quality_of_evidenceQu Yes               -0.03155    0.10236 1087.32755  -0.308    0.758    
## plsGroth                                     -0.42641    0.05157 1007.62663  -8.268 4.26e-16 ***
## s_iv_technical_termsReplace:s_int_psy         0.08621    0.12110 1091.30634   0.712    0.477    
## s_iv_technical_termsExpl Glossar:s_int_psy   -0.01809    0.12760 1100.02009  -0.142    0.887    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v__R s_v__EG s_nt_p s____Y plsGrt s___R:
## s_v_tchnc_R -0.616                                           
## s_v_tchn_EG -0.594  0.497                                    
## s_int_psy    0.013 -0.005 -0.004                             
## s_v_qlt__QY -0.485  0.033  0.024  -0.023                     
## plsGroth    -0.246 -0.001 -0.001   0.014  0.004              
## s_v_tc_R:__ -0.014  0.007  0.003  -0.717  0.025 -0.007       
## s_v_t_EG:__ -0.005  0.003  0.019  -0.680  0.011 -0.014  0.487
r.squaredGLMM(m7)
##            R2m       R2c
## [1,] 0.1937032 0.6934813
r2beta(m7, method = "nsj")
##                                       Effect   Rsq upper.CL lower.CL
## 1                                      Model 0.194    0.225    0.167
## 4                                  s_int_psy 0.060    0.081    0.042
## 2                s_iv_technical_termsReplace 0.022    0.037    0.012
## 3           s_iv_technical_termsExpl Glossar 0.021    0.035    0.011
## 6                                   plsGroth 0.013    0.024    0.005
## 7      s_iv_technical_termsReplace:s_int_psy 0.000    0.004    0.000
## 5             s_iv_quality_of_evidenceQu Yes 0.000    0.003    0.000
## 8 s_iv_technical_termsExpl Glossar:s_int_psy 0.000    0.003    0.000
anova(m1,m6, m7)
## refitting model(s) with ML (instead of REML)
## Data: df_resilience[!is.na(df_resilience$s_int_psy), ]
## Models:
## m1: empowerment ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id)
## m6: empowerment ~ s_iv_technical_terms + s_iv_quality_of_evidence + pls + (1 | id) + s_int_psy
## m7: empowerment ~ s_iv_technical_terms * s_int_psy + s_iv_quality_of_evidence + pls + (1 | id)
##    npar    AIC    BIC  logLik deviance   Chisq Df Pr(>Chisq)    
## m1    7 8195.5 8234.9 -4090.7   8181.5                          
## m6    8 7961.0 8006.0 -3972.5   7945.0 236.492  1     <2e-16 ***
## m7   10 7964.1 8020.5 -3972.1   7944.1   0.825  2      0.662    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#ICC Subject

m0 <- lmer(empowerment ~ (1 | id), data = df_resilience)

summary(m0)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ (1 | id)
##    Data: df_resilience
## 
## REML criterion at convergence: 8283.5
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.88191 -0.51027 -0.01293  0.49165  3.02182 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.873    1.695   
##  Residual             1.406    1.186   
## Number of obs: 2064, groups:  id, 1101
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 4.078e+00  5.759e-02 1.093e+03   70.81   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
performance::icc(m0)
## # Intraclass Correlation Coefficient
## 
##      Adjusted ICC: 0.671
##   Conditional ICC: 0.671

H3 Quality of Evidence

A Understanding of the quality of evidence - decision task

m1 <- lmer(pref_quality_of_evidence ~ s_iv_quality_of_evidence + pls + (1 | id), data = df)

summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: pref_quality_of_evidence ~ s_iv_quality_of_evidence + pls + (1 |      id)
##    Data: df
## 
## REML criterion at convergence: 16496.5
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.99612 -0.43383 -0.01972  0.47015  2.89585 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.676    1.295   
##  Residual             1.534    1.238   
## Number of obs: 4276, groups:  id, 2264
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       4.68528    0.06266 3136.44400  74.778  < 2e-16 ***
## s_iv_quality_of_evidenceQu Yes    0.21911    0.06663 2235.73258   3.288  0.00102 ** 
## plsChristodoulou                 -0.22001    0.05310 2091.21912  -4.143 3.56e-05 ***
## plsRasmussen                      0.07706    0.07708 3528.17926   1.000  0.31748    
## plsGroth                         -0.09622    0.07692 3517.02349  -1.251  0.21108    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s____Y plsChr plsRsm
## s_v_qlt__QY -0.527                     
## plsChristdl -0.425  0.001              
## plsRasmussn -0.595  0.015  0.345       
## plsGroth    -0.598  0.018  0.345  0.737
r.squaredGLMM(m1)
##              R2m       R2c
## [1,] 0.007509718 0.5258153
r2beta(m1, method = "nsj")
##                           Effect   Rsq upper.CL lower.CL
## 1                          Model 0.008    0.014    0.004
## 2 s_iv_quality_of_evidenceQu Yes 0.004    0.008    0.001
## 3               plsChristodoulou 0.002    0.006    0.000
## 5                       plsGroth 0.000    0.002    0.000
## 4                   plsRasmussen 0.000    0.002    0.000
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## term                     |    sumsq | meansq | NumDF |    DenDF | statistic | p.value |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_quality_of_evidence |   16.585 | 16.585 |     1 | 2235.733 |    10.813 |   0.001 |      | 0.004 |         0.004 |         |                 |           |    0.063 |      
## pls                      |   44.546 | 14.849 |     3 | 2782.515 |     9.681 |  < .001 |      | 0.010 |         0.010 |         |                 |           |    0.103 |      
## Residuals                | 4234.647 |  0.990 |       |          |           |         | 4276 |       |               |         |                 |           |          |
anova_stats(m1)$term
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "s_iv_quality_of_evidence" "pls"                      "Residuals"
interpret_omega_squared(anova_stats(m1)$partial.etasq, rules = "field2013")
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "very small" "small"      NA          
## (Rules: field2013)
#ICC Subject

m0 <- lmer(pref_quality_of_evidence ~ (1 | id), data = df)

summary(m0)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: pref_quality_of_evidence ~ (1 | id)
##    Data: df
## 
## REML criterion at convergence: 16521
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.86807 -0.45962  0.06856  0.48959  2.75165 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.681    1.296   
##  Residual             1.552    1.246   
## Number of obs: 4276, groups:  id, 2264
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 4.730e+00  3.339e-02 2.237e+03   141.7   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
performance::icc(m0)
## # Intraclass Correlation Coefficient
## 
##      Adjusted ICC: 0.520
##   Conditional ICC: 0.520

B Knowledge of the quality of evidence - pls-specific

table(df$knowledge_evidence, df$s_iv_quality_of_evidence)/sum(table(df$knowledge_evidence, df$s_iv_quality_of_evidence)) * 100
##    
##        Qu No   Qu Yes
##   0 33.10427 26.35071
##   1 18.22275 22.32227
table(df$knowledge_evidence[df$s_iv_quality_of_evidence == "Qu No"])/sum(table(df$knowledge_evidence[df$s_iv_quality_of_evidence == "Qu No"])) * 100
## 
##        0        1 
## 64.49677 35.50323
table(df$knowledge_evidence[df$s_iv_quality_of_evidence == "Qu Yes"])/sum(table(df$knowledge_evidence[df$s_iv_quality_of_evidence == "Qu Yes"])) * 100
## 
##        0        1 
## 54.13827 45.86173
m1 <- glmer(knowledge_evidence ~ s_iv_quality_of_evidence + pls + (1 | id), family = binomial("logit"), data = df, glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 100000)))

summary(m1)
## Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
##  Family: binomial  ( logit )
## Formula: knowledge_evidence ~ s_iv_quality_of_evidence + pls + (1 | id)
##    Data: df
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))
## 
##      AIC      BIC   logLik deviance df.resid 
##   5560.6   5598.7  -2774.3   5548.6     4214 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -0.9657 -0.6518 -0.5368  0.8519  1.3561 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  id     (Intercept) 1.164    1.079   
## Number of obs: 4220, groups:  id, 2173
## 
## Fixed effects:
##                                Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                    -0.75417    0.09067  -8.318  < 2e-16 ***
## s_iv_quality_of_evidenceQu Yes  0.54752    0.08679   6.309 2.81e-10 ***
## plsChristodoulou               -0.15647    0.09860  -1.587    0.113    
## plsRasmussen                    0.08643    0.11120   0.777    0.437    
## plsGroth                        0.05270    0.11131   0.473    0.636    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s____Y plsChr plsRsm
## s_v_qlt__QY -0.516                     
## plsChristdl -0.524 -0.012              
## plsRasmussn -0.594  0.023  0.437       
## plsGroth    -0.591  0.020  0.437  0.566
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: npar
## term                     | npar |    sumsq | meansq | statistic |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## ---------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_quality_of_evidence |    1 |   44.824 | 44.824 |    44.824 |      | 0.011 |         0.011 |         |                 |           |    0.105 |      
## pls                      |    3 |    6.295 |  2.098 |     2.098 |      | 0.002 |         0.002 |         |                 |           |    0.039 |      
## Residuals                |      | 4046.421 |  0.150 |           | 4220 |       |               |         |                 |           |          |
#r.squaredGLMM(m1)
r2beta(m1, method = "nsj")
##                           Effect   Rsq upper.CL lower.CL
## 1                          Model 0.012    0.020    0.007
## 2 s_iv_quality_of_evidenceQu Yes 0.011    0.018    0.006
## 3               plsChristodoulou 0.001    0.003    0.000
## 4                   plsRasmussen 0.000    0.002    0.000
## 5                       plsGroth 0.000    0.001    0.000
exp(fixef(m1))
##                    (Intercept) s_iv_quality_of_evidenceQu Yes               plsChristodoulou 
##                      0.4703989                      1.7289524                      0.8551547 
##                   plsRasmussen                       plsGroth 
##                      1.0902734                      1.0541081
interpret_oddsratio(exp(fixef(m1)), rules = "cohen1988")
##                    (Intercept) s_iv_quality_of_evidenceQu Yes               plsChristodoulou 
##                        "small"                        "small"                   "very small" 
##                   plsRasmussen                       plsGroth 
##                   "very small"                   "very small" 
## (Rules: cohen1988)
#ICC Subject


m0 <- glmer(knowledge_evidence ~ (1 | id), family = binomial("logit"), data = df)

summary(m0)
## Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
##  Family: binomial  ( logit )
## Formula: knowledge_evidence ~ (1 | id)
##    Data: df
## 
##      AIC      BIC   logLik deviance df.resid 
##   5598.8   5611.5  -2797.4   5594.8     4218 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -0.8567 -0.5787 -0.5787  0.8012  1.1672 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  id     (Intercept) 1.197    1.094   
## Number of obs: 4220, groups:  id, 2173
## 
## Fixed effects:
##             Estimate Std. Error z value Pr(>|z|)    
## (Intercept)  -0.4930     0.0445  -11.08   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
performance::icc(m0)
## # Intraclass Correlation Coefficient
## 
##      Adjusted ICC: 0.267
##   Conditional ICC: 0.267

B Knowledge on the quality of evidence - general

data$s_int_psy <- scale(data$s_int_psy)

table(data$s_knowledge_evidence, data$s_iv_quality_of_evidence)
##    
##     Qu No Qu Yes
##   0   389    313
##   1   659    683
table(data$s_knowledge_evidence, data$s_iv_quality_of_evidence)/sum(table(data$s_knowledge_evidence, data$s_iv_quality_of_evidence)) * 100
##    
##        Qu No   Qu Yes
##   0 19.03131 15.31311
##   1 32.24070 33.41487
table(data$s_knowledge_evidence[data$s_iv_quality_of_evidence == "Qu No"])/sum(table(data$s_knowledge_evidence[data$s_iv_quality_of_evidence == "Qu No"])) * 100
## 
##        0        1 
## 37.11832 62.88168
table(data$s_knowledge_evidence[data$s_iv_quality_of_evidence == "Qu Yes"])/sum(table(data$s_knowledge_evidence[data$s_iv_quality_of_evidence == "Qu Yes"])) * 100
## 
##       0       1 
## 31.4257 68.5743
table(data$s_knowledge_evidence)
## 
##    0    1 
##  702 1342
sum(table(data$s_knowledge_evidence))
## [1] 2044
m1 <- glm(s_knowledge_evidence ~ s_iv_quality_of_evidence, data = data, family = "binomial")

summary(m1)
## 
## Call:
## glm(formula = s_knowledge_evidence ~ s_iv_quality_of_evidence, 
##     family = "binomial", data = data)
## 
## Deviance Residuals: 
##     Min       1Q   Median       3Q      Max  
## -1.5215  -1.4079   0.8686   0.9632   0.9632  
## 
## Coefficients:
##                                Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                     0.52714    0.06394   8.245   <2e-16 ***
## s_iv_quality_of_evidenceQu Yes  0.25315    0.09353   2.707   0.0068 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 2629.8  on 2043  degrees of freedom
## Residual deviance: 2622.4  on 2042  degrees of freedom
##   (244 Beobachtungen als fehlend gelöscht)
## AIC: 2626.4
## 
## Number of Fisher Scoring iterations: 4
exp(coef(m1))
##                    (Intercept) s_iv_quality_of_evidenceQu Yes 
##                       1.694087                       1.288073
interpret_oddsratio(exp(coef(m1)), rules = "cohen1988")
##                    (Intercept) s_iv_quality_of_evidenceQu Yes 
##                        "small"                   "very small" 
## (Rules: cohen1988)
m4 <- glm(s_knowledge_evidence ~ s_iv_quality_of_evidence + s_int_psy, data = data, family = "binomial")

summary(m4)
## 
## Call:
## glm(formula = s_knowledge_evidence ~ s_iv_quality_of_evidence + 
##     s_int_psy, family = "binomial", data = data)
## 
## Deviance Residuals: 
##     Min       1Q   Median       3Q      Max  
## -1.7179  -1.3454   0.8146   0.9167   1.1936  
## 
## Coefficients:
##                                Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                     0.52060    0.06458   8.061 7.55e-16 ***
## s_iv_quality_of_evidenceQu Yes  0.26312    0.09450   2.784  0.00536 ** 
## s_int_psy                       0.30270    0.04803   6.302 2.93e-10 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 2629.8  on 2043  degrees of freedom
## Residual deviance: 2582.3  on 2041  degrees of freedom
##   (244 Beobachtungen als fehlend gelöscht)
## AIC: 2588.3
## 
## Number of Fisher Scoring iterations: 4
m5 <- glm(s_knowledge_evidence ~ s_iv_quality_of_evidence * s_int_psy, data = data, family = "binomial")

summary(m5)
## 
## Call:
## glm(formula = s_knowledge_evidence ~ s_iv_quality_of_evidence * 
##     s_int_psy, family = "binomial", data = data)
## 
## Deviance Residuals: 
##     Min       1Q   Median       3Q      Max  
## -1.7223  -1.3466   0.8133   0.9176   1.1887  
## 
## Coefficients:
##                                          Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                               0.52055    0.06455   8.064  7.4e-16 ***
## s_iv_quality_of_evidenceQu Yes            0.26359    0.09457   2.787  0.00532 ** 
## s_int_psy                                 0.29638    0.06711   4.416  1.0e-05 ***
## s_iv_quality_of_evidenceQu Yes:s_int_psy  0.01294    0.09608   0.135  0.89283    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 2629.8  on 2043  degrees of freedom
## Residual deviance: 2582.2  on 2040  degrees of freedom
##   (244 Beobachtungen als fehlend gelöscht)
## AIC: 2590.2
## 
## Number of Fisher Scoring iterations: 4
anova(m1,m4,m5)
## Analysis of Deviance Table
## 
## Model 1: s_knowledge_evidence ~ s_iv_quality_of_evidence
## Model 2: s_knowledge_evidence ~ s_iv_quality_of_evidence + s_int_psy
## Model 3: s_knowledge_evidence ~ s_iv_quality_of_evidence * s_int_psy
##   Resid. Df Resid. Dev Df Deviance
## 1      2042     2622.4            
## 2      2041     2582.3  1   40.169
## 3      2040     2582.2  1    0.018

Accessibility

m1 <- lmer(accessibility ~ s_iv_quality_of_evidence + pls + (1 | id), data = df)



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ s_iv_quality_of_evidence + pls + (1 | id)
##    Data: df
## 
## REML criterion at convergence: 17051.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3950 -0.4229  0.1020  0.3894  3.2173 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.678    1.637   
##  Residual             1.310    1.145   
## Number of obs: 4326, groups:  id, 2284
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       6.17517    0.07007 2860.15780  88.128   <2e-16 ***
## s_iv_quality_of_evidenceQu Yes   -0.10028    0.07713 2264.75789  -1.300   0.1937    
## plsChristodoulou                 -0.11271    0.04889 2090.82160  -2.305   0.0212 *  
## plsRasmussen                     -0.83337    0.08493 3158.64246  -9.813   <2e-16 ***
## plsGroth                         -1.38174    0.08493 3158.81290 -16.269   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s____Y plsChr plsRsm
## s_v_qlt__QY -0.544                     
## plsChristdl -0.348 -0.001              
## plsRasmussn -0.591  0.019  0.287       
## plsGroth    -0.591  0.020  0.287  0.816
r.squaredGLMM(m1)
##             R2m       R2c
## [1,] 0.07311593 0.6955514
r2beta(m1, method = "nsj")
##                           Effect   Rsq upper.CL lower.CL
## 1                          Model 0.073    0.089    0.060
## 5                       plsGroth 0.056    0.070    0.044
## 4                   plsRasmussen 0.021    0.031    0.014
## 2 s_iv_quality_of_evidenceQu Yes 0.001    0.003    0.000
## 3               plsChristodoulou 0.000    0.003    0.000
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## term                     |    sumsq |  meansq | NumDF |    DenDF | statistic | p.value |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_quality_of_evidence |    2.214 |   2.214 |     1 | 2264.758 |     1.690 |   0.194 |      | 0.001 |         0.001 |         |                 |           |    0.026 |      
## pls                      |  398.364 | 132.788 |     3 | 2528.406 |   101.365 |  < .001 |      | 0.108 |         0.108 |         |                 |           |    0.347 |      
## Residuals                | 3301.401 |   0.763 |       |          |           |         | 4326 |       |               |         |                 |           |          |

Understanding

m1 <- lmer(understanding ~ s_iv_quality_of_evidence + pls + (1 | id), data = df)

summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ s_iv_quality_of_evidence + pls + (1 | id)
##    Data: df
## 
## REML criterion at convergence: 16962.3
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3970 -0.4492  0.0883  0.4375  3.1136 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.431    1.559   
##  Residual             1.359    1.166   
## Number of obs: 4323, groups:  id, 2285
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       6.23796    0.06820 2922.53712  91.459  < 2e-16 ***
## s_iv_quality_of_evidenceQu Yes   -0.13625    0.07457 2265.71594  -1.827   0.0678 .  
## plsChristodoulou                 -0.20543    0.04985 2097.09535  -4.121 3.92e-05 ***
## plsRasmussen                     -0.81360    0.08289 3242.66775  -9.816  < 2e-16 ***
## plsGroth                         -1.35754    0.08287 3241.77724 -16.381  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s____Y plsChr plsRsm
## s_v_qlt__QY -0.541                     
## plsChristdl -0.364 -0.001              
## plsRasmussn -0.592  0.018  0.300       
## plsGroth    -0.593  0.020  0.300  0.800
r.squaredGLMM(m1)
##             R2m       R2c
## [1,] 0.06998235 0.6665696
r2beta(m1, method = "nsj")
##                           Effect   Rsq upper.CL lower.CL
## 1                          Model 0.070    0.086    0.057
## 5                       plsGroth 0.057    0.071    0.045
## 4                   plsRasmussen 0.021    0.031    0.014
## 3               plsChristodoulou 0.001    0.005    0.000
## 2 s_iv_quality_of_evidenceQu Yes 0.001    0.004    0.000
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## term                     |    sumsq |  meansq | NumDF |    DenDF | statistic | p.value |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_quality_of_evidence |    4.537 |   4.537 |     1 | 2265.716 |     3.339 |   0.068 |      | 0.001 |         0.001 |         |                 |           |    0.036 |      
## pls                      |  405.456 | 135.152 |     3 | 2581.664 |    99.461 |  < .001 |      | 0.104 |         0.104 |         |                 |           |    0.341 |      
## Residuals                | 3490.875 |   0.808 |       |          |           |         | 4323 |       |               |         |                 |           |          |

Knowledge

m1 <- glmer(knowledge ~ s_iv_quality_of_evidence + pls + (1 | id), family = binomial("logit"), data = df)

summary(m1)
## Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
##  Family: binomial  ( logit )
## Formula: knowledge ~ s_iv_quality_of_evidence + pls + (1 | id)
##    Data: df
## 
##      AIC      BIC   logLik deviance df.resid 
##   5232.9   5271.0  -2610.5   5220.9     4214 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -1.7385 -0.9814  0.4019  0.6739  1.0190 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  id     (Intercept) 1.102    1.05    
## Number of obs: 4220, groups:  id, 2173
## 
## Fixed effects:
##                                Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                     1.34612    0.10027  13.425   <2e-16 ***
## s_iv_quality_of_evidenceQu Yes -0.07517    0.08665  -0.868    0.386    
## plsChristodoulou               -1.05180    0.10615  -9.909   <2e-16 ***
## plsRasmussen                    0.04058    0.11944   0.340    0.734    
## plsGroth                       -1.05234    0.11784  -8.930   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s____Y plsChr plsRsm
## s_v_qlt__QY -0.443                     
## plsChristdl -0.647  0.009              
## plsRasmussn -0.559  0.016  0.445       
## plsGroth    -0.670  0.024  0.529  0.549

Empowerment

m1 <- lmer(empowerment ~ s_iv_quality_of_evidence + pls + (1 | id), data = df)

summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ s_iv_quality_of_evidence + pls + (1 | id)
##    Data: df
## 
## REML criterion at convergence: 17087.3
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2348 -0.4920 -0.0097  0.4726  3.3710 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.868    1.693   
##  Residual             1.268    1.126   
## Number of obs: 4324, groups:  id, 2285
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       4.99787    0.07145 2818.10409  69.952  < 2e-16 ***
## s_iv_quality_of_evidenceQu Yes   -0.04076    0.07900 2266.46337  -0.516    0.606    
## plsChristodoulou                 -0.27488    0.04818 2086.11696  -5.706 1.32e-08 ***
## plsRasmussen                     -0.68588    0.08639 3098.19956  -7.939 2.82e-15 ***
## plsGroth                         -1.11652    0.08642 3100.48310 -12.920  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s____Y plsChr plsRsm
## s_v_qlt__QY -0.547                     
## plsChristdl -0.335 -0.001              
## plsRasmussn -0.590  0.019  0.278       
## plsGroth    -0.590  0.020  0.278  0.827
r.squaredGLMM(m1)
##             R2m       R2c
## [1,] 0.04118597 0.7060639
r2beta(m1, method = "nsj")
##                           Effect   Rsq upper.CL lower.CL
## 1                          Model 0.041    0.054    0.031
## 5                       plsGroth 0.036    0.048    0.026
## 4                   plsRasmussen 0.014    0.022    0.008
## 3               plsChristodoulou 0.002    0.006    0.000
## 2 s_iv_quality_of_evidenceQu Yes 0.000    0.002    0.000
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## term                     |    sumsq | meansq | NumDF |    DenDF | statistic | p.value |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_quality_of_evidence |    0.337 |  0.337 |     1 | 2266.463 |     0.266 |   0.606 |      | 0.000 |         0.000 |         |                 |           |    0.010 |      
## pls                      |  250.962 | 83.654 |     3 | 2489.532 |    65.981 |  < .001 |      | 0.074 |         0.074 |         |                 |           |    0.282 |      
## Residuals                | 3145.492 |  0.727 |       |          |           |         | 4324 |       |               |         |                 |           |          |

H4 Operationalization

A Accessibility

m1 <- lmer(accessibility ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + (1 | id), data = df_cama)



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ s_iv_operationalization + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_cama
## 
## REML criterion at convergence: 8512.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.8731 -0.3673  0.1713  0.3470  3.3567 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.6104   1.6157  
##  Residual             0.9826   0.9912  
## Number of obs: 2262, groups:  id, 1183
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       6.34136    0.09145 1298.54431  69.343  < 2e-16 ***
## s_iv_operationalizationOp Yes    -0.33786    0.10304 1169.17411  -3.279  0.00107 ** 
## s_iv_quality_of_evidenceQu Yes   -0.09955    0.10304 1169.12932  -0.966  0.33420    
## plsChristodoulou                 -0.11306    0.04240 1095.61897  -2.667  0.00777 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_OY s____Y
## s_v_prtnlOY -0.564              
## s_v_qlt__QY -0.564  0.012       
## plsChristdl -0.230 -0.001 -0.002
r.squaredGLMM(m1)
##              R2m       R2c
## [1,] 0.009399288 0.7290993
r2beta(m1, method = "nsj")
##                           Effect   Rsq upper.CL lower.CL
## 1                          Model 0.009    0.020    0.004
## 2  s_iv_operationalizationOp Yes 0.008    0.017    0.002
## 4               plsChristodoulou 0.001    0.005    0.000
## 3 s_iv_quality_of_evidenceQu Yes 0.001    0.005    0.000
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## term                     |    sumsq | meansq | NumDF |    DenDF | statistic | p.value |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_operationalization  |   10.564 | 10.564 |     1 | 1169.174 |    10.751 |   0.001 |      | 0.008 |         0.008 |         |                 |           |    0.092 |      
## s_iv_quality_of_evidence |    0.917 |  0.917 |     1 | 1169.129 |     0.933 |   0.334 |      | 0.001 |         0.001 |         |                 |           |    0.027 |      
## pls                      |    6.987 |  6.987 |     1 | 1095.619 |     7.111 |   0.008 |      | 0.005 |         0.006 |         |                 |           |    0.075 |      
## Residuals                | 1254.603 |  0.555 |       |          |           |         | 2262 |       |               |         |                 |           |          |
anova_stats(m1)$term
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "s_iv_operationalization"  "s_iv_quality_of_evidence" "pls"                      "Residuals"
interpret_omega_squared(anova_stats(m1)$partial.etasq, rules = "field2013")
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "very small" "very small" "very small" NA          
## (Rules: field2013)
# Interest in psychological research


m1 <- lmer(accessibility ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + (1 | id), data = df_cama[!is.na(df_cama$s_int_psy),])



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ s_iv_operationalization + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_cama[!is.na(df_cama$s_int_psy), ]
## 
## REML criterion at convergence: 8512.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.8731 -0.3673  0.1713  0.3470  3.3567 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.6104   1.6157  
##  Residual             0.9826   0.9912  
## Number of obs: 2262, groups:  id, 1183
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       6.34136    0.09145 1298.54431  69.343  < 2e-16 ***
## s_iv_operationalizationOp Yes    -0.33786    0.10304 1169.17411  -3.279  0.00107 ** 
## s_iv_quality_of_evidenceQu Yes   -0.09955    0.10304 1169.12932  -0.966  0.33420    
## plsChristodoulou                 -0.11306    0.04240 1095.61897  -2.667  0.00777 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_OY s____Y
## s_v_prtnlOY -0.564              
## s_v_qlt__QY -0.564  0.012       
## plsChristdl -0.230 -0.001 -0.002
r.squaredGLMM(m1)
##              R2m       R2c
## [1,] 0.009399288 0.7290993
r2beta(m1, method = "nsj")
##                           Effect   Rsq upper.CL lower.CL
## 1                          Model 0.009    0.020    0.004
## 2  s_iv_operationalizationOp Yes 0.008    0.017    0.002
## 4               plsChristodoulou 0.001    0.005    0.000
## 3 s_iv_quality_of_evidenceQu Yes 0.001    0.005    0.000
m6 <- lmer(accessibility ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + s_int_psy + (1 | id), data = df_cama[!is.na(df_cama$s_int_psy),])

summary(m6)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ s_iv_operationalization + s_iv_quality_of_evidence +      pls + s_int_psy + (1 | id)
##    Data: df_cama[!is.na(df_cama$s_int_psy), ]
## 
## REML criterion at convergence: 8412.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -4.0270 -0.3715  0.1240  0.3718  3.3889 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.3498   1.5329  
##  Residual             0.9819   0.9909  
## Number of obs: 2262, groups:  id, 1183
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       6.33927    0.08777 1309.94782  72.230  < 2e-16 ***
## s_iv_operationalizationOp Yes    -0.30770    0.09870 1169.59311  -3.118  0.00187 ** 
## s_iv_quality_of_evidenceQu Yes   -0.11243    0.09867 1169.17651  -1.139  0.25474    
## plsChristodoulou                 -0.11207    0.04236 1098.99177  -2.645  0.00827 ** 
## s_int_psy                         0.51019    0.04892 1181.27630  10.428  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_OY s____Y plsChr
## s_v_prtnlOY -0.563                     
## s_v_qlt__QY -0.563  0.012              
## plsChristdl -0.239 -0.001 -0.002       
## s_int_psy   -0.003  0.029 -0.013  0.002
r.squaredGLMM(m6)
##             R2m       R2c
## [1,] 0.08163412 0.7293484
r2beta(m6, method = "nsj")
##                           Effect   Rsq upper.CL lower.CL
## 1                          Model 0.082    0.105    0.063
## 5                      s_int_psy 0.073    0.094    0.054
## 2  s_iv_operationalizationOp Yes 0.007    0.016    0.002
## 3 s_iv_quality_of_evidenceQu Yes 0.001    0.005    0.000
## 4               plsChristodoulou 0.001    0.005    0.000
m7 <- lmer(accessibility ~ s_iv_operationalization * s_int_psy + s_iv_quality_of_evidence + pls + (1 | id), data = df_cama[!is.na(df_cama$s_int_psy),])



summary(m7)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ s_iv_operationalization * s_int_psy + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_cama[!is.na(df_cama$s_int_psy), ]
## 
## REML criterion at convergence: 8407.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.9976 -0.3768  0.1395  0.3709  3.4315 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.3319   1.5270  
##  Residual             0.9825   0.9912  
## Number of obs: 2262, groups:  id, 1183
## 
## Fixed effects:
##                                           Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                                6.34885    0.08758 1309.23258  72.490  < 2e-16 ***
## s_iv_operationalizationOp Yes             -0.30410    0.09841 1167.09311  -3.090  0.00205 ** 
## s_int_psy                                  0.37956    0.06804 1178.21774   5.579 3.01e-08 ***
## s_iv_quality_of_evidenceQu Yes            -0.12725    0.09851 1167.33435  -1.292  0.19672    
## plsChristodoulou                          -0.11199    0.04237 1097.95573  -2.643  0.00834 ** 
## s_iv_operationalizationOp Yes:s_int_psy    0.26909    0.09773 1179.69595   2.753  0.00599 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_OY s_nt_p s____Y plsChr
## s_v_prtnlOY -0.562                            
## s_int_psy   -0.030  0.012                     
## s_v_qlt__QY -0.563  0.011  0.029              
## plsChristdl -0.240 -0.001  0.001 -0.002       
## s_v_prOY:__  0.040  0.013 -0.697 -0.055  0.000
r.squaredGLMM(m7)
##            R2m       R2c
## [1,] 0.0863755 0.7291743
r2beta(m7, method = "nsj")
##                                    Effect   Rsq upper.CL lower.CL
## 1                                   Model 0.086    0.111    0.067
## 3                               s_int_psy 0.022    0.035    0.012
## 2           s_iv_operationalizationOp Yes 0.007    0.015    0.002
## 6 s_iv_operationalizationOp Yes:s_int_psy 0.005    0.013    0.001
## 4          s_iv_quality_of_evidenceQu Yes 0.001    0.006    0.000
## 5                        plsChristodoulou 0.001    0.005    0.000
anova(m1,m6,m7)
## refitting model(s) with ML (instead of REML)
## Data: df_cama[!is.na(df_cama$s_int_psy), ]
## Models:
## m1: accessibility ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + (1 | id)
## m6: accessibility ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + s_int_psy + (1 | id)
## m7: accessibility ~ s_iv_operationalization * s_int_psy + s_iv_quality_of_evidence + pls + (1 | id)
##    npar    AIC    BIC  logLik deviance    Chisq Df Pr(>Chisq)    
## m1    6 8510.6 8544.9 -4249.3   8498.6                           
## m6    7 8408.1 8448.2 -4197.1   8394.1 104.4646  1  < 2.2e-16 ***
## m7    8 8402.5 8448.3 -4193.3   8386.5   7.5869  1   0.005879 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#ICC Subject

m0 <- lmer(accessibility ~ (1 | id), data = df_cama)

summary(m0)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ (1 | id)
##    Data: df_cama
## 
## REML criterion at convergence: 8521.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.7701 -0.3288  0.1481  0.3071  3.2715 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.6327   1.6226  
##  Residual             0.9882   0.9941  
## Number of obs: 2262, groups:  id, 1183
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 6.068e+00  5.172e-02 1.172e+03   117.3   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
performance::icc(m0)
## # Intraclass Correlation Coefficient
## 
##      Adjusted ICC: 0.727
##   Conditional ICC: 0.727

B Understanding

m1 <- lmer(understanding ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + (1 | id), data = df_cama)



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ s_iv_operationalization + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_cama
## 
## REML criterion at convergence: 8624.5
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.5711 -0.4254  0.1251  0.4269  3.0112 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.268    1.506   
##  Residual             1.198    1.095   
## Number of obs: 2258, groups:  id, 1184
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       6.38915    0.08882 1338.07729  71.930  < 2e-16 ***
## s_iv_operationalizationOp Yes    -0.32029    0.09922 1168.57595  -3.228  0.00128 ** 
## s_iv_quality_of_evidenceQu Yes   -0.12150    0.09923 1168.53379  -1.224  0.22103    
## plsChristodoulou                 -0.20532    0.04683 1098.58679  -4.385 1.27e-05 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_OY s____Y
## s_v_prtnlOY -0.560              
## s_v_qlt__QY -0.559  0.012       
## plsChristdl -0.261 -0.002 -0.002
r.squaredGLMM(m1)
##             R2m       R2c
## [1,] 0.01134843 0.6582069
r2beta(m1, method = "nsj")
##                           Effect   Rsq upper.CL lower.CL
## 1                          Model 0.011    0.023    0.005
## 2  s_iv_operationalizationOp Yes 0.007    0.016    0.002
## 4               plsChristodoulou 0.003    0.009    0.000
## 3 s_iv_quality_of_evidenceQu Yes 0.001    0.005    0.000
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## term                     |    sumsq | meansq | NumDF |    DenDF | statistic | p.value |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_operationalization  |   12.485 | 12.485 |     1 | 1168.576 |    10.419 |   0.001 |      | 0.008 |         0.008 |         |                 |           |    0.088 |      
## s_iv_quality_of_evidence |    1.797 |  1.797 |     1 | 1168.534 |     1.499 |   0.221 |      | 0.001 |         0.001 |         |                 |           |    0.034 |      
## pls                      |   23.038 | 23.038 |     1 | 1098.587 |    19.226 |  < .001 |      | 0.014 |         0.014 |         |                 |           |    0.120 |      
## Residuals                | 1599.460 |  0.708 |       |          |           |         | 2258 |       |               |         |                 |           |          |
anova_stats(m1)$term
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "s_iv_operationalization"  "s_iv_quality_of_evidence" "pls"                      "Residuals"
interpret_omega_squared(anova_stats(m1)$partial.etasq, rules = "field2013")
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "very small" "very small" "small"      NA          
## (Rules: field2013)
# Interest in psychological research


m1 <- lmer(understanding ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + (1 | id), data = df_cama[!is.na(df_cama$s_int_psy),])



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ s_iv_operationalization + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_cama[!is.na(df_cama$s_int_psy), ]
## 
## REML criterion at convergence: 8624.5
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.5711 -0.4254  0.1251  0.4269  3.0112 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.268    1.506   
##  Residual             1.198    1.095   
## Number of obs: 2258, groups:  id, 1184
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       6.38915    0.08882 1338.07729  71.930  < 2e-16 ***
## s_iv_operationalizationOp Yes    -0.32029    0.09922 1168.57595  -3.228  0.00128 ** 
## s_iv_quality_of_evidenceQu Yes   -0.12150    0.09923 1168.53379  -1.224  0.22103    
## plsChristodoulou                 -0.20532    0.04683 1098.58679  -4.385 1.27e-05 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_OY s____Y
## s_v_prtnlOY -0.560              
## s_v_qlt__QY -0.559  0.012       
## plsChristdl -0.261 -0.002 -0.002
r.squaredGLMM(m1)
##             R2m       R2c
## [1,] 0.01134843 0.6582069
r2beta(m1, method = "nsj")
##                           Effect   Rsq upper.CL lower.CL
## 1                          Model 0.011    0.023    0.005
## 2  s_iv_operationalizationOp Yes 0.007    0.016    0.002
## 4               plsChristodoulou 0.003    0.009    0.000
## 3 s_iv_quality_of_evidenceQu Yes 0.001    0.005    0.000
m6 <- lmer(understanding ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + s_int_psy + (1 | id), data = df_cama[!is.na(df_cama$s_int_psy),])

summary(m6)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ s_iv_operationalization + s_iv_quality_of_evidence +      pls + s_int_psy + (1 | id)
##    Data: df_cama[!is.na(df_cama$s_int_psy), ]
## 
## REML criterion at convergence: 8498.3
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.6247 -0.3847  0.1232  0.4376  3.0311 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.970    1.404   
##  Residual             1.196    1.094   
## Number of obs: 2258, groups:  id, 1184
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       6.38496    0.08445 1357.74238  75.602  < 2e-16 ***
## s_iv_operationalizationOp Yes    -0.28769    0.09403 1170.32286  -3.060  0.00227 ** 
## s_iv_quality_of_evidenceQu Yes   -0.13331    0.09399 1169.82406  -1.418  0.15635    
## plsChristodoulou                 -0.20405    0.04675 1104.56877  -4.365 1.39e-05 ***
## s_int_psy                         0.54753    0.04667 1185.84457  11.731  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_OY s____Y plsChr
## s_v_prtnlOY -0.557                     
## s_v_qlt__QY -0.557  0.011              
## plsChristdl -0.274 -0.002 -0.002       
## s_int_psy   -0.005  0.030 -0.011  0.001
r.squaredGLMM(m6)
##             R2m       R2c
## [1,] 0.09719461 0.6588952
r2beta(m6, method = "nsj")
##                           Effect   Rsq upper.CL lower.CL
## 1                          Model 0.097    0.122    0.077
## 5                      s_int_psy 0.086    0.109    0.066
## 2  s_iv_operationalizationOp Yes 0.006    0.015    0.002
## 4               plsChristodoulou 0.003    0.010    0.000
## 3 s_iv_quality_of_evidenceQu Yes 0.001    0.006    0.000
m7 <- lmer(understanding ~ s_iv_operationalization * s_int_psy + s_iv_quality_of_evidence + pls + (1 | id), data = df_cama[!is.na(df_cama$s_int_psy),])



summary(m7)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ s_iv_operationalization * s_int_psy + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_cama[!is.na(df_cama$s_int_psy), ]
## 
## REML criterion at convergence: 8497.5
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.5979 -0.3848  0.1316  0.4296  3.0368 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.962    1.401   
##  Residual             1.197    1.094   
## Number of obs: 2258, groups:  id, 1184
## 
## Fixed effects:
##                                           Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                                6.39166    0.08441 1356.91835  75.717  < 2e-16 ***
## s_iv_operationalizationOp Yes             -0.28559    0.09390 1167.67040  -3.041  0.00241 ** 
## s_int_psy                                  0.46010    0.06504 1184.38944   7.074 2.58e-12 ***
## s_iv_quality_of_evidenceQu Yes            -0.14326    0.09400 1168.01998  -1.524  0.12774    
## plsChristodoulou                          -0.20412    0.04676 1103.36752  -4.365 1.39e-05 ***
## s_iv_operationalizationOp Yes:s_int_psy    0.17991    0.09338 1184.24046   1.927  0.05427 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_OY s_nt_p s____Y plsChr
## s_v_prtnlOY -0.556                            
## s_int_psy   -0.032  0.013                     
## s_v_qlt__QY -0.558  0.011  0.031              
## plsChristdl -0.274 -0.002  0.002 -0.002       
## s_v_prOY:__  0.041  0.012 -0.698 -0.055 -0.001
r.squaredGLMM(m7)
##             R2m       R2c
## [1,] 0.09936772 0.6587064
r2beta(m7, method = "nsj")
##                                    Effect   Rsq upper.CL lower.CL
## 1                                   Model 0.099    0.125    0.079
## 3                               s_int_psy 0.033    0.049    0.020
## 2           s_iv_operationalizationOp Yes 0.006    0.015    0.002
## 5                        plsChristodoulou 0.003    0.010    0.000
## 6 s_iv_operationalizationOp Yes:s_int_psy 0.003    0.008    0.000
## 4          s_iv_quality_of_evidenceQu Yes 0.002    0.007    0.000
anova(m1,m6,m7)
## refitting model(s) with ML (instead of REML)
## Data: df_cama[!is.na(df_cama$s_int_psy), ]
## Models:
## m1: understanding ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + (1 | id)
## m6: understanding ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + s_int_psy + (1 | id)
## m7: understanding ~ s_iv_operationalization * s_int_psy + s_iv_quality_of_evidence + pls + (1 | id)
##    npar    AIC    BIC  logLik deviance    Chisq Df Pr(>Chisq)    
## m1    6 8622.5 8656.8 -4305.2   8610.5                           
## m6    7 8493.7 8533.7 -4239.8   8479.7 130.8210  1    < 2e-16 ***
## m7    8 8492.0 8537.7 -4238.0   8476.0   3.7215  1    0.05372 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#ICC Subject

m0 <- lmer(understanding ~ (1 | id), data = df_cama)

summary(m0)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ (1 | id)
##    Data: df_cama
## 
## REML criterion at convergence: 8645.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.4699 -0.3949  0.1780  0.3690  2.8710 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.281    1.510   
##  Residual             1.219    1.104   
## Number of obs: 2258, groups:  id, 1184
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 6.068e+00  4.981e-02 1.172e+03   121.8   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
performance::icc(m0)
## # Intraclass Correlation Coefficient
## 
##      Adjusted ICC: 0.652
##   Conditional ICC: 0.652

C Knowledge

table(df_cama$knowledge, df_cama$s_iv_operationalization)/sum(table(df_cama$knowledge, df_cama$s_iv_operationalization)) * 100
##    
##        Op No   Op Yes
##   0 18.29596 16.99552
##   1 32.01794 32.69058
table(df_cama$knowledge[df_cama$s_iv_operationalization == "Op Yes"])
## 
##   0   1 
## 379 729
table(df_cama$knowledge[df_cama$s_iv_operationalization  == "Op Yes"])/sum(table(df_cama$knowledge[df_cama$s_iv_operationalization  == "Op Yes"])) * 100
## 
##        0        1 
## 34.20578 65.79422
table(df_cama$knowledge[df_cama$s_iv_operationalization == "Op No"])
## 
##   0   1 
## 408 714
table(df_cama$knowledge[df_cama$s_iv_operationalization  == "Op No"])/sum(table(df_cama$knowledge[df_cama$s_iv_operationalization  == "Op No"])) * 100
## 
##        0        1 
## 36.36364 63.63636
table(df_cama$knowledge, df_cama$s_iv_operationalization)/sum(table(df_cama$knowledge, df_cama$s_iv_operationalization)) * 100
##    
##        Op No   Op Yes
##   0 18.29596 16.99552
##   1 32.01794 32.69058
m1 <- glmer(knowledge ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + (1 | id), family = binomial("logit"), data = df_cama)

summary(m1)
## Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
##  Family: binomial  ( logit )
## Formula: knowledge ~ s_iv_operationalization + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_cama
## 
##      AIC      BIC   logLik deviance df.resid 
##   2790.2   2818.8  -1390.1   2780.2     2225 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -1.7455 -0.9916  0.4476  0.7026  1.0085 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  id     (Intercept) 0.7623   0.8731  
## Number of obs: 2230, groups:  id, 1147
## 
## Fixed effects:
##                                Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                     1.21282    0.11881  10.208   <2e-16 ***
## s_iv_operationalizationOp Yes   0.11695    0.11115   1.052    0.293    
## s_iv_quality_of_evidenceQu Yes -0.06672    0.11105  -0.601    0.548    
## plsChristodoulou               -0.99358    0.10552  -9.416   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_OY s____Y
## s_v_prtnlOY -0.446              
## s_v_qlt__QY -0.482  0.015       
## plsChristdl -0.558 -0.025  0.009
r.squaredGLMM(m1)
## Warning: The null model is correct only if all variables used by the original model remain unchanged.
##                    R2m       R2c
## theoretical 0.05840006 0.2355287
## delta       0.04658602 0.1878824
r2beta(m1, method = "nsj")
##                           Effect   Rsq upper.CL lower.CL
## 1                          Model 0.042    0.060    0.028
## 4               plsChristodoulou 0.041    0.058    0.027
## 2  s_iv_operationalizationOp Yes 0.001    0.004    0.000
## 3 s_iv_quality_of_evidenceQu Yes 0.000    0.003    0.000
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: npar
## term                     | npar |    sumsq |  meansq | statistic |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## ----------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_operationalization  |    1 |    0.935 |   0.935 |     0.935 |      | 0.000 |         0.000 |         |                 |           |    0.021 |      
## s_iv_quality_of_evidence |    1 |    0.331 |   0.331 |     0.331 |      | 0.000 |         0.000 |         |                 |           |    0.012 |      
## pls                      |    1 |  109.786 | 109.786 |   109.786 |      | 0.047 |         0.047 |         |                 |           |    0.222 |      
## Residuals                |      | 2220.857 |   0.161 |           | 2230 |       |               |         |                 |           |          |
exp(fixef(m1))
##                    (Intercept)  s_iv_operationalizationOp Yes s_iv_quality_of_evidenceQu Yes 
##                      3.3629391                      1.1240591                      0.9354533 
##               plsChristodoulou 
##                      0.3702491
interpret_oddsratio(exp(fixef(m1)), rules = "cohen1988")
##                    (Intercept)  s_iv_operationalizationOp Yes s_iv_quality_of_evidenceQu Yes 
##                       "medium"                   "very small"                   "very small" 
##               plsChristodoulou 
##                       "medium" 
## (Rules: cohen1988)
m1 <- glmer(knowledge ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + (1 | id), family = binomial("logit"), data = df_cama[!is.na(df_cama$s_int_psy),])

summary(m1)
## Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
##  Family: binomial  ( logit )
## Formula: knowledge ~ s_iv_operationalization + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_cama[!is.na(df_cama$s_int_psy), ]
## 
##      AIC      BIC   logLik deviance df.resid 
##   2790.2   2818.8  -1390.1   2780.2     2225 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -1.7455 -0.9916  0.4476  0.7026  1.0085 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  id     (Intercept) 0.7623   0.8731  
## Number of obs: 2230, groups:  id, 1147
## 
## Fixed effects:
##                                Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                     1.21282    0.11881  10.208   <2e-16 ***
## s_iv_operationalizationOp Yes   0.11695    0.11115   1.052    0.293    
## s_iv_quality_of_evidenceQu Yes -0.06672    0.11105  -0.601    0.548    
## plsChristodoulou               -0.99358    0.10552  -9.416   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_OY s____Y
## s_v_prtnlOY -0.446              
## s_v_qlt__QY -0.482  0.015       
## plsChristdl -0.558 -0.025  0.009
# Interest in psychological research

m6 <- glmer(knowledge ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + s_int_psy  + (1 | id), family = binomial("logit"), data = df_cama[!is.na(df_cama$s_int_psy),], glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 100000)))

summary(m6)
## Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
##  Family: binomial  ( logit )
## Formula: knowledge ~ s_iv_operationalization + s_iv_quality_of_evidence +      pls + s_int_psy + (1 | id)
##    Data: df_cama[!is.na(df_cama$s_int_psy), ]
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))
## 
##      AIC      BIC   logLik deviance df.resid 
##   2772.0   2806.3  -1380.0   2760.0     2224 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.0150 -0.9276  0.4529  0.6722  1.2087 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  id     (Intercept) 0.7265   0.8524  
## Number of obs: 2230, groups:  id, 1147
## 
## Fixed effects:
##                                Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                     1.20244    0.11834  10.161  < 2e-16 ***
## s_iv_operationalizationOp Yes   0.13505    0.11081   1.219    0.223    
## s_iv_quality_of_evidenceQu Yes -0.06786    0.11061  -0.614    0.540    
## plsChristodoulou               -0.99565    0.10554  -9.434  < 2e-16 ***
## s_int_psy                       0.24902    0.05617   4.433 9.28e-06 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_OY s____Y plsChr
## s_v_prtnlOY -0.444                     
## s_v_qlt__QY -0.484  0.016              
## plsChristdl -0.558 -0.027  0.010       
## s_int_psy    0.065  0.046 -0.006 -0.085
m7 <- glmer(knowledge ~ s_iv_operationalization * s_int_psy + s_iv_quality_of_evidence + pls   + (1 | id), family = binomial("logit"), data = df_cama[!is.na(df_cama$s_int_psy),], glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 100000)))

summary(m7)
## Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
##  Family: binomial  ( logit )
## Formula: knowledge ~ s_iv_operationalization * s_int_psy + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_cama[!is.na(df_cama$s_int_psy), ]
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))
## 
##      AIC      BIC   logLik deviance df.resid 
##   2773.4   2813.4  -1379.7   2759.4     2223 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.0699 -0.9333  0.4521  0.6663  1.1865 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  id     (Intercept) 0.727    0.8527  
## Number of obs: 2230, groups:  id, 1147
## 
## Fixed effects:
##                                         Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                              1.20622    0.11851  10.178  < 2e-16 ***
## s_iv_operationalizationOp Yes            0.13687    0.11089   1.234  0.21708    
## s_int_psy                                0.20750    0.07742   2.680  0.00736 ** 
## s_iv_quality_of_evidenceQu Yes          -0.07313    0.11084  -0.660  0.50942    
## plsChristodoulou                        -0.99597    0.10557  -9.434  < 2e-16 ***
## s_iv_operationalizationOp Yes:s_int_psy  0.08611    0.11126   0.774  0.43898    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_OY s_nt_p s____Y plsChr
## s_v_prtnlOY -0.442                            
## s_int_psy    0.007  0.021                     
## s_v_qlt__QY -0.486  0.014  0.038              
## plsChristdl -0.559 -0.027 -0.049  0.011       
## s_v_prOY:__  0.056  0.023 -0.688 -0.062 -0.018
anova(m1,m6,m7)
## Data: df_cama[!is.na(df_cama$s_int_psy), ]
## Models:
## m1: knowledge ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + (1 | id)
## m6: knowledge ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + s_int_psy + (1 | id)
## m7: knowledge ~ s_iv_operationalization * s_int_psy + s_iv_quality_of_evidence + pls + (1 | id)
##    npar    AIC    BIC  logLik deviance   Chisq Df Pr(>Chisq)    
## m1    5 2790.2 2818.8 -1390.1   2780.2                          
## m6    6 2772.0 2806.3 -1380.0   2760.0 20.1909  1  7.009e-06 ***
## m7    7 2773.4 2813.4 -1379.7   2759.4  0.5999  1     0.4386    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#ICC Subject


m0 <- glmer(knowledge ~ (1 | id), family = binomial("logit"), data = df_cama, glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 100000)))

summary(m0)
## Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
##  Family: binomial  ( logit )
## Formula: knowledge ~ (1 | id)
##    Data: df_cama
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))
## 
##      AIC      BIC   logLik deviance df.resid 
##   2886.3   2897.7  -1441.1   2882.3     2228 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -1.3169 -1.0937  0.6287  0.6287  0.7594 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  id     (Intercept) 0.4524   0.6726  
## Number of obs: 2230, groups:  id, 1147
## 
## Fixed effects:
##             Estimate Std. Error z value Pr(>|z|)    
## (Intercept)  0.67196    0.05486   12.25   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
performance::icc(m0)
## # Intraclass Correlation Coefficient
## 
##      Adjusted ICC: 0.121
##   Conditional ICC: 0.121

D Empowerment

m1 <- lmer(empowerment ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + (1 | id), data = df_cama)



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ s_iv_operationalization + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_cama
## 
## REML criterion at convergence: 8863.1
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -3.11460 -0.45527  0.00239  0.45932  2.96964 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.814    1.677   
##  Residual             1.225    1.107   
## Number of obs: 2260, groups:  id, 1184
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       5.11035    0.09650 1316.33003  52.955  < 2e-16 ***
## s_iv_operationalizationOp Yes    -0.21953    0.10835 1171.12888  -2.026    0.043 *  
## s_iv_quality_of_evidenceQu Yes   -0.04851    0.10835 1171.09519  -0.448    0.654    
## plsChristodoulou                 -0.27469    0.04736 1097.86035  -5.800 8.65e-09 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_OY s____Y
## s_v_prtnlOY -0.563              
## s_v_qlt__QY -0.562  0.012       
## plsChristdl -0.242 -0.003 -0.002
r.squaredGLMM(m1)
##              R2m       R2c
## [1,] 0.007757616 0.6990427
r2beta(m1, method = "nsj")
##                           Effect   Rsq upper.CL lower.CL
## 1                          Model 0.008    0.018    0.003
## 4               plsChristodoulou 0.005    0.012    0.001
## 2  s_iv_operationalizationOp Yes 0.003    0.009    0.000
## 3 s_iv_quality_of_evidenceQu Yes 0.000    0.003    0.000
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## term                     |    sumsq | meansq | NumDF |    DenDF | statistic | p.value |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_operationalization  |    5.029 |  5.029 |     1 | 1171.129 |     4.105 |   0.043 |      | 0.003 |         0.003 |         |                 |           |    0.056 |      
## s_iv_quality_of_evidence |    0.246 |  0.246 |     1 | 1171.095 |     0.200 |   0.654 |      | 0.000 |         0.000 |         |                 |           |    0.012 |      
## pls                      |   41.212 | 41.212 |     1 | 1097.860 |    33.643 |  < .001 |      | 0.025 |         0.025 |         |                 |           |    0.161 |      
## Residuals                | 1591.937 |  0.704 |       |          |           |         | 2260 |       |               |         |                 |           |          |
anova_stats(m1)$term
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "s_iv_operationalization"  "s_iv_quality_of_evidence" "pls"                      "Residuals"
interpret_omega_squared(anova_stats(m1)$partial.etasq, rules = "field2013")
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "very small" "very small" "small"      NA          
## (Rules: field2013)
m1 <- lmer(empowerment ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + (1 | id), data = df_cama[!is.na(df_cama$s_int_psy),])



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ s_iv_operationalization + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_cama[!is.na(df_cama$s_int_psy), ]
## 
## REML criterion at convergence: 8863.1
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -3.11460 -0.45527  0.00239  0.45932  2.96964 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.814    1.677   
##  Residual             1.225    1.107   
## Number of obs: 2260, groups:  id, 1184
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       5.11035    0.09650 1316.33003  52.955  < 2e-16 ***
## s_iv_operationalizationOp Yes    -0.21953    0.10835 1171.12888  -2.026    0.043 *  
## s_iv_quality_of_evidenceQu Yes   -0.04851    0.10835 1171.09519  -0.448    0.654    
## plsChristodoulou                 -0.27469    0.04736 1097.86035  -5.800 8.65e-09 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_OY s____Y
## s_v_prtnlOY -0.563              
## s_v_qlt__QY -0.562  0.012       
## plsChristdl -0.242 -0.003 -0.002
r.squaredGLMM(m1)
##              R2m       R2c
## [1,] 0.007757616 0.6990427
r2beta(m1, method = "nsj")
##                           Effect   Rsq upper.CL lower.CL
## 1                          Model 0.008    0.018    0.003
## 4               plsChristodoulou 0.005    0.012    0.001
## 2  s_iv_operationalizationOp Yes 0.003    0.009    0.000
## 3 s_iv_quality_of_evidenceQu Yes 0.000    0.003    0.000
# Interest in psychological research

m6 <- lmer(empowerment ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + s_int_psy + (1 | id), data = df_cama[!is.na(df_cama$s_int_psy),])

summary(m6)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ s_iv_operationalization + s_iv_quality_of_evidence +      pls + s_int_psy + (1 | id)
##    Data: df_cama[!is.na(df_cama$s_int_psy), ]
## 
## REML criterion at convergence: 8679
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3109 -0.4479  0.0078  0.4857  2.8654 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.312    1.521   
##  Residual             1.222    1.106   
## Number of obs: 2260, groups:  id, 1184
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       5.10726    0.08966 1342.29549  56.961  < 2e-16 ***
## s_iv_operationalizationOp Yes    -0.17849    0.10023 1174.12249  -1.781   0.0752 .  
## s_iv_quality_of_evidenceQu Yes   -0.06604    0.10019 1173.70480  -0.659   0.5099    
## plsChristodoulou                 -0.27293    0.04725 1106.19081  -5.776 9.95e-09 ***
## s_int_psy                         0.70926    0.04973 1188.17611  14.263  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_OY s____Y plsChr
## s_v_prtnlOY -0.560                     
## s_v_qlt__QY -0.559  0.011              
## plsChristdl -0.260 -0.003 -0.002       
## s_int_psy   -0.004  0.030 -0.012  0.001
r.squaredGLMM(m6)
##            R2m       R2c
## [1,] 0.1316673 0.6997381
r2beta(m6, method = "nsj")
##                           Effect   Rsq upper.CL lower.CL
## 1                          Model 0.132    0.159    0.108
## 5                      s_int_psy 0.125    0.150    0.101
## 4               plsChristodoulou 0.005    0.013    0.001
## 2  s_iv_operationalizationOp Yes 0.002    0.008    0.000
## 3 s_iv_quality_of_evidenceQu Yes 0.000    0.003    0.000
m7 <- lmer(empowerment ~ s_iv_operationalization * s_int_psy + s_iv_quality_of_evidence + pls + (1 | id), data = df_cama[!is.na(df_cama$s_int_psy),])



summary(m7)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ s_iv_operationalization * s_int_psy + s_iv_quality_of_evidence +      pls + (1 | id)
##    Data: df_cama[!is.na(df_cama$s_int_psy), ]
## 
## REML criterion at convergence: 8678.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2867 -0.4482  0.0158  0.4827  2.8389 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.305    1.518   
##  Residual             1.223    1.106   
## Number of obs: 2260, groups:  id, 1184
## 
## Fixed effects:
##                                           Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                                5.11375    0.08964 1341.44472  57.048  < 2e-16 ***
## s_iv_operationalizationOp Yes             -0.17635    0.10012 1171.76880  -1.761   0.0784 .  
## s_int_psy                                  0.62283    0.06931 1186.10354   8.986  < 2e-16 ***
## s_iv_quality_of_evidenceQu Yes            -0.07587    0.10022 1172.16054  -0.757   0.4492    
## plsChristodoulou                          -0.27286    0.04727 1105.25549  -5.773 1.01e-08 ***
## s_iv_operationalizationOp Yes:s_int_psy    0.17792    0.09952 1186.83321   1.788   0.0741 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_OY s_nt_p s____Y plsChr
## s_v_prtnlOY -0.559                            
## s_int_psy   -0.031  0.013                     
## s_v_qlt__QY -0.560  0.011  0.030              
## plsChristdl -0.260 -0.003  0.001 -0.002       
## s_v_prOY:__  0.040  0.012 -0.697 -0.055  0.001
r.squaredGLMM(m7)
##            R2m       R2c
## [1,] 0.1334905 0.6996498
r2beta(m7, method = "nsj")
##                                    Effect   Rsq upper.CL lower.CL
## 1                                   Model 0.133    0.161    0.110
## 3                               s_int_psy 0.053    0.073    0.037
## 5                        plsChristodoulou 0.005    0.013    0.001
## 6 s_iv_operationalizationOp Yes:s_int_psy 0.002    0.008    0.000
## 2           s_iv_operationalizationOp Yes 0.002    0.008    0.000
## 4          s_iv_quality_of_evidenceQu Yes 0.000    0.004    0.000
anova(m1,m6,m7)
## refitting model(s) with ML (instead of REML)
## Data: df_cama[!is.na(df_cama$s_int_psy), ]
## Models:
## m1: empowerment ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + (1 | id)
## m6: empowerment ~ s_iv_operationalization + s_iv_quality_of_evidence + pls + s_int_psy + (1 | id)
## m7: empowerment ~ s_iv_operationalization * s_int_psy + s_iv_quality_of_evidence + pls + (1 | id)
##    npar    AIC    BIC  logLik deviance    Chisq Df Pr(>Chisq)    
## m1    6 8861.6 8895.9 -4424.8   8849.6                           
## m6    7 8674.9 8714.9 -4330.4   8660.9 188.7249  1     <2e-16 ***
## m7    8 8673.7 8719.4 -4328.8   8657.7   3.2053  1     0.0734 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#ICC Subject

m0 <- lmer(empowerment ~ (1 | id), data = df_cama)

summary(m0)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ (1 | id)
##    Data: df_cama
## 
## REML criterion at convergence: 8891.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.1705 -0.4653  0.0261  0.5175  3.0588 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.799    1.673   
##  Residual             1.263    1.124   
## Number of obs: 2260, groups:  id, 1184
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 4.841e+00  5.421e-02 1.174e+03    89.3   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
performance::icc(m0)
## # Intraclass Correlation Coefficient
## 
##      Adjusted ICC: 0.689
##   Conditional ICC: 0.689

Plots

library(readr)
library(tidyr)
## 
## Attache Paket: 'tidyr'
## Die folgenden Objekte sind maskiert von 'package:Matrix':
## 
##     expand, pack, unpack
library(ggplot2)
## 
## Attache Paket: 'ggplot2'
## Die folgenden Objekte sind maskiert von 'package:psych':
## 
##     %+%, alpha
library(Hmisc)
## Lade nötiges Paket: lattice
## Lade nötiges Paket: Formula
## 
## Attache Paket: 'Hmisc'
## Das folgende Objekt ist maskiert 'package:psych':
## 
##     describe
## Die folgenden Objekte sind maskiert von 'package:base':
## 
##     format.pval, units
library(plyr)
## 
## Attache Paket: 'plyr'
## Die folgenden Objekte sind maskiert von 'package:Hmisc':
## 
##     is.discrete, summarize
library(RColorBrewer)
library(reshape2)
## 
## Attache Paket: 'reshape2'
## Das folgende Objekt ist maskiert 'package:tidyr':
## 
##     smiths
#R Code for Raincloud Plots
source("https://gist.githubusercontent.com/benmarwick/2a1bb0133ff568cbe28d/raw/fb53bd97121f7f9ce947837ef1a4c65a73bffb3f/geom_flat_violin.R")
## 
## Attache Paket: 'dplyr'
## Die folgenden Objekte sind maskiert von 'package:plyr':
## 
##     arrange, count, desc, failwith, id, mutate, rename, summarise, summarize
## Die folgenden Objekte sind maskiert von 'package:Hmisc':
## 
##     src, summarize
## Das folgende Objekt ist maskiert 'package:MASS':
## 
##     select
## Die folgenden Objekte sind maskiert von 'package:stats':
## 
##     filter, lag
## Die folgenden Objekte sind maskiert von 'package:base':
## 
##     intersect, setdiff, setequal, union
raincloud_theme = theme(
text = element_text(size = 10),
axis.title.x = element_blank(),
axis.title.y = element_text(size = 16),
axis.text = element_text(size = 12),
axis.text.x = element_text(angle = 90, vjust = 0.5),
legend.title=element_text(size=16),
legend.text=element_text(size=16),
legend.position = "right",
plot.title = element_text(lineheight=.8, face="bold", size = 16),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'))


m000 <- lmer(accessibility ~ pls  + (1 | id) + s_iv_quality_of_evidence, data = df_cama)

summary(m000)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ pls + (1 | id) + s_iv_quality_of_evidence
##    Data: df_cama
## 
## REML criterion at convergence: 8520.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.8443 -0.3771  0.1624  0.3557  3.3325 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.6370   1.6239  
##  Residual             0.9824   0.9911  
## Number of obs: 2262, groups:  id, 1183
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       6.17208    0.07578 1366.07505  81.448  < 2e-16 ***
## plsChristodoulou                 -0.11325    0.04240 1095.81293  -2.671  0.00767 ** 
## s_iv_quality_of_evidenceQu Yes   -0.09543    0.10347 1170.56191  -0.922  0.35659    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) plsChr
## plsChristdl -0.278       
## s_v_qlt__QY -0.675 -0.002
nrow(df_cama)
## [1] 2274
ranef_accessibility <- ranef(m000)$id[,1]
ranef_id <- as.numeric(rownames(ranef(m000)$id))

rain <- data[data$id %in% ranef_id,c("id","s_iv_operationalization", "s_iv_quality_of_evidence")]

rain <- rain[
  with(rain, order(id)),
]



rain$accessibility <- ranef_accessibility



rain$PLS <- rain$s_iv_operationalization
levels(rain$PLS)
## [1] "Op No"  "Op Yes"
levels(rain$PLS) <- c("Subsection Excluded", "Subsection Included")



p1 <- ggplot(data = rain, aes(y = accessibility, x = PLS, fill = PLS)) +
geom_flat_violin(position = position_nudge(x = .2, y = 0), alpha = .8) +
geom_point(aes(y = accessibility, color = PLS), position = position_jitter(width = .15), size = .5, alpha = 0.8) +
geom_boxplot(width = .1, guides = FALSE, outlier.shape = NA, alpha = 0.5) +
#expand_limits(y = 5.25) +
guides(fill = FALSE) +
guides(color = FALSE) +
scale_color_brewer(palette = "Dark2") +
scale_fill_brewer(palette = "Dark2") +
# coord_flip() +
theme_bw() +
raincloud_theme  + ylab("Accessibility") + scale_x_discrete(guide = guide_axis(angle = 60)) + theme(axis.text.x=element_text(size=10))
## Warning: Ignoring unknown parameters: guides
p1

# Understanding


m000 <- lmer(understanding ~ pls +(1 | id) + s_iv_quality_of_evidence, data = df_cama)

summary(m000)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ pls + (1 | id) + s_iv_quality_of_evidence
##    Data: df_cama
## 
## REML criterion at convergence: 8632.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.5998 -0.4578  0.0937  0.4486  2.9845 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.293    1.514   
##  Residual             1.198    1.094   
## Number of obs: 2258, groups:  id, 1184
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       6.22864    0.07388 1424.19254  84.304  < 2e-16 ***
## plsChristodoulou                 -0.20566    0.04682 1099.28526  -4.393 1.23e-05 ***
## s_iv_quality_of_evidenceQu Yes   -0.11771    0.09964 1170.51435  -1.181    0.238    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) plsChr
## plsChristdl -0.315       
## s_v_qlt__QY -0.668 -0.002
nrow(df_cama)
## [1] 2274
ranef_understanding <- ranef(m000)$id[,1]
ranef_id <- as.numeric(rownames(ranef(m000)$id))

rain <- data[data$id %in% ranef_id,c("id","s_iv_operationalization", "s_iv_quality_of_evidence")]

rain <- rain[
  with(rain, order(id)),
]


rain$understanding <- ranef_understanding



rain$PLS <- rain$s_iv_operationalization
levels(rain$PLS)
## [1] "Op No"  "Op Yes"
levels(rain$PLS) <- c("Subsection Excluded", "Subsection Included")



p2 <- ggplot(data = rain, aes(y = understanding, x = PLS, fill = PLS)) +
geom_flat_violin(position = position_nudge(x = .2, y = 0), alpha = .8) +
geom_point(aes(y = understanding, color = PLS), position = position_jitter(width = .15), size = .5, alpha = 0.8) +
geom_boxplot(width = .1, guides = FALSE, outlier.shape = NA, alpha = 0.5) +
#expand_limits(x = 5.25) +
guides(fill = FALSE) +
guides(color = FALSE) +
scale_color_brewer(palette = "Dark2") +
scale_fill_brewer(palette = "Dark2") +
# coord_flip() +
theme_bw() +
raincloud_theme  + ylab("Understanding") + scale_x_discrete(guide = guide_axis(angle = 60))  + theme(axis.text.x=element_text(size=10)) #+ labs(caption="Operationalization") + 
## Warning: Ignoring unknown parameters: guides
 # theme(plot.caption = element_text(hjust=0.5, size=rel(1.2)))

p2

# Empowerment

m000 <- lmer(empowerment ~ pls  + (1 | id) + s_iv_quality_of_evidence, data = df_cama)

summary(m000)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ pls + (1 | id) + s_iv_quality_of_evidence
##    Data: df_cama
## 
## REML criterion at convergence: 8864.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.0971 -0.4675 -0.0073  0.4683  2.9876 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.824    1.681   
##  Residual             1.225    1.107   
## Number of obs: 2260, groups:  id, 1184
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       5.00034    0.07990 1392.43451  62.586  < 2e-16 ***
## plsChristodoulou                 -0.27495    0.04735 1098.52844  -5.807 8.33e-09 ***
## s_iv_quality_of_evidenceQu Yes   -0.04590    0.10851 1172.85147  -0.423    0.672    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) plsChr
## plsChristdl -0.294       
## s_v_qlt__QY -0.672 -0.002
nrow(df_cama)
## [1] 2274
ranef_empowerment <- ranef(m000)$id[,1]
ranef_id <- as.numeric(rownames(ranef(m000)$id))

rain <- data[data$id %in% ranef_id,c("id","s_iv_operationalization", "s_iv_quality_of_evidence")]

rain <- rain[
  with(rain, order(id)),
]

rain$empowerment <- ranef_empowerment


rain$PLS <- rain$s_iv_operationalization
levels(rain$PLS)
## [1] "Op No"  "Op Yes"
levels(rain$PLS) <- c("Subsection Excluded", "Subsection Included")


p3 <- ggplot(data = rain, aes(y = empowerment, x = PLS, fill = PLS)) +
geom_flat_violin(position = position_nudge(x = .2, y = 0), alpha = .8) +
geom_point(aes(y = empowerment, color = PLS), position = position_jitter(width = .15), size = .5, alpha = 0.8) +
geom_boxplot(width = .1, guides = FALSE, outlier.shape = NA, alpha = 0.5) +
#expand_limits(x = 5.25) +
guides(fill = FALSE) +
guides(color = FALSE) +
scale_color_brewer(palette = "Dark2") +
scale_fill_brewer(palette = "Dark2") +
# coord_flip() +
theme_bw() +
raincloud_theme + ylab("Empowerment") + scale_x_discrete(guide = guide_axis(angle = 60)) + theme(axis.text.x=element_text(size=10)) 
## Warning: Ignoring unknown parameters: guides
p3

# Accessibility


m000 <- lmer(accessibility ~ pls + (1 | id) + s_iv_quality_of_evidence, data = df_resilience)

summary(m000)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ pls + (1 | id) + s_iv_quality_of_evidence
##    Data: df_resilience
## 
## REML criterion at convergence: 8455.3
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -3.02142 -0.50125  0.03743  0.54772  2.83362 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.723    1.650   
##  Residual             1.674    1.294   
## Number of obs: 2064, groups:  id, 1101
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       5.34550    0.08454 1377.13171  63.233   <2e-16 ***
## plsGroth                         -0.54952    0.05817 1010.33584  -9.447   <2e-16 ***
## s_iv_quality_of_evidenceQu Yes   -0.10790    0.11527 1094.69919  -0.936    0.349    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) plsGrt
## plsGroth    -0.346       
## s_v_qlt__QY -0.647  0.003
nrow(df_resilience)
## [1] 2076
ranef_accessibility <- ranef(m000)$id[,1]
ranef_id <- as.numeric(rownames(ranef(m000)$id))

rain <- data[data$id %in% ranef_id,c("id","s_iv_technical_terms", "s_iv_quality_of_evidence")]

rain <- rain[
  with(rain, order(id)),
]

rain$accessibility <- ranef_accessibility


rain$PLS <- rain$s_iv_technical_terms
levels(rain$PLS)
## [1] "Expl None"    "Replace"      "Expl Glossar"
levels(rain$PLS) <- c("No Explanation", "Replacement", "Glossary")


p4 <- ggplot(data = rain, aes(y = accessibility, x = PLS, fill = PLS)) +
geom_flat_violin(position = position_nudge(x = .2, y = 0), alpha = .8) +
geom_point(aes(y = accessibility, color = PLS), position = position_jitter(width = .15), size = .5, alpha = 0.8) +
geom_boxplot(width = .1, guides = FALSE, outlier.shape = NA, alpha = 0.5) +
#expand_limits(x = 5.25) +
guides(fill = FALSE) +
guides(color = FALSE) +
scale_color_brewer(palette = "Dark2") +
scale_fill_brewer(palette = "Dark2") +
# coord_flip() +
theme_bw() +
raincloud_theme + ylab("Accessibility") + scale_x_discrete(guide = guide_axis(angle = 60)) + theme(axis.text.x=element_text(size=10))
## Warning: Ignoring unknown parameters: guides
p4

#understanding


m000 <- lmer(understanding ~ pls  + (1 | id) + s_iv_quality_of_evidence, data = df_resilience)

summary(m000)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ pls + (1 | id) + s_iv_quality_of_evidence
##    Data: df_resilience
## 
## REML criterion at convergence: 8309.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.1648 -0.4624  0.0636  0.5270  2.9198 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.582    1.607   
##  Residual             1.537    1.240   
## Number of obs: 2065, groups:  id, 1101
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       5.43412    0.08190 1372.46175  66.350   <2e-16 ***
## plsGroth                         -0.54391    0.05574 1009.93724  -9.758   <2e-16 ***
## s_iv_quality_of_evidenceQu Yes   -0.15709    0.11179 1094.61595  -1.405     0.16    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) plsGrt
## plsGroth    -0.343       
## s_v_qlt__QY -0.648  0.003
nrow(df_resilience)
## [1] 2076
ranef_understanding <- ranef(m000)$id[,1]
ranef_id <- as.numeric(rownames(ranef(m000)$id))

rain <- data[data$id %in% ranef_id,c("id","s_iv_technical_terms", "s_iv_quality_of_evidence")]

rain <- rain[
  with(rain, order(id)),
]

rain$understanding <- ranef_understanding


rain$PLS <- rain$s_iv_technical_terms
levels(rain$PLS)
## [1] "Expl None"    "Replace"      "Expl Glossar"
levels(rain$PLS) <- c("No Explanation", "Replacement", "Glossary")

p5 <- ggplot(data = rain, aes(y = understanding, x = PLS, fill = PLS)) +
geom_flat_violin(position = position_nudge(x = .2, y = 0), alpha = .8) +
geom_point(aes(y = understanding, color = PLS), position = position_jitter(width = .15), size = .5, alpha = 0.8) +
geom_boxplot(width = .1, guides = FALSE, outlier.shape = NA, alpha = 0.5) +
#expand_limits(x = 5.25) +
guides(fill = FALSE) +
guides(color = FALSE) +
scale_color_brewer(palette = "Dark2") +
scale_fill_brewer(palette = "Dark2") +
# coord_flip() +
theme_bw() +
raincloud_theme  + ylab("Understanding") + scale_x_discrete(guide = guide_axis(angle = 60)) + theme(axis.text.x=element_text(size=10)) + theme(axis.text.x=element_text(size=10)) #+ labs(caption="Technical Terms") + theme(plot.caption = element_text(hjust=0.5, size=rel(1.2)))
## Warning: Ignoring unknown parameters: guides
p5

# empowerment

m000 <- lmer(empowerment ~ pls  + (1 | id) + s_iv_quality_of_evidence, data = df_resilience)

summary(m000)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ pls + (1 | id) + s_iv_quality_of_evidence
##    Data: df_resilience
## 
## REML criterion at convergence: 8222.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.1736 -0.5052 -0.0169  0.4751  3.3095 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 2.917    1.708   
##  Residual             1.316    1.147   
## Number of obs: 2064, groups:  id, 1101
## 
## Fixed effects:
##                                  Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       4.30944    0.08346 1319.79525  51.635  < 2e-16 ***
## plsGroth                         -0.43069    0.05171  998.79986  -8.330 2.66e-16 ***
## s_iv_quality_of_evidenceQu Yes   -0.03520    0.11526 1092.83988  -0.305     0.76    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) plsGrt
## plsGroth    -0.312       
## s_v_qlt__QY -0.655  0.004
nrow(df_resilience)
## [1] 2076
ranef_empowerment <- ranef(m000)$id[,1]
ranef_id <- as.numeric(rownames(ranef(m000)$id))

rain <- data[data$id %in% ranef_id,c("id","s_iv_technical_terms", "s_iv_quality_of_evidence")]

rain <- rain[
  with(rain, order(id)),
]

rain$empowerment <- ranef_empowerment


rain$PLS <- rain$s_iv_technical_terms
levels(rain$PLS)
## [1] "Expl None"    "Replace"      "Expl Glossar"
levels(rain$PLS) <- c("No Explanation", "Replacement", "Glossary")

p6 <- ggplot(data = rain, aes(y = empowerment, x = PLS, fill = PLS)) +
geom_flat_violin(position = position_nudge(x = .2, y = 0), alpha = .8) +
geom_point(aes(y = empowerment, color = PLS), position = position_jitter(width = .15), size = .5, alpha = 0.8) +
geom_boxplot(width = .1, guides = FALSE, outlier.shape = NA, alpha = 0.5) +
#expand_limits(x = 5.25) +
guides(fill = FALSE) +
guides(color = FALSE) +
scale_color_brewer(palette = "Dark2") +
scale_fill_brewer(palette = "Dark2") +
# coord_flip() +
theme_bw() +
raincloud_theme  + ylab("Empowerment") + scale_x_discrete(guide = guide_axis(angle = 60)) + theme(axis.text.x=element_text(size=10))
## Warning: Ignoring unknown parameters: guides
p6

require(ggpubr)
## Lade nötiges Paket: ggpubr
## Registered S3 methods overwritten by 'car':
##   method                          from
##   influence.merMod                lme4
##   cooks.distance.influence.merMod lme4
##   dfbeta.influence.merMod         lme4
##   dfbetas.influence.merMod        lme4
## 
## Attache Paket: 'ggpubr'
## Das folgende Objekt ist maskiert 'package:plyr':
## 
##     mutate
library(cowplot)
## 
## Attache Paket: 'cowplot'
## Das folgende Objekt ist maskiert 'package:ggpubr':
## 
##     get_legend
library(ggimage)
## 
## Attache Paket: 'ggimage'
## Das folgende Objekt ist maskiert 'package:cowplot':
## 
##     theme_nothing
## Das folgende Objekt ist maskiert 'package:ggpubr':
## 
##     theme_transparent
p9 <- plot_grid(p1, p2, p3, p4, p5, p6, ncol=3, labels=LETTERS[1:6]) 

p7 <- plot_grid(p1, p2, p3, ncol=3, labels=LETTERS[4:6]) 
p7 <- p7 +  labs(caption="OPERATIONALIZATION") + 
  theme(plot.caption = element_text(hjust=0.5, size=rel(1.2)))
p8 <- plot_grid(p4, p5, p6, ncol=3, labels=LETTERS[1:3]) +  labs(caption="TECHNICAL TERMS") +  theme(plot.caption = element_text(hjust=0.5, size=rel(1.2)))

p10 <- plot_grid(p8, p7, ncol=1) 

#ggsave("Figure_Study_1.tiff", p10, width = 16, height = 12, compression = "lzw")
#ggsave("Figure_Study_1.png", p10, width = 16, height = 12)


#pref_quality_of_evidence



m000 <- lmer(pref_quality_of_evidence  ~ (1|pls) + (1 | id), data = df)

summary(m000)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: pref_quality_of_evidence ~ (1 | pls) + (1 | id)
##    Data: df
## 
## REML criterion at convergence: 16501.3
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.95716 -0.40792 -0.00403  0.46097  2.85804 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.68698  1.2988  
##  pls      (Intercept) 0.01472  0.1213  
##  Residual             1.53392  1.2385  
## Number of obs: 4276, groups:  id, 2264; pls, 4
## 
## Fixed effects:
##             Estimate Std. Error      df t value Pr(>|t|)    
## (Intercept)  4.73135    0.06924 3.99626   68.33 2.78e-07 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
ranef_accessibility <- ranef(m000)$id[,1]
ranef_id <- as.numeric(rownames(ranef(m000)$id))

rain <- data[data$id %in% ranef_id,c("id","s_iv_operationalization", "s_iv_quality_of_evidence")]

rain <- rain[
  with(rain, order(id)),
]


rain$accessibility <- ranef_accessibility



rain$PLS <- rain$s_iv_quality_of_evidence
levels(rain$PLS)
## [1] "Qu No"  "Qu Yes"
levels(rain$PLS) <- c("No Quality of Evidence Statement", "Quality of Evidence Statement")



p1 <- ggplot(data = rain, aes(y = accessibility, x = PLS, fill = PLS)) +
geom_flat_violin(position = position_nudge(x = .2, y = 0), alpha = .8) +
geom_point(aes(y = accessibility, color = PLS), position = position_jitter(width = .15), size = .5, alpha = 0.8) +
geom_boxplot(width = .1, guides = FALSE, outlier.shape = NA, alpha = 0.5) +
#expand_limits(y = 5.25) +
guides(fill = FALSE) +
guides(color = FALSE) +
scale_color_brewer(palette = "Dark2") +
scale_fill_brewer(palette = "Dark2") +
# coord_flip() +
theme_bw() +
raincloud_theme  + ylab("Preference of Meta-Analytical Evidence") +  xlab("Preference of Meta-Analytical Evidence") + scale_x_discrete(guide = guide_axis(title = "Quality of Evidence", angle = 0)) + theme(axis.text.x=element_text(size=10)) + theme(axis.title = element_text(size=10)) 
## Warning: Ignoring unknown parameters: guides
p1

#ggsave("Figure_Study_1_Quality_of_Evidence.tiff", p1, width = 8, height = 6, compression = "lzw")
#ggsave("Figure_Study_1_Quality_of_Evidence.png", p1, width = 8, height = 6)

Study 2

Read data files

Note: Anomynized datasets were created by removing possibly identifying information and data that are not relevant to confirmatory analyses.

options(width=120)


df <- read.csv2(file = "20210901_study_2_df_long.csv",fileEncoding="UTF-8", stringsAsFactors = T)
df$s_iv_statistics <- factor(df$s_iv_statistics, levels = c("eff + qual + gloss","eff + qual","eff + gloss", "qual"))
df$s_iv_quality_of_evidence <- factor(df$s_iv_quality_of_evidence, levels = c("normal", "extended"))
df$s_iv_complex <- factor(df$s_iv_complex, levels = c("uncomplex","complex"))
df$s_iv_structured <- factor(df$s_iv_structured, levels = c("unstructured","structured"))
df$pls <- factor(df$pls, levels = c("Schwalm","Bucher", "Bergmann","Yule"))

df_complex <- df[df$s_iv_complex == "complex",]
df_uncomplex <- df[df$s_iv_complex == "uncomplex",]

data <- read.csv2(file = "20210719_study_2_df_wide.csv",fileEncoding="UTF-8", stringsAsFactors = T)
data$s_iv_complex <- factor(data$s_iv_complex, levels = c("uncomplex","complex"))
data$s_iv_quality_of_evidence <- factor(data$s_iv_quality_of_evidence, levels = c("normal", "extended"))
data$s_iv_statistics <- factor(data$s_iv_statistics, levels = c("eff + qual + gloss","eff + qual","eff + gloss", "qual"))
data$s_iv_structured <- factor(data$s_iv_structured, levels = c("unstructured","structured"))

Descriptive Within Level

Complex

names(df_complex)
##  [1] "id"                       "s_iv_complex"             "s_iv_quality_of_evidence" "s_iv_structured"         
##  [5] "s_iv_statistics"          "s_knowledge_evidence"     "pls"                      "accessibility"           
##  [9] "understanding"            "empowerment"              "knowledge_evidence"       "knowledge"
df1 <- df_complex[,c("accessibility", "understanding", "empowerment","knowledge_evidence","knowledge")]

describe(df1)
## df1 
## 
##  5  Variables      2116  Observations
## ------------------------------------------------------------------------------------------------------------------------
## accessibility 
##        n  missing distinct     Info     Mean      Gmd 
##     2100       16        8    0.968    5.768    2.016 
## 
## lowest : 1 2 3 4 5, highest: 4 5 6 7 8
##                                                           
## Value          1     2     3     4     5     6     7     8
## Frequency     57    57   134   271   288   472   384   437
## Proportion 0.027 0.027 0.064 0.129 0.137 0.225 0.183 0.208
## ------------------------------------------------------------------------------------------------------------------------
## understanding 
##        n  missing distinct     Info     Mean      Gmd 
##     2098       18        8    0.961    6.034     1.89 
## 
## lowest : 1 2 3 4 5, highest: 4 5 6 7 8
##                                                           
## Value          1     2     3     4     5     6     7     8
## Frequency     45    37   100   203   310   472   402   529
## Proportion 0.021 0.018 0.048 0.097 0.148 0.225 0.192 0.252
## ------------------------------------------------------------------------------------------------------------------------
## empowerment 
##        n  missing distinct     Info     Mean      Gmd 
##     2106       10        8    0.967    5.072    1.977 
## 
## lowest : 1 2 3 4 5, highest: 4 5 6 7 8
##                                                           
## Value          1     2     3     4     5     6     7     8
## Frequency    104    93   191   309   469   526   230   184
## Proportion 0.049 0.044 0.091 0.147 0.223 0.250 0.109 0.087
## ------------------------------------------------------------------------------------------------------------------------
## knowledge_evidence 
##        n  missing distinct     Info      Sum     Mean      Gmd 
##     2089       27        2    0.749     1008   0.4825   0.4996 
## 
## ------------------------------------------------------------------------------------------------------------------------
## knowledge 
##        n  missing distinct     Info      Sum     Mean      Gmd 
##     2089       27        2    0.659     1408    0.674   0.4397 
## 
## ------------------------------------------------------------------------------------------------------------------------
describeBy(df_complex$accessibility, group = df_complex$s_iv_statistics)
## 
##  Descriptive statistics by group 
## group: eff + qual + gloss
##    vars   n mean  sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 500 5.52 1.9      6    5.66 1.48   1   8     7 -0.49    -0.57 0.09
## ------------------------------------------------------------------------------------------ 
## group: eff + qual
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 494 6.01 1.71      6    6.18 1.48   1   8     7 -0.64    -0.28 0.08
## ------------------------------------------------------------------------------------------ 
## group: eff + gloss
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 573 5.43 1.81      6    5.56 1.48   1   8     7 -0.59    -0.25 0.08
## ------------------------------------------------------------------------------------------ 
## group: qual
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 533 6.13 1.74      6    6.34 1.48   1   8     7 -0.87     0.25 0.08
describeBy(df_complex$understanding, group = df_complex$s_iv_statistics)
## 
##  Descriptive statistics by group 
## group: eff + qual + gloss
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 498 5.76 1.84      6    5.96 1.48   1   8     7 -0.76        0 0.08
## ------------------------------------------------------------------------------------------ 
## group: eff + qual
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 496 6.32 1.57      7    6.51 1.48   1   8     7 -0.84     0.24 0.07
## ------------------------------------------------------------------------------------------ 
## group: eff + gloss
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 573 5.82 1.76      6    5.99 1.48   1   8     7 -0.71        0 0.07
## ------------------------------------------------------------------------------------------ 
## group: qual
##    vars   n mean   sd median trimmed  mad min max range skew kurtosis   se
## X1    1 531 6.25 1.66      6    6.43 1.48   1   8     7 -0.8     0.19 0.07
describeBy(df_complex$empowerment, group = df_complex$s_iv_statistics)
## 
##  Descriptive statistics by group 
## group: eff + qual + gloss
##    vars   n mean   sd median trimmed  mad min max range skew kurtosis   se
## X1    1 500 4.85 1.95      5    4.95 1.48   1   8     7 -0.4    -0.62 0.09
## ------------------------------------------------------------------------------------------ 
## group: eff + qual
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 499 5.25 1.61      5    5.28 1.48   1   8     7 -0.28    -0.17 0.07
## ------------------------------------------------------------------------------------------ 
## group: eff + gloss
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 574 4.93 1.85      5    5.01 1.48   1   8     7 -0.35    -0.43 0.08
## ------------------------------------------------------------------------------------------ 
## group: qual
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 533 5.26 1.66      5    5.34 1.48   1   8     7 -0.51     0.03 0.07
a <- aggregate(df_complex[, c("accessibility","understanding","empowerment")], list(df_complex$s_iv_statistics), mean, na.rm = TRUE)
b <- aggregate(df_complex[, c("accessibility","understanding","empowerment")], list(df_complex$s_iv_statistics), sd, na.rm = TRUE)

df5 <- cbind(t(a[,-c(1)]),t(b[,-c(1)]))
df5 <- as.data.frame(df5)

names(df5) <- c(paste(a[,1], "mean"),paste(b[,1], "sd"))

df5 <- df5[, order(names(df5))]


describeBy(df_complex$accessibility, group = df_complex$s_iv_quality_of_evidence)
## 
##  Descriptive statistics by group 
## group: normal
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1028 5.73 1.86      6    5.92 1.48   1   8     7 -0.68    -0.17 0.06
## ------------------------------------------------------------------------------------------ 
## group: extended
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1072  5.8 1.77      6    5.96 1.48   1   8     7 -0.59    -0.35 0.05
describeBy(df_complex$understanding, group = df_complex$s_iv_quality_of_evidence)
## 
##  Descriptive statistics by group 
## group: normal
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1030 5.99 1.78      6     6.2 1.48   1   8     7 -0.83     0.17 0.06
## ------------------------------------------------------------------------------------------ 
## group: extended
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1068 6.08 1.68      6    6.25 1.48   1   8     7 -0.75     0.12 0.05
describeBy(df_complex$empowerment, group = df_complex$s_iv_quality_of_evidence)
## 
##  Descriptive statistics by group 
## group: normal
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1033    5 1.82      5     5.1 1.48   1   8     7 -0.43    -0.33 0.06
## ------------------------------------------------------------------------------------------ 
## group: extended
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1073 5.14 1.74      5    5.22 1.48   1   8     7 -0.42     -0.2 0.05
a <- aggregate(df_complex[, c("accessibility","understanding","empowerment")], list(df_complex$s_iv_quality_of_evidence), mean, na.rm = TRUE)
b <-  aggregate(df_complex[, c("accessibility","understanding","empowerment")], list(df_complex$s_iv_quality_of_evidence), sd, na.rm = TRUE)

df5 <- cbind(t(a[,-c(1)]),t(b[,-c(1)]))
df5 <- as.data.frame(df5)

names(df5) <- c(paste(a[,1], "mean"),paste(b[,1], "sd"))

df5 <- df5[, order(names(df5))]


describeBy(df_complex$accessibility, group = df_complex$s_iv_structured)
## 
##  Descriptive statistics by group 
## group: unstructured
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1026 5.66 1.83      6    5.82 1.48   1   8     7 -0.64    -0.21 0.06
## ------------------------------------------------------------------------------------------ 
## group: structured
##    vars    n mean  sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1074 5.87 1.8      6    6.04 1.48   1   8     7 -0.65    -0.28 0.05
describeBy(df_complex$understanding, group = df_complex$s_iv_structured)
## 
##  Descriptive statistics by group 
## group: unstructured
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1026 5.95 1.73      6    6.14 1.48   1   8     7 -0.79     0.17 0.05
## ------------------------------------------------------------------------------------------ 
## group: structured
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1072 6.11 1.72      6    6.31 1.48   1   8     7 -0.81     0.18 0.05
describeBy(df_complex$empowerment, group = df_complex$s_iv_structured)
## 
##  Descriptive statistics by group 
## group: unstructured
##    vars    n mean   sd median trimmed  mad min max range skew kurtosis   se
## X1    1 1031    5 1.77      5    5.08 1.48   1   8     7 -0.4    -0.29 0.06
## ------------------------------------------------------------------------------------------ 
## group: structured
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1075 5.14 1.78      5    5.24 1.48   1   8     7 -0.46    -0.22 0.05
a <- aggregate(df_complex[, c("accessibility","understanding","empowerment")], list(df_complex$s_iv_structured), mean, na.rm = TRUE)
b <- aggregate(df_complex[, c("accessibility","understanding","empowerment")], list(df_complex$s_iv_structured), sd, na.rm = TRUE)

df5 <- cbind(t(a[,-c(1)]),t(b[,-c(1)]))
df5 <- as.data.frame(df5)

names(df5) <- c(paste(a[,1], "mean"),paste(b[,1], "sd"))

df5 <- df5[, order(names(df5))]




#correlations

cor(df_complex$understanding, df_complex$accessibility, use = "pairwise.complete.obs")
## [1] 0.6930322
cor(df_complex$understanding, df_complex$empowerment, use = "pairwise.complete.obs")
## [1] 0.6248939
cor(df_complex$accessibility, df_complex$empowerment, use = "pairwise.complete.obs")
## [1] 0.5557023

Uncomplex

names(df_uncomplex)
##  [1] "id"                       "s_iv_complex"             "s_iv_quality_of_evidence" "s_iv_structured"         
##  [5] "s_iv_statistics"          "s_knowledge_evidence"     "pls"                      "accessibility"           
##  [9] "understanding"            "empowerment"              "knowledge_evidence"       "knowledge"
df1 <- df_uncomplex[,c("accessibility", "understanding", "empowerment", "knowledge_evidence","knowledge")]

describe(df1)
## df1 
## 
##  5  Variables      2190  Observations
## ------------------------------------------------------------------------------------------------------------------------
## accessibility 
##        n  missing distinct     Info     Mean      Gmd 
##     2164       26        8    0.965    5.927    1.983 
## 
## lowest : 1 2 3 4 5, highest: 4 5 6 7 8
##                                                           
## Value          1     2     3     4     5     6     7     8
## Frequency     48    51   126   260   287   443   428   521
## Proportion 0.022 0.024 0.058 0.120 0.133 0.205 0.198 0.241
## ------------------------------------------------------------------------------------------------------------------------
## understanding 
##        n  missing distinct     Info     Mean      Gmd 
##     2173       17        8    0.957    6.126    1.909 
## 
## lowest : 1 2 3 4 5, highest: 4 5 6 7 8
##                                                           
## Value          1     2     3     4     5     6     7     8
## Frequency     32    48    98   232   299   414   417   633
## Proportion 0.015 0.022 0.045 0.107 0.138 0.191 0.192 0.291
## ------------------------------------------------------------------------------------------------------------------------
## empowerment 
##        n  missing distinct     Info     Mean      Gmd 
##     2175       15        8     0.97    5.103    2.008 
## 
## lowest : 1 2 3 4 5, highest: 4 5 6 7 8
##                                                           
## Value          1     2     3     4     5     6     7     8
## Frequency    105   100   189   328   477   506   265   205
## Proportion 0.048 0.046 0.087 0.151 0.219 0.233 0.122 0.094
## ------------------------------------------------------------------------------------------------------------------------
## knowledge_evidence 
##        n  missing distinct     Info      Sum     Mean      Gmd 
##     2161       29        2    0.749     1037   0.4799   0.4994 
## 
## ------------------------------------------------------------------------------------------------------------------------
## knowledge 
##        n  missing distinct     Info      Sum     Mean      Gmd 
##     2161       29        2    0.577     1600   0.7404   0.3846 
## 
## ------------------------------------------------------------------------------------------------------------------------
describeBy(df_uncomplex$accessibility, group = df_uncomplex$s_iv_statistics)
## 
##  Descriptive statistics by group 
## group: eff + qual + gloss
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 542 5.69 1.84      6    5.85 1.48   1   8     7 -0.59     -0.3 0.08
## ------------------------------------------------------------------------------------------ 
## group: eff + qual
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 601 6.12 1.76      6    6.33 1.48   1   8     7 -0.79    -0.13 0.07
## ------------------------------------------------------------------------------------------ 
## group: eff + gloss
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 491 5.54 1.86      6    5.67 1.48   1   8     7 -0.52    -0.52 0.08
## ------------------------------------------------------------------------------------------ 
## group: qual
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 530  6.3 1.61      7    6.48 1.48   1   8     7 -0.91     0.45 0.07
describeBy(df_uncomplex$understanding, group = df_uncomplex$s_iv_statistics)
## 
##  Descriptive statistics by group 
## group: eff + qual + gloss
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 544 5.93 1.84      6    6.14 1.48   1   8     7 -0.74    -0.13 0.08
## ------------------------------------------------------------------------------------------ 
## group: eff + qual
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 601 6.24 1.74      7    6.45 1.48   1   8     7 -0.83    -0.03 0.07
## ------------------------------------------------------------------------------------------ 
## group: eff + gloss
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 491 5.93 1.72      6    6.09 1.48   1   8     7 -0.62     -0.2 0.08
## ------------------------------------------------------------------------------------------ 
## group: qual
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 537 6.37 1.63      7    6.58 1.48   1   8     7 -0.86    -0.03 0.07
describeBy(df_uncomplex$empowerment, group = df_uncomplex$s_iv_statistics)
## 
##  Descriptive statistics by group 
## group: eff + qual + gloss
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 547 5.03 1.89      5    5.12 1.48   1   8     7 -0.43    -0.45 0.08
## ------------------------------------------------------------------------------------------ 
## group: eff + qual
##    vars   n mean   sd median trimmed  mad min max range skew kurtosis   se
## X1    1 602 5.23 1.84      5    5.34 1.48   1   8     7 -0.5    -0.29 0.08
## ------------------------------------------------------------------------------------------ 
## group: eff + gloss
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 493 4.85 1.75      5    4.93 1.48   1   8     7 -0.32    -0.44 0.08
## ------------------------------------------------------------------------------------------ 
## group: qual
##    vars   n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 533 5.27 1.68      5    5.33 1.48   1   8     7 -0.35    -0.13 0.07
a <- aggregate(df_uncomplex[, c("accessibility","understanding","empowerment")], list(df_uncomplex$s_iv_statistics), mean, na.rm = TRUE)
b <- aggregate(df_uncomplex[, c("accessibility","understanding","empowerment")], list(df_uncomplex$s_iv_statistics), sd, na.rm = TRUE)

df5 <- cbind(t(a[,-c(1)]),t(b[,-c(1)]))
df5 <- as.data.frame(df5)

names(df5) <- c(paste(a[,1], "mean"),paste(b[,1], "sd"))

df5 <- df5[, order(names(df5))]



describeBy(df_uncomplex$accessibility, group = df_uncomplex$s_iv_quality_of_evidence)
## 
##  Descriptive statistics by group 
## group: normal
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1065 5.93 1.83      6    6.13 1.48   1   8     7 -0.73    -0.12 0.06
## ------------------------------------------------------------------------------------------ 
## group: extended
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1099 5.92 1.76      6     6.1 1.48   1   8     7 -0.69    -0.25 0.05
describeBy(df_uncomplex$understanding, group = df_uncomplex$s_iv_quality_of_evidence)
## 
##  Descriptive statistics by group 
## group: normal
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1072 6.04 1.78      6    6.25 1.48   1   8     7 -0.77    -0.06 0.05
## ------------------------------------------------------------------------------------------ 
## group: extended
##    vars    n mean  sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1101 6.21 1.7      7     6.4 1.48   1   8     7 -0.77    -0.13 0.05
describeBy(df_uncomplex$empowerment, group = df_uncomplex$s_iv_quality_of_evidence)
## 
##  Descriptive statistics by group 
## group: normal
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1074 5.03 1.82      5    5.13 1.48   1   8     7 -0.44    -0.36 0.06
## ------------------------------------------------------------------------------------------ 
## group: extended
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1101 5.18 1.78      5    5.25 1.48   1   8     7 -0.39    -0.29 0.05
a <- aggregate(df_uncomplex[, c("accessibility","understanding","empowerment")], list(df_uncomplex$s_iv_quality_of_evidence), mean, na.rm = TRUE)
b <-  aggregate(df_uncomplex[, c("accessibility","understanding","empowerment")], list(df_uncomplex$s_iv_quality_of_evidence), sd, na.rm = TRUE)

df5 <- cbind(t(a[,-c(1)]),t(b[,-c(1)]))
df5 <- as.data.frame(df5)

names(df5) <- c(paste(a[,1], "mean"),paste(b[,1], "sd"))

df5 <- df5[, order(names(df5))]


describeBy(df_uncomplex$accessibility, group = df_uncomplex$s_iv_structured)
## 
##  Descriptive statistics by group 
## group: unstructured
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1128 5.99 1.71      6    6.16 1.48   1   8     7 -0.69    -0.11 0.05
## ------------------------------------------------------------------------------------------ 
## group: structured
##    vars    n mean   sd median trimmed  mad min max range skew kurtosis   se
## X1    1 1036 5.86 1.88      6    6.05 1.48   1   8     7 -0.7     -0.3 0.06
describeBy(df_uncomplex$understanding, group = df_uncomplex$s_iv_structured)
## 
##  Descriptive statistics by group 
## group: unstructured
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1136 6.21 1.68      6    6.39 1.48   1   8     7 -0.76    -0.09 0.05
## ------------------------------------------------------------------------------------------ 
## group: structured
##    vars    n mean  sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1037 6.04 1.8      6    6.25 1.48   1   8     7 -0.77    -0.12 0.06
describeBy(df_uncomplex$empowerment, group = df_uncomplex$s_iv_structured)
## 
##  Descriptive statistics by group 
## group: unstructured
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1135 5.19 1.78      5    5.27 1.48   1   8     7 -0.42     -0.3 0.05
## ------------------------------------------------------------------------------------------ 
## group: structured
##    vars    n mean   sd median trimmed  mad min max range  skew kurtosis   se
## X1    1 1040    5 1.82      5    5.09 1.48   1   8     7 -0.41    -0.35 0.06
a <- aggregate(df_uncomplex[, c("accessibility","understanding","empowerment")], list(df_uncomplex$s_iv_structured), mean, na.rm = TRUE)
b <- aggregate(df_uncomplex[, c("accessibility","understanding","empowerment")], list(df_uncomplex$s_iv_structured), sd, na.rm = TRUE)

df5 <- cbind(t(a[,-c(1)]),t(b[,-c(1)]))
df5 <- as.data.frame(df5)

names(df5) <- c(paste(a[,1], "mean"),paste(b[,1], "sd"))

df5 <- df5[, order(names(df5))]





#correlations

cor(df_uncomplex$understanding, df_uncomplex$accessibility, use = "pairwise.complete.obs")
## [1] 0.6781916
cor(df_uncomplex$understanding, df_uncomplex$empowerment, use = "pairwise.complete.obs")
## [1] 0.6064299
cor(df_uncomplex$accessibility, df_uncomplex$empowerment, use = "pairwise.complete.obs")
## [1] 0.5200545

Confirmatory analysis

Complex

A Accessibility

m1 <- lmer(accessibility ~ s_iv_statistics + s_iv_quality_of_evidence + s_iv_structured + pls + (1 | id), data = df_complex)



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ s_iv_statistics + s_iv_quality_of_evidence +      s_iv_structured + pls + (1 | id)
##    Data: df_complex
## 
## REML criterion at convergence: 8013.5
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3394 -0.4614  0.0925  0.5061  2.6429 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.844    1.358   
##  Residual             1.363    1.168   
## Number of obs: 2100, groups:  id, 1085
## 
## Fixed effects:
##                                    Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                         5.38422    0.12591 1177.37674  42.762  < 2e-16 ***
## s_iv_statisticseff + qual           0.49952    0.14108 1083.99090   3.541 0.000416 ***
## s_iv_statisticseff + gloss         -0.06578    0.13646 1081.98645  -0.482 0.629841    
## s_iv_statisticsqual                 0.62976    0.13859 1081.27190   4.544 6.14e-06 ***
## s_iv_quality_of_evidenceextended    0.06255    0.09721 1080.84430   0.644 0.520032    
## s_iv_structuredstructured           0.19660    0.09739 1080.63125   2.019 0.043772 *  
## plsYule                            -0.01513    0.05145 1045.25710  -0.294 0.768846    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_+q s_v_+g s_v_stt s_v___ s_v_str
## s_v_sttst+q -0.561                                     
## s_v_sttst+g -0.605  0.514                              
## s_v_sttstcs -0.575  0.505  0.523                       
## s_v_qlty_f_ -0.393 -0.013  0.011  0.003                
## s_v_strctrd -0.426  0.024  0.064  0.020  -0.002        
## plsYule     -0.202 -0.002 -0.006 -0.004   0.001  0.004
r.squaredGLMM(m1)
##             R2m      R2c
## [1,] 0.03184354 0.588453
r2beta(m1, method = "nsj")
##                             Effect   Rsq upper.CL lower.CL
## 1                            Model 0.032    0.050    0.021
## 4              s_iv_statisticsqual 0.015    0.027    0.006
## 2        s_iv_statisticseff + qual 0.009    0.019    0.003
## 6        s_iv_structuredstructured 0.003    0.009    0.000
## 5 s_iv_quality_of_evidenceextended 0.000    0.004    0.000
## 3       s_iv_statisticseff + gloss 0.000    0.003    0.000
## 7                          plsYule 0.000    0.002    0.000
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## term                     |    sumsq | meansq | NumDF |    DenDF | statistic | p.value |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_statistics          |   53.790 | 17.930 |     3 | 1080.828 |    13.152 |  < .001 |      | 0.029 |         0.029 |         |                 |           |    0.173 |      
## s_iv_quality_of_evidence |    0.565 |  0.565 |     1 | 1080.844 |     0.414 |   0.520 |      | 0.000 |         0.000 |         |                 |           |    0.018 |      
## s_iv_structured          |    5.555 |  5.555 |     1 | 1080.631 |     4.075 |   0.044 |      | 0.003 |         0.003 |         |                 |           |    0.056 |      
## pls                      |    0.118 |  0.118 |     1 | 1045.257 |     0.086 |   0.769 |      | 0.000 |         0.000 |         |                 |           |    0.008 |      
## Residuals                | 1794.135 |  0.854 |       |          |           |         | 2100 |       |               |         |                 |           |          |
anova_stats(m1)$term
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "s_iv_statistics"          "s_iv_quality_of_evidence" "s_iv_structured"          "pls"                     
## [5] "Residuals"
interpret_omega_squared(anova_stats(m1)$partial.etasq, rules = "field2013")
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "small"      "very small" "very small" "very small" NA          
## (Rules: field2013)
test = glht(m1,linfct=mcp(s_iv_statistics="Tukey"))
summary(test, test = adjusted("none"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = accessibility ~ s_iv_statistics + s_iv_quality_of_evidence + 
##     s_iv_structured + pls + (1 | id), data = df_complex)
## 
## Linear Hypotheses:
##                                       Estimate Std. Error z value Pr(>|z|)    
## eff + qual - eff + qual + gloss == 0   0.49952    0.14108   3.541 0.000399 ***
## eff + gloss - eff + qual + gloss == 0 -0.06578    0.13646  -0.482 0.629743    
## qual - eff + qual + gloss == 0         0.62976    0.13859   4.544 5.52e-06 ***
## eff + gloss - eff + qual == 0         -0.56530    0.13683  -4.132 3.60e-05 ***
## qual - eff + qual == 0                 0.13025    0.13909   0.936 0.349051    
## qual - eff + gloss == 0                0.69555    0.13427   5.180 2.22e-07 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- none method)
summary(test, test = adjusted("BH"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = accessibility ~ s_iv_statistics + s_iv_quality_of_evidence + 
##     s_iv_structured + pls + (1 | id), data = df_complex)
## 
## Linear Hypotheses:
##                                       Estimate Std. Error z value Pr(>|z|)    
## eff + qual - eff + qual + gloss == 0   0.49952    0.14108   3.541 0.000599 ***
## eff + gloss - eff + qual + gloss == 0 -0.06578    0.13646  -0.482 0.629743    
## qual - eff + qual + gloss == 0         0.62976    0.13859   4.544 1.66e-05 ***
## eff + gloss - eff + qual == 0         -0.56530    0.13683  -4.132 7.21e-05 ***
## qual - eff + qual == 0                 0.13025    0.13909   0.936 0.418862    
## qual - eff + gloss == 0                0.69555    0.13427   5.180 1.33e-06 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- BH method)
#ICC Subject

m0 <- lmer(accessibility ~ (1 | id), data = df_complex)

summary(m0)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ (1 | id)
##    Data: df_complex
## 
## REML criterion at convergence: 8040.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2807 -0.4881  0.0517  0.4978  2.7149 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.936    1.391   
##  Residual             1.363    1.168   
## Number of obs: 2100, groups:  id, 1085
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 5.768e+00  4.944e-02 1.085e+03   116.7   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
performance::icc(m0)
## # Intraclass Correlation Coefficient
## 
##      Adjusted ICC: 0.587
##   Conditional ICC: 0.587

B Understanding

m1 <- lmer(understanding ~ s_iv_statistics + s_iv_quality_of_evidence + s_iv_structured + pls + (1 | id), data = df_complex)



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ s_iv_statistics + s_iv_quality_of_evidence +      s_iv_structured + pls + (1 | id)
##    Data: df_complex
## 
## REML criterion at convergence: 7761.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.6324 -0.4330  0.1114  0.4866  2.9880 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.760    1.327   
##  Residual             1.162    1.078   
## Number of obs: 2098, groups:  id, 1086
## 
## Fixed effects:
##                                    Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       5.617e+00  1.210e-01  1.168e+03  46.439  < 2e-16 ***
## s_iv_statisticseff + qual         5.721e-01  1.357e-01  1.084e+03   4.216 2.69e-05 ***
## s_iv_statisticseff + gloss        7.758e-02  1.314e-01  1.082e+03   0.591 0.554975    
## s_iv_statisticsqual               5.071e-01  1.335e-01  1.082e+03   3.799 0.000154 ***
## s_iv_quality_of_evidenceextended  9.070e-02  9.353e-02  1.080e+03   0.970 0.332420    
## s_iv_structuredstructured         1.680e-01  9.372e-02  1.080e+03   1.792 0.073334 .  
## plsYule                          -6.832e-03  4.757e-02  1.041e+03  -0.144 0.885821    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_+q s_v_+g s_v_stt s_v___ s_v_str
## s_v_sttst+q -0.563                                     
## s_v_sttst+g -0.607  0.515                              
## s_v_sttstcs -0.576  0.506  0.524                       
## s_v_qlty_f_ -0.393 -0.013  0.010  0.002                
## s_v_strctrd -0.428  0.024  0.064  0.021   0.000        
## plsYule     -0.192 -0.003 -0.006 -0.007  -0.001  0.003
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## term                     |    sumsq | meansq | NumDF |    DenDF | statistic | p.value |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_statistics          |   33.501 | 11.167 |     3 | 1080.542 |     9.614 |  < .001 |      | 0.022 |         0.022 |         |                 |           |    0.150 |      
## s_iv_quality_of_evidence |    1.092 |  1.092 |     1 | 1080.418 |     0.940 |   0.332 |      | 0.001 |         0.001 |         |                 |           |    0.027 |      
## s_iv_structured          |    3.732 |  3.732 |     1 | 1080.304 |     3.213 |   0.073 |      | 0.002 |         0.002 |         |                 |           |    0.050 |      
## pls                      |    0.024 |  0.024 |     1 | 1040.544 |     0.021 |   0.886 |      | 0.000 |         0.000 |         |                 |           |    0.004 |      
## Residuals                | 1498.331 |  0.714 |       |          |           |         | 2098 |       |               |         |                 |           |          |
anova_stats(m1)$term
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "s_iv_statistics"          "s_iv_quality_of_evidence" "s_iv_structured"          "pls"                     
## [5] "Residuals"
interpret_omega_squared(anova_stats(m1)$partial.etasq, rules = "field2013")
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "small"      "very small" "very small" "very small" NA          
## (Rules: field2013)
test = glht(m1,linfct=mcp(s_iv_statistics="Tukey"))
summary(test, test = adjusted("none"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = understanding ~ s_iv_statistics + s_iv_quality_of_evidence + 
##     s_iv_structured + pls + (1 | id), data = df_complex)
## 
## Linear Hypotheses:
##                                       Estimate Std. Error z value Pr(>|z|)    
## eff + qual - eff + qual + gloss == 0   0.57207    0.13568   4.216 2.48e-05 ***
## eff + gloss - eff + qual + gloss == 0  0.07758    0.13137   0.591 0.554852    
## qual - eff + qual + gloss == 0         0.50706    0.13348   3.799 0.000145 ***
## eff + gloss - eff + qual == 0         -0.49449    0.13153  -3.759 0.000170 ***
## qual - eff + qual == 0                -0.06500    0.13376  -0.486 0.627002    
## qual - eff + gloss == 0                0.42949    0.12927   3.322 0.000892 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- none method)
summary(test, test = adjusted("BH"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = understanding ~ s_iv_statistics + s_iv_quality_of_evidence + 
##     s_iv_structured + pls + (1 | id), data = df_complex)
## 
## Linear Hypotheses:
##                                       Estimate Std. Error z value Pr(>|z|)    
## eff + qual - eff + qual + gloss == 0   0.57207    0.13568   4.216 0.000149 ***
## eff + gloss - eff + qual + gloss == 0  0.07758    0.13137   0.591 0.627002    
## qual - eff + qual + gloss == 0         0.50706    0.13348   3.799 0.000341 ***
## eff + gloss - eff + qual == 0         -0.49449    0.13153  -3.759 0.000341 ***
## qual - eff + qual == 0                -0.06500    0.13376  -0.486 0.627002    
## qual - eff + gloss == 0                0.42949    0.12927   3.322 0.001338 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- BH method)
r.squaredGLMM(m1)
##             R2m       R2c
## [1,] 0.02427837 0.6121035
r2beta(m1, method = "nsj")
##                             Effect   Rsq upper.CL lower.CL
## 1                            Model 0.024    0.041    0.015
## 2        s_iv_statisticseff + qual 0.013    0.024    0.005
## 4              s_iv_statisticsqual 0.011    0.021    0.004
## 6        s_iv_structuredstructured 0.002    0.008    0.000
## 5 s_iv_quality_of_evidenceextended 0.001    0.005    0.000
## 3       s_iv_statisticseff + gloss 0.000    0.003    0.000
## 7                          plsYule 0.000    0.002    0.000
#ICC Subject

m0 <- lmer(understanding ~ (1 | id), data = df_complex)

summary(m0)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ (1 | id)
##    Data: df_complex
## 
## REML criterion at convergence: 7777.3
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.5909 -0.4558  0.1204  0.4422  2.9038 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.820    1.349   
##  Residual             1.162    1.078   
## Number of obs: 2098, groups:  id, 1086
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 6.030e+00  4.733e-02 1.084e+03   127.4   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
performance::icc(m0)
## # Intraclass Correlation Coefficient
## 
##      Adjusted ICC: 0.610
##   Conditional ICC: 0.610

C Knowledge

table(df_complex$knowledge, df_complex$s_iv_statistics)/sum(table(df_complex$knowledge, df_complex$s_iv_statistics)) * 100
##    
##     eff + qual + gloss eff + qual eff + gloss      qual
##   0           9.047391   7.467688    8.329344  7.754907
##   1          14.648157  16.275730   19.004308 17.472475
table(df_complex[,c("knowledge", "s_iv_statistics")])
##          s_iv_statistics
## knowledge eff + qual + gloss eff + qual eff + gloss qual
##         0                189        156         174  162
##         1                306        340         397  365
# richtige Antworten je Bedingung
table(df_complex[,c("knowledge", "s_iv_statistics")])[2,]/colSums(table(df_complex[,c("knowledge", "s_iv_statistics")]))
## eff + qual + gloss         eff + qual        eff + gloss               qual 
##          0.6181818          0.6854839          0.6952715          0.6925996
#test
397/571
## [1] 0.6952715
table(df_complex[,c("knowledge", "s_iv_structured")])
##          s_iv_structured
## knowledge unstructured structured
##         0          329        352
##         1          691        717
table(df_complex[,c("knowledge", "s_iv_structured")])[2,]/colSums(table(df_complex[,c("knowledge", "s_iv_structured")]))
## unstructured   structured 
##    0.6774510    0.6707203
table(df_complex[,c("knowledge", "s_iv_quality_of_evidence")])
##          s_iv_quality_of_evidence
## knowledge normal extended
##         0    307      374
##         1    721      687
table(df_complex[,c("knowledge", "s_iv_quality_of_evidence")])[2,]/colSums(table(df_complex[,c("knowledge", "s_iv_quality_of_evidence")]))
##    normal  extended 
## 0.7013619 0.6475024
m1 <- glmer(knowledge ~ s_iv_statistics + s_iv_quality_of_evidence + s_iv_structured + pls + (1 | id), family = binomial("logit"), data = df_complex, glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1000000)))

summary(m1)
## Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
##  Family: binomial  ( logit )
## Formula: knowledge ~ s_iv_statistics + s_iv_quality_of_evidence + s_iv_structured +      pls + (1 | id)
##    Data: df_complex
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+06))
## 
##      AIC      BIC   logLik deviance df.resid 
##   2575.1   2620.2  -1279.5   2559.1     2081 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -1.5494 -0.8584  0.4447  0.5145  0.9349 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  id     (Intercept) 1.514    1.23    
## Number of obs: 2089, groups:  id, 1064
## 
## Fixed effects:
##                                  Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                       0.68192    0.17761   3.839 0.000123 ***
## s_iv_statisticseff + qual         0.40094    0.19204   2.088 0.036813 *  
## s_iv_statisticseff + gloss        0.44108    0.18616   2.369 0.017818 *  
## s_iv_statisticsqual               0.44826    0.19018   2.357 0.018423 *  
## s_iv_quality_of_evidenceextended -0.32329    0.13386  -2.415 0.015728 *  
## s_iv_structuredstructured        -0.01943    0.13328  -0.146 0.884089    
## plsYule                           0.27716    0.10827   2.560 0.010468 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_+q s_v_+g s_v_stt s_v___ s_v_str
## s_v_sttst+q -0.507                                     
## s_v_sttst+g -0.548  0.507                              
## s_v_sttstcs -0.513  0.497  0.514                       
## s_v_qlty_f_ -0.400 -0.033 -0.006 -0.018                
## s_v_strctrd -0.421  0.026  0.064  0.023   0.004        
## plsYule     -0.274  0.013  0.011  0.012  -0.016  0.004
test = glht(m1,linfct=mcp(s_iv_statistics ="Tukey"))
summary(test, test = adjusted("none"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: glmer(formula = knowledge ~ s_iv_statistics + s_iv_quality_of_evidence + 
##     s_iv_structured + pls + (1 | id), data = df_complex, family = binomial("logit"), 
##     control = glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+06)))
## 
## Linear Hypotheses:
##                                       Estimate Std. Error z value Pr(>|z|)  
## eff + qual - eff + qual + gloss == 0  0.400945   0.192039   2.088   0.0368 *
## eff + gloss - eff + qual + gloss == 0 0.441084   0.186160   2.369   0.0178 *
## qual - eff + qual + gloss == 0        0.448258   0.190181   2.357   0.0184 *
## eff + gloss - eff + qual == 0         0.040139   0.187831   0.214   0.8308  
## qual - eff + qual == 0                0.047313   0.191644   0.247   0.8050  
## qual - eff + gloss == 0               0.007174   0.185516   0.039   0.9692  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- none method)
summary(test, test = adjusted("BH"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: glmer(formula = knowledge ~ s_iv_statistics + s_iv_quality_of_evidence + 
##     s_iv_structured + pls + (1 | id), data = df_complex, family = binomial("logit"), 
##     control = glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+06)))
## 
## Linear Hypotheses:
##                                       Estimate Std. Error z value Pr(>|z|)  
## eff + qual - eff + qual + gloss == 0  0.400945   0.192039   2.088   0.0736 .
## eff + gloss - eff + qual + gloss == 0 0.441084   0.186160   2.369   0.0553 .
## qual - eff + qual + gloss == 0        0.448258   0.190181   2.357   0.0553 .
## eff + gloss - eff + qual == 0         0.040139   0.187831   0.214   0.9692  
## qual - eff + qual == 0                0.047313   0.191644   0.247   0.9692  
## qual - eff + gloss == 0               0.007174   0.185516   0.039   0.9692  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- BH method)
exp(fixef(m1))
##                      (Intercept)        s_iv_statisticseff + qual       s_iv_statisticseff + gloss 
##                        1.9776761                        1.4932351                        1.5543919 
##              s_iv_statisticsqual s_iv_quality_of_evidenceextended        s_iv_structuredstructured 
##                        1.5655826                        0.7237674                        0.9807567 
##                          plsYule 
##                        1.3193828
interpret_oddsratio(exp(fixef(m1)), rules = "cohen1988")
##                      (Intercept)        s_iv_statisticseff + qual       s_iv_statisticseff + gloss 
##                          "small"                          "small"                          "small" 
##              s_iv_statisticsqual s_iv_quality_of_evidenceextended        s_iv_structuredstructured 
##                          "small"                     "very small"                     "very small" 
##                          plsYule 
##                     "very small" 
## (Rules: cohen1988)
exp(summary(test, test = adjusted("none"))$test$coefficients)
##  eff + qual - eff + qual + gloss eff + gloss - eff + qual + gloss        qual - eff + qual + gloss 
##                         1.493235                         1.554392                         1.565583 
##         eff + gloss - eff + qual                qual - eff + qual               qual - eff + gloss 
##                         1.040956                         1.048450                         1.007199
interpret_oddsratio(exp(summary(test, test = adjusted("none"))$test$coefficients), rules = "cohen1988")
##  eff + qual - eff + qual + gloss eff + gloss - eff + qual + gloss        qual - eff + qual + gloss 
##                          "small"                          "small"                          "small" 
##         eff + gloss - eff + qual                qual - eff + qual               qual - eff + gloss 
##                     "very small"                     "very small"                     "very small" 
## (Rules: cohen1988)
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: npar
## term                     | npar |    sumsq | meansq | statistic |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## ---------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_statistics          |    3 |    8.642 |  2.881 |     2.881 |      | 0.005 |         0.005 |         |                 |           |    0.070 |      
## s_iv_quality_of_evidence |    1 |    6.359 |  6.359 |     6.359 |      | 0.004 |         0.004 |         |                 |           |    0.060 |      
## s_iv_structured          |    1 |    0.028 |  0.028 |     0.028 |      | 0.000 |         0.000 |         |                 |           |    0.004 |      
## pls                      |    1 |    7.479 |  7.479 |     7.479 |      | 0.004 |         0.004 |         |                 |           |    0.065 |      
## Residuals                |      | 1754.922 |  0.129 |           | 2089 |       |               |         |                 |           |          |
r.squaredGLMM(m1)
## Warning: The null model is correct only if all variables used by the original model remain unchanged.
##                    R2m       R2c
## theoretical 0.01635049 0.3262973
## delta       0.01295702 0.2585758
r2beta(m1, method = "nsj")
##                             Effect   Rsq upper.CL lower.CL
## 1                            Model 0.010    0.024    0.005
## 4              s_iv_statisticsqual 0.003    0.010    0.000
## 5 s_iv_quality_of_evidenceextended 0.003    0.010    0.000
## 3       s_iv_statisticseff + gloss 0.003    0.010    0.000
## 7                          plsYule 0.003    0.009    0.000
## 2        s_iv_statisticseff + qual 0.002    0.008    0.000
## 6        s_iv_structuredstructured 0.000    0.002    0.000
#ICC Subject

m0 <- glmer(knowledge ~ (1 | id), family = binomial("logit"), data = df_complex, glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1000000)))

summary(m0)
## Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
##  Family: binomial  ( logit )
## Formula: knowledge ~ (1 | id)
##    Data: df_complex
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+06))
## 
##      AIC      BIC   logLik deviance df.resid 
##   2583.5   2594.8  -1289.8   2579.5     2087 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -1.3208 -0.8562  0.4698  0.4698  0.7571 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  id     (Intercept) 1.508    1.228   
## Number of obs: 2089, groups:  id, 1064
## 
## Fixed effects:
##             Estimate Std. Error z value Pr(>|z|)    
## (Intercept)  0.96556    0.07888   12.24   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
performance::icc(m0)
## # Intraclass Correlation Coefficient
## 
##      Adjusted ICC: 0.314
##   Conditional ICC: 0.314

D Empowerment

m1 <- lmer(empowerment ~ s_iv_statistics + s_iv_quality_of_evidence + s_iv_structured + pls + (1 | id), data = df_complex)



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ s_iv_statistics + s_iv_quality_of_evidence + s_iv_structured +      pls + (1 | id)
##    Data: df_complex
## 
## REML criterion at convergence: 7912
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2877 -0.4763  0.0351  0.5213  3.0965 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.919    1.385   
##  Residual             1.210    1.100   
## Number of obs: 2106, groups:  id, 1086
## 
## Fixed effects:
##                                    Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                         4.70379    0.12542 1168.04629  37.505  < 2e-16 ***
## s_iv_statisticseff + qual           0.40388    0.14071 1084.53016   2.870  0.00418 ** 
## s_iv_statisticseff + gloss          0.09751    0.13630 1084.15704   0.715  0.47452    
## s_iv_statisticsqual                 0.40526    0.13846 1084.04787   2.927  0.00349 ** 
## s_iv_quality_of_evidenceextended    0.14705    0.09703 1082.24031   1.516  0.12992    
## s_iv_structuredstructured           0.14902    0.09722 1082.02863   1.533  0.12561    
## plsYule                            -0.02372    0.04842 1046.52480  -0.490  0.62427    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_+q s_v_+g s_v_stt s_v___ s_v_str
## s_v_sttst+q -0.563                                     
## s_v_sttst+g -0.607  0.515                              
## s_v_sttstcs -0.576  0.506  0.523                       
## s_v_qlty_f_ -0.394 -0.012  0.011  0.002                
## s_v_strctrd -0.427  0.024  0.064  0.021  -0.001        
## plsYule     -0.190 -0.002 -0.005 -0.005   0.000  0.003
test = glht(m1,linfct=mcp(s_iv_statistics="Tukey"))
summary(test, test = adjusted("none"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = empowerment ~ s_iv_statistics + s_iv_quality_of_evidence + 
##     s_iv_structured + pls + (1 | id), data = df_complex)
## 
## Linear Hypotheses:
##                                        Estimate Std. Error z value Pr(>|z|)   
## eff + qual - eff + qual + gloss == 0   0.403878   0.140709   2.870  0.00410 **
## eff + gloss - eff + qual + gloss == 0  0.097507   0.136298   0.715  0.47436   
## qual - eff + qual + gloss == 0         0.405259   0.138460   2.927  0.00342 **
## eff + gloss - eff + qual == 0         -0.306371   0.136443  -2.245  0.02474 * 
## qual - eff + qual == 0                 0.001381   0.138727   0.010  0.99206   
## qual - eff + gloss == 0                0.307752   0.134127   2.294  0.02176 * 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- none method)
summary(test, test = adjusted("BH"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = empowerment ~ s_iv_statistics + s_iv_quality_of_evidence + 
##     s_iv_structured + pls + (1 | id), data = df_complex)
## 
## Linear Hypotheses:
##                                        Estimate Std. Error z value Pr(>|z|)  
## eff + qual - eff + qual + gloss == 0   0.403878   0.140709   2.870   0.0123 *
## eff + gloss - eff + qual + gloss == 0  0.097507   0.136298   0.715   0.5692  
## qual - eff + qual + gloss == 0         0.405259   0.138460   2.927   0.0123 *
## eff + gloss - eff + qual == 0         -0.306371   0.136443  -2.245   0.0371 *
## qual - eff + qual == 0                 0.001381   0.138727   0.010   0.9921  
## qual - eff + gloss == 0                0.307752   0.134127   2.294   0.0371 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- BH method)
r.squaredGLMM(m1)
##            R2m       R2c
## [1,] 0.0137975 0.6184957
r2beta(m1, method = "nsj")
##                             Effect   Rsq upper.CL lower.CL
## 1                            Model 0.014    0.028    0.008
## 4              s_iv_statisticsqual 0.006    0.015    0.001
## 2        s_iv_statisticseff + qual 0.006    0.015    0.001
## 6        s_iv_structuredstructured 0.002    0.007    0.000
## 5 s_iv_quality_of_evidenceextended 0.002    0.007    0.000
## 3       s_iv_statisticseff + gloss 0.000    0.004    0.000
## 7                          plsYule 0.000    0.003    0.000
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## term                     |    sumsq | meansq | NumDF |    DenDF | statistic | p.value |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_statistics          |   16.616 |  5.539 |     3 | 1082.248 |     4.576 |   0.003 |      | 0.011 |         0.011 |         |                 |           |    0.103 |      
## s_iv_quality_of_evidence |    2.780 |  2.780 |     1 | 1082.240 |     2.297 |   0.130 |      | 0.002 |         0.002 |         |                 |           |    0.042 |      
## s_iv_structured          |    2.844 |  2.844 |     1 | 1082.029 |     2.350 |   0.126 |      | 0.002 |         0.002 |         |                 |           |    0.043 |      
## pls                      |    0.291 |  0.291 |     1 | 1046.525 |     0.240 |   0.624 |      | 0.000 |         0.000 |         |                 |           |    0.014 |      
## Residuals                | 1558.557 |  0.740 |       |          |           |         | 2106 |       |               |         |                 |           |          |
anova_stats(m1)$term
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "s_iv_statistics"          "s_iv_quality_of_evidence" "s_iv_structured"          "pls"                     
## [5] "Residuals"
interpret_omega_squared(anova_stats(m1)$partial.etasq, rules = "field2013")
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "small"      "very small" "very small" "very small" NA          
## (Rules: field2013)
#ICC Subject

m0 <- lmer(empowerment ~ (1 | id), data = df_complex)

summary(m0)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ (1 | id)
##    Data: df_complex
## 
## REML criterion at convergence: 7913.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3042 -0.4450 -0.0145  0.5477  3.0599 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.95     1.396   
##  Residual             1.21     1.100   
## Number of obs: 2106, groups:  id, 1086
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 5.067e+00  4.879e-02 1.087e+03   103.9   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
performance::icc(m0)
## # Intraclass Correlation Coefficient
## 
##      Adjusted ICC: 0.617
##   Conditional ICC: 0.617

Uncomplex

A Accessibility

m1 <- lmer(accessibility ~ s_iv_statistics + s_iv_quality_of_evidence + s_iv_structured + pls + (1 | id), data = df_uncomplex)



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ s_iv_statistics + s_iv_quality_of_evidence +      s_iv_structured + pls + (1 | id)
##    Data: df_uncomplex
## 
## REML criterion at convergence: 8264
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.4018 -0.4762  0.1075  0.5181  2.5986 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.738    1.318   
##  Residual             1.416    1.190   
## Number of obs: 2164, groups:  id, 1123
## 
## Fixed effects:
##                                    Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                         5.65779    0.11834 1216.59014  47.811  < 2e-16 ***
## s_iv_statisticseff + qual           0.44310    0.12995 1098.40767   3.410 0.000674 ***
## s_iv_statisticseff + gloss         -0.13991    0.13653 1100.52375  -1.025 0.305719    
## s_iv_statisticsqual                 0.60960    0.13367 1106.50501   4.561 5.67e-06 ***
## s_iv_quality_of_evidenceextended   -0.01968    0.09434 1101.55215  -0.209 0.834783    
## s_iv_structuredstructured          -0.09793    0.09450 1101.46638  -1.036 0.300309    
## plsBucher                           0.13926    0.05170 1060.57281   2.694 0.007179 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_+q s_v_+g s_v_stt s_v___ s_v_str
## s_v_sttst+q -0.581                                     
## s_v_sttst+g -0.528  0.499                              
## s_v_sttstcs -0.567  0.511  0.485                       
## s_v_qlty_f_ -0.392 -0.030 -0.046 -0.030                
## s_v_strctrd -0.425  0.041 -0.006  0.045   0.049        
## plsBucher   -0.222  0.008  0.006  0.005  -0.004 -0.005
test = glht(m1,linfct=mcp(s_iv_statistics="Tukey"))
summary(test, test = adjusted("none"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = accessibility ~ s_iv_statistics + s_iv_quality_of_evidence + 
##     s_iv_structured + pls + (1 | id), data = df_uncomplex)
## 
## Linear Hypotheses:
##                                       Estimate Std. Error z value Pr(>|z|)    
## eff + qual - eff + qual + gloss == 0    0.4431     0.1299   3.410  0.00065 ***
## eff + gloss - eff + qual + gloss == 0  -0.1399     0.1365  -1.025  0.30549    
## qual - eff + qual + gloss == 0          0.6096     0.1337   4.561 5.10e-06 ***
## eff + gloss - eff + qual == 0          -0.5830     0.1335  -4.366 1.26e-05 ***
## qual - eff + qual == 0                  0.1665     0.1304   1.277  0.20160    
## qual - eff + gloss == 0                 0.7495     0.1372   5.465 4.64e-08 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- none method)
summary(test, test = adjusted("BH"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = accessibility ~ s_iv_statistics + s_iv_quality_of_evidence + 
##     s_iv_structured + pls + (1 | id), data = df_uncomplex)
## 
## Linear Hypotheses:
##                                       Estimate Std. Error z value Pr(>|z|)    
## eff + qual - eff + qual + gloss == 0    0.4431     0.1299   3.410 0.000975 ***
## eff + gloss - eff + qual + gloss == 0  -0.1399     0.1365  -1.025 0.305494    
## qual - eff + qual + gloss == 0          0.6096     0.1337   4.561 1.53e-05 ***
## eff + gloss - eff + qual == 0          -0.5830     0.1335  -4.366 2.53e-05 ***
## qual - eff + qual == 0                  0.1665     0.1304   1.277 0.241921    
## qual - eff + gloss == 0                 0.7495     0.1372   5.465 2.78e-07 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- BH method)
r.squaredGLMM(m1)
##             R2m       R2c
## [1,] 0.03104364 0.5650063
r2beta(m1, method = "nsj")
##                             Effect   Rsq upper.CL lower.CL
## 1                            Model 0.031    0.049    0.020
## 4              s_iv_statisticsqual 0.014    0.026    0.006
## 2        s_iv_statisticseff + qual 0.008    0.017    0.002
## 7                        plsBucher 0.002    0.007    0.000
## 6        s_iv_structuredstructured 0.001    0.005    0.000
## 3       s_iv_statisticseff + gloss 0.001    0.005    0.000
## 5 s_iv_quality_of_evidenceextended 0.000    0.002    0.000
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## term                     |    sumsq | meansq | NumDF |    DenDF | statistic | p.value |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_statistics          |   58.705 | 19.568 |     3 | 1101.132 |    13.823 |  < .001 |      | 0.029 |         0.029 |         |                 |           |    0.174 |      
## s_iv_quality_of_evidence |    0.062 |  0.062 |     1 | 1101.552 |     0.044 |   0.835 |      | 0.000 |         0.000 |         |                 |           |    0.006 |      
## s_iv_structured          |    1.520 |  1.520 |     1 | 1101.466 |     1.074 |   0.300 |      | 0.001 |         0.001 |         |                 |           |    0.028 |      
## pls                      |   10.271 | 10.271 |     1 | 1060.573 |     7.256 |   0.007 |      | 0.005 |         0.005 |         |                 |           |    0.073 |      
## Residuals                | 1948.347 |  0.900 |       |          |           |         | 2164 |       |               |         |                 |           |          |
anova_stats(m1)$term
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "s_iv_statistics"          "s_iv_quality_of_evidence" "s_iv_structured"          "pls"                     
## [5] "Residuals"
interpret_omega_squared(anova_stats(m1)$partial.etasq, rules = "field2013")
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "small"      "very small" "very small" "very small" NA          
## (Rules: field2013)
#ICC Subject

m0 <- lmer(accessibility ~ (1 | id), data = df_uncomplex)

summary(m0)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ (1 | id)
##    Data: df_uncomplex
## 
## REML criterion at convergence: 8296.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2653 -0.4512  0.0208  0.4928  2.5989 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.817    1.348   
##  Residual             1.425    1.194   
## Number of obs: 2164, groups:  id, 1123
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 5.912e+00  4.784e-02 1.107e+03   123.6   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
performance::icc(m0)
## # Intraclass Correlation Coefficient
## 
##      Adjusted ICC: 0.560
##   Conditional ICC: 0.560

B Understanding

m1 <- lmer(understanding ~ s_iv_statistics + s_iv_quality_of_evidence + s_iv_structured + pls + (1 | id), data = df_uncomplex)



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ s_iv_statistics + s_iv_quality_of_evidence +      s_iv_structured + pls + (1 | id)
##    Data: df_uncomplex
## 
## REML criterion at convergence: 8047
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.7105 -0.4113  0.1365  0.4482  3.0289 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.884    1.372   
##  Residual             1.121    1.059   
## Number of obs: 2173, groups:  id, 1121
## 
## Fixed effects:
##                                    Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                       5.861e+00  1.175e-01  1.198e+03  49.897  < 2e-16 ***
## s_iv_statisticseff + qual         3.004e-01  1.298e-01  1.106e+03   2.314  0.02087 *  
## s_iv_statisticseff + gloss       -5.239e-03  1.364e-01  1.108e+03  -0.038  0.96937    
## s_iv_statisticsqual               4.107e-01  1.332e-01  1.111e+03   3.084  0.00209 ** 
## s_iv_quality_of_evidenceextended  1.545e-01  9.414e-02  1.108e+03   1.642  0.10095    
## s_iv_structuredstructured        -1.564e-01  9.432e-02  1.108e+03  -1.659  0.09747 .  
## plsBucher                         1.361e-01  4.589e-02  1.069e+03   2.967  0.00308 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_+q s_v_+g s_v_stt s_v___ s_v_str
## s_v_sttst+q -0.584                                     
## s_v_sttst+g -0.531  0.498                              
## s_v_sttstcs -0.572  0.512  0.486                       
## s_v_qlty_f_ -0.395 -0.028 -0.045 -0.027                
## s_v_strctrd -0.425  0.039 -0.006  0.045   0.047        
## plsBucher   -0.199  0.007  0.004  0.004  -0.001 -0.004
test = glht(m1,linfct=mcp(s_iv_statistics="Tukey"))
summary(test, test = adjusted("none"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = understanding ~ s_iv_statistics + s_iv_quality_of_evidence + 
##     s_iv_structured + pls + (1 | id), data = df_uncomplex)
## 
## Linear Hypotheses:
##                                        Estimate Std. Error z value Pr(>|z|)   
## eff + qual - eff + qual + gloss == 0   0.300357   0.129821   2.314  0.02069 * 
## eff + gloss - eff + qual + gloss == 0 -0.005239   0.136419  -0.038  0.96937   
## qual - eff + qual + gloss == 0         0.410742   0.133190   3.084  0.00204 **
## eff + gloss - eff + qual == 0         -0.305596   0.133479  -2.289  0.02205 * 
## qual - eff + qual == 0                 0.110385   0.129978   0.849  0.39574   
## qual - eff + gloss == 0                0.415981   0.136767   3.042  0.00235 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- none method)
summary(test, test = adjusted("BH"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = understanding ~ s_iv_statistics + s_iv_quality_of_evidence + 
##     s_iv_structured + pls + (1 | id), data = df_uncomplex)
## 
## Linear Hypotheses:
##                                        Estimate Std. Error z value Pr(>|z|)   
## eff + qual - eff + qual + gloss == 0   0.300357   0.129821   2.314  0.03308 * 
## eff + gloss - eff + qual + gloss == 0 -0.005239   0.136419  -0.038  0.96937   
## qual - eff + qual + gloss == 0         0.410742   0.133190   3.084  0.00706 **
## eff + gloss - eff + qual == 0         -0.305596   0.133479  -2.289  0.03308 * 
## qual - eff + qual == 0                 0.110385   0.129978   0.849  0.47489   
## qual - eff + gloss == 0                0.415981   0.136767   3.042  0.00706 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- BH method)
r.squaredGLMM(m1)
##             R2m       R2c
## [1,] 0.01710391 0.6332309
r2beta(m1, method = "nsj")
##                             Effect   Rsq upper.CL lower.CL
## 1                            Model 0.017    0.032    0.010
## 4              s_iv_statisticsqual 0.007    0.016    0.002
## 2        s_iv_statisticseff + qual 0.004    0.011    0.000
## 6        s_iv_structuredstructured 0.002    0.008    0.000
## 5 s_iv_quality_of_evidenceextended 0.002    0.007    0.000
## 7                        plsBucher 0.002    0.007    0.000
## 3       s_iv_statisticseff + gloss 0.000    0.002    0.000
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## term                     |    sumsq | meansq | NumDF |    DenDF | statistic | p.value |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_statistics          |   16.738 |  5.579 |     3 | 1107.715 |     4.976 |   0.002 |      | 0.011 |         0.011 |         |                 |           |    0.106 |      
## s_iv_quality_of_evidence |    3.022 |  3.022 |     1 | 1108.047 |     2.695 |   0.101 |      | 0.002 |         0.002 |         |                 |           |    0.045 |      
## s_iv_structured          |    3.085 |  3.085 |     1 | 1107.992 |     2.751 |   0.097 |      | 0.002 |         0.002 |         |                 |           |    0.046 |      
## pls                      |    9.869 |  9.869 |     1 | 1069.129 |     8.802 |   0.003 |      | 0.007 |         0.007 |         |                 |           |    0.082 |      
## Residuals                | 1476.245 |  0.679 |       |          |           |         | 2173 |       |               |         |                 |           |          |
anova_stats(m1)$term
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "s_iv_statistics"          "s_iv_quality_of_evidence" "s_iv_structured"          "pls"                     
## [5] "Residuals"
interpret_omega_squared(anova_stats(m1)$partial.etasq, rules = "field2013")
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "small"      "very small" "very small" "very small" NA          
## (Rules: field2013)
#ICC Subject

m0 <- lmer(understanding ~ (1 | id), data = df_uncomplex)

summary(m0)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ (1 | id)
##    Data: df_uncomplex
## 
## REML criterion at convergence: 8059.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.6390 -0.3881  0.1894  0.4038  2.9459 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.914    1.384   
##  Residual             1.130    1.063   
## Number of obs: 2173, groups:  id, 1121
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)    6.1165     0.0473 1112.8386   129.3   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
performance::icc(m0)
## # Intraclass Correlation Coefficient
## 
##      Adjusted ICC: 0.629
##   Conditional ICC: 0.629

C Knowledge

table(df_uncomplex$knowledge, df_uncomplex$s_iv_statistics)/sum(table(df_uncomplex$knowledge, df_uncomplex$s_iv_statistics)) * 100
##    
##     eff + qual + gloss eff + qual eff + gloss      qual
##   0           6.941231   7.172605    6.108283  5.738084
##   1          18.186025  20.592318   16.658954 18.602499
table(df_uncomplex[,c("knowledge", "s_iv_statistics")])
##          s_iv_statistics
## knowledge eff + qual + gloss eff + qual eff + gloss qual
##         0                150        155         132  124
##         1                393        445         360  402
table(df_uncomplex[,c("knowledge", "s_iv_statistics")])[2,]/colSums(table(df_uncomplex[,c("knowledge", "s_iv_statistics")]))
## eff + qual + gloss         eff + qual        eff + gloss               qual 
##          0.7237569          0.7416667          0.7317073          0.7642586
table(df_uncomplex[,c("knowledge", "s_iv_structured")])
##          s_iv_structured
## knowledge unstructured structured
##         0          285        276
##         1          846        754
table(df_uncomplex[,c("knowledge", "s_iv_structured")])[2,]/colSums(table(df_uncomplex[,c("knowledge", "s_iv_structured")]))
## unstructured   structured 
##    0.7480106    0.7320388
table(df_uncomplex[,c("knowledge", "s_iv_quality_of_evidence")])
##          s_iv_quality_of_evidence
## knowledge normal extended
##         0    298      263
##         1    765      835
table(df_uncomplex[,c("knowledge", "s_iv_quality_of_evidence")])[2,]/colSums(table(df_uncomplex[,c("knowledge", "s_iv_quality_of_evidence")]))
##    normal  extended 
## 0.7196613 0.7604736
m1 <- glmer(knowledge ~ s_iv_statistics + s_iv_quality_of_evidence + s_iv_structured + pls + (1 | id), family = binomial("logit"), data = df_uncomplex, glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 100000)))

summary(m1)
## Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
##  Family: binomial  ( logit )
## Formula: knowledge ~ s_iv_statistics + s_iv_quality_of_evidence + s_iv_structured +      pls + (1 | id)
##    Data: df_uncomplex
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))
## 
##      AIC      BIC   logLik deviance df.resid 
##   2352.1   2397.5  -1168.0   2336.1     2153 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -1.6854 -0.5729  0.2741  0.3434  0.8926 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  id     (Intercept) 3.877    1.969   
## Number of obs: 2161, groups:  id, 1100
## 
## Fixed effects:
##                                  Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                       1.28587    0.24300   5.292 1.21e-07 ***
## s_iv_statisticseff + qual         0.17533    0.24651   0.711   0.4769    
## s_iv_statisticseff + gloss        0.08457    0.25839   0.327   0.7434    
## s_iv_statisticsqual               0.30010    0.25462   1.179   0.2386    
## s_iv_quality_of_evidenceextended  0.32267    0.18029   1.790   0.0735 .  
## s_iv_structuredstructured        -0.12875    0.18019  -0.715   0.4749    
## plsBucher                         0.54143    0.12659   4.277 1.89e-05 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_+q s_v_+g s_v_stt s_v___ s_v_str
## s_v_sttst+q -0.511                                     
## s_v_sttst+g -0.466  0.492                              
## s_v_sttstcs -0.496  0.503  0.476                       
## s_v_qlty_f_ -0.328 -0.010 -0.038 -0.020                
## s_v_strctrd -0.417  0.045 -0.005  0.047   0.037        
## plsBucher   -0.165  0.022  0.012  0.021   0.025 -0.017
test = glht(m1,linfct=mcp(s_iv_statistics ="Tukey"))
summary(test, test = adjusted("none"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: glmer(formula = knowledge ~ s_iv_statistics + s_iv_quality_of_evidence + 
##     s_iv_structured + pls + (1 | id), data = df_uncomplex, family = binomial("logit"), 
##     control = glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05)))
## 
## Linear Hypotheses:
##                                       Estimate Std. Error z value Pr(>|z|)
## eff + qual - eff + qual + gloss == 0   0.17533    0.24651   0.711    0.477
## eff + gloss - eff + qual + gloss == 0  0.08457    0.25839   0.327    0.743
## qual - eff + qual + gloss == 0         0.30010    0.25462   1.179    0.239
## eff + gloss - eff + qual == 0         -0.09076    0.25468  -0.356    0.722
## qual - eff + qual == 0                 0.12477    0.25003   0.499    0.618
## qual - eff + gloss == 0                0.21553    0.26250   0.821    0.412
## (Adjusted p values reported -- none method)
summary(test, test = adjusted("BH"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: glmer(formula = knowledge ~ s_iv_statistics + s_iv_quality_of_evidence + 
##     s_iv_structured + pls + (1 | id), data = df_uncomplex, family = binomial("logit"), 
##     control = glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05)))
## 
## Linear Hypotheses:
##                                       Estimate Std. Error z value Pr(>|z|)
## eff + qual - eff + qual + gloss == 0   0.17533    0.24651   0.711    0.743
## eff + gloss - eff + qual + gloss == 0  0.08457    0.25839   0.327    0.743
## qual - eff + qual + gloss == 0         0.30010    0.25462   1.179    0.743
## eff + gloss - eff + qual == 0         -0.09076    0.25468  -0.356    0.743
## qual - eff + qual == 0                 0.12477    0.25003   0.499    0.743
## qual - eff + gloss == 0                0.21553    0.26250   0.821    0.743
## (Adjusted p values reported -- BH method)
exp(fixef(m1))
##                      (Intercept)        s_iv_statisticseff + qual       s_iv_statisticseff + gloss 
##                        3.6178299                        1.1916428                        1.0882509 
##              s_iv_statisticsqual s_iv_quality_of_evidenceextended        s_iv_structuredstructured 
##                        1.3499963                        1.3808112                        0.8791897 
##                        plsBucher 
##                        1.7184543
interpret_oddsratio(exp(fixef(m1)), rules = "cohen1988")
##                      (Intercept)        s_iv_statisticseff + qual       s_iv_statisticseff + gloss 
##                         "medium"                     "very small"                     "very small" 
##              s_iv_statisticsqual s_iv_quality_of_evidenceextended        s_iv_structuredstructured 
##                     "very small"                     "very small"                     "very small" 
##                        plsBucher 
##                          "small" 
## (Rules: cohen1988)
exp(summary(test, test = adjusted("none"))$test$coefficients)
##  eff + qual - eff + qual + gloss eff + gloss - eff + qual + gloss        qual - eff + qual + gloss 
##                        1.1916428                        1.0882509                        1.3499963 
##         eff + gloss - eff + qual                qual - eff + qual               qual - eff + gloss 
##                        0.9132358                        1.1328867                        1.2405193
interpret_oddsratio(exp(summary(test, test = adjusted("none"))$test$coefficients), rules = "cohen1988")
##  eff + qual - eff + qual + gloss eff + gloss - eff + qual + gloss        qual - eff + qual + gloss 
##                     "very small"                     "very small"                     "very small" 
##         eff + gloss - eff + qual                qual - eff + qual               qual - eff + gloss 
##                     "very small"                     "very small"                     "very small" 
## (Rules: cohen1988)
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: npar
## term                     | npar |    sumsq | meansq | statistic |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## ---------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_statistics          |    3 |    1.645 |  0.548 |     0.548 |      | 0.001 |         0.001 |         |                 |           |    0.037 |      
## s_iv_quality_of_evidence |    1 |    3.361 |  3.361 |     3.361 |      | 0.003 |         0.003 |         |                 |           |    0.053 |      
## s_iv_structured          |    1 |    0.506 |  0.506 |     0.506 |      | 0.000 |         0.000 |         |                 |           |    0.021 |      
## pls                      |    1 |   20.034 | 20.034 |    20.034 |      | 0.016 |         0.016 |         |                 |           |    0.129 |      
## Residuals                |      | 1202.001 |  0.081 |           | 2161 |       |               |         |                 |           |          |
r.squaredGLMM(m1)
## Warning: The null model is correct only if all variables used by the original model remain unchanged.
##                    R2m       R2c
## theoretical 0.01619252 0.5483932
## delta       0.01270776 0.4303746
r2beta(m1, method = "nsj")
##                             Effect   Rsq upper.CL lower.CL
## 1                            Model 0.011    0.024    0.006
## 7                        plsBucher 0.008    0.018    0.002
## 5 s_iv_quality_of_evidenceextended 0.002    0.007    0.000
## 4              s_iv_statisticsqual 0.001    0.005    0.000
## 6        s_iv_structuredstructured 0.000    0.004    0.000
## 2        s_iv_statisticseff + qual 0.000    0.004    0.000
## 3       s_iv_statisticseff + gloss 0.000    0.003    0.000
#ICC Subject

m0 <- glmer(knowledge ~ (1 | id), family = binomial("logit"), data = df_uncomplex, glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1000000)))

summary(m0)
## Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
##  Family: binomial  ( logit )
## Formula: knowledge ~ (1 | id)
##    Data: df_uncomplex
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+06))
## 
##      AIC      BIC   logLik deviance df.resid 
##   2364.6   2375.9  -1180.3   2360.6     2159 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -1.3744 -0.7118  0.3064  0.3064  0.7276 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  id     (Intercept) 3.608    1.899   
## Number of obs: 2161, groups:  id, 1100
## 
## Fixed effects:
##             Estimate Std. Error z value Pr(>|z|)    
## (Intercept)   1.7463     0.1396   12.51   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
performance::icc(m0)
## # Intraclass Correlation Coefficient
## 
##      Adjusted ICC: 0.523
##   Conditional ICC: 0.523

D Empowerment

m1 <- lmer(empowerment ~ s_iv_statistics + s_iv_quality_of_evidence + s_iv_structured + pls + (1 | id), data = df_uncomplex)



summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ s_iv_statistics + s_iv_quality_of_evidence + s_iv_structured +      pls + (1 | id)
##    Data: df_uncomplex
## 
## REML criterion at convergence: 8241.3
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3598 -0.4361  0.0440  0.5185  3.0483 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.952    1.397   
##  Residual             1.262    1.123   
## Number of obs: 2175, groups:  id, 1123
## 
## Fixed effects:
##                                    Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                         4.98718    0.12081 1209.92447  41.281   <2e-16 ***
## s_iv_statisticseff + qual           0.18926    0.13328 1111.47218   1.420   0.1559    
## s_iv_statisticseff + gloss         -0.18242    0.14008 1111.44206  -1.302   0.1931    
## s_iv_statisticsqual                 0.22074    0.13692 1118.59298   1.612   0.1072    
## s_iv_quality_of_evidenceextended    0.13201    0.09675 1114.78535   1.364   0.1727    
## s_iv_structuredstructured          -0.17448    0.09693 1114.50092  -1.800   0.0721 .  
## plsBucher                           0.12257    0.04865 1075.91347   2.519   0.0119 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_+q s_v_+g s_v_stt s_v___ s_v_str
## s_v_sttst+q -0.582                                     
## s_v_sttst+g -0.530  0.498                              
## s_v_sttstcs -0.570  0.511  0.484                       
## s_v_qlty_f_ -0.394 -0.030 -0.045 -0.027                
## s_v_strctrd -0.427  0.040 -0.005  0.046   0.049        
## plsBucher   -0.205  0.007  0.005  0.004  -0.004 -0.003
test = glht(m1,linfct=mcp(s_iv_statistics="Tukey"))
summary(test, test = adjusted("none"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = empowerment ~ s_iv_statistics + s_iv_quality_of_evidence + 
##     s_iv_structured + pls + (1 | id), data = df_uncomplex)
## 
## Linear Hypotheses:
##                                       Estimate Std. Error z value Pr(>|z|)   
## eff + qual - eff + qual + gloss == 0   0.18926    0.13328   1.420  0.15559   
## eff + gloss - eff + qual + gloss == 0 -0.18242    0.14008  -1.302  0.19284   
## qual - eff + qual + gloss == 0         0.22074    0.13692   1.612  0.10693   
## eff + gloss - eff + qual == 0         -0.37168    0.13711  -2.711  0.00671 **
## qual - eff + qual == 0                 0.03148    0.13368   0.236  0.81382   
## qual - eff + gloss == 0                0.40316    0.14068   2.866  0.00416 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- none method)
summary(test, test = adjusted("BH"))
## 
##   Simultaneous Tests for General Linear Hypotheses
## 
## Multiple Comparisons of Means: Tukey Contrasts
## 
## 
## Fit: lmer(formula = empowerment ~ s_iv_statistics + s_iv_quality_of_evidence + 
##     s_iv_structured + pls + (1 | id), data = df_uncomplex)
## 
## Linear Hypotheses:
##                                       Estimate Std. Error z value Pr(>|z|)  
## eff + qual - eff + qual + gloss == 0   0.18926    0.13328   1.420   0.2314  
## eff + gloss - eff + qual + gloss == 0 -0.18242    0.14008  -1.302   0.2314  
## qual - eff + qual + gloss == 0         0.22074    0.13692   1.612   0.2139  
## eff + gloss - eff + qual == 0         -0.37168    0.13711  -2.711   0.0201 *
## qual - eff + qual == 0                 0.03148    0.13368   0.236   0.8138  
## qual - eff + gloss == 0                0.40316    0.14068   2.866   0.0201 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## (Adjusted p values reported -- BH method)
r.squaredGLMM(m1)
##             R2m       R2c
## [1,] 0.01319522 0.6126359
r2beta(m1, method = "nsj")
##                             Effect   Rsq upper.CL lower.CL
## 1                            Model 0.013    0.027    0.007
## 6        s_iv_structuredstructured 0.002    0.008    0.000
## 4              s_iv_statisticsqual 0.002    0.007    0.000
## 2        s_iv_statisticseff + qual 0.001    0.006    0.000
## 5 s_iv_quality_of_evidenceextended 0.001    0.006    0.000
## 3       s_iv_statisticseff + gloss 0.001    0.006    0.000
## 7                        plsBucher 0.001    0.006    0.000
anova_stats(m1)
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## term                     |    sumsq | meansq | NumDF |    DenDF | statistic | p.value |   df | etasq | partial.etasq | omegasq | partial.omegasq | epsilonsq | cohens.f | power
## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## s_iv_statistics          |   13.566 |  4.522 |     3 | 1114.179 |     3.585 |   0.013 |      | 0.008 |         0.008 |         |                 |           |    0.090 |      
## s_iv_quality_of_evidence |    2.349 |  2.349 |     1 | 1114.785 |     1.862 |   0.173 |      | 0.001 |         0.001 |         |                 |           |    0.037 |      
## s_iv_structured          |    4.088 |  4.088 |     1 | 1114.501 |     3.240 |   0.072 |      | 0.002 |         0.002 |         |                 |           |    0.049 |      
## pls                      |    8.006 |  8.006 |     1 | 1075.913 |     6.346 |   0.012 |      | 0.005 |         0.005 |         |                 |           |    0.069 |      
## Residuals                | 1683.200 |  0.774 |       |          |           |         | 2175 |       |               |         |                 |           |          |
anova_stats(m1)$term
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "s_iv_statistics"          "s_iv_quality_of_evidence" "s_iv_structured"          "pls"                     
## [5] "Residuals"
interpret_omega_squared(anova_stats(m1)$partial.etasq, rules = "field2013")
## Warning in tidy.anova(model): The following column names in ANOVA output were not recognized or transformed: NumDF,
## DenDF
## [1] "very small" "very small" "very small" "very small" NA          
## (Rules: field2013)
#ICC Subject

m0 <- lmer(empowerment ~ (1 | id), data = df_uncomplex)

summary(m0)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ (1 | id)
##    Data: df_uncomplex
## 
## REML criterion at convergence: 8247.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2372 -0.4525 -0.0211  0.5308  2.9793 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.976    1.406   
##  Residual             1.268    1.126   
## Number of obs: 2175, groups:  id, 1123
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept) 5.098e+00  4.851e-02 1.120e+03   105.1   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
performance::icc(m0)
## # Intraclass Correlation Coefficient
## 
##      Adjusted ICC: 0.609
##   Conditional ICC: 0.609

Plots

library(readr)
library(tidyr)
library(ggplot2)
library(Hmisc)
library(plyr)
library(RColorBrewer)
library(reshape2)
library(gridExtra)
## 
## Attache Paket: 'gridExtra'
## Das folgende Objekt ist maskiert 'package:dplyr':
## 
##     combine
source("https://gist.githubusercontent.com/benmarwick/2a1bb0133ff568cbe28d/raw/fb53bd97121f7f9ce947837ef1a4c65a73bffb3f/geom_flat_violin.R")

raincloud_theme = theme(
text = element_text(size = 10),
axis.title.x = element_blank(),
axis.title.y = element_text(size = 16),
axis.text = element_text(size = 12),
axis.text.x = element_text(angle = 90, vjust = 0.5),
legend.title=element_text(size=16),
legend.text=element_text(size=16),
legend.position = "right",
plot.title = element_text(lineheight=.8, face="bold", size = 16),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'))

#Accessibility

m000 <- lmer(accessibility ~ (1|pls) +  (1 | id) + s_iv_complex * s_iv_structured + + s_iv_complex * s_iv_quality_of_evidence, data = df)

summary(m000)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ (1 | pls) + (1 | id) + s_iv_complex * s_iv_structured +  
##     +s_iv_complex * s_iv_quality_of_evidence
##    Data: df
## 
## REML criterion at convergence: 16339.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3484 -0.4628  0.0744  0.5090  2.7063 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.87586  1.36962 
##  pls      (Intercept) 0.00357  0.05975 
##  Residual             1.38973  1.17887 
## Number of obs: 4264, groups:  id, 2208; pls, 4
## 
## Fixed effects:
##                                                        Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                                             5.98551    0.09419   26.18111  63.546   <2e-16 ***
## s_iv_complexcomplex                                    -0.36046    0.13436   27.08647  -2.683   0.0123 *  
## s_iv_structuredstructured                              -0.13653    0.09665 2192.62507  -1.413   0.1579    
## s_iv_quality_of_evidenceextended                       -0.01794    0.09657 2192.69061  -0.186   0.8526    
## s_iv_complexcomplex:s_iv_structuredstructured           0.34589    0.13768 2188.80023   2.512   0.0121 *  
## s_iv_complexcomplex:s_iv_quality_of_evidenceextended    0.08886    0.13762 2188.86071   0.646   0.5185    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_cm s_v_st s_v___ s_v_:__
## s_v_cmplxcm -0.701                             
## s_v_strctrd -0.517  0.363                      
## s_v_qlty_f_ -0.544  0.381  0.050               
## s_v_cmpl:__  0.363 -0.519 -0.702 -0.035        
## s_v_cm:____  0.382 -0.532 -0.035 -0.702  0.023
nrow(df)
## [1] 4306
ranef_accessibility <- ranef(m000)$id[,1]
ranef_id <- as.numeric(rownames(ranef(m000)$id))

rain <- data[data$id %in% ranef_id,c("id","s_iv_statistics")]

rain <- rain[
  with(rain, order(id)),
]


rain$accessibility <- ranef_accessibility



rain$PLS <- rain$s_iv_statistics
levels(rain$PLS)
## [1] "eff + qual + gloss" "eff + qual"         "eff + gloss"        "qual"
levels(rain$PLS) <- c("Statement + Effect + Glossary","Statement + Effect", "Effect + Glossary", "Statement")


p1 <- ggplot(data = rain, aes(y = accessibility, x = PLS, fill = PLS)) +
geom_flat_violin(position = position_nudge(x = .2, y = 0), alpha = .8) +
geom_point(aes(y = accessibility, color = PLS), position = position_jitter(width = .15), size = .5, alpha = 0.8) +
geom_boxplot(width = .1, guides = FALSE, outlier.shape = NA, alpha = 0.5) +
#expand_limits(y = 5.25) +
guides(fill = FALSE) +
guides(color = FALSE) +
scale_color_brewer(palette = "Dark2") +
scale_fill_brewer(palette = "Dark2") +
# coord_flip() +
theme_bw() +
raincloud_theme + ylab("Accessibility") + scale_x_discrete(guide = guide_axis(angle = 60)) + theme(axis.text.x=element_text(size=10))
## Warning: Ignoring unknown parameters: guides
p1

# Understanding


m000 <- lmer(understanding ~ (1|pls) +  (1 | id) + s_iv_complex * s_iv_structured + + s_iv_complex * s_iv_quality_of_evidence, data = df)

summary(m000)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ (1 | pls) + (1 | id) + s_iv_complex * s_iv_structured +  
##     +s_iv_complex * s_iv_quality_of_evidence
##    Data: df
## 
## REML criterion at convergence: 15834.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.6415 -0.4132  0.1429  0.4606  3.0078 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.86327  1.36502 
##  pls      (Intercept) 0.00353  0.05941 
##  Residual             1.14137  1.06835 
## Number of obs: 4271, groups:  id, 2207; pls, 4
## 
## Fixed effects:
##                                                        Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                                             6.12249    0.09181   26.62704  66.689  < 2e-16 ***
## s_iv_complexcomplex                                    -0.23046    0.13108   27.65751  -1.758  0.08980 .  
## s_iv_structuredstructured                              -0.17884    0.09394 2191.95368  -1.904  0.05707 .  
## s_iv_quality_of_evidenceextended                        0.15697    0.09385 2191.80563   1.673  0.09456 .  
## s_iv_complexcomplex:s_iv_structuredstructured           0.35109    0.13383 2193.82076   2.623  0.00877 ** 
## s_iv_complexcomplex:s_iv_quality_of_evidenceextended   -0.05835    0.13377 2193.77782  -0.436  0.66274    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_cm s_v_st s_v___ s_v_:__
## s_v_cmplxcm -0.700                             
## s_v_strctrd -0.513  0.359                      
## s_v_qlty_f_ -0.540  0.378  0.047               
## s_v_cmpl:__  0.360 -0.517 -0.702 -0.033        
## s_v_cm:____  0.379 -0.529 -0.033 -0.702  0.023
nrow(df)
## [1] 4306
ranef_understanding <- ranef(m000)$id[,1]
ranef_id <- as.numeric(rownames(ranef(m000)$id))

rain <- data[data$id %in% ranef_id,c("id","s_iv_statistics")]

rain <- rain[
  with(rain, order(id)),
]


rain$understanding <- ranef_understanding



rain$PLS <- rain$s_iv_statistics
levels(rain$PLS)
## [1] "eff + qual + gloss" "eff + qual"         "eff + gloss"        "qual"
levels(rain$PLS) <- c("Statement + Effect + Glossary","Statement + Effect", "Effect + Glossary", "Statement")



p2 <- ggplot(data = rain, aes(y = understanding, x = PLS, fill = PLS)) +
geom_flat_violin(position = position_nudge(x = .2, y = 0), alpha = .8) +
geom_point(aes(y = understanding, color = PLS), position = position_jitter(width = .15), size = .5, alpha = 0.8) +
geom_boxplot(width = .1, guides = FALSE, outlier.shape = NA, alpha = 0.5) +
#expand_limits(y = 5.25) +
guides(fill = FALSE) +
guides(color = FALSE) +
scale_color_brewer(palette = "Dark2") +
scale_fill_brewer(palette = "Dark2") +
# coord_flip() +
theme_bw() +
raincloud_theme + ylab("Understanding") + scale_x_discrete(guide = guide_axis(angle = 60)) + theme(axis.text.x=element_text(size=10))
## Warning: Ignoring unknown parameters: guides
p2

# Empowerment

m000 <- lmer(empowerment ~ (1|pls) +  (1 | id) + s_iv_complex * s_iv_structured + + s_iv_complex * s_iv_quality_of_evidence, data = df)

summary(m000)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ (1 | pls) + (1 | id) + s_iv_complex * s_iv_structured +  
##     +s_iv_complex * s_iv_quality_of_evidence
##    Data: df
## 
## REML criterion at convergence: 16160.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3493 -0.4418  0.0260  0.5338  3.0935 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.957692 1.39918 
##  pls      (Intercept) 0.002736 0.05231 
##  Residual             1.236177 1.11183 
## Number of obs: 4281, groups:  id, 2209; pls, 4
## 
## Fixed effects:
##                                                        Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                                             5.12476    0.09179   36.93301  55.831   <2e-16 ***
## s_iv_complexcomplex                                    -0.21126    0.13107   38.35870  -1.612   0.1152    
## s_iv_structuredstructured                              -0.19354    0.09659 2204.71796  -2.004   0.0452 *  
## s_iv_quality_of_evidenceextended                        0.12903    0.09650 2204.79545   1.337   0.1813    
## s_iv_complexcomplex:s_iv_structuredstructured           0.34373    0.13760 2203.29099   2.498   0.0126 *  
## s_iv_complexcomplex:s_iv_quality_of_evidenceextended    0.02264    0.13753 2203.41526   0.165   0.8693    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_cm s_v_st s_v___ s_v_:__
## s_v_cmplxcm -0.700                             
## s_v_strctrd -0.530  0.371                      
## s_v_qlty_f_ -0.556  0.390  0.050               
## s_v_cmpl:__  0.372 -0.532 -0.702 -0.035        
## s_v_cm:____  0.390 -0.544 -0.035 -0.702  0.024
nrow(df)
## [1] 4306
ranef_empowerment <- ranef(m000)$id[,1]
ranef_id <- as.numeric(rownames(ranef(m000)$id))

rain <- data[data$id %in% ranef_id,c("id","s_iv_statistics")]

rain <- rain[
  with(rain, order(id)),
]


rain$empowerment <- ranef_empowerment



rain$PLS <- rain$s_iv_statistics
levels(rain$PLS)
## [1] "eff + qual + gloss" "eff + qual"         "eff + gloss"        "qual"
levels(rain$PLS) <- c("Statement + Effect + Glossary","Statement + Effect", "Effect + Glossary", "Statement")




p3 <- ggplot(data = rain, aes(y = empowerment, x = PLS, fill = PLS)) +
geom_flat_violin(position = position_nudge(x = .2, y = 0), alpha = .8) +
geom_point(aes(y = empowerment, color = PLS), position = position_jitter(width = .15), size = .5, alpha = 0.8) +
geom_boxplot(width = .1, guides = FALSE, outlier.shape = NA, alpha = 0.5) +
#expand_limits(y = 5.25) +
guides(fill = FALSE) +
guides(color = FALSE) +
scale_color_brewer(palette = "Dark2") +
scale_fill_brewer(palette = "Dark2") +
# coord_flip() +
theme_bw() +
raincloud_theme + ylab("Empowerment") + scale_x_discrete(guide = guide_axis(angle = 60)) + theme(axis.text.x=element_text(size=10))
## Warning: Ignoring unknown parameters: guides
p3

## Structured x Complex


# Accessibility

m000 <- lmer(accessibility ~ (1|pls) +  (1 | id) + s_iv_complex * s_iv_statistics + + s_iv_complex * s_iv_quality_of_evidence, data = df)

summary(m000)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: accessibility ~ (1 | pls) + (1 | id) + s_iv_complex * s_iv_statistics +  
##     +s_iv_complex * s_iv_quality_of_evidence
##    Data: df
## 
## REML criterion at convergence: 16274.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3934 -0.4721  0.1145  0.5072  2.6314 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.793898 1.33936 
##  pls      (Intercept) 0.003617 0.06014 
##  Residual             1.389648 1.17883 
## Number of obs: 4264, groups:  id, 2208; pls, 4
## 
## Fixed effects:
##                                                        Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                                           5.675e+00  1.130e-01  5.264e+01  50.226  < 2e-16 ***
## s_iv_complexcomplex                                  -1.902e-01  1.635e-01  5.760e+01  -1.163 0.249523    
## s_iv_statisticseff + qual                             4.481e-01  1.310e-01  2.183e+03   3.422 0.000633 ***
## s_iv_statisticseff + gloss                           -1.410e-01  1.377e-01  2.187e+03  -1.024 0.305867    
## s_iv_statisticsqual                                   6.154e-01  1.347e-01  2.198e+03   4.570 5.15e-06 ***
## s_iv_quality_of_evidenceextended                     -1.457e-02  9.503e-02  2.188e+03  -0.153 0.878190    
## s_iv_complexcomplex:s_iv_statisticseff + qual         4.441e-02  1.917e-01  2.186e+03   0.232 0.816847    
## s_iv_complexcomplex:s_iv_statisticseff + gloss        5.756e-02  1.930e-01  2.185e+03   0.298 0.765519    
## s_iv_complexcomplex:s_iv_statisticsqual               8.341e-03  1.925e-01  2.190e+03   0.043 0.965446    
## s_iv_complexcomplex:s_iv_quality_of_evidenceextended  7.734e-02  1.354e-01  2.184e+03   0.571 0.568037    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_cm s_v_+q s_v_+g s_v_st s_v___ s__:+q s__:+g s_v_:__
## s_v_cmplxcm -0.691                                                         
## s_v_sttst+q -0.594  0.411                                                  
## s_v_sttst+g -0.559  0.386  0.499                                           
## s_v_sttstcs -0.578  0.399  0.510  0.486                                    
## s_v_qlty_f_ -0.394  0.272 -0.032 -0.046 -0.032                             
## s_v_cm:__+q  0.406 -0.588 -0.683 -0.341 -0.348  0.022                      
## s_v_cm:__+g  0.399 -0.587 -0.356 -0.714 -0.347  0.033  0.506               
## s_v_cmpl:__  0.404 -0.589 -0.357 -0.340 -0.700  0.022  0.507  0.504        
## s_v_cm:____  0.276 -0.406  0.023  0.032  0.022 -0.702 -0.022 -0.017 -0.014
nrow(df)
## [1] 4306
ranef_accessibility <- ranef(m000)$id[,1]
ranef_id <- as.numeric(rownames(ranef(m000)$id))

rain <- data[data$id %in% ranef_id,c("id","s_iv_structured","s_iv_complex")]

rain <- rain[
  with(rain, order(id)),
]



rain$accessibility <- ranef_accessibility



rain$PLS <- interaction(rain$s_iv_structured, rain$s_iv_complex)
levels(rain$PLS)
## [1] "unstructured.uncomplex" "structured.uncomplex"   "unstructured.complex"   "structured.complex"
levels(rain$PLS) <- c("Unstructured + Uncomplex", "Structured + Uncomplex","Unstructured + Complex", "Structured + Complex")





p4 <- ggplot(data = rain, aes(y = accessibility, x = PLS, fill = PLS)) +
geom_flat_violin(position = position_nudge(x = .2, y = 0), alpha = .8) +
geom_point(aes(y = accessibility, color = PLS), position = position_jitter(width = .15), size = .5, alpha = 0.8) +
geom_boxplot(width = .1, guides = FALSE, outlier.shape = NA, alpha = 0.5) +
#expand_limits(y = 5.25) +
guides(fill = FALSE) +
guides(color = FALSE) +
scale_color_brewer(palette = "Dark2") +
scale_fill_brewer(palette = "Dark2") +
# coord_flip() +
theme_bw() +
raincloud_theme + ylab("Accessibility") + scale_x_discrete(guide = guide_axis(angle = 60)) + theme(axis.text.x=element_text(size=10))
## Warning: Ignoring unknown parameters: guides
p4

# Understanding


m000 <- lmer(understanding ~ (1|pls) +  (1 | id) + s_iv_complex * s_iv_statistics + + s_iv_complex * s_iv_quality_of_evidence, data = df)

summary(m000)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: understanding ~ (1 | pls) + (1 | id) + s_iv_complex * s_iv_statistics +  
##     +s_iv_complex * s_iv_quality_of_evidence
##    Data: df
## 
## REML criterion at convergence: 15806
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.6935 -0.4120  0.1394  0.4601  3.0045 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.825887 1.35125 
##  pls      (Intercept) 0.003562 0.05968 
##  Residual             1.141552 1.06843 
## Number of obs: 4271, groups:  id, 2207; pls, 4
## 
## Fixed effects:
##                                                        Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                                           5.847e+00  1.110e-01  5.554e+01  52.661  < 2e-16 ***
## s_iv_complexcomplex                                  -1.408e-01  1.606e-01  6.067e+01  -0.876  0.38424    
## s_iv_statisticseff + qual                             3.080e-01  1.285e-01  2.183e+03   2.397  0.01659 *  
## s_iv_statisticseff + gloss                           -6.997e-03  1.351e-01  2.187e+03  -0.052  0.95870    
## s_iv_statisticsqual                                   4.206e-01  1.318e-01  2.192e+03   3.191  0.00144 ** 
## s_iv_quality_of_evidenceextended                      1.619e-01  9.313e-02  2.187e+03   1.738  0.08230 .  
## s_iv_complexcomplex:s_iv_statisticseff + qual         2.583e-01  1.880e-01  2.191e+03   1.374  0.16961    
## s_iv_complexcomplex:s_iv_statisticseff + gloss        6.946e-02  1.893e-01  2.190e+03   0.367  0.71373    
## s_iv_complexcomplex:s_iv_statisticsqual               8.172e-02  1.887e-01  2.194e+03   0.433  0.66495    
## s_iv_complexcomplex:s_iv_quality_of_evidenceextended -7.106e-02  1.328e-01  2.189e+03  -0.535  0.59255    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_cm s_v_+q s_v_+g s_v_st s_v___ s__:+q s__:+g s_v_:__
## s_v_cmplxcm -0.691                                                         
## s_v_sttst+q -0.594  0.410                                                  
## s_v_sttst+g -0.558  0.386  0.499                                           
## s_v_sttstcs -0.579  0.400  0.511  0.486                                    
## s_v_qlty_f_ -0.394  0.272 -0.030 -0.045 -0.029                             
## s_v_cm:__+q  0.406 -0.588 -0.683 -0.341 -0.349  0.021                      
## s_v_cm:__+g  0.398 -0.586 -0.356 -0.714 -0.347  0.032  0.507               
## s_v_cmpl:__  0.404 -0.589 -0.357 -0.340 -0.699  0.020  0.508  0.505        
## s_v_cm:____  0.276 -0.404  0.021  0.031  0.020 -0.701 -0.021 -0.017 -0.013
nrow(df)
## [1] 4306
ranef_understanding <- ranef(m000)$id[,1]
ranef_id <- as.numeric(rownames(ranef(m000)$id))

rain <- data[data$id %in% ranef_id,c("id","s_iv_structured","s_iv_complex")]

rain <- rain[
  with(rain, order(id)),
]


rain$understanding <- ranef_understanding



rain$PLS <- interaction(rain$s_iv_structured, rain$s_iv_complex)
levels(rain$PLS)
## [1] "unstructured.uncomplex" "structured.uncomplex"   "unstructured.complex"   "structured.complex"
levels(rain$PLS) <- c("Unstructured + Uncomplex", "Structured + Uncomplex","Unstructured + Complex", "Structured + Complex")



p5 <- ggplot(data = rain, aes(y = understanding, x = PLS, fill = PLS)) +
geom_flat_violin(position = position_nudge(x = .2, y = 0), alpha = .8) +
geom_point(aes(y = understanding, color = PLS), position = position_jitter(width = .15), size = .5, alpha = 0.8) +
geom_boxplot(width = .1, guides = FALSE, outlier.shape = NA, alpha = 0.5) +
#expand_limits(y = 5.25) +
guides(fill = FALSE) +
guides(color = FALSE) +
scale_color_brewer(palette = "Dark2") +
scale_fill_brewer(palette = "Dark2") +
# coord_flip() +
theme_bw() +
raincloud_theme + ylab("Understanding") + scale_x_discrete(guide = guide_axis(angle = 60)) + theme(axis.text.x=element_text(size=10))
## Warning: Ignoring unknown parameters: guides
p5

# Empowerment

m000 <- lmer(empowerment ~ (1|pls) +  (1 | id) + s_iv_complex * s_iv_statistics + + s_iv_complex * s_iv_quality_of_evidence, data = df)

summary(m000)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
## Formula: empowerment ~ (1 | pls) + (1 | id) + s_iv_complex * s_iv_statistics +  
##     +s_iv_complex * s_iv_quality_of_evidence
##    Data: df
## 
## REML criterion at convergence: 16149.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3590 -0.4437  0.0318  0.5278  3.0771 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  id       (Intercept) 1.939343 1.39260 
##  pls      (Intercept) 0.002741 0.05235 
##  Residual             1.236556 1.11201 
## Number of obs: 4281, groups:  id, 2209; pls, 4
## 
## Fixed effects:
##                                                        Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)                                           4.956e+00  1.121e-01  8.058e+01  44.198   <2e-16 ***
## s_iv_complexcomplex                                  -1.819e-01  1.625e-01  8.856e+01  -1.119    0.266    
## s_iv_statisticseff + qual                             1.983e-01  1.325e-01  2.194e+03   1.496    0.135    
## s_iv_statisticseff + gloss                           -1.842e-01  1.394e-01  2.193e+03  -1.321    0.187    
## s_iv_statisticsqual                                   2.315e-01  1.361e-01  2.207e+03   1.701    0.089 .  
## s_iv_quality_of_evidenceextended                      1.408e-01  9.615e-02  2.199e+03   1.464    0.143    
## s_iv_complexcomplex:s_iv_statisticseff + qual         2.005e-01  1.939e-01  2.198e+03   1.034    0.301    
## s_iv_complexcomplex:s_iv_statisticseff + gloss        2.682e-01  1.954e-01  2.197e+03   1.373    0.170    
## s_iv_complexcomplex:s_iv_statisticsqual               1.692e-01  1.948e-01  2.204e+03   0.869    0.385    
## s_iv_complexcomplex:s_iv_quality_of_evidenceextended  6.287e-03  1.371e-01  2.198e+03   0.046    0.963    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) s_v_cm s_v_+q s_v_+g s_v_st s_v___ s__:+q s__:+g s_v_:__
## s_v_cmplxcm -0.690                                                         
## s_v_sttst+q -0.605  0.417                                                  
## s_v_sttst+g -0.569  0.393  0.498                                           
## s_v_sttstcs -0.590  0.407  0.510  0.485                                    
## s_v_qlty_f_ -0.401  0.277 -0.032 -0.045 -0.029                             
## s_v_cm:__+q  0.413 -0.599 -0.683 -0.341 -0.348  0.022                      
## s_v_cm:__+g  0.406 -0.597 -0.356 -0.713 -0.346  0.032  0.506               
## s_v_cmpl:__  0.412 -0.600 -0.356 -0.339 -0.699  0.020  0.508  0.504        
## s_v_cm:____  0.282 -0.412  0.023  0.032  0.021 -0.702 -0.022 -0.017 -0.014
nrow(df)
## [1] 4306
ranef_empowerment <- ranef(m000)$id[,1]
ranef_id <- as.numeric(rownames(ranef(m000)$id))

rain <- data[data$id %in% ranef_id,c("id","s_iv_structured","s_iv_complex")]

rain <- rain[
  with(rain, order(id)),
]


rain$empowerment <- ranef_empowerment



rain$PLS <- interaction(rain$s_iv_structured, rain$s_iv_complex)
levels(rain$PLS)
## [1] "unstructured.uncomplex" "structured.uncomplex"   "unstructured.complex"   "structured.complex"
levels(rain$PLS) <- c("Unstructured + Uncomplex", "Structured + Uncomplex","Unstructured + Complex", "Structured + Complex")


p6 <- ggplot(data = rain, aes(y = empowerment, x = PLS, fill = PLS)) +
geom_flat_violin(position = position_nudge(x = .2, y = 0), alpha = .8) +
geom_point(aes(y = empowerment, color = PLS), position = position_jitter(width = .15), size = .5, alpha = 0.8) +
geom_boxplot(width = .1, guides = FALSE, outlier.shape = NA, alpha = 0.5) +
#expand_limits(y = 5.25) +
guides(fill = FALSE) +
guides(color = FALSE) +
scale_color_brewer(palette = "Dark2") +
scale_fill_brewer(palette = "Dark2") +
# coord_flip() +
theme_bw() +
raincloud_theme + ylab("Empowerment") + scale_x_discrete(guide = guide_axis(angle = 60)) + theme(axis.text.x=element_text(size=10))
## Warning: Ignoring unknown parameters: guides
p6

require(ggpubr)
library(cowplot)
library(ggimage)

p9 <- plot_grid(p1, p2, p3, p4, p5, p6, ncol=3, labels=LETTERS[1:6])

p9

p7 <- plot_grid(p1, p2, p3, ncol=3, labels=LETTERS[1:3]) 
p7 <- p7 +  labs(caption="STATISTICAL TERMS") + 
  theme(plot.caption = element_text(hjust=0.5, size=rel(1.2)))
p8 <- plot_grid(p4, p5, p6, ncol=3, labels=LETTERS[4:6]) +  labs(caption="STRUCTURE X COMPLEXITY") +  theme(plot.caption = element_text(hjust=0.5, size=rel(1.2)))

p10 <- plot_grid(p7, p8, ncol=1) 

p10

#ggsave("Figure_Study_2.tiff", p10, width = 16, height = 12, compression = "lzw")
#ggsave("Figure_Study_2.png", p10, width = 16, height = 12)