In this report we define five exclusion criterion. First, exclude participants with a completion time lower than a third of the median value of the Length of Interview [\(LOI_{subject} < {Mdn(LOI) \over 3}\)]. Second, exclude participants whose Country group is different than their Residence OR Native Language [\((Residence | Native~Language) != Country\)]. Third, exclude participants who will report a value below 4 (i.e. 1-3) on the “attentional check” question (i.e. “How serious have you been about filling in the survey?”) [\(attentional~ check < 4\)]. Fourth, exclude participants who will report a value below 3 (i.e. 1-2) on the “attentional check” question [\(attentional~ check < 3\)]. Fifth, exclude countries with a sample size lower than 250 participants [\(Sample < 250\)]. In this report we will run the entire analysis protocol for five exclusion criterion.
In this section we show the effect size of each study for each country. We use log Odd Ratios (OR) to quantify the effect size of binary variables, while we use Standardized Mean Differences (SMD) as a measure of effect size of continuous variables. Each dot correspond to a country. The dots size indicate the inverse of the standard error (e.g. 1/SE). The red horizontal lines indicate the effect size calculated using the results reported in the original papers. For the Drink and Gym study, we could not calculate the effect size of the original papers as information about pooled standard deviation are not reported. To facilitate the interpretation of the results, the variables are coded in such a way that the effect reported in the original papers are positive in all studies.
In this section we perform a Bayesian meta-analysis. The purple dots indicate the population level estimates, while the gray dots indicate the estimates of each country. Our results show that the population effect replicate the original findings for each study, regardless of the exclusion criterion used.
In this section we perform a Bayesian unpooled analysis. We will compute the effect size for each scenario and country independently. We will then compare each of them with the effect sizes found in the original papers.
d <- df %>%mutate(x=zscore(GDPxCapita), y=choensd) %>%filter(family!="binomial1")fit_and_save_nlbmGDPxCapita <-function(){ path2file <-"scr/country-level-analysis/models/Others/nlbmGDPxCapita.rds"if( file.exists(path2file) ){ m <-readRDS(file = path2file) } else {# Define the nonlinear formula with random effects nlform <-bf( y ~ K / (1+exp(-r * (x - x0))), K ~1+ (1| study), # Random effect for K r ~1+ (1| study), # Random effect for r x0 ~1+ (1| study), # Random effect for x0nl =TRUE )# Define priors (adjust these based on your knowledge) priors <-c(prior(normal(1, 1), nlpar ="K"),prior(normal(0, 2.5), nlpar ="r"),prior(normal(-1, 1), nlpar ="x0") )# Fit the model m <-brm(formula = nlform,data = d,prior = priors,family =gaussian(),control =list(adapt_delta =0.95),chains =4,iter =20000,cores =4,seed =123 )saveRDS(m, file = path2file) }return(m)}bm1 <-fit_and_save_nlbmGDPxCapita()
### Linear GDP -----# - Standardize New Input Data# Assuming you have the means and standard deviations of the original datamean_GDPxCapita <-mean(df$GDPxCapita, na.rm =TRUE)sd_GDPxCapita <-sd(df$GDPxCapita, na.rm =TRUE)# New input data (in original scale)new_GDPxCapita_values <-seq(0, 90000, length=2) # replace with actual values# Standardize new input datanew_zGDPxCapita <- (new_GDPxCapita_values - mean_GDPxCapita) / sd_GDPxCapita# - Make Predictions with the Model# Prepare the new data for predictionnew_data <-data.frame(zGDPxCapita = new_zGDPxCapita, zFinancialLiteracy=0, zEducation=0, zAge=0, zIncome=0)# Make predictions using the brm modelpredictions <-predict(mAll, newdata = new_data, re_formula =NA)# - Transform Predictions Back to Original Scale# Assuming you have the means and standard deviations for choensdmean_choensd <-mean(df$choensd, na.rm =TRUE)sd_choensd <-sd(df$choensd, na.rm =TRUE)# Re-scale predictionspreddf <-cbind(new_data, predictions) %>%mutate(GDPxCapita = new_GDPxCapita_values)# Plot Flagsplot_gdp <- df %>%filter(family!="binomial1") %>%group_by(Country) %>%mutate(choensd=mean(choensd)) %>%filter(row_number()==1) %>%ggplot(aes(GDPxCapita, choensd)) +geom_line(data=preddf, aes(GDPxCapita, Estimate),linewidth=2,color="#5f7e96") +geom_point(size=6, shape=1) +geom_flag(aes(country=flag)) +theme_pubr() +coord_cartesian(xlim =c(-5000, 102000), ylim =c(-0.05, 1.1)) +scale_x_continuous(breaks =c(0, 50000, 100000), labels =c("$0", "$50K\n", "$100K"), guide ="prism_offset") +scale_y_continuous(breaks =seq(0., .9, 0.3), guide ="prism_offset") +xlab("GDP per Capita\n") +ylab("Mental Accounting\n(Choen's d)") +theme(text =element_text(size =15, family="Arial"))### Betas All Linear -----pp <-prepare_predictions(mAll)re <-data.frame( re =c(fixef(mAll)[2,1] +ranef(mAll)$study[,1,"zGDPxCapita"],fixef(mAll)[3,1] +ranef(mAll)$study[,1,"zFinancialLiteracy"],fixef(mAll)[4,1] +ranef(mAll)$study[,1,"zEducation"],fixef(mAll)[5,1] +ranef(mAll)$study[,1,"zAge"],fixef(mAll)[6,1] +ranef(mAll)$study[,1,"zIncome"] ),x =c(rep(1-0.2, 6), rep(2-0.2, 6), rep(3-0.2, 6), rep(4-0.2, 6), rep(5-0.2, 6)),study=rep(names(ranef(mAll)$study[,1,"zFinancialLiteracy"]), 5)) %>%mutate(study=factor(study, labels =c("Game", "Jacket", "Play", "Plane", "Drink", "Gym"), levels =c("Game", "Jacket", "Play", "Plane", "Drink", "Gym"), ))fe <-data.frame(fe =fixef(mAll)[2:6,1],low=ci(mAll)$CI_low[2:6],high=ci(mAll)$CI_high[2:6])plot_betas <- pp$dpars$mu$fe$b %>%as_tibble() %>%select(-b_Intercept) %>% reshape2::melt() %>%mutate(issign =ifelse(variable=="b_zGDPxCapita", "yes", "no")) %>%ggplot(aes(variable, value)) +stat_halfeye(aes(fill=issign),# adjust bandwidthadjust = .5,# move to the rightjustification =-0.2,# remove the slub intervalwidth = .6, .width =0,point_colour =NA# fill= c("#d5f8ff", rep("#f6f0e8", 4)) ) +geom_pointinterval(data=fe, aes(1:5+0.1, y=fe, ymin=low, ymax=high), color=c("#234666", rep("#a85100", 4)) ) +geom_beeswarm(data=re, aes(x, re, color=study), cex=2, size=2, shape=1, stroke=1) +geom_line(data=data.frame(x=c(0.2,5.5), y=c(0, 0)), aes(x, y), linewidth=0.1) +scale_color_manual(values = colorspace::qualitative_hcl(7, "Dark 3")[c(2, 4, 7, 6, 1, 3)]) +theme_pubr() +coord_cartesian(xlim =c(0.9, 5.1), ylim =c(-0.3, .45)) +scale_x_discrete(labels =c("GDP per\nCapita","Finantial\nLiteracy", "Education", "Age", "Income"),guide ="prism_offset") +scale_y_continuous(breaks =c(-0.25, 0, 0.25), guide ="prism_offset") +theme(text =element_text(size =15, family="Arial"),legend.position =c(0.6, 0.75), legend.direction ="horizontal",axis.text.x =element_text(angle =45, vjust =0.5, hjust=.5) ) +labs(x=NULL, y="Beta Estimate ", color=NULL) +guides(fill ="none") +scale_fill_manual(values =c("#f6f0e8", "#E7ECEF"))cowplot::plot_grid( plot_betas, plot_gdp, rel_widths =c(0.55, 0.45), labels = LETTERS[1:2], label_size =20, label_y =0.93)
Show model fit
fit_and_save_mGDPxCapita_Others <-function(){ path2file <-"scr/country-level-analysis/models/Others/mGDPxCapita.rds"if( file.exists(path2file) ){ m <-readRDS(file = path2file) } else { m <-brm(formula = choensd ~ zGDPxCapita + (zGDPxCapita | study), prior =prior_string("normal(0,2.5)", class ="b"),data = df,iter =20000, refresh =100, cores =4 )saveRDS(m, file = path2file) }return(m)}mGDPxCapita <-fit_and_save_mGDPxCapita_Others()## Standardize New Input Data ----# Assuming you have the means and standard deviations of the original datamean_GDPxCapita <-mean(df$GDPxCapita, na.rm =TRUE)sd_GDPxCapita <-sd(df$GDPxCapita, na.rm =TRUE)# New input data (in original scale)new_GDPxCapita_values <-seq(0, 1e5, length=2) # replace with actual values# Standardize new input datanew_zGDPxCapita <- (new_GDPxCapita_values - mean_GDPxCapita) / sd_GDPxCapita# - Make Predictions with the Model# Prepare the new data for predictionnew_data <-data.frame(zGDPxCapita = new_zGDPxCapita)# Make predictions using the brm modelpredictions <-predict(mGDPxCapita, newdata = new_data, re_formula =NA)# - Transform Predictions Back to Original Scale# Assuming you have the means and standard deviations for choensdmean_choensd <-mean(df$choensd, na.rm =TRUE)sd_choensd <-sd(df$choensd, na.rm =TRUE)# Re-scale predictionspreddf <-cbind(new_data, predictions) %>%mutate(GDPxCapita = new_GDPxCapita_values)
Plot
## Compare Linear and Non-Linear Model -----loo1 <-loo(mGDPxCapita)loo2 <-loo(bm1)df_loo <-loo_compare(loo1, loo2) %>%as.data.frame() row.names(df_loo) <-c("Non-Linear", "Linear")df_loo %>% kable
# Define colors for each scenarioscenario_colors <- colorspace::qualitative_hcl(7, "Dark 3")[c(5, 2, 4, 7, 6, 1, 3)]names(scenario_colors) <-c("MrAB", "Game", "Play", "Drink", "Jacket", "Plane", "Gym")# Create 3D plot using plotlyplot_ly(mds_df, x =~x, y =~y, z =~z, type ="scatter3d",mode ="markers+text",text =~scenario,textposition ="top center",color =~scenario, # Use scenario for color mappingcolors = scenario_colors, # Specify the color palettemarker =list(size =5)) %>%layout(showlegend =FALSE,scene =list(xaxis =list(title ="Dimension 1", tickvals =-3:3, range=c(-4, 4)), yaxis =list(title ="Dimension 2", tickvals =-3:3, range=c(-4, 4)),zaxis =list(title ="Dimension 3", tickvals =-3:3, range=c(-4, 4)) ) )
Individual-level Analysis
In this section we perform a Bayesian unpooled analysis to explore the role of financial literacy (i.e., are people with higher levels of financial literacy less susceptible to the mental accounting effect?), age, gender, and income.
list_of_dfs <-list(mrab1_bias, mrab2_bias, game_bias, drink_bias, jacket_bias, play_bias, gym_bias, plane_bias)df_bias <-reduce(list_of_dfs, ~inner_join(.x, .y, by ="subject"))library(corrplot)M =cor( as.matrix( na.omit(df_bias[,2:9]) ) )testRes =cor.mtest(df_bias[2:9], conf.level =0.95)num_colors_each_side <-150# Number of colors from blue to white and from white to redcustom_palette <-c(rep("#053061", 300),colorRampPalette(c("#053061", "white"))(num_colors_each_side),colorRampPalette(c("white", "#67001F"))(num_colors_each_side), # Exclude the first white since we already have it from the blue-to-white transitionrep("#67001F", 300))corrplot(M * (1-diag(8)), p.mat = testRes$p, method ='color', diag =FALSE, type ='lower',sig.level =c(0.001, 0.01, 0.05), pch.cex =0.9, col.lim=c(-0.3, 0.3), insig ='label_sig', pch.col ='grey20',col = custom_palette, tl.col ='black', tl.srt =45, tl.offset =0.8)