####################################################### # Jamieson, Mewhort, & Hockley # A computational account of the production effect # Canadian Journal of Experimental Psychology # Simulation 1: The mixed-list production effect ####################################################### ####################################################### # This will clear the work environment before running # the experiment; it solves any potential confusion # between functions and variables left from previous # simulations ####################################################### rm(list = ls()) ####################################################### # Activation function ####################################################### Activation <- function (p, t) { S <- crossprod(p, t) / sum(abs(p)+abs(t) > 0) return(S**3) } ####################################################### # Iterative retrieval function: P = probe, M = memory, # and n = number of iterations ####################################################### Compute_intensity <- function (P, M, n) { for (i in 1:n) { if (i == 1) P <- P else P <- C/max(abs(C)) I <- 0 C <- matrix(0, length(P)) for (j in 1:nrow(M)) { A <- Activation(P, M[j,]) I <- I + A C <- C + A * M[j, ] } } return(I) } ####################################################### # Encode the matrix M at learning rate L ####################################################### Encode <- function (M, L) { for (i in 1:nrow(M)) { for (j in 1:ncol(M)) { if (runif(1, 0, 1) < (1-L)) M[i,j] <- 0 } } return(M) } ####################################################### # Plot error bars # From http://monkeysuncle.stanford.edu/?p=485 ####################################################### error.bar <- function (x, y, upper, lower=upper, length=0.1,...){ if(length(x) != length(y) | length(y) !=length(lower) | length(lower) != length(upper)) stop("vectors must be same length") arrows(x,y+upper, x, y-lower, angle=90, code=3, length=length, ...) } ####################################################### # Simulation parameters ####################################################### N_features <- 30 N_retrievals <- 3 N_learning <- 5 N_simulations <- 100 N_words <- 80 N_produced <- N_words/2 ####################################################### # Make data structures to store echo intensities over # all simulations ####################################################### Positive_results <- array(0, dim=c(N_learning, N_simulations, N_words)) Negative_results <- array(0, dim=c(N_learning, N_simulations, N_words)) ####################################################### # Start the simulation ####################################################### for (i in 1:N_learning) { ####################################################### # Assign learning rate ####################################################### Learning_rate <- i * 1/N_learning ####################################################### # Compute N_simulations replications for each level of # the learning rate ####################################################### for (j in 1:N_simulations) { ####################################################### # Print information about the simulation number ####################################################### print(paste("Simulating subject number", j, "of", N_simulations, "at L =", Learning_rate)) ####################################################### # Generate representations for positives and negatives. # Both Positive and Negatives are matrices where rows # correspond to items and columns correspond to features ####################################################### Positives <- matrix(sample(c(+1,-1), N_words*N_features, replace=TRUE), N_words, N_features) Negatives <- matrix(sample(c(+1,-1), N_words*N_features, replace=TRUE), N_words, N_features) ####################################################### # Store positives to memory at L = learning_rate ####################################################### Memory <- Encode(Positives, Learning_rate) ####################################################### # Introduce production/generation/read manipulations # by deleting unstored features from traces in memory: # base features = 1..20, production features = 21..25, # and generation features = 26..30 ####################################################### Memory[1:N_produced, 26:30] <- 0 Memory[(N_produced+1):N_words, 21:30] <- 0 ####################################################### # Compute and store the echo intensity for each # positive and negative probe by presenting just the # base features in each probe and using the iterative # retrieval algorithm to pull out and make use of the # production features stored at study ####################################################### Positives[, 21:N_features] <- 0 Negatives[, 21:N_features] <- 0 for (k in 1:N_words) { Positive_results[i,j,k] <- Compute_intensity(Positives[k,], Memory, N_retrievals) Negative_results[i,j,k] <- Compute_intensity(Negatives[k,], Memory, N_retrievals) } } } ####################################################### # Convert echo intensities to old/new decisions using # a mean criterion ####################################################### for (i in 1:N_learning) { for (j in 1:N_simulations) { Criterion <- mean(c(Positive_results[i,j,],Negative_results[i,j,])) for (k in 1:N_words) { if (Positive_results[i,j,k] > Criterion) Positive_results[i,j,k] <- 1 else Positive_results[i,j,k] <- 0 if (Negative_results[i,j,k] > Criterion) Negative_results[i,j,k] <- 1 else Negative_results[i,j,k] <- 0 } } } ####################################################### # Summarize the old/new data as a function of both # learning rate and probe type ####################################################### Percent_old <- array(0, dim=c(N_learning, N_simulations, 3)) for (j in 1:N_learning) { for (k in 1:N_simulations) { Percent_old[j,k,1] <- mean(Positive_results[j,k,1:N_produced]) * 100 Percent_old[j,k,2] <- mean(Positive_results[j,k,(N_produced+1):N_words]) * 100 Percent_old[j,k,3] <- mean(Negative_results[j,k,1:N_words]) * 100 } } ####################################################### # Construct table of means ####################################################### MixedList.means <- matrix(0, 3, N_learning) rownames(MixedList.means) <- c("Produced","Unproduced","Foils") colnames(MixedList.means) <- c("0.2","0.4","0.6","0.8","1.0") for (i in 1:N_learning) { MixedList.means[1,i] <- mean(Percent_old[i,,1]) MixedList.means[2,i] <- mean(Percent_old[i,,2]) MixedList.means[3,i] <- mean(Percent_old[i,,3]) } ####################################################### # Construct table of standard deviations ####################################################### MixedList.sd <- matrix(0, 3, N_learning) rownames(MixedList.sd) <- c("Produced","Unproduced","Foils") colnames(MixedList.sd) <- c("0.2","0.4","0.6","0.8","1.0") for (i in 1:N_learning) { MixedList.sd[1,i] <- sd(Percent_old[i,,1]) MixedList.sd[2,i] <- sd(Percent_old[i,,2]) MixedList.sd[3,i] <- sd(Percent_old[i,,3]) } ####################################################### # Graph the results to match outcome in Figure 1 of the # manuscript ####################################################### layout(matrix(c(1,2), 2, 1, byrow = TRUE)) Graph <- barplot(MixedList.means, beside=T, legend=rownames(MixedList.means), args.legend = list(x=5.5, y=130, bty = "n", cex=.9), main="", xlab="", ylab="Percent 'old' responses", col=c("gray35", "gray80", "white"), ylim=c(0,100)) error.bar(Graph, MixedList.means, MixedList.sd) Graph <- barplot(MixedList.means[1,]-MixedList.means[2,], beside=T, main="", xlab="Encoding quality (L)", ylab="Production advantage", col=c("gray35"), ylim=c(0,27))