# STATS 4CI3/6CI3 Winter 2019 # March 27, 2019 # Simlation Study for Parametric Bootstrap Example set.seed(27032019) lambda <- 1/10 n <- 10 # Let us just look at one sample first. x <- rexp(n, rate=lambda) lambda.hat <- 1/mean(x) # The true bias and standard deviation of the estimator bias.true <- lambda/(n-1) sd.true <- n*lambda/((n-1)*sqrt(n-2)) # The bias and standard error based on asymptotic results for # the maximum likelihood estimator bias.asy <- 0 se.asy <- lambda.hat/sqrt(n) # Now we will implement a parametric bootstrap R <- 100000 x.boot <- matrix(rexp(R*n, rate=lambda.hat), ncol=n) lambda.star <- 1/rowMeans(x.boot) bias.boot <- mean(lambda.star)-lambda.hat se.boot <- sd(lambda.star) # Now let us run a simulation study to examine this over multiple # samples to get a better idea of the characteristics N <- 1000 samples <- matrix(rexp(n*N, rate=lambda), ncol=n) estimates <- 1/rowMeans(samples) # The bias and standard deviation estimated from the # simulated samples. bias.mc <- mean(estimates)-lambda sd.mc <- sd(estimates) # The bias and standard error using the exact distribution # of the estimator. bias.exact <- estimates/(n-1) se.exact <- n*estimates/((n-1)*sqrt(n-2)) # The bias and standard error based on asymptotic results for # the maximum likelihood estimator bias.asy <- 0 se.asy <- estimates/sqrt(n) # The Monte Carlo bootstrap estimates of bias and standard error. R <- 10000 bias.boot <- rep(NA, N) se.boot <- rep(NA,N) for (i in 1:N) { x.boot <- matrix(rexp(R*n, rate=estimates[i]), ncol=n) lambda.star <- 1/rowMeans(x.boot) bias.boot[i] <- mean(lambda.star)-estimates[i] se.boot[i] <- sd(lambda.star) } plot(bias.exact, bias.boot) abline(0,1,col="red") bias.true mean(bias.exact) mean(bias.boot) plot(se.exact, se.boot) abline(0,1, col="red", lwd=2) sd.true mean(se.asy) mean(se.exact) mean(se.boot)