質問

In the following code I try to compare the gamma distribution of the simulated data to the MLE calculated using that data. I want to be able to complete the integral that would yield the space between the two densities and report that as my error.

The graphs work out okay, so I'm not sure why the error calculation is not.

Thank you for any help, in advance!

library("maxLik");

#GAMMA MLE Process
GammaLogLikelihood<- function(t){
    # log likelihood for Gamma(alpha,scale=beta)
    alpha <- t[1]
    beta <- t[2]
    loglik <- sum(dgamma(x,t[1],scale=1/t[2],log=TRUE))
    return(loglik)
}
GetGammaParameters<-function(x){
    start<- mean(x)
    GammaEst<-maxLik(GammaLogLikelihood, start=c(start,start))
    return(GammaEst$estimate)   
}

#Simulation
x<-rgamma(100,3,2);
mleEst<-GetGammaParameters(x);

#Graph
color2 <- rgb(1,0,0,0.2)
color1 <- rgb(0,0,1,0.2)
xax<-seq(0,max(x),.01);
plot(density(x),type="l",xlim=c(0,max(x)),ylim=c(0,1.1));
lines(xax,dgamma(xax,mleEst[1],mleEst[2]),type="l",lty=2);
polygon(density(x),density=-1,col=color1);
polygon(c(xax,max(x)),c(dgamma(xax,mleEst[1],mleEst[2]),0),density=-1,col=color2);

#Find Error
finderror<-function(data,est,l,u){
    integrand<-function(x){
    abs(data(x)-est(x));
}
integrate(integrand, lower = l, upper = u)
}
dataDensity<-function(x){
    density(x)
}
estDensity<-function(x){
    dgamma(x,mleEst[1],mleEst[2]);
}
finderror(dataDensity,estDensity,min(x),max(x));

正しい解決策はありません

他のヒント

library(sfsmisc);

#Find Error
finderror<-function(densx,estDensity){
    newy<-abs(densx$y-estDensity(densx$x));
    integrate.xy(densx$x,newy);
}

estDensity<-function(x){
    dgamma(x,mleEst[1],mleEst[2]);
}

finderror(densx,estDensity);

This solved it.

ライセンス: CC-BY-SA帰属
所属していません StackOverflow
scroll top