.packageName <- "merror"
"beta.bar" <-
function (x) 
{
# compute beta.bar (Jaech, p. 184)
# model for ith instrument and kth item:
# x[i,k] = alpha[i] + beta[i]mu[k] + epsilon[i,k]

  # no. of instruments
  N <- dim(x)[2]
  denominator <- vector("numeric",N)
  
  s <- var(x)
 
  numerator <- (apply(s,1,prod)/diag(s))^(1/N)
  
  for(i in 1:N)
  {
    denominator[i] <- prod(s[-i,-i][upper.tri(s[-i,-i])])^(2/(N*(N-2)))
  }
  
  numerator/denominator # beta[i]
  
}
"cb.pd" <-
function(x, conf.level=0.95, M=40)
{
# Maximum Likelihood precision estimates for Constant Bias Model using Paired Data
# ME (method of moments estimator) and MLE are the same for i=3 instruments
#  except for a factor of (n-1)/n: MLE = (n-1)/n * ME
#
# Using paired differences forces Constant Bias model
#
# This function first computes the Grubbs ME and then uses it as the starting
#   point for iteratively computing the MLEs (required for N > 3)
# 
#
# Jaech 1985, Chapters 3, 4, and 5 p. 144 in particular, eq. 5.3.1
#
# Written for any no. of methods N
#
# x[i,k] = alpha[i] + beta[i]*mu[k] + epsilon[i,k]
#   with beta[1] = beta[2] = ... = beta[N]
#   N = no. of methods
#   n = no. of items
# 

  n <- dim(x)[1]
  N <- dim(x)[2]

#  Compute alphas for unconstrained betas (NCB) and betas=1 (CB) cases
#    for completeness 

   alpha.cb  <- mean(x) - rep(1,N)*mean(mean(x))
   alpha.ncb <- mean(x) - beta.bar(x)*mean(mean(x))

# compute paired differences 

  y <- array(NA,c(N,N,n)) # not all elements are used
  
  for(i in 1:(N-1))
  {
    for(j in (i+1):N)
    {
      y[i,j,] <- x[,i] - x[,j]
    }
  }
  
# print(y[1,2,])

# compute variances for each paired difference

  V <- matrix(NA,N,N) # not all elements are used
  
  for(i in 1:(N-1))
  {
    for(j in (i+1):N)
    {
      V[i,j] <- var(y[i,j,])
      V[j,i] <- var(y[i,j,])
    }
  }
  
#  print(V)

# Compute Grubbs (Squared) Precions estimates - stored in r

  S <- vector("numeric",N)
  
  for(i in 1:N)
  {
    S[i] <- 0
    for(j in 1:N)
    {
      if(j!=i) S[i] <- S[i] + V[i,j]
    }
  }
  
# print(S)
  
  VT <- sum(S)/2  

  r <- vector("numeric",N)
  
  for(i in 1:N)
  {
    r[i] <- ((N-1)*S[i] - VT)/((N-1)*(N-2))
  }

# cat("\nGrubbs initial r =",r)

  initial.r <- r

# Use Grubbs MEs for squared precisions to find MLEs interatively

# compute MLE

  for(i in 1:M) r <- mle(V, r, n)
# cat("\nr =",r)

# compute squared se's for MLE
  sigma2.se2 <- mle.se2(V,r,n)
# cat("\nsigma2.se2 =",sigma2.se2)

# compute degrees of freedom

  df <- 2*r^2/sigma2.se2
  
# cat("\ndf =",df,"\n")

# compute confidence intervals

  sig.level <- 1 - conf.level

  lb <- df*r/qchisq(1-sig.level/2,df)
  ub <- df*r/qchisq(sig.level/2,df)

  sigma.table <- data.frame(n=rep(n,N),sigma=sqrt(r),sigma.se=sigma2.se2^(1/4),
    alpha.cb=alpha.cb,alpha.ncb=alpha.ncb,
    beta=rep(1,N),df=df,chisq.low=qchisq(sig.level/2,df),
    chisq.upper=qchisq(1-sig.level/2,df),lb=sqrt(lb),ub=sqrt(ub))
      
  dimnames(sigma.table)[[1]] <- names(x)

# return all the table and all the pieces

 
list(
    conf.level=conf.level,
    sigma.table=sigma.table,
    n.items=n,
    N.methods=N,
    Grubbs.initial.sigma2=initial.r,
    sigma2=r,
    sigma2.se2=sigma2.se2,
    alpha.cb=alpha.cb,
    alpha.ncb=alpha.ncb,
    beta=rep(1,N),df=df,
    chisq.low=qchisq(sig.level/2,df),
    chisq.upper=qchisq(1-sig.level/2,df),
    lb=lb,
    ub=ub
    )
  
}
"errors.cb" <-
function(x) { ncb.od(x)$errors.cb }
"errors.nb" <-
function(x) { ncb.od(x)$errors.nb }
"errors.ncb" <-
function(x) { ncb.od(x)$errors.ncb }
"lrt" <-
function(x,M=40) 
{ 
# Likelihood-ratio test for beta.bars for NonConstant Bias model using MLE
# Jaech, pp. 204-205
# sigma2.cb and sigma.mu2.cb and sigma2.ncb and sigma.mu2.ncb should be the MLE

  N <- dim(x)[2]
  n <- dim(x)[1]
  sig <- matrix(NA,N,N)
  beta.bars <- beta.bar(x)
  
  ncb.model <- ncb.od(x,M=M)
  cb.model  <- ncb.od(x,beta=rep(1,N),M=M)

  sigma2.ncb <- ncb.model$sigma2
  sigma.mu2.ncb <- ncb.model$sigma.mu2  
  
  sigma2.cb <- cb.model$sigma2
  sigma.mu2.cb <- cb.model$sigma.mu2  
 
# cat("\nbeta.bars=",beta.bars)
# cat("\nsigma.mu2.ncb=",sigma.mu2.ncb)
# cat("\nsigma.mu2.cb=",sigma.mu2.cb)
# cat("\nsigma2.ncb=",sigma2.ncb)
# cat("\nsigma2.cb=",sigma2.cb)

  # Likelihood L.mle for ncb mle
  
  a <- beta.bars*sqrt(sigma.mu2.ncb)
  
# cat("\na=",a)

  V <- prod(sigma2.ncb)
  Q <- V*(sum(a^2/sigma2.ncb)+1)
  
# cat("\nV=",V)
# cat("\nQ=",Q)

  for(i in 1:N)
  {
    for(j in 1:N)
    {
      if(j==i) sig[i,j] <- V*(sum(a[-i]^2/sigma2.ncb[-i])+1)/(Q*sigma2.ncb[i])
      else sig[i,j] <- -V*a[i]*a[j]/(Q*sigma2.ncb[i]*sigma2.ncb[j])
    }
  }
  
# cat("\nsig\n")
# print(sig)
# cat("\n")
  
  L.mle <- -0.5*n*log(Q) - 0.5*(n-1)*sum(sig*var(x))
  
# cat("\n-0.5*n*log(Q)=",-0.5*n*log(Q))
# cat("\n-0.5*(n-1)*sum(sig*var(x))=",-0.5*(n-1)*sum(sig*var(x)))
  
# cat("\nL.mle=",L.mle)

  # Likelihood L.hyp for constant bias

  a <- rep(1,N)*sqrt(sigma.mu2.cb)

# cat("\na=",a)

  V <- prod(sigma2.cb)
  Q <- V*(sum(a^2/sigma2.cb)+1)
  
# cat("\nV=",V)
# cat("\nQ=",Q)

  for(i in 1:N)
  {
    for(j in 1:N)
    {
      if(j==i) sig[i,j] <- V*(sum(a[-i]^2/sigma2.cb[-i])+1)/(Q*sigma2.cb[i])
      else sig[i,j] <- -V*a[i]*a[j]/(Q*sigma2.cb[i]*sigma2.cb[j])
    }
  }
  
# cat("\nsig\n")
# print(sig)
# cat("\n")
  
  L.hyp <- -0.5*n*log(Q) - 0.5*(n-1)*sum(sig*var(x))
  
# cat("\n-0.5*n*log(Q)=",-0.5*n*log(Q))
# cat("\n-0.5*(n-1)*sum(sig*var(x))=",-0.5*(n-1)*sum(sig*var(x)))
  
# cat("\nL.hyp=",L.hyp,"\n\n")
  
  lambda <- -2*(L.hyp - L.mle)
  
# cat("\nlambda = ",lambda,"\n")
  
  # df for Likelihood-Ratio Test = no. of parameters lost
  # For this test there are 2*N unrestricted parameters 
  #   (N sigma2s and N betas) and
  #   there are only N + 1 parameters in the restricted model
  #   because all the betas are reduced to just one. Thus
  #   N - 1 parameters are lost (or 2*N - (N + 1) = 2*N - N - 1 = N - 1)
  # So the degrees of freedom = N - 1
  
  df <- N - 1
  
# cat("\ndf = ",df,"\n")
  
  list(N.methods=N,n.items=n,beta.bars=beta.bars,lambda=lambda, df=df,
  p.value=1-pchisq(lambda,df)) 
}
"mle" <-
function(v, r, ni)
{
# v is the var/cov matrix of paired differences
# r initially should be the grubbs estimate of the 
#   squared precision (vector 1 to N)
# r when returned is the MLE
# N is the no. of measuring devices
# ni is the no. of measured items

  N <- length(r)

  for(i in 1:N)
  {
    b0 <- 0
    b1 <- 0
    b2 <- 0
  
    for(j in 1:N)
    {
      if(i != j)
      {
        b0 <- b0 + 1/r[j]
        b1 <- b1 + v[i,j]/r[j]
        jplus1 <- j + 1
        if(jplus1 <= N)
        {
          for(k in jplus1:N)
          {
            if(k != i) b2 <- b2 + v[j,k]/(r[j]*r[k])
          }
        }
      }
    }
    r[i] <- (((ni-1)*(b0*b1-b2))/(ni*b0^2))-1/b0
#   cat("i = ",i, " r = ",r[i], " b0 = ", b0, " b1 = ",b1, " b2 = ",b2,"\n",sep="")
  }
  r
}
"mle.se2" <-
function(v, r, ni)
{
# Computes the squared standard errors for the squared precisions
# before calling this function, compute the MLE's 
# MLE's stored in r
# ni is the no. of items

   N <- length(r)
   
   H <- matrix(0,N,N)
   D <- matrix(0,N,N)
   
   B0 <- vector("numeric",N)
   B1 <- vector("numeric",N)
   
   for(i in 1:N)
   {
     for(j in 1:N)
     {
       if(j!=i) 
       {
         B0[i] <- B0[i] + 1/r[j]
         B1[i] <- B1[i] + v[i,j]/r[j]
       }
     }
   }
   for(i in 1:N)
   {
     H[i,i] <- 0.5*ni*B0[i]^2/(1+B0[i]*r[i])^2  
     for(j in (1:N))
     {
       if(j!=i)
       {
         for(k in 1:N)
         {
           if(k!=i&k!=j) D[i,j] <- D[i,j] + v[j,k]/r[k]
         }
#      cat("\ni =",i," j =",j)
#      print(D)
       H[i,j] <- -(0.5*ni/(r[j]^2*(1+B0[i]*r[i])^2))*
         (1+2*B0[i]*r[i]-((ni-1)/ni)*(B0[i]*v[i,j]+B1[i]-D[i,j]))  
       } 
     }
   }
#  print(B0)
#  print(B1)
#  print(v)
#  print(r)
#  print(ni)
#  print(N)
#  print(H)
   diag(solve(H))
   
}
"ncb.od" <-
function(x, beta=beta.bar(x),M=40,conf.level=0.95)
{
# MLE Confidence Intervals for Precision
# Jaech 1985, pp. 201-202
#
# squared precisions stored in sigma2
# n is the no. of items
# N is the no. of methods
# beta is a vector of slope biases

   n <- dim(x)[1]
   N <- dim(x)[2]
   s <- var(x)

   H <- matrix(0,N+1,N+1)

#  Compute alphas for unconstrained betas (NCB) and betas=1 (CB) cases
#    for completeness

   alpha.cb  <- mean(x) - rep(1,N)*mean(mean(x))
   alpha.ncb <- mean(x) - beta.bar(x)*mean(mean(x))


   mles <- precision.mle.ncb.od(x, beta.bars=beta,M=M)

   sigma2 <- mles[[1]]
#  cat("\nsigma2 =",sigma2)
   sigma.mu2 <- mles[[2]]
#  cat("\nsigma.mu2 =",sigma.mu2)

   a <- beta*sqrt(sigma.mu2)
   c0 <- a^2
   d2 <- sum(beta^2/sigma2)
   c1 <- vector("numeric",N)
   c2 <- vector("numeric",N)
   c3 <- vector("numeric",N)
   c4 <- vector("numeric",N)
   b0 <- vector("numeric",N)
   b1 <- vector("numeric",N)
   b2 <- vector("numeric",N)


   for(i in 1:N)
   {
  #  cat("\ni=",i)
     c1[i] <- sum(a[-i]^2/sigma2[-i]) + 1
  #  cat("\nc1=",c1[i])
     c2[i] <- sum(diag(s[-i,-i])/sigma2[-i])
  #  cat("\nc2=",c2[i])
     c3[i] <- sum(((c1[i] - a[-i]^2/sigma2[-i])*diag(s[-i,-i]))/sigma2[-i])
  #  cat("\nc3=",c3[i])
     c4[i] <- sum(a[-i]*s[i,-i]/sigma2[-i])
  #  cat("\nc4=",c4[i])
     b0[i] = c0[i]
  #  cat("\nb0=",b0[i])
     b1[i] = c1[i]
  #  cat("\nb1=",b1)
     b2[i] = c1[i]*s[i,i] + c0[i]*c2[i] - 2*a[i]*c4[i]
  #  cat("\nb2=",b2[i])

     H[i,i] <- -0.5*n*b1[i]^2/(b0[i] + b1[i]*sigma2[i])^2
  #  cat("\nH[",i,",",i,"]=",H[i,i])
   }

   for(i in 1:(N-1))
   {
     for(j in (i+1):N)
     {
    #  cat("\nj=",j)
       g1 <- -beta[j]^2*sigma.mu2/sigma2[j]^2
    #  cat("\ng1=",g1)
       g2 <- sigma.mu2*(-beta[j]^2*s[i,i] - beta[i]^2*s[j,j] + 2*beta[i]*beta[j]*s[i,j])/sigma2[j]^2
    #  cat("\ng2=",g2)

       g3.b <- 0

       for(k in 1:N)
       {
         if(k!=i&k!=j) g3.b <- g3.b + (beta[k]^2*s[j,j] + beta[j]^2*s[k,k] - 2*beta[j]*beta[k]*s[j,k])/sigma2[k]
       }

       g3 <- -s[j,j]/sigma2[j]^2 - sigma.mu2/sigma2[j]^2*g3.b
    #  cat("\ng3=",g3,"\n\n")

       H[i,j] <- n*beta[j]^2*sigma.mu2*(b1[i]*sigma2[i] + 0.5*b0[i])/(sigma2[j]^2*(b0[i] + b1[i]*sigma2[i])^2) -
   0.5*(n-1)*(b0[i]*g3 - b1[i]*g2 - b2[i]*g1)/(b0[i] + b1[i]*sigma2[i])^2
       H[j,i] <- H[i,j]
    #  cat("\nH[",i,",",j,"]=",H[i,j])
     }
   }

   H[N+1,N+1] <- -0.5*n*d2^2/(d2*sigma.mu2 + 1)^2
#  cat("\nH[",N+1,",",N+1,"]=",H[N+1,N+1])

   for(i in 1:N)
   {
     H[i,N+1] <- (n*beta[i]^2*(d2*sigma.mu2 + 0.5) - (n - 1)*beta[i]*(beta[i]*s[i,i]/sigma2[i] + sum(beta[-i]*s[i,-i]/sigma2[-i])))/
       (sigma2[i]^2*(d2*sigma.mu2 + 1)^2)
     H[N+1,i] <- H[i,N+1]
  #  cat("\nH[",i,",",N+1,"]=",H[i,N+1])
   }
   H <- -H

#  cat("\nH matrix\n")
#  print(H)
#  cat("\nVariances for Squared Precision Estimates\n")

   se2.sigma2 <- diag(solve(H))
   names(se2.sigma2) <- c(dimnames(x)[[2]],"Process")
   names(sigma2) <- dimnames(x)[[2]]
   names(beta) <- dimnames(x)[[2]]
#  print(se2.sigma2)

#  cat("\n\n")

   # Jaech 1985, p. 71, eq. 3.4.2
   df <- 2*c(sigma2^2,sigma.mu2^2)/se2.sigma2
   names(df) <- c(dimnames(x)[[2]],"Process")
   sig.level <- 1 - conf.level
   lb <- df*c(sigma2,sigma.mu2)/qchisq(1 - sig.level/2,df)
   ub <- df*c(sigma2,sigma.mu2)/qchisq(sig.level/2,df)

   out <- data.frame(n=rep(n,N+1),
     sigma=c(sqrt(sigma2),sqrt(sigma.mu2)),se.sigma=se2.sigma2^(1/4),
     alpha.cb=c(alpha.cb,NA),alpha.ncb=c(alpha.ncb,NA),beta=c(beta,NA),
     df,chisq.l=qchisq(sig.level/2,df),
     chisq.u=qchisq(1 - sig.level/2,df),lb=sqrt(lb),ub=sqrt(ub))

   dimnames(out)[[1]] <- c(dimnames(x)[[2]],"Process")

# Compute errors
#   for no bias model (alpha=0, beta=1)

    errors.nb <- x - 1*apply(x,1,mean)

#   for constant bias model (alpha, beta=1)

    alpha.cb.mat <- matrix(alpha.cb,n,N,byrow=TRUE)
    errors.cb <- x - (alpha.cb.mat + 1*apply(x,1,mean))

#   for nonconstant bias model (alpha, beta)

    alpha.ncb.mat <- matrix(alpha.ncb,n,N,byrow=TRUE)
    errors.ncb <- x - (alpha.ncb.mat + matrix(apply(x,1,mean),n,N)*matrix(beta,n,N,byrow=TRUE))

   list(conf.level=conf.level,sigma.table=out,n.items=n,N.methods=N,sigma2=sigma2,sigma.mu2=sigma.mu2,
     se2.sigma2=se2.sigma2,alpha.cb=alpha.cb,alpha.ncb=alpha.ncb,
     beta=beta,df=df,lb=sqrt(lb),ub=sqrt(ub),H=H,
     errors.nb=errors.nb,errors.cb=errors.cb,errors.ncb=errors.ncb)

}
"precision.grubbs.cb.pd" <-
function(x)
{
# Grubbs ME precision estimates for Constant Bias Model using Paired Data
# Confidence intervals not computed (because we should use MLE instead!)
#
# Jaech 1985, Chapters 3 & 4, p. 144 in particular
#
# Written for any no. of methods N
#
# x[i,k] = alpha[i] + beta[i]*mu[k] + epsilon[i,k]
#   with beta[1] = beta[2] = ... = beta[N]
#   N = no. of methods
#   n = no. of items
# 
# ME (method of moments estimator) and MLE are the same for i=3 instruments
#  except for a factor of (n-1)/n: MLE = (n-1)/n * ME
#  so to get the MLE just multiply the output by (n-1)/n

  n <- dim(x)[1]
  N <- dim(x)[2]

# compute paired differences 

  y <- array(NA,c(N,N,n)) # not all elements are used
  
  for(i in 1:(N-1))
  {
    for(j in (i+1):N)
    {
      y[i,j,] <- x[,i] - x[,j]
    }
  }
  
# print(y[1,2,])

# compute variances for each paired difference

  V <- matrix(NA,N,N) # not all elements are used
  
  for(i in 1:(N-1))
  {
    for(j in (i+1):N)
    {
      V[i,j] <- var(y[i,j,])
      V[j,i] <- var(y[i,j,])
    }
  }
  
#  print(V)

# Compute Grubbs (Squared) Precions estimates - stored in r

  S <- vector("numeric",N)
  
  for(i in 1:N)
  {
    S[i] <- 0
    for(j in 1:N)
    {
      if(j!=i) S[i] <- S[i] + V[i,j]
    }
  }
  
# print(S)
  
  VT <- sum(S)/2  

  r <- vector("numeric",N)
  
  for(i in 1:N)
  {
    r[i] <- ((N-1)*S[i] - VT)/((N-1)*(N-2))
  }

#  print(r)

# Return squared precisons
  r 
}
"precision.grubbs.ncb.od" <-
function (x,beta.bar.x=beta.bar(x)) 
{
# Grubbs Precision estimates for NonConstant Bias using Original Data
# Jaech, p. 184

  sqrt(diag(var(x))-beta.bar.x^2*process.sd(x)^2)

}
"precision.mle.ncb.od" <-
function(x, M=20, beta.bars=beta.bar(x),jaech.errors=FALSE) 
{
# Iterative Approximation to MLE precision estimates for NonConstant Bias model
#   using Original Data
# Jaech, p. 185-186
# Use Grubbs NonConstant Bias using Original Data for initial values for
#   precision (sigma)
# Default for beta.bars is NonConstant Bians (betas differ from 1.0) by
#   using Grubbs least squares estimate via function "beta.bar"
# To compute under assumption of constant bias, use a vector of N ones.

  N <- dim(x)[2]  # no. of instruments or methods
  n <- dim(x)[1]  # no. of items
  
# cat("\nPrecision Estimates Using MLE\nAssuming A Model With NonConstant Bias Using Original Data\n")
# cat("Jaech, Chapter 6, p. 185\n")
# cat("Note errors in Jaech when estimating sigma.mu^2 - See function process.var.mle.jaech.err\n")
  
  if(jaech.errors==TRUE)
    cat("\n***Using Same Errors in Jaech's Fortran Program (p. 288) for sigma.mu^2 for compatibility***\n")
  
# cat("\nNo. of methods N=",N,"\nNo. of items n=",n,"\nNo. of iterations for MLE M=",M,"\n")

  s <- var(x)
# cat("\nvariance-covariance matrix s\n")
# print(s)
# cat("\n")

  sigma2 <- round(precision.grubbs.cb.pd(x),4) # initial precision estimates 1 to N
# sigma2[sigma2<0] <- abs(sigma2[sigma2<0]) # take absolute value (remove negative values)
# cat("\nInitial Squared Precision Estiamtes (Grubbs)\n")
# print(sigma2)
# cat("\n")
  
# cat("\nDefault Least Squares Estiamtes of Betas (Grubbs) -or- Assumed Values\n")
# print(beta.bars)
# cat("\n")
    
  if(jaech.errors==TRUE) sigma.mu2 <- process.var.mle.jaech.err(sigma2,s,beta.bars,N,n)
  else sigma.mu2 <- process.var.mle(sigma2,s,beta.bars,N,n)
  
# if(sigma.mu2 < 1) sigma.mu2 <- 0 # set sigma.mu2 = 0 if negative
  
# cat("\nInitial Squared Precision Estimates (Grubbs)\n")
# cat(0,sigma2,"sigma.mu^2=",sigma.mu2,"\n")
# cat("\n")
  
  for(m in 1:M)
  {
    for(i in 1:N)
    {
      sigma2[i] <- sigma.mle(i,s,sigma2,sigma.mu2,beta.bars,N,n)
#      cat("\n","i=",i,"sigma2",sigma2)
    }
#   cat("\n",m,"Sq.Prec.=",sigma2,"sigma.mu^2=",sigma.mu2)
        
    if(jaech.errors==TRUE) sigma.mu2 <- process.var.mle.jaech.err(sigma2,s,beta.bars,N,n)
    else sigma.mu2 <- process.var.mle(sigma2,s,beta.bars,N,n)
  }
# cat("\n\n")    
# cat("\nPrecision (Final)=",sqrt(sigma2),"sigma.mu (Final)=",sqrt(sigma.mu2),"\n\n")

  list(sigma2=sigma2,sigma.mu2=sigma.mu2)

}
"process.var.mle" <-
function (sigma2,s,beta.bars,N,n) 
{
# Jaech p. 186  equations 6.37 - 6.3.10

  d2 <- sum(beta.bars^2/sigma2)
  d3 <- sum(diag(s)/sigma2)
  
  d4.1 <- 0
  
  for(i in 1:N)
  {
    for(j in 1:N)
    {
       if(i!=j) d4.1 <- d4.1 + beta.bars[i]^2*s[j,j]/(sigma2[i]*sigma2[j])
    }
  }
  
  d4.2 <- 0
  
  for(i in 1:(N-1))
  {
    for(j in (i+1):N)
    {
      d4.2 <- d4.2 + beta.bars[i]*beta.bars[j]*s[i,j]/(sigma2[i]*sigma2[j])
    }
  }
  
  d4 <- d4.1 - 2*d4.2
  
# cat("\nprocess.var.mle sigma.mu2 =",(n-1)*(d2*d3 - d4)/(n*d2^2) - 1/d2)
  
  # return process variance
  (n-1)*(d2*d3 - d4)/(n*d2^2) - 1/d2
}
"process.var.mle.jaech.err" <-
function (sigma2,s,beta.bars,N,n) 
{
# Jaech p. 186  equations 6.37 - 6.3.10

  d2 <- sum(beta.bars^2/sigma2)
  d3 <- sum(diag(s)/sigma2)
  
  d4.1 <- 0
  
  for(i in 1:N)
  {
    for(j in 1:N)
    {
       # Jaech p. 288 line 2330 has s[i,j] instead of s[j,j]
       if(i!=j) d4.1 <- d4.1 + beta.bars[i]^2*s[i,j]/(sigma2[i]*sigma2[j])
    }
  }
  
  d4.2 <- 0
  
  for(i in 1:(N-1))
  {
    for(j in (i+1):N)
    {
      d4.2 <- d4.2 + beta.bars[i]*beta.bars[j]*s[i,j]/(sigma2[i]*sigma2[j])
    }
  }
  
  d4 <- d4.1 - 2*d4.2
  
  # Jaech p. 288 line 2410 leaves of "- 1/d2"
  (n-1)*(d2*d3 - d4)/(n*d2^2) # return process var

}
"sigma.mle" <-
function (i,s,sigma2,sigma.mu2,beta.bars,N,n) 
{
# Jaech p. 185-186 equations 6.3.1 - 6.3.6

# this is the ith iteration

# if(sigma.mu2 < 0) sigma.mu2 <- 0

  a <- beta.bars*sqrt(sigma.mu2)
  
#  cat("\na=",a)
  
  b0 <- a[i]^2
  
#  cat("\nb0=",b0)
  
  b1 <- 0
  for(j in 1:N) { if(j!=i) { b1 <- b1 + a[j]^2/sigma2[j] }}
  b1 <- b1 + 1
  
#  cat("\nb1=",b1)
  
  b2.1 <- 0
  for(j in 1:N) { if(j!=i) { b2.1 <- b2.1 + s[j,j]/sigma2[j] }}
  
#  cat("\nb2.1=",b2.1)
  
  b2.2 <- 0
  for(j in 1:N) { if(j!=i) { b2.2 <- b2.2 + a[j]*s[i,j]/sigma2[j] }}
  
#  cat("\nb2.2=",b2.2)
  
  b2 <- b1*s[i,i] + a[i]^2*b2.1 - 2*a[i]*b2.2
  
#  cat("\nb2=",b2)
  
  b3.1 <-0
  for(j in 1:N) { if(j!=i) { b3.1 <- b3.1 + s[j,j]/sigma2[j] }}
  
#  cat("\nb3.1=",b3.1)

  b3.2 <-0
  for(j in 1:N) { if(j!=i) { b3.2 <- b3.2 + a[j]^2*s[j,j]/sigma2[j]^2}}
  
#  cat("\nb3.2=",b3.2)
  
  b3.3 <-0
  for(j in 1:N)
  {
    if(j!=i) 
    {
      for(k in 1:N)
      {
        if(k>j & k!=i) b3.3 <- b3.3 + a[j]*a[k]*s[j,k]/(sigma2[j]*sigma2[k])
      }
    }  
  }
  b3 <- b1*b3.1 - b3.2 - 2*b3.3
  
#  cat("\nb3=",b3)
  
  (n-1)*(b1*b2 - b0*b3)/(n*b1^2) - b0/b1 # return sigma2[i]     

}
