.packageName <- "geoR"
##
## "wrappers" for pieces of C code in geoR package
## -----------------------------------------------
##
"bilinearformXAY" <-
  function(X, lowerA, diagA, Y)
  {
    nA <- length(diagA)
    nX <- length(X)/nA
    nY <- length(Y)/nA
    if(length(lowerA) != (nA * (nA -1)/2))
      stop("lowerA and diagA have incompatible dimentions")
    out <- .C("bilinearform_XAY",
              as.double(as.vector(lowerA)),
              as.double(as.vector(diagA)),
              as.double(as.vector(X)),
              as.double(as.vector(Y)),
              as.integer(nX),
              as.integer(nY),
              as.integer(nA),
              res=as.double(rep(0,(nX*nY))),
              PACKAGE = "geoR")$res
    attr(out, "dim") <- c(nX, nY)
    return(out)
  }

"diagquadraticformXAX" <-
  function(X, lowerA, diagA)
  {
    nA <- length(diagA)
    nX <- length(X)/nA
    if(length(lowerA) != (nA * (nA -1)/2))
      stop("lowerA and diagA have incompatible dimentions")
    out <- .C("diag_quadraticform_XAX",
              as.double(as.vector(lowerA)),
              as.double(as.vector(diagA)),
              as.double(as.vector(X)),
              as.integer(nX),
              as.integer(nA),
              res = as.double(rep(0,nX)),
              PACKAGE = "geoR")$res
    return(out)
  }

"loccoords" <-
  function(coords, locations)
  {
    ## Computes a matrix for which each row has the distances between
    ## each point in 'locations' to all the points in 'coords'
    coords <- as.matrix(coords)
    locations <- as.matrix(locations)
    dimc <- dim(coords)
    diml <- dim(locations)
    if((dimc[2] != 2) | (diml[2] != 2))
      stop("coords and locations must have two columns")
    nc <- dimc[1]
    nl <- diml[1]
    out <- as.double(rep(0, nc*nl))
    .C("loccoords",
       as.double(as.vector(locations[,1])),
       as.double(as.vector(locations[,2])),
       as.double(as.vector(coords[,1])),
       as.double(as.vector(coords[,2])),
       as.integer(nl),
       as.integer(nc),
       out, DUP=FALSE,
       PACKAGE = "geoR")
    attr(out, "dim") <- c(nc, nl)
    return(out)
  }

"diffpairs" <-
  function(coords, data)
  {
    ## returns:
    ##   - the lower triangle of the matrix with euclidean distances
    ##     between pairs of points, 
    ##   - the difference between data values at these locations
    ##
    coords <- as.matrix(coords)
    data <- as.vector(data)
    if(length(data) != nrow(coords)) stop('incompatible dimensions between data and coords')
    dimc <- dim(coords)
    if(dimc[2] == 1 & dimc[1] == 2) return(0)
    else{
      if(dimc[2] != 2)
        stop("coords must have two columns")
      nc <- dimc[1]
      out <- as.double(rep(0, (nc * (nc-1))/2))
      res <- .C("diffpairs",
                as.double(coords[,1]),
                as.double(coords[,2]),
                as.double(data),
                as.integer(nc),
                dist = out, diff = out,
                PACKAGE = "geoR")[c('dist','diff')]
      return(res)
    }
  }

"corr.diaglowertri" <-
  function(coords, cov.model, phi, kappa)
{
  cov.model <- match.arg(cov.model,
                         choices = c("matern", "exponential", "gaussian",
                           "spherical", "circular", "cubic", "wave", "power",
                           "powered.exponential", "cauchy", "gneiting",
                           "gneiting.matern", "pure.nugget"))
  if(any(cov.model == c("cauchy", "matern", "powered.exponential", "power", "gneiting.matern"))){
    if(missing(kappa))
      stop("argument kappa is needed for this choice of correlation function")
  }
  else kappa <- 1
  coords <- as.matrix(coords)
  dimc <- dim(coords)
  if(dimc[2] == 1 & dimc[1] == 2)
    return(0)
  else{
    if(dimc[2] != 2)
      stop("coords must have two columns")
    nc <- dimc[1]
    out <- as.double(rep(0, (nc * (nc+1)/2)))
    .C("cor_diag",
       as.double(coords[,1]),
       as.double(coords[,2]),
       as.integer(nc),
       as.integer(cor.number(cov.model)),
       as.double(phi),
       as.double(kappa),
       out, DUP = FALSE,
       PACKAGE = "geoR")
    return(out)
  }
}

"cond.sim" <-
  function(env.loc, env.iter, loc.coincide, coincide.cond, tmean, Rinv, mod, vbetai,
           fixed.sigmasq)
  {
    NTOT <- mod$nloc * mod$Nsims
    if(fixed.sigmasq)
      invchisc <- rep(1, NTOT)
    else
      invchisc <- sqrt(mod$df.model/rchisq(mod$Nsims, df=mod$df.model))
    ##
    if(mod$beta.size == 1){
      Blower <- 0
      Bdiag <- vbetai
    }
    else{
      Blower <- vbetai[lower.tri(vbetai)]
      Bdiag <- diag(vbetai)
    }
    ##
    if((length(tmean) %% mod$nloc) > 0)
      stop("cond.sim: wrong size of tmean")
    tmean <- matrix(tmean, nrow = mod$nloc)
    ncol.tmean <- ncol(tmean)
    if(ncol(tmean) > 1){
      if(ncol.tmean != mod$Nsims)
        stop("cond.sim: size of tmean does not matches with Nsims")
      diff.mean <- as.integer(1)
    }      
    else
      diff.mean <- as.integer(0)
    ##
    normalsc <- rnorm(NTOT)
##    if(is.null(loc.coincide))
##      locations <- get("locations", envir=env.loc)
##    else
##      locations <- get("locations", envir=env.loc)[-loc.coincide,,drop=FALSE]
    ##
    ##
##    R0 <- varcov.spatial(coords = locations,
##                         cov.pars=c(mod$Dval, mod$phi))[[1]]
##    iR <- matrix(0, mod$n,mod$n)
##    iR[lower.tri(iR)] <- Rinv$lower
##    iR <- iR  + t(iR)
##    diag(iR) <- Rinv$diag
##    v0iRv0 <- crossprod(get("v0", envir=env.iter), iR) %*%
##      get("v0", envir=env.iter)
##    V <- vbetai
##    bVb <- t(get("b", envir=env.iter)) %*% V %*% get("b", envir=env.iter)
##    Vmat <- R0 - v0iRv0  + bVb
##    Vchol <- try(chol(Vmat))
##    if(!is.numeric(Vchol)) print(try(chol(Vmat)))
    ##
    ##
    if(coincide.cond) loccoin <- -loc.coincide
    else loccoin <- TRUE
    normalsc <- .C("kb_sim_new",
                   as.double(as.vector(tmean)),
                   out = as.double(normalsc),
                   as.double(as.vector(Rinv$lower)),
                   as.double(as.vector(Rinv$diag)),
                   as.double(as.vector(get("v0", envir=env.iter))),
                   as.integer(mod$nloc),
                   as.integer(mod$n),
                   as.double(mod$Dval),
                   as.integer(mod$Nsims),
                   as.double(invchisc),
                   as.double(mod$s2),
                   as.double(Blower),
                   as.double(Bdiag),
                   as.double(as.vector(get("b", envir=env.iter))),
                   as.integer(mod$beta.size),
                   as.double(get("locations", envir=env.loc)[loccoin,1]),
                   as.double(get("locations", envir=env.loc)[loccoin,2]),
                   as.integer(mod$cov.model.number),
                   as.double(mod$phi),
                   as.double(mod$kappa),
                   as.integer(diff.mean),
                   PACKAGE = "geoR")$out      
    attr(normalsc, "dim") <- c(mod$nloc, mod$Nsims)
    return(normalsc)
  }

"Ksat" <-
structure(list(coords = structure(c(2.6, 3.3, 3.4, 2.5, 2.6, 
5.5, 5.7, 6, 6.2, 7.6, 7.8, 11.3, 11.6, 11.9, 12.1, 12.4, 15.3, 
17, 18.9, 16.3, 17.9, 17, 13.4, 15.2, 17.1, 18.9, 20.8, 13.3, 
15.3, 17.1, 18.9, 21, 2.4, 2.2, 1.6, 1.3, 0.8, 5.1, 4.6, 3.3, 
2.6, 3.8, 3.2, 7.9, 7, 6.2, 5.3, 4.5, 7.1, 7.3, 7.5, 9.1, 8.4, 
9.1, 3.4, 3.5, 3.6, 3.7, 4, 2.1, 2.2, 2.3, 2.4, 2.7), .Dim = as.integer(c(32, 
2)), .Dimnames = list(c("1", "2", "3", "4", "5", "6", "7", "8", 
"9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", 
"20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", 
"31", "32"), c("x", "y"))), data = c(0.02, 0.08, 0.036, 0.093, 
0.036, 0.82, 0.41, 0.093, 0.086, 0.25, 0.81, 3.24, 4.3, 2.3, 
0.3, 0.2, 0.56, 0.07, 0.33, 0.28, 0.062, 0.31, 3.38, 2.22, 0.13, 
0.67, 1.11, 0.09, 2.63, 0.29, 1.46, 1.24), borders = structure(list(
    x = c(0, 0.7, 2.2, 5, 8.7, 13.9, 19.8, 21, 22.4, 22.5), y = c(0, 
    1.6, 3.3, 5.7, 7.8, 10.4, 13, 10.5, 5.3, 0)), .Names = c("x", 
"y"), class = "data.frame", row.names = c("1", "2", "3", "4", 
"5", "6", "7", "8", "9", "10"))), .Names = c("coords", "data", 
"borders"), class = "geodata")
##
## Auxiliary functions for the geoR library
## ----------------------------------------
##
## These functions are typically called by other main functions
## to perform internal calculations
##

"solve.geoR" <-
  function (a, b = NULL, ...) 
{
  require(methods)
  a <- eval(a)
  b <- eval(b)
  if(exists("trySilent")){
    if (is.null(b)) res <- trySilent(solve(a, ...))
    else res <- trySilent(solve(a, b, ...))
  }
  else{
    error.now <- options()$show.error.messages
    if (is.null(error.now) | error.now) 
      on.exit(options(show.error.messages = TRUE))
    options(show.error.messages = FALSE)
    if (is.null(b)) res <- try(solve(a, ...))
    else res <- try(solve(a, b, ...))
  }
  if (inherits(res, "try-error")) {
    test <- all.equal.numeric(a, t(a), 100 * .Machine$double.eps)
    if(!(is.logical(test) && test)){
      ##      options(show.error.messages = TRUE)
      stop("matrix `a' is not symmetric")
    }
    t.ei <- eigen(a, symmetric = TRUE)
    if(exists("trySilent")){
      if (is.null(b)) res <- trySilent(t.ei$vec %*% diag(t.ei$val^(-1)) %*% t(t.ei$vec))
      else res <- trySilent(t.ei$vec %*% diag(t.ei$val^(-1)) %*% t(t.ei$vec) %*% b)
    }
    else{
      if (is.null(b)) res <- try(t.ei$vec %*% diag(t.ei$val^(-1)) %*% t(t.ei$vec))
      else res <- try(t.ei$vec %*% diag(t.ei$val^(-1)) %*% t(t.ei$vec) %*% b)
    }
    if (any(is.na(res)) | any(is.nan(res)) | any(is.infinite(res))) 
      class(res) <- "try-error"
  }
  if (inherits(res, "try-error")) 
    stop("Singular matrix. Covariates may have different orders of magnitude.")
  return(res)
}

"coords.aniso" <- 
  function(coords, aniso.pars, reverse=FALSE)
{
  coords <- as.matrix(coords)
  n <- nrow(coords)
  if(length(aniso.pars) != 2)
    stop("argument aniso.pars must be a vector with 2 elementsm the anisotropy angle and anisotropy ratio, respectively")
  psiA <- aniso.pars[1]
  psiR <- aniso.pars[2]
  if(psiR < 1){
    psiR <- round(psiR, dig=8)
    if(psiR < 1)
      stop("anisotropy ratio must be greater than 1")
  }
  rm <- matrix(c(cos(psiA), -sin(psiA),
                 sin(psiA), cos(psiA)),
               ncol = 2)
  tm <- diag(c(1, 1/psiR))
  if(reverse)
    coords.mod <- coords %*% solve(rm %*% tm)
  else
    coords.mod <- coords %*% rm %*% tm
  return(coords.mod)
}

#"dist0.krige" <-
#function (x0, coords) 
#{
#  if (length(x0) != 2) 
#    stop(paste("length of x0 is", length(x0), "(it must be 2)"))
#  coords[, 1] <- coords[, 1] - x0[1]
#  coords[, 2] <- coords[, 2] - x0[2]
#  return(sqrt(coords[, 1]^2 + coords[, 2]^2))
#}


"set.coords.lims" <-
  function(coords, xlim, ylim)
{
  coords.lims <- apply(coords, 2, range, na.rm=TRUE)
  if(!missing(xlim) && is.numeric(xlim)) coords.lims[,1] <- xlim[order(xlim)]
  if(!missing(ylim) && is.numeric(ylim)) coords.lims[,2] <- ylim[order(ylim)]
  coords.diff <- diff(coords.lims)
  if (coords.diff[1] != coords.diff[2]) {
    coords.diff.diff <- abs(diff(as.vector(coords.diff)))
    ind.min <- which(coords.diff == min(coords.diff))
    coords.lims[, ind.min] <-
      coords.lims[, ind.min] +
        c(-coords.diff.diff, coords.diff.diff)/2
  }
  return(coords.lims)
}


"dinvchisq" <-
  function(x, df, scale=1/df, log = FALSE)
{
  if(df <= 0)
    stop("df must be greater than zero")
  if(scale <= 0)
    stop("scale must be greater than zero")
  nu <- df/2
  if(log)
    return(ifelse(x > 0, nu*log(nu) - log(gamma(nu)) + nu*log(scale) -
                  (nu+1)*log(x) - (nu*scale/x), NA))
  else
    return(ifelse(x > 0,
                  (((nu)^(nu))/gamma(nu)) * (scale^nu) *
                  (x^(-(nu+1))) * exp(-nu*scale/x), NA))
}


"rinvchisq" <- 
  function (n, df, scale = 1/df)
{
  if((length(scale)!= 1) & (length(scale) != n))
    stop("scale should be a scalar or a vector of the same length as x")
  if(df <= 0)
    stop("df must be greater than zero")
  if(any(scale <= 0))
    stop("scale must be greater than zero")
  return((df*scale)/rchisq(n, df=df)) 
}

"locations.inside" <-
  function(locations, borders)
{
  if(is.list(borders))
    borders <- matrix(unlist(borders[1:2]), ncol=2)
  borders <- as.matrix(borders)
  if(ncol(borders) != 2)
    stop("borders must be a matrix or data-frame with two columns")
  if (!require(splancs))
    cat("package splancs in required to select points inside the borders\n")
  locations <- locations[as.vector(inout(pts = locations,
                                         poly = borders)),]
  return(locations)
}


"polygrid" <- 
  function(xgrid, ygrid, borders, vec.inout = FALSE)
{
  ## checking for splancs
  if(!require(splancs))
    cat("ERROR: cannot run the function\npackage \"splancs\" should be installed/loaded")
  else library(splancs)
  ## checking input
  if(!is.list(xgrid) && is.vector(drop(xgrid))){
    if(missing(ygrid)) stop("xgrid must have x and y coordinates or a vector must be provided for ygrid")
    if(!is.vector(ygrid)) stop("ygrid must be a vector")
    xygrid <- expand.grid(x = xgrid, y = ygrid)
  }
  if(is.matrix(xgrid) || is.data.frame(xgrid)){
    if(ncol(xgrid) != 2) stop("xgrid must be a vector or a 2 column matrix or data-frame")
    xygrid <- xgrid
    if(!missing(ygrid)) warning("xgrid has 2 column, ygrid was ignored")
  }
  else
    if(is.list(xgrid)){
      if(length(xgrid) != 2) stop("if xgrid is a list it must have 2 elements")
      xygrid <- expand.grid(x = xgrid[[1]], y = xgrid[[2]])
      if(!missing(ygrid)) warning("xgrid is a list, ygrid was ignored")
    }
  if(nrow(borders) < 3) stop("borders must have at least 3 points")
  if(exists("inout")){
    ind <- as.vector(inout(pts=xygrid, poly=borders))
    xypoly <- xygrid[ind == TRUE,  ]
    if(vec.inout == FALSE)
      return(xypoly)
    else return(list(xypoly = xypoly, vec.inout = ind))
  }
  else{
    cat("ERROR: cannot run the function\n")
    cat("package \"splancs\" should be installed/loaded")
    return(invisible())
  }
}

"trend.spatial" <-
  function (trend, geodata) 
{
  if(!missing(geodata)){
    attach(geodata, pos=2)
    if(!is.null(geodata$covariate)){
      attach(geodata$covariate, pos=3)
      on.exit(detach("geodata$covariate"), add=TRUE)
    }
    on.exit(detach("geodata"), add=TRUE)
  }
  if (inherits(trend, "formula")) {
    require(methods)
    if(exists("trySilent")){
      trend.mat <- trySilent(model.matrix(trend))
    }
    else{
      error.now <- options()$show.error.messages
      if (is.null(error.now) | error.now) 
        on.exit(options(show.error.messages = TRUE))
      options(show.error.messages = FALSE)
      trend.mat <- try(model.matrix(trend))
    }    
    if (inherits(trend.mat, "try-error")) 
      stop("\ntrend elements not found")
  }
  else {
    if(is.numeric(trend))
      trend.mat <- unclass(trend)
    else if (trend == "cte"){
      if(missing(geodata))
        stop("argument geodata must be provided with trend=\"cte\"")
      trend.mat <- as.matrix(rep(1, nrow(geodata$coords)))
    }
    else if (trend == "1st"){
      if(missing(geodata))
        stop("argument geodata must be provided with trend=\"1st\"")
      trend.mat <- cbind(1, geodata$coords)
    }
    else if (trend == "2nd"){ 
      if(missing(geodata))
        stop("argument geodata must be provided with trend=\"2nd\"")
      trend.mat <- cbind(1, geodata$coords, geodata$coords[,1]^2,
                         geodata$coords[,2]^2,
                         geodata$coords[,1] * geodata$coords[,2])
    }
    else stop("external trend must be provided for data locations to be estimated using the arguments trend.d and trend.l. Allowed values are \"cte\", \"1st\", \"2nd\" or  a model formula")
  }
  trend.mat <- as.matrix(trend.mat)
  dimnames(trend.mat) <- list(NULL, NULL)
  class(trend.mat) <- "trend.spatial"
  return(trend.mat)
}

"nlmP" <- function(objfunc, params, lower = rep( -Inf, length(params)),
                   upper = rep(+Inf, length(params)), ... )
{
  ## minimizer, using nlm with transformation of variables
  ## to allow for limits for the parameters   
  ##
  ## objfunc is a function to be optimised
  ## params is a starting value for the parameters
  ##
  ## NOTE: this function was used before optim() becomes available for R
  ##       It has limited usage now.
  ##
  ## Adapted from a function from Patrick E. Brown, Lancaster University 
  ##
  Nparams <- length(params)
  if(length(lower) != Nparams)
    stop(" lower boundry different length than params")
  if(length(upper) != Nparams)
    stop(" upper boundry different length than params")
  checklimits <- upper - lower
  if(any(checklimits <= 0))
    stop(" bad boundries")
  if(any(params < lower))
    stop(" starting params too low")
  if(any(params > upper))
    stop(" starting params too high")
  
  bothlimQQ <- (lower != (-Inf)) & (upper != +Inf)
  loweronlyQQ <- (lower != (-Inf)) & (upper == +Inf)
  upperonlyQQ <- (lower == (-Inf)) & (upper != +Inf)
  ubothQQ <- upper[bothlimQQ]
  lbothQQ <- lower[bothlimQQ]
  dbothQQ <- ubothQQ - lbothQQ
  loneQQ <- lower[loweronlyQQ]
  uoneQQ <- upper[upperonlyQQ]
  
  .bounds.list <- list(bothlimQQ = bothlimQQ, 
                       loweronlyQQ = loweronlyQQ,
                       upperonlyQQ = upperonlyQQ,
                       ubothQQ = ubothQQ,
                       lbothQQ = lbothQQ,
                       dbothQQ = dbothQQ,
                       loneQQ = loneQQ,
                       uoneQQ = uoneQQ)
  
  assign(".objfuncQQ", objfunc, pos=1)
  assign(".bounds.list", .bounds.list, pos=1)
  
  ## reduce the parameter space by a scale to keep parameters
  ## away from the boundries
  
  normaltomad <- function(normalparamsX)
    {
      madparamsX <- normalparamsX
      if(any(.bounds.list$bothlimQQ)) {
        noughtone <- (normalparamsX[.bounds.list$bothlimQQ] -
                      .bounds.list$lbothQQ)/.bounds.list$dbothQQ
        madparamsX[.bounds.list$bothlimQQ] <- log(noughtone/(1 - noughtone))
      }
      
      if(any(.bounds.list$loweronlyQQ))
        madparamsX[.bounds.list$loweronlyQQ] <-
          log(normalparamsX[.bounds.list$loweronlyQQ] - .bounds.list$loneQQ)
      
      if(any(.bounds.list$upperonlyQQ))
        madparamsX[.bounds.list$upperonlyQQ] <-
          log(.bounds.list$uoneQQ - normalparamsX[.bounds.list$upperonlyQQ])
      
      return(madparamsX)
    }
  
  madtonormalQQ <<- function(madparamsX)
    {
      normalparamsX <- madparamsX
      
      if(any(.bounds.list$bothlimQQ)) {
###        madparamsX[((.bounds.list$bothlimQQ) & (madparamsX > 300))] <- 300
        emad <- exp(madparamsX[.bounds.list$bothlimQQ])
        normalparamsX[.bounds.list$bothlimQQ] <-
          .bounds.list$dbothQQ * (emad/(1 + emad)) + .bounds.list$lbothQQ
      }
      
      if(any(.bounds.list$loweronlyQQ)){
        normalparamsX[.bounds.list$loweronlyQQ] <-
          exp(madparamsX[.bounds.list$loweronlyQQ]) + .bounds.list$loneQQ
      }
      
      if(any(.bounds.list$upperonlyQQ))
        normalparamsX[.bounds.list$upperonlyQQ] <-
          - exp(madparamsX[.bounds.list$upperonlyQQ]) + .bounds.list$uoneQQ
      
      if(exists(".ind.prof.phi"))
        if(is.nan(normalparamsX[.ind.prof.phi]))
          normalparamsX[.ind.prof.phi] <- 0
      
      return(normalparamsX)
    }
  
  newobjfunc <- function(madparams) {
    normalparams <-  madtonormalQQ(madparams)
    
    .objfuncQQ(normalparams)
    
  }
  
  startmadparams <- normaltomad(params)
  result <- nlm(newobjfunc, startmadparams, ...)
  result$madestimate <- result$estimate
  result$estimate <- madtonormalQQ(result$madestimate)
  remove(".bounds.list", pos=1, inherits=TRUE)
  remove(".objfuncQQ", pos=1, inherits=TRUE)
  remove("madtonormalQQ", pos=1, inherits=TRUE)
  
###  return(result, madtonormalQQ(normaltomad(params)),params)
  return(result)
}



##
## Box-Cox transformation in the package geoR
## ------------------------------------------
##

"boxcox.geodata" <- function(object, trend = "cte", ...)
{
  require(MASS)
  xmat <- unclass(trend.spatial(trend = trend, geodata = object))
  if (nrow(xmat) != length(object$data)) 
    stop("coords and trend have incompatible sizes")
  require(MASS)
  boxcox(object$data ~ xmat + 0, ...)
}

"boxcox.fit" <-
  function(data, xmat, lambda, lambda2 = NULL, add.to.data = 0,...)
{
  call.fc <- match.call()
  data <- data + add.to.data
  if(is.null(lambda2) && any(data <= 0))
    stop("Transformation requires positive data")
  ##
  data <- as.vector(data)
  n <- length(data)
  if(missing(xmat)) xmat <- rep(1, n)
  xmat <- as.matrix(xmat)
  if(any(xmat[,1] != 1)) xmat <- cbind(1, xmat)
  ## do not reverse order of the next two lines:
  xmat <- xmat[!is.na(data),,drop=FALSE]
  data <- data[!is.na(data)]
  n <- length(data)
  ##
  beta.size <- ncol(xmat)
  if(nrow(xmat) != length(data))
    stop("xmat and data have incompatible lengths")
  ##  lik.method <- match.arg(lik.method, choices = c("ML", "RML"))
  lik.method <- "ML"
  ##
  absmin <- abs(min(data)) + 0.0001 * diff(range(data)) 
  if(!is.null(lambda2)){
    if(missing(lambda)) lambda.ini <- seq(-2, 2, by=0.2)
    else lambda.ini <- lambda
    lambda2.ini <- 0
    if(lambda2 == TRUE) lambda2.ini <- absmin
    if(is.numeric(lambda2)) lambda2.ini <- lambda2
    lambdas.ini <- as.matrix(expand.grid(lambda.ini, lambda2.ini))
    ##
    if(length(as.matrix(lambdas.ini)) > 2){
      lamlik <- apply(lambdas.ini, 1, boxcox.negloglik, data=data + absmin,
                      xmat=xmat, lik.method=lik.method)
      lambdas.ini <- drop(lambdas.ini[which(lamlik == min(lamlik)),])
    }
    names(lambdas.ini) <- NULL
    lik.lambda <- optim(par=lambdas.ini, fn = boxcox.negloglik,
                        method="L-BFGS-B", hessian = TRUE, 
                        lower = c(-Inf, absmin), 
                        data = data, xmat = xmat, lik.method = lik.method)
  }
  else{
    lik.lambda <- optimize(boxcox.negloglik, int = c(-5, 5), data = data, xmat = xmat, lik.method = lik.method)
    lik.lambda <- list(par = lik.lambda$minimum, value = lik.lambda$objective, convergence = 0, message = "function optimize used")
  }
  ##
  ##  hess <- sqrt(diag(solve(as.matrix(lik.lambda$hessian))))
  lambda.fit <- lik.lambda$par
  if(length(lambda.fit) == 1) lambda.fit <- c(lambda.fit, 0)
  data <- data + lambda.fit[2]
  ##
  if(abs(lambda.fit[1]) < 0.0001) yt <- log(data)
  else yt <- ((data^lambda.fit[1]) - 1)/lambda.fit[1]
  beta <- solve(crossprod(xmat), crossprod(xmat, yt))
  mu <- drop(xmat %*% beta)
  sigmasq <- sum((yt - mu)^2)/n
  if(lik.method == "ML")
    loglik <- drop((-(n/2) * (log(2*pi) + log(sigmasq) + 1)) + (lambda.fit[1]-1) * sum(log(data)))
  ##  if(lik.method == "RML")
  ##    loglik <- drop(-lik.lambda$value - (n/2)*log(2*pi) - (n-beta.size)*(log(n) - 1))
  ##
  temp <- 1 + lambda.fit[1] * mu
  fitted.y <- ((temp^((1/lambda.fit[1]) - 2)) *
               (temp^2 + ((1-lambda.fit[1])/2) * sigmasq))
  variance.y <-  (temp^((2/lambda.fit[1]) - 2)) * sigmasq
  if(beta.size == 1){
    fitted.y <- unique(fitted.y)
    variance.y <- unique(fitted.y)
  }
  ##
  beta <- drop(beta)
  if(length(beta) > 1)
    names(beta) <- paste("beta", 0:(beta.size-1), sep="")
  if(length(lik.lambda$par) == 1) lambda.fit <- lambda.fit[1]
  if(length(lik.lambda$par) == 2) names(lambda.fit) <- c("lambda", "lambda2")
  res <- list(lambda = lambda.fit, beta.normal = drop(beta),
              sigmasq.normal = sigmasq, 
              loglik = loglik, optim.results = lik.lambda)
  ## res$hessian <- c(lambda = hess) 
  res$call <- call.fc
  class(res) <- "boxcox.fit"
  return(res)
}

"boxcox.negloglik" <-
  function(lambda.val, data, xmat, lik.method = "ML")
{
  if(length(lambda.val) == 2){
    data <- data + lambda.val[2]
    lambda <- lambda.val[1]
  }
  else lambda <- lambda.val
  n <- length(data)
  beta.size <- ncol(xmat)
  if(abs(lambda) < 0.0001)
    yt <- log(data)
  else yt <- ((data^lambda) - 1)/lambda
  beta <- solve(crossprod(xmat), crossprod(xmat, yt))
  ss <- sum((drop(yt) - drop(xmat %*% beta))^2)
  if(lik.method == "ML")
    neglik <- (n/2) * log(ss) - ((lambda - 1) * sum(log(data)))
  if(lik.method == "RML"){
    xx <- crossprod(xmat)
    if(length(as.vector(xx)) == 1)
      choldet <- 0.5 * log(xx)
    else
      choldet <- sum(log(diag(chol(xx))))
    neglik <- ((n-beta.size)/2) * log(ss) + choldet -
      ((lambda - 1) * sum(log(data)))
  }  
  if(!is.numeric(neglik)) neglik <- Inf
  return(drop(neglik))
}

"print.boxcox.fit" <-
  function(x, ...)
{
  if(length(x$lambda) == 1) names(x$lambda) <- "lambda"
  if(length(x$beta.normal) == 1) names(x$beta.normal) <- "beta"
  res <- c(x$lambda, x$beta.normal, sigmasq = x$sigmasq.normal)
  cat("Fitted parameters:\n")
  print(res)
  cat("\nConvergence code returned by optim: ")
  cat(x$optim.results$convergence)
  cat("\n")
  return(invisible())
}

"plot.boxcox.fit" <-
  function(x, hist = TRUE, data = eval(x$call$data), ...)
{
  if(is.null(data)) stop("data object not provided or not found")
  if(!is.null(x$call$xmat) && ncol(eval(x$call$xmat)) > 1)
    stop("plot.boxcox.fit not valid when covariates are included")
  if(!is.null(x$call$add.to.data))
    data <- data + eval(x$call$add.to.data)
  y <- data
  rd <- range(y)
  obj <- x
  x <- NULL
  f <- function(x, res = obj){
    dboxcox(x, lambda=res$lambda[1],
            lambda2 = ifelse(res$lambda[2], res$lambda[2], 0),
            mean=res$beta, sd=sqrt(res$sigmasq))
  } 
  ldots <- list()
  if(hist){
    if(is.null(ldots$ylim)){
      lim.hist <- max(hist(y, prob=TRUE, ...)$dens)
      lim.dens <- max(f(seq(rd[1], rd[2], l=200)))
      hist(y, prob=TRUE, ylim=c(0, max(lim.hist, lim.dens)), ...)
    }
    else
      hist(y, prob=TRUE, ...)
    curve(f(x), from=rd[1], to=rd[2], add=TRUE)
  }
  else{
    if(is.null(ldots$from)) ini <- rd[1]
    else ini <- ldots$from
    if(is.null(ldots$to)) fim <- rd[2]
    else fim <- ldots$to
    curve(f(x), from=ini, to=fim, ...)
  }
  return(invisible())
}

"lines.boxcox.fit" <-
  function(x, data = eval(x$call$data), ...)
{
  if(is.null(data)) stop("data object not provided or not found")
  if(!is.null(x$call$xmat) && ncol(eval(x$call$xmat)) > 1)
    stop("lines.boxcox.fit not valid when covariates are included")
  y <- data
  rd <- range(y)
  obj <- x
  x <- NULL
  rd <- range(data)
  ldots <- list()
  if(is.null(ldots$from)) ini <- rd[1]
  else ini <- ldots$from
  if(is.null(ldots$to)) fim <- rd[2]
  else fim <- ldots$to
  f <- function(x, res = obj){
    dboxcox(x, lambda=res$lambda,
            lambda2 = ifelse(res$lambda[2], res$lambda[2], 0),
            mean=res$beta, sd=sqrt(res$sigmasq))
  }
  curve(f(x), from=rd[1], to=rd[2], add=TRUE, ...)
  return(invisible())
}

"rboxcox" <-
  function(n, lambda, lambda2 = NULL, mean = 0, sd = 1)
{
  if(is.null(lambda2)) lambda2 <- 0
  if(is.na(lambda2)) lambda2 <- 0
  xn <- rnorm(n = n, mean = mean, sd = sd)
  if(abs(lambda) < 0.001) xbc <- exp(xn)
  else{
    xbc <- rep(NA, n)
    ind <- xn < -1/lambda
    sum.ind <- sum(ind)
    if(sum.ind > 0)
      cat(paste("rboxcox: WARNING ", sum.ind, "values truncated to 0")) 
    xn[ind] <- -1/lambda
    xbc <- ((xn * lambda) + 1)^(1/lambda)
  }
  return(xbc - lambda2)
}

"dboxcox" <-
  function(x, lambda, lambda2 = NULL, mean = 0, sd = 1)
{
  if(is.null(lambda2)) lambda2 <- 0
  if(is.na(lambda2)) lambda2 <- 0
  x <- x + lambda2
  lx <- length(x)
  dval <- rep(0, lx)
  for(i in 1:lx){
    if(x[i] <=0) dval[i] <- 0
    else{
      if(abs(lambda) < 0.0001) xt <- log(x[i])
      else xt <- ((x[i]^lambda) - 1)/lambda 
      dval[i] <- ((1/sqrt(2*pi)) * (1/sd) * x[i]^(lambda-1) *
                  exp(-((xt-mean)^2)/(2*sd^2)))
    }
  }
  return(dval)
}

"BCtransform" <-
  function(x, lambda, add.to.data = 0,
           inverse = FALSE, log.jacobian = FALSE)
{ 
  x <- x + add.to.data
  if(inverse){
    if(log.jacobian) stop("options log.jacobian not allowed with inverse = TRUE")
    if(abs(lambda) < 0.001)
      x <- exp(x)
    else{
      if(lambda > 0.001)
        x[x < (-1/lambda)] <- -1/lambda
      if(lambda < -0.001)
        x[x > (-1/lambda)] <- -1/lambda
      x <- ((x * lambda) + 1)^(1/lambda)
    }
    return(list(data = x))
  }
  else{
    if(abs(lambda-1) > 0.001) {
      if(any(x <= 0))
        stop("Transformation requires positive data")
      if(log.jacobian){
        Jdata <- x^(lambda - 1)
        if(any(Jdata <= 0))
          temp.list$log.jacobian <- log(prod(Jdata))
        else temp.list$log.jacobian <- sum(log(Jdata))
        Jdata <- NULL
      }
      if(abs(lambda) < 0.001)
        x <- log(x)
      else x <- ((x^lambda) - 1)/lambda
      if(any(c(is.na(x), is.nan(x))))
        stop("transformation has generated NA or NaN values")
      if(any(abs(x) == Inf))
        stop("transformation has generated Inf values")
    }
    else
      if(log.jacobian) log.jacobian <- 0
    if(log.jacobian)
      return(list(data = x, log.jacobian = log.jacobian))
    else
      return(list(data = x))
  }
}

"backtransform.moments" <-
  function(lambda, mean, variance, distribution,
           simul.back = FALSE, n.simul = 1000) 
{
  ##
  ## This function is called by krige.bayes and krige.conv functions 
  ## in the package geoR to backtransform predictions when the original variable
  ## was transformed by the Box-Cox transformation.
  ##
  ## WARNING: The Box-Cox transformation internal to the functions are:
  ##    Z = ((Y^lambda) -1)/lambda  for Y != 0 and Y != 1
  ##    Z = log(Y) for Y = 0
  ##    NO TRANSFORMATION is performed when lambda = 1
  ##
  ## The transformations can be done:
  ##    - by analytical approximation (setting simul.back = FALSE)
  ##  or  
  ##    - by simulation (setting simul.back = TRUE)
  res <- list(mean = numeric(), variance = numeric(), distribution = character()) 
  ni <- length(mean)
  mean <- as.vector(mean)
  variance <- as.vector(variance)
  if (ni != length(variance)) stop("mean and variances must have same length")
  if(abs(lambda-1) > 0.001){
    if(simul.back){
      res$distribution <- "back-transformed (Box-Cox) from Gaussian by simulation"
      ap.warn <- options()$warn
      options(warn = -1)
      temp.data <- matrix(rnorm(n = ni * n.simul,
                                mean = mean,
                                sd = sqrt(variance)),
                          nrow = ni)
      options(warn = ap.warn)
      ind.zero <- (variance < 1e-12)
      temp.data[ind.zero,  ] <- mean[ind.zero]
      remove(ind.zero)
      temp.data <- BCtransform(lambda = lambda, data = temp.data,
                               inverse = TRUE)$data
      if(lambda < -0.001) {
        res$mean  <-  "resulting distribution has no mean for negative lambda. Medians returned"
        res$variance  <-  "resulting distribution has no variance for negative lambda"
      }
      else{
        res$mean <- as.vector(apply(temp.data, 1, mean))
        res$variance <- as.vector(apply(temp.data, 1, var))
        quants <- apply(temp.data, 1, quantile, prob=c(.025,.5, .975))
        res$median <- drop(quants[2,])
        res$uncertainty <- drop((quants[3,] - quants[1,])/4)
      }
    }
    else{
      res$distribution <- "back-transformed (Box-Cox) from Gaussian"
      if(abs(lambda) < 0.001) {
        temp <- mean
        res$mean <- exp(mean + 0.5 * (variance))
        res$variance <- (exp(2 * temp + variance)) * (exp(variance) - 1)
        res$median <- exp(mean)
      }
      else{
        temp <- 1 + (lambda * mean)
        res$mean <- (temp^((1/lambda)-2) *
                     ((temp^2) + ((1-lambda)/2) * variance)) 
        if(abs(lambda - 0.5) < 0.001)
          res$variance <- variance * ((variance/8) + temp^2)
        else
          res$variance <- temp^((2/lambda)-2) *  variance
        res$median <- temp^(1/lambda)
      }
      res$uncertainty <- (qnorm(.975, mean = mean, sd = sqrt(variance)) -
                          qnorm(.025, mean = mean, sd = sqrt(variance)))/4
    }
  }
  else{
    res$distribution <- ifelse(missing(distribution), NULL, distribution)
    res$mean <- mean
    res$variance <- variance
  }
  return(res) 
}

##
## Correlations and covariances for the package geoR
## -------------------------------------------------
##
## Includes functions to compute cor. and cov,
## vectors and matrices and related operations
## 

"matern" <-
  function (u, phi, kappa) 
{
  if(is.vector(u)) names(u) <- NULL
  if(is.matrix(u)) dimnames(u) <- list(NULL, NULL)
  uphi <- u/phi
  uphi <- ifelse(u > 0,
                 (((2^(-(kappa-1)))/gamma(kappa)) *
                  (uphi^kappa) *
                  besselK(x=uphi, nu=kappa)), 1)    
  uphi[u > 600*phi] <- 0 
  return(uphi)
}

"cor.number" <- 
  function(cov.model= c("exponential", "matern", "gaussian",
             "spherical", "circular", "linear", "cubic", "wave", "power",
             "powered.exponential", "cauchy", "gneiting",
             "gneiting.matern", "pure.nugget"))
{
###	WARNING: codes below MUST be the same as in the C code
###              "cor_diag"
  cov.model <- match.arg(cov.model)
  cornumber <- switch(cov.model,
                      pure.nugget = as.integer(1),
                      exponential = as.integer(2),
                      spherical = as.integer(3),
                      gaussian = as.integer(4),
                      wave = as.integer(5),
                      cubic = as.integer(6),
                      power = as.integer(7),
                      powered.exponential = as.integer(8),
                      cauchy = as.integer(9),
                      gneiting = as.integer(10),
                      circular = as.integer(11),
                      matern = as.integer(12),
                      gneiting.matern = as.integer(13),
                      stop("wrong or no specification of cov.model")
                      )
  return(cornumber)
}

"cov.spatial" <-
  function(obj, cov.model = 'matern', cov.pars = stop("no cov.pars argument provided"),
           kappa = 0.5)
{
  ## extracting covariance paramters
  if(is.vector(cov.pars)) sigmasq <- cov.pars[1]
  else sigmasq <- cov.pars[, 1]
  if(is.vector(cov.pars)) phi <- cov.pars[2]
  else phi <- cov.pars[, 2]
  if(is.null(kappa)) kappa <- NA
  ## checking for nested models 
  if(is.vector(cov.pars)) ns <- 1
  else{
    ns <- nrow(cov.pars)
    if(length(cov.model) == 1) cov.model <- rep(cov.model, ns)
    if(length(kappa) == 1) kappa <- rep(kappa, ns)
  }
  if(length(cov.model) != ns) stop('wrong length for cov.model')
  if(length(kappa) != ns) stop('wrong length for kappa')
  ##
  cov.model <- sapply(cov.model, match.arg, c("matern", "exponential", "gaussian",
                                              "spherical", "circular", "cubic", "wave",
                                              "linear", "power", "powered.exponential", "cauchy",
                                              "gneiting", "gneiting.matern", "pure.nugget"))
  ## settings for power model (do not reverse order of the next two lines!)
  phi[cov.model == "linear"] <- 1
  cov.model[cov.model == "linear"] <- "power"
  ## checking input for cov. models with extra parameter(s)
  if(any(cov.model == 'gneiting.model') && ns > 1)
    stop('nested models including the gneiting.matern are not implemented') 
    for(i in 1:ns){
    if(cov.model[i] == "matern" | cov.model[i] == "powered.exponential" | 
       cov.model[i] == "cauchy" | cov.model[i] == "gneiting.matern"){
      if(is.na(kappa[i]))
        stop("for matern, powered.exponential, cauchy and gneiting.matern covariance functions the parameter kappa must be provided")
      if(cov.model[i] == "gneiting.matern" & length(kappa) != 2*ns)
        stop("gneiting.matern correlation function model requires a vector with 2 parameters in the argument kappa")
      if((cov.model[i] == "matern" | cov.model[i] == "powered.exponential" | 
          cov.model[i] == "cauchy") & length(kappa) != 1*ns)
        stop("kappa must have 1 parameter for this correlation function")
      if(cov.model[i] == "matern" & kappa[i] == 0.5) cov.model[i] == "exponential"
    }
    if(cov.model[i] == "power")
      if(any(phi[i] >= 2) | any(phi[i] <= 0))
        stop("for power model the phi parameters must be in the interval ]0,2[")
  }
  ##
  ## computing correlations/covariances
  ##
  covs <- array(0, dim = dim(obj))
  for(i in 1:ns) {
    if(phi[i] < 1e-12)
      cov.model[i] <- "pure.nugget"
    cov.values <- switch(cov.model[i],
                         pure.nugget = rep(0, length(obj)),
                         wave = (1/obj) * (phi[i] * sin(obj/phi[i])),
                         exponential = exp( - (obj/phi[i])),
                         matern = matern(u = obj, phi = phi[i], kappa = kappa[i]),
                         gaussian = exp( - ((obj/phi[i])^2)),
                         spherical = ifelse(obj < phi[i], (1 - 1.5 * (obj/phi[i]) +
                           0.5 * (obj/phi[i])^3), 0),
                         circular = {
                           obj.sc <- obj/phi[i];
                           obj.sc[obj.sc > 1] <- 1;
                           ifelse(obj < phi[i], (1 - (2 * ((obj.sc) *
                                                           sqrt(1 - ((obj.sc)^2)) +
                                                           asin(obj.sc)))/pi), 0)
                         },
                         cubic = {
                           obj.sc <- obj/phi[i];
                           ifelse(obj < phi[i], (1 - (7 * (obj.sc^2) -
                                                      8.75 * (obj.sc^3) +
                                                      3.5 * (obj.sc^5) -
                                                      0.75 * (obj.sc^7))), 0)
                         },
                         power = (obj)^phi,
                         powered.exponential = exp( - ((obj/phi[i])^kappa[i])),
                         cauchy = (1 + (obj/phi[i])^2)^(-kappa[i]),
                         gneiting = {
                           obj.sc <- obj/phi[i];
                           t2 <- (1 - obj.sc);
                           t2 <- ifelse(t2 > 0, (t2^8), 0);
                           (1 + 8 * obj.sc + 25 * (obj.sc^2) + 32 * (obj.sc^
                                                                     3)) * t2
                         },
                         gneiting.matern = { 
                           obj.sc <- obj/(phi[i] * kappa[2]);
                           t2 <- (1 - obj.sc);
                           t2 <- ifelse(t2 > 0, (t2^8), 0);
                    cov.values <- (1 + 8 * obj.sc + 25 * (obj.sc^2) + 32 * (obj.sc^3)) * t2;
                    cov.values * matern(u = obj, phi = phi[i], kappa = kappa[1])
                  },
                  stop("wrong or no specification of cov.model")
                  )
    cov.values <- sigmasq[i] * cov.values
    covs <- covs + cov.values
  }
  if(all(cov.model == "power")) covs <- max(covs) - covs
  else covs[obj < 1e-15] <- sum(sigmasq)
  return(covs)
}

"varcov.spatial" <-
  function(coords = NULL, dists.lowertri = NULL, cov.model = "matern",
            kappa = 0.5, nugget = 0, cov.pars = stop("no cov.pars argument"), 
            inv = FALSE, det = FALSE,
            func.inv = c("cholesky", "eigen", "svd", "solve"),
            scaled = FALSE, only.decomposition = FALSE, 
            sqrt.inv = FALSE, try.another.decomposition = TRUE,
            only.inv.lower.diag = FALSE) 
{
  if(! "package:stats" %in% search()) require(mva)
  ##
##  op.sem <- options()$show.error.message
##  options(show.error.message = FALSE)
##  on.exit(options(show.error.message = op.sem))
  require(methods)
  if(!exists("trySilent")){
    error.now <- options()$show.error.messages
    if (is.null(error.now) | error.now) 
      on.exit(options(show.error.messages = TRUE))
    options(show.error.messages = FALSE)
  }
  ##
  func.inv <- match.arg(func.inv)
  cov.model <- match.arg(cov.model,
                         choices = c("matern", "exponential", "gaussian",
                           "spherical", "circular", "cubic", "wave", "linear", "power",
                           "powered.exponential", "cauchy", "gneiting",
                           "gneiting.matern", "pure.nugget"))
  if (only.inv.lower.diag)  inv <- TRUE
  if (is.null(coords) & is.null(dists.lowertri)) 
    stop("one of the arguments, coords or dists.lowertri must be provided")
  if (!is.null(coords) & !is.null(dists.lowertri)) 
    stop("only ONE argument, either coords or dists.lowertri must be provided")
  if (!is.null(coords))  n <- nrow(coords)
  if (!is.null(dists.lowertri))
    n <- as.integer(round(0.5 * (1 + sqrt(1 + 8 * length(dists.lowertri)))))
  tausq <- nugget
  if (is.vector(cov.pars)) {
    sigmasq <- cov.pars[1]
    phi <- cov.pars[2]
  }
  else {
    sigmasq <- cov.pars[, 1]
    phi <- cov.pars[, 2]
  }
##  print(c(tausq=tausq, sigmasq=sigmasq, phi=phi, kappa=kappa))
  if (!is.null(coords)) dists.lowertri <- as.vector(dist(coords))
  if (round(1e+12 * min(dists.lowertri)) == 0) 
    warning("Two or more pairs of data at coincident (or very close) locations. \nThis may cause crashes in some matrices operations.\n")
  varcov <- matrix(0, n, n)
  if (scaled) {
    if (all(phi < 1e-12)) 
      varcov <- diag(x = (1 + (tausq/sum(sigmasq))), n)
    else {
      if (is.vector(cov.pars)) cov.pars.sc <- c(1, phi)
      else cov.pars.sc <- cbind(1, phi)
      covvec <- cov.spatial(obj = dists.lowertri, cov.model = cov.model, 
                            kappa = kappa, cov.pars = cov.pars.sc)
      varcov[lower.tri(varcov)] <- covvec
      varcov <- t(varcov)
      varcov[lower.tri(varcov)] <- covvec
      if (is.R()) remove("covvec")
      else remove("covvec", frame = sys.nframe())
      diag(varcov) <- 1 + (tausq/sum(sigmasq))
    }
  }
  else {
    if (all(sigmasq < 1e-10) | all(phi < 1e-10)) {
      varcov <- diag(x = (tausq + sum(sigmasq)), n)
    }
    else {
      covvec <- cov.spatial(obj = dists.lowertri, cov.model = cov.model, 
                            kappa = kappa, cov.pars = cov.pars)
      varcov[lower.tri(varcov)] <- covvec
      varcov <- t(varcov)
      varcov[lower.tri(varcov)] <- covvec
      if (is.R()) remove("covvec")
      else remove("covvec", frame = sys.nframe())
      diag(varcov) <- tausq + sum(sigmasq)
    }
  }
  if (inv | det | only.decomposition | sqrt.inv | only.inv.lower.diag) {
    if (func.inv == "cholesky") {
      if(exists("trySilent"))
        varcov.sqrt <- trySilent(chol(varcov))
      else
        varcov.sqrt <- try(chol(varcov))
      if (inherits(varcov.sqrt, "try-error")) {
        if (try.another.decomposition){
          cat("trying another decomposition (svd)\n")
          func.inv <- "svd"
        }
        else {
          print(varcov.sqrt[1])
          stop()
        }
      }
      else {
        if (only.decomposition | inv) 
          if (is.R()) remove("varcov")
          else remove("varcov", frame = sys.nframe())
        if (only.decomposition == FALSE) {
          if (det) cov.logdeth <- sum(log(diag(varcov.sqrt)))
          if (sqrt.inv) inverse.sqrt <- solve(varcov.sqrt)
          if (inv) {
            if (is.R()) {
              invcov <- chol2inv(varcov.sqrt)
              if (!sqrt.inv)
                remove("varcov.sqrt")
            }
            else {
              invcov.sqrt <- solve.upper(varcov.sqrt)
              invcov <- invcov.sqrt %*% t(invcov.sqrt)
              if (!sqrt.inv) 
                remove("varcov.sqrt", frame = sys.nframe())
            }
          }
        }
      }
    }
    if (func.inv == "svd") {
      varcov.svd <- svd(varcov, nv = 0)
      if(exists("trySilent"))
        cov.logdeth <- trySilent(sum(log(sqrt(varcov.svd$d))))
      else
        cov.logdeth <- try(sum(log(sqrt(varcov.svd$d))))
      if (inherits(cov.logdeth, "try-error")) {
        if (try.another.decomposition){
          cat("trying another decomposition (eigen)\n")
          func.inv <- "eigen"
        }
        else {
          print(cov.logdeth[1])
          stop()
        }
      }
      else {
        if (only.decomposition | inv) 
          if (is.R())  remove("varcov")
          else remove("varcov", frame = sys.nframe())
        if (only.decomposition) 
          varcov.sqrt <- t(varcov.svd$u %*% (t(varcov.svd$u) * 
                                             sqrt(varcov.svd$d)))
        if (inv) {
          invcov <- t(varcov.svd$u %*% (t(varcov.svd$u) * 
                                        (1/varcov.svd$d)))
        }
        if (sqrt.inv) 
          inverse.sqrt <- t(varcov.svd$u %*% (t(varcov.svd$u) * 
                                              (1/sqrt(varcov.svd$d))))
      }
    }
    if (func.inv == "solve") {
      if (det) 
        stop("the option func.inv == \"solve\" does not allow computation of determinants. \nUse func.inv = \"chol\",\"svd\" or \"eigen\"\n")
      if(exists("trySilent"))
        invcov <- trySilent(solve(varcov))
      else
        invcov <- try(solve(varcov))
      if (inherits(cov.logdeth, "try-error")) {
        if (try.another.decomposition) 
          func.inv <- "eigen"
        else {
          print(invcov[1])
          stop()
        }
      }
      if (is.R()) remove("varcov")
      else remove("varcov", frame = sys.nframe())
    }
    if (func.inv == "eigen") {
      if(exists("trySilent")){
        varcov.eig <- trySilent(eigen(varcov, symmetric = TRUE))
        cov.logdeth <- trySilent(sum(log(sqrt(varcov.eig$val))))
      }
      else{
        varcov.eig <- try(eigen(varcov, symmetric = TRUE))
        cov.logdeth <- try(sum(log(sqrt(varcov.eig$val))))
      }
      if (inherits(cov.logdeth, "try.error") | inherits(varcov.eig, "try-error")) {
        diag(varcov) <- 1.0001 * diag(varcov)
        if(exists("trySilent")){
          varcov.eig <- trySilent(eigen(varcov, symmetric = TRUE))
          cov.logdeth <- trySilent(sum(log(sqrt(varcov.eig$val))))
        }
        else{
          varcov.eig <- try(eigen(varcov, symmetric = TRUE))
          cov.logdeth <- try(sum(log(sqrt(varcov.eig$val))))
        }
        if (inherits(cov.logdeth, "try.error") | inherits(varcov.eig, "try-error")) {
          return(list(crash.parms = c(tausq=tausq, sigmasq=sigmasq, phi=phi, kappa=kappa)))
        }
      }
      else {
        if (only.decomposition | inv) 
          if (is.R()) remove("varcov")
          else remove("varcov", frame = sys.nframe())
        if (only.decomposition) 
          varcov.sqrt <- (varcov.eig$vec %*% diag(sqrt(varcov.eig$val)) %*% 
                          t(varcov.eig$vec))
        if (inv) 
          invcov <- (varcov.eig$vec %*% diag(1/varcov.eig$val) %*% 
                     t(varcov.eig$vec))
        if (sqrt.inv) 
          inverse.sqrt <- (varcov.eig$vec %*% diag(1/sqrt(varcov.eig$val)) %*% 
                           t(varcov.eig$vec))
      }
    }
  }
  if (only.decomposition == FALSE) {
    if (det) {
      if (inv) {
        if (only.inv.lower.diag) 
          result <- list(lower.inverse = invcov[lower.tri(invcov)], 
                         diag.inverse = diag(invcov), log.det.to.half = cov.logdeth)
        else result <- list(inverse = invcov, log.det.to.half = cov.logdeth)
      }
      else {
        result <- list(varcov = varcov, log.det.to.half = cov.logdeth)
      }
      if (sqrt.inv) 
        result$sqrt.inverse <- inverse.sqrt
    }
    else {
      if (inv) {
        if (only.inv.lower.diag) 
          result <- list(lower.inverse = invcov[lower.tri(invcov)], 
                         diag.inverse = diag(invcov))
        else {
          if (sqrt.inv) 
            result <- list(inverse = invcov, sqrt.inverse = inverse.sqrt)
          else result <- list(inverse = invcov)
        }
      }
      else result <- list(varcov = varcov)
    }
  }
  else result <- list(sqrt.varcov = varcov.sqrt)
  result$crash.parms <- NULL
  return(result)
}


"geoRdefunct" <-
  function()
  {
    cat("\n")
    cat("The following functions are no longer used in geoR:")
    cat("---------------------------------------------------")
    cat("\nolsfit: use variofit() instead")
    cat("\nwlsfit: use variofit() instead")
    cat("\nlikfit.old: use likfit() instead")
    cat("\n")
  }
    
"olsfit" <- function(...)
  stop("this function is now obsolete.\nuse variofit() instead.")

"wlsfit" <- function(...)
  stop("this function is now obsolete.\nuse variofit() instead.")


"distdiag" <-
  function(coords)
  {
    ## returns the lower triangle of the matrix with euclidean distances
    ## between pairs of points, including the diagonal. 
    ##
    coords <- as.matrix(coords)
    dimc <- dim(coords)
    if(dimc[2] == 1 & dimc[1] == 2)
      return(0)
    else{
      if(dimc[2] != 2)
        stop("coords must have two columns")
      nc <- dimc[1]
      out <- as.double(rep(0, (nc * (nc+1)/2)))
      .C("distdiag",
         as.double(coords[,1]),
         as.double(coords[,2]),
         as.integer(nc),
         out, DUP = FALSE,
         PACKAGE = "geoR")
      return(out)
    }
  }

##
## Miscelaneous geoR functions
##

"cite.geoR" <- function()
{
    cat("\n")
    cat("To cite geoR in publications, use\n\n")
    msg <- "RIBEIRO Jr., P.J. & DIGGLE, P.J. (2001) geoR: A package for geostatistical analysis. R-NEWS, Vol 1, No 2, 15-18. ISSN 1609-3631."
    writeLines(strwrap(msg, prefix = "  "))
    cat("\n")
    msg <- paste("Please cite geoR when using it for data analysis!")
    writeLines(strwrap(msg))
    cat("\nA BibTeX entry for LaTeX users is\n\n")
    cat("  @Article{,\n")
    cat("     title	   = {{geoR}: a package for geostatistical analysis},\n")
    cat("     author        = {Ribeiro Jr., P.J. and Diggle, P.J.},\n")
    cat("     journal       = {R-NEWS},\n")
    cat("     year	   = {2001},\n")
    cat("     volume	   = {1},\n")
    cat("     number	   = {2},\n")
    cat("     pages	   = {15--18},\n")
    cat("     issn          = {1609-3631},\n")
    cat("     url           = {http://cran.R-project.org/doc/Rnews}\n")
    cat("   }\n\n")
}

#geoR.options <- function(messages = TRUE, ...)
#{
#  res <- list(...)
#  res$messages <- messages
#  .geoR.options <<- res
#  return(invisible())
#}

"coords2coords" <-
  function(coords, xlim, ylim, xlim.ori, ylim.ori)
{
  if(missing(ylim.ori)) xlim.ori <- range(coords[,1], na.rm=TRUE)
  if(missing(ylim.ori)) ylim.ori <- range(coords[,2], na.rm=TRUE)
  coords[,1] <- xlim[1] + (coords[,1] - xlim.ori[1]) * diff(xlim)/diff(xlim.ori)
  coords[,2] <- ylim[1] + (coords[,2] - ylim.ori[1]) * diff(ylim)/diff(ylim.ori)
  return(coords)
}

"zoom.coords" <-
  function(x, ...)
{
  UseMethod("zoom.coords")
}

"zoom.coords.default" <-
    function(x, xzoom, yzoom=xzoom, xlim.ori, ylim.ori, xoff=0, yoff=0, ...)
{
  if(missing(ylim.ori)) xlim.ori <- range(x[,1], na.rm=TRUE)
  if(missing(ylim.ori)) ylim.ori <- range(x[,2], na.rm=TRUE)
  xlim <- xlim.ori + c(-1,1) * (diff(xlim.ori)/2) * (xzoom - 1)
  ylim <- ylim.ori + c(-1,1) * (diff(ylim.ori)/2) * (yzoom - 1)
  res <- coords2coords(x, xlim=xlim, ylim=ylim, xlim.ori = xlim.ori, ylim.ori=ylim.ori)
  res[,1] <- res[,1] + xoff
  res[,2] <- res[,2] + yoff
  return(res)
}

"zoom.coords.geodata" <-
  function(x, ...)
{
  x$coords <- zoom.coords.default(x$coords, ...)
  if(!is.null(x$borders)) x$borders <- zoom.coords.default(x$borders, ...)
  if(!is.null(x$subarea.lims)) x$subarea.lims <- zoom.coords.default(x$subarea.lims, ...)
  return(x)
}

"rect.coords" <-
  function(coords, xzoom = 1, yzoom=xzoom, add.to.plot=TRUE, quiet=FALSE, ...)
{
  rx <- range(coords[,1], na.rm=TRUE)
  ry <- range(coords[,2], na.rm=TRUE)
  res <- cbind(c(rx,rev(rx)), rep(ry,c(2,2)))
  res <- zoom.coords(res, xzoom=xzoom, yzoom=yzoom)
  if(add.to.plot) rect(res[1,1], res[1,2], res[3,1], res[3,2], ...)
  if(quiet)
    return(invisible())
  else
    return(res)  
}

# make it generic with a method for geoR
"dup.coords" <-
  function(x)
{
  UseMethod("dup.coords")
}

"dup.coords.default" <-
  function(x)
{
  ap1 <- unclass(factor(paste("x",x[,1],"y",x[,2], sep="")))
  ap2 <- table(ap1)
  ap2 <- ap2[ap2 > 1]
  takecoords <- function(n){
    if(!is.null(rownames(x))) rownames(x[ap1 == n,])
    else (1:nrow(x))[ap1 == n]
    }
  res <- sapply(as.numeric(names(ap2)), takecoords)
  if(length(res) == 0) res <- NULL
  return(res)
}

"dup.coords.geodata" <-
  function(x)
{
  return(dup.coords.default(x$coords))
}

"subarea" <-
  function(geodata, xlim, ylim, ...)
{
  if(class(geodata) != "geodata")
    stop("an object of the class geodata must be provided")
  if(missing(xlim) & !missing(ylim)) xlim <- c(-Inf, +Inf)
  if(!missing(xlim) & missing(ylim)) ylim <- c(-Inf, +Inf)
  if(missing(xlim) & missing(ylim)){
    cat("Enter 2 points defining the corners of the subarea\n")
    pt <- locator(2)
    xlim <- sort(pt[[1]])
    ylim <- sort(pt[[2]])
  }
  if(!is.vector(xlim) || length(xlim) != 2)
    stop("xlim must be a vector with 2 elements")
  if(!is.vector(ylim) || length(ylim) != 2)
    stop("ylim must be a vector with 2 elements")
  geo <- geodata
  ind <- (geodata$coords[,1] > xlim[1] & geodata$coords[,1] < xlim[2] &  
          geodata$coords[,2] > ylim[1] & geodata$coords[,2] < ylim[2])  
  geo$coords <- geodata$coords[ind,]
  xlim.all <- c(xlim, range(geo$coords[,1]))
  ylim.all <- c(ylim, range(geo$coords[,2]))
  if(is.vector(geodata$data)) geo$data <- geodata$data[ind]
  else geo$data <- geodata$data[ind,]
  geo$units.m <- geodata$units.m[ind]
  if(!is.null(geodata$covariate)){
    if(is.vector(geodata$covariate))
      geo$covariate <- geodata$covariate[ind]
    if(is.matrix(geodata$covariate) |  is.data.frame(geodata$covariate))   
      geo$covariate <- geodata$covariate[ind,]
  }
  if(!is.null(geodata$borders)){
    geo$borders <- geodata$borders[(geodata$borders[,1]>xlim[1] & geodata$borders[,1]<xlim[2] &  
                                    geodata$borders[,2]>ylim[1] & geodata$borders[,2] < ylim[2]),]
    xlim.all <- c(xlim.all, range(geo$borders[,1]))
    ylim.all <- c(ylim.all, range(geo$borders[,2]))
  }
  geo$subarea.lims <- cbind(range(xlim.all[is.finite(xlim.all)]),
                            range(ylim.all[is.finite(ylim.all)]))
  return(geo)
}

"krige.bayes" <- 
  function(geodata, coords=geodata$coords, data=geodata$data,
           locations = "no", borders = NULL, model, prior, output)
{
  ##
  ## ======================= PART 1 ==============================
  ##                Reading and Checking Input
  ## =============================================================
  ##
  ## setting output object and environments
  ##
  if(missing(geodata))
    geodata <- list(coords = coords, data = data)
  if(! "package:stats" %in% search()) require(mva)
  call.fc <- match.call()
  seed <- get(".Random.seed", envir=.GlobalEnv, inherits = FALSE)
  do.prediction <- ifelse(all(locations == "no"), FALSE, TRUE)
  base.env <- sys.frame(sys.nframe())
  message.prediction <- character()
  phidist <- list()
  "krige.bayes.counter" <- 
    function(.temp.ap, n.points){  
      if(n.points <= 50) cat(paste(.temp.ap, ", ", sep=""))
      if(n.points > 50 & n.points <= 500)
        if(.temp.ap %% 10 == 1) cat(paste(.temp.ap, ", ", sep=""))
      if(n.points > 500)
        if(.temp.ap %% 100 == 1) cat(paste(.temp.ap, ", ", sep=""))
      if(n.points == .temp.ap) cat("\n")
    }
  kb <- list(posterior = list(beta=list(), sigmasq=list(),
               phi=list(), tausq.rel=list()),
             predictive=list(mean = NULL, variance = NULL, distribution = NULL))
  class(kb$posterior) <- c("posterior.krige.bayes", "variomodel")
  class(kb$predictive) <- "predictive.krige.bayes"
  pred.env <- new.env()
  ##
  ## reading model input
  ##
  if(missing(model))
    model <- model.control()
  else{
##    if(is.null(class(model)) || class(model) != "model.geoR"){
    if(length(class(model)) == 0 || class(model) != "model.geoR"){
      if(!is.list(model))
        stop("krige.bayes: the argument model only takes a list or an output of the function model.control")
      else{
        model.names <- c("trend.d", "trend.l", "cov.model", "kappa", "aniso.pars", "lambda") 
        model.user <- model
        model <- list()
        if(length(model.user) > 0){
          for(i in 1:length(model.user)){
            n.match <- match.arg(names(model.user)[i], model.names)
            model[[n.match]] <- model.user[[i]]
          }
        }    
        if(is.null(model$trend.d)) model$trend.d <- "cte"  
        if(is.null(model$trend.l)) model$trend.l <- "cte"  
        if(is.null(model$cov.model)) model$cov.model <- "matern"  
        if(is.null(model$kappa)) model$kappa <- 0.5
        if(is.null(model$aniso.pars)) model$aniso.pars <- NULL 
        if(is.null(model$lambda)) model$lambda <- 1
        model <- model.control(trend.d = model$trend.d,
                               trend.l = model$trend.l,
                               cov.model = model$cov.model,
                               kappa = model$kappa,
                               aniso.pars = model$aniso.pars,
                               lambda = model$lambda)
      }
    }
  }
  cov.model <- model$cov.model
  cov.model.number <- cor.number(cov.model)
  kappa <- model$kappa
  lambda <- model$lambda
  ##
  ## reading prior specification
  ##
  if(missing(prior))
    prior <- prior.control()
  else{
##    if(is.null(class(prior)) || class(prior) != "prior.geoR"){
    if(length(class(prior)) == 0 || class(prior) != "prior.geoR"){
      if(!is.list(prior))
        stop("krige.bayes: the argument prior only takes a list or an output of the function prior.control")
      else{
        prior.names <- c("beta.prior", "beta", "beta.var.std", "sigmasq.prior",
                         "sigmasq", "df.sigmasq", "phi.prior", "phi", "phi.discrete",
                         "tausq.rel.prior", "tausq.rel", "tausq.rel.discrete") 
        prior.user <- prior
        prior <- list()
        if(length(prior.user) > 0){
          for(i in 1:length(prior.user)){
            n.match <- match.arg(names(prior.user)[i], prior.names)
            prior[[n.match]] <- prior.user[[i]]
          }
        }
        ## DO NOT CHANGE ORDER OF THE NEXT 3 LINES
        if(is.null(prior$beta)) prior$beta <-  NULL
        if(is.null(prior$beta.prior)) prior$beta.prior <-  c("flat", "normal", "fixed")
        if(is.null(prior$beta.var.std)) prior$beta.var.std <-  NULL
        ## DO NOT CHANGE ORDER OF THE NEXT 3 LINES
        if(is.null(prior$sigmasq)) prior$sigmasq <- NULL
        if(is.null(prior$sigmasq.prior))
          prior$sigmasq.prior <- c("reciprocal",  "uniform", "sc.inv.chisq",  "fixed") 
        if(is.null(prior$df.sigmasq)) prior$df.sigmasq <- NULL
        ## DO NOT CHANGE ORDER OF THE NEXT 3 LINES
        if(is.null(prior$phi)) prior$phi <- NULL
        if(is.null(prior$phi.prior))
          prior$phi.prior <- c("uniform", "exponential", "fixed", "squared.reciprocal","reciprocal")
        if(is.null(prior$phi.discrete)) prior$phi.discrete <- NULL
        ## DO NOT CHANGE ORDER OF THE NEXT 3 LINES
        if(is.null(prior$tausq.rel)) prior$tausq.rel <- 0
        if(is.null(prior$tausq.rel.prior))
          prior$tausq.rel.prior <- c("fixed", "uniform")
        if(is.null(prior$tausq.rel.discrete)) prior$tausq.rel.discrete <- NULL 
        prior <- prior.control(beta.prior = prior$beta.prior,
                               beta = prior$beta,
                               beta.var.std = prior$beta.var.std,
                               sigmasq.prior = prior$sigmasq.prior,
                               sigmasq = prior$sigmasq,
                               df.sigmasq = prior$df.sigmasq,
                               phi.prior = prior$phi.prior,
                               phi = prior$phi,
                               phi.discrete = prior$phi.discrete, 
                               tausq.rel.prior = prior$tausq.rel.prior,
                               tausq.rel = prior$tausq.rel,
                               tausq.rel.discrete = prior$tausq.rel.discrete)
      }
    }
  }
  kb$prior <- prior$priors.info
  kb$model <- model
  class(kb$prior) <- "prior.geoR"
  ##
  if(prior$dep.prior){
    npr <- length(prior$sigmasq)
    nphipr <- nrow(as.matrix(prior$sigmasq))
    ntaupr <- ncol(as.matrix(prior$sigmasq))
  }
  else nphipr <- ntaupr <- npr <- 1
  beta <- prior$beta
  if(prior$beta.prior == "fixed") beta.fixed <- beta
  if(prior$beta.prior == "normal"){
    nbeta <- attr(prior$beta.var.std, "Size")
    betares <- list()
    for(j in 1:ntaupr){
      for(i in 1:nphipr){
        beta <- array(prior$beta, dim=c(nphipr, ntaupr, nbeta))[i,j,]
        beta.var.std <- array(prior$beta.var.std,
                              dim=c(nphipr, ntaupr, nbeta^2))[i,j,]
        beta.var.std <- matrix(beta.var.std, nbeta, nbeta)
        ind.pos <- (j-1)*nphipr + i
        betares[[ind.pos]] <-
          list(iv = solve.geoR(beta.var.std),
               ivm = drop(solve.geoR(beta.var.std, beta)),
               mivm = drop(crossprod(beta, solve.geoR(beta.var.std, beta))))
      }
    }
  }
  if(prior$sigmasq.prior != "fixed") S2.prior <- prior$sigmasq
  else sigmasq.fixed <- S2.prior <- prior$sigmasq
  df.sigmasq.prior <- prior$df.sigmasq
  ##
  phi.discrete <- prior$phi.discrete
  exponential.par <- prior$phi  
  ##
  tausq.rel.fixed <- tausq.rel <- prior$tausq.rel
  if(tausq.rel.fixed > 2)
    print("WARNING: relative (NOT absolute) nugget should be specified.")
  tausq.rel.discrete <- prior$tausq.rel.discrete
  ##
  ## checking data configuration
  ##
  n <- length(data)
  ##
  if(is.vector(coords)){
    coords <- cbind(coords, 0)
    warning("krige.bayes: vector of coordinates: assuming one spatial dimension (transect)")
  }
  coords <- as.matrix(coords)
  dists.env <- new.env()
  assign("data.dist", as.vector(dist(coords)), envir=dists.env)
  data.dist.range <- range(get("data.dist", envir=dists.env))
  data.dist.min <- data.dist.range[1]
  data.dist.max <- data.dist.range[2]
  if(round(1e12*data.dist.min) == 0)
    stop("krige.bayes: this function does not allow two data at same location")
  ##
  ## reading output options
  ##
  if(missing(output)) output <- output.control()
  else{
    if(length(class(output)) == 0 || class(output) != "output.geoR"){
      if(!is.list(output))
        stop("krige.bayes: the argument output only takes a list or an output of the function output.control")
      else{
        output.names <- c("n.posterior","n.predictive","moments","n.back.moments","simulations.predictive",
                          "mean.var","quantile","threshold","signal","messages.screen")
        output.user <- output
        output <- list()
        if(length(output.user) > 0){
          for(i in 1:length(output.user)){
            n.match <- match.arg(names(output.user)[i], output.names)
            output[[n.match]] <- output.user[[i]]
          }
        }
        if(is.null(output$n.posterior)) output$n.posterior <- 1000 
        if(is.null(output$n.predictive)) output$n.predictive <- NULL
        if(is.null(output$moments)) output$moments <- TRUE
        if(is.null(output$n.back.moments)) output$n.back.moments <- 1000 
        if(is.null(output$simulations.predictive)){
          if(is.null(output$n.predictive)) output$simulations.predictive <- NULL
          else
            output$simulations.predictive <- ifelse(output$n.predictive > 0, TRUE, FALSE)
        }
        if(is.null(output$mean.var)) output$mean.var <- NULL
        if(is.null(output$quantile)) output$quantile <- NULL
        if(is.null(output$threshold)) output$threshold <- NULL
        if(is.null(output$signal)) output$signal <- NULL
        if(is.null(output$messages.screen)) output$messages.screen <- TRUE
        output <- output.control(n.posterior = output$n.posterior,
                                 n.predictive = output$n.predictive,
                                 moments = output$moments,
                                 n.back.moments = output$n.back.moments, 
                                 simulations.predictive = output$simulations.predictive,
                                 mean.var = output$mean.var, quantile = output$quantile,
                                 threshold = output$threshold, signal = output$signal,
                                 message = output$messages.screen)
      }
    }
  }
  n.posterior <- output$n.posterior
  messages.screen <- output$messages.screen
  if(do.prediction){
    n.predictive <- output$n.predictive
    if(is.null(n.predictive))
      n.predictive <- ifelse(prior$phi.prior == "fixed", 0, n.posterior)
    simulations.predictive <- output$simulations.predictive
    if(is.null(simulations.predictive))
      simulations.predictive <- ifelse(prior$phi.prior == "fixed", FALSE, TRUE)
    keep.simulations <- output$keep.simulations
    if(is.null(keep.simulations))
      keep.simulations <- simulations.predictive
    mean.estimator <- output$mean.estimator
    if(is.null(mean.estimator))
      mean.estimator <- ifelse(simulations.predictive, TRUE, FALSE)
    moments <- output$moments    
    if(is.null(moments) | prior$phi.prior == "fixed")
      moments <- TRUE
    n.back.moments <- output$n.back.moments
    signal <- ifelse(is.null(output$signal), TRUE, output$signal)
    quantile.estimator <- output$quantile.estimator
    probability.estimator <- output$probability.estimator
    if(simulations.predictive & n.predictive == 0) n.predictive <- 1000
  }
  ##
  if(!do.prediction) {
    if(prior$beta.prior != "fixed" & prior$sigmasq.prior != "fixed"  &
       prior$phi.prior != "fixed" & output$messages.screen){
      cat("krige.bayes: no prediction locations provided.\n")
      cat("             Only samples of the posterior for the parameters will be returned.\n")
    }
  }
  ##
  ## Box-Cox transformation
  ##
  if(abs(lambda-1) > 0.001) {
    if(messages.screen)
      cat(paste("krige.bayes: Box-Cox's transformation performed for lambda =", round(lambda,dig=3), "\n"))
    data <- BCtransform(data, lambda = lambda)$data
  }
  ##
  ## Building trend (covariates/design) matrices:   
  ##
  dimnames(coords) <- list(NULL, NULL)
  if(nrow(coords) != length(data))
    stop("krige.bayes: number of data is different of number of data locations (coordinates)")
  trend.data <- unclass(trend.spatial(trend=model$trend.d, geodata = geodata))
  if(nrow(trend.data) != nrow(coords))
    stop("trend specification not compatible with the length of the data") 
  beta.size <- ncol(trend.data)
  if(beta.size > 1)
    beta.names <- paste("beta", (0:(beta.size-1)), sep="")
  else beta.names <- "beta"
  if(prior$beta.prior == "normal" |  prior$beta.prior == "fixed"){
    if(beta.size != length(beta))
      stop("size of beta incompatible with the trend model (covariates)")
  }
  if(do.prediction) {
    ##
    ## selecting locations inside the borders 
    ##
    if(!is.null(borders)){
      locations <- locations.inside(locations, borders)
      if(nrow(locations) == 0){
        warning("\nkrige.bayes: no prediction to be performed.\n             There are no prediction locations inside the borders")
        do.prediction <- FALSE
      }
      if(messages.screen)
        cat("krige.bayes: results will be returned only for prediction locations inside the borders\n")
    }
    ##
    ## Checking the spatial dimension for prediction
    ##  1 (data/prediction on a transect) or 2 (data/prediction on an area)
    ##
    if(is.vector(locations)) {
      if(length(locations) == 2) {
        locations <- t(as.matrix(locations))
        warning("krige.bayes: THE FUNCTION IS CONSIDERING THAT YOU HAVE ENTERED WITH 1 POINT TO BE PREDICTED IN A TWO DIMENSION REGION\n")
      }
      else locations <- as.matrix(cbind(locations, 0))
    }
    else locations <- as.matrix(locations)
    ##
    ## Checking for 1D prediction 
    ##
    if(length(unique(locations[,1])) == 1 | length(unique(locations[,2])) == 1)
      krige1d <- TRUE
    else krige1d <- FALSE
    ##
    ## Checking trend specification
    ##
    if(inherits(model$trend.d, "formula") | inherits(model$trend.l, "formula")){
      if((inherits(model$trend.d, "formula") == FALSE) | (inherits(model$trend.l, "formula") == FALSE))
        stop("krige.bayes: model$trend.d and model$trend.l must have similar specification\n")
    }
    else{
##      if((!is.null(class(model$trend.d)) && class(model$trend.d) == "trend.spatial") &
##         (!is.null(class(model$trend.l)) && class(model$trend.l) == "trend.spatial")){
      if((length(class(model$trend.d)) > 0 && class(model$trend.d) == "trend.spatial") &
         (length(class(model$trend.l)) > 0 && class(model$trend.l) == "trend.spatial")){
        if(ncol(model$trend.d) != ncol(model$trend.l))
          stop("krige.bayes: trend.d and trend.l do not have the same number of columns")
      }
      else
        if(model$trend.d != model$trend.l)
          stop("krige.bayes: especification of model$trend.l and model$trend.d must be similar")
    }
    ##
    if(messages.screen){
      cat(switch(as.character(model$trend.d)[1],
                 "cte" = "krige.bayes: model with constant mean",
                 "1st" = "krige.bayes: model with mean given by a 1st order polynomial on the coordinates",
                 "2nd" = "krige.bayes: model with mean given by a 2nd order polynomial on the coordinates",
                 "krige.bayes: model with mean defined by covariates provided by the user"))
      cat("\n")
    }
    ##
    dimnames(locations) <- list(NULL, NULL)
    assign("trend.loc", unclass(trend.spatial(trend=model$trend.l, geodata = list(coords = locations))),
           envir=pred.env)
    ni <- nrow(get("trend.loc", envir=pred.env))
    if(nrow(locations) != ni)
      stop("trend.l is not compatible with number of prediction locations")
    expect.env <- new.env()
    assign("expect", 0, envir=expect.env)
    assign("expect2", 0, envir=expect.env)
  }
  ##
  ## Anisotropy correction
  ##   (warning: this must be placed here, AFTER trend matrices be defined)
  ##
  if(!is.null(model$aniso.pars)) {
#    if((abs(model$aniso.pars[1]) > 0.001) & (abs(model$aniso.pars[2] - 1) > 0.001)){
    if(abs(model$aniso.pars[2] - 1) > 0.001){
      if(messages.screen)
        cat("krige.bayes: anisotropy parameters provided and assumed to be constants\n")
      coords <- coords.aniso(coords = coords, aniso.pars = model$aniso.pars)
      if(do.prediction)
        locations <- coords.aniso(coords = locations, 
                                  aniso.pars = model$aniso.pars)
      remove(dists.env)
      dists.env <- new.env()
      assign("data.dist", as.vector(dist(coords)), envir=dists.env)
    }
  }  
  ##
  ## Distances between data and prediction locations
  ## Must be here, AFTER anisotropy be checked
  ##
  if(do.prediction){
    assign("d0", loccoords(coords = coords, locations = locations), envir=pred.env)
    ##
    ## checking coincident data and prediction locations
    ##
    loc.coincide <- apply(get("d0", envir=pred.env), 2, function(x){any(x < 1e-10)})
    if(any(loc.coincide))
      loc.coincide <- (1:ni)[loc.coincide]
    else
      loc.coincide <- NULL
    if(!is.null(loc.coincide)){
      temp.f <- function(x, data){return(data[x < 1e-10])}
      data.coincide <- apply(get("d0", envir=pred.env)[,loc.coincide, drop=FALSE],
                             2,temp.f, data=data)
    }
    else
      data.coincide <- NULL
    n.loc.coincide <- length(loc.coincide)
    assign("loc.coincide", loc.coincide, envir=pred.env)
    assign("data.coincide", data.coincide, envir=pred.env)
    remove(data.coincide, loc.coincide)
    if(is.R()) gc(verbose=FALSE)
  }
  ##
  ## Preparing prior information on beta and sigmasq
  ##
  beta.info <- list()
  sigmasq.info <- list()
  for(i in 1:npr){
    beta.info[[i]] <-
      switch(prior$beta.prior,
             fixed = list(mivm = 0, ivm = 0, iv = Inf, beta.fixed = beta.fixed, p = 0),
             flat = list(mivm = 0, ivm = 0, iv = 0, p = beta.size),
             normal = list(mivm = betares[[i]]$mivm, ivm = betares[[i]]$ivm,
               iv = betares[[i]]$iv, p = 0))
    sigmasq.info[[i]] <-
      switch(prior$sigmasq.prior,
             fixed = list(df.sigmasq = Inf, n0S0 = 0,
               sigmasq.fixed = sigmasq.fixed),
             reciprocal = list(df.sigmasq = 0, n0S0 = 0),
             uniform = list(df.sigmasq = -2, n0S0 = 0),
             sc.inv.chisq = list(df.sigmasq = df.sigmasq.prior, n0S0 = df.sigmasq.prior*S2.prior[i]))
  }
  beta.info$p <- switch(prior$beta.prior,
                        fixed = 0,
                        flat = beta.size,
                        normal = 0)
  sigmasq.info$df.sigmasq <- switch(prior$sigmasq.prior,
                                    fixed = Inf,
                                    reciprocal = 0,
                                    uniform = -2, 
                                    sc.inv.chisq = df.sigmasq.prior)
  ##
  ## ====================== PART 2 =============================
  ##                 FIXED PHI AND TAUSQ.REL
  ## ===========================================================
  ## 
  if(prior$phi.prior == "fixed"){
    phi.fixed <- prior$phi
    ##
    ## Computing parameters of the posterior for $\(\beta, \sigma^2)$ 
    ## and moments of the predictive (if applies)
    ##
    bsp <- beta.sigmasq.post(n = n, beta.info = beta.info[[1]],
                             sigmasq.info = sigmasq.info[[1]], 
                             env.dists = dists.env,
                             model = list(cov.model = model$cov.model, kappa = model$kappa),
                             xmat = trend.data, y = data,
                             phi = phi.fixed, tausq.rel = tausq.rel.fixed,
                             do.prediction.moments = (do.prediction && moments),
                             do.prediction.simulations = (do.prediction && simulations.predictive),
                             env.pred = pred.env,
                             signal = (do.prediction && signal))
    ##
    ## Preparing output of the posterior distribution
    ##    
    if(prior$beta.prior == "fixed")
      kb$posterior$beta <- list(status = "fixed", fixed.value = beta.fixed)
    else{
      if(prior$sigmasq.prior == "fixed")
        kb$posterior$beta <- list(distribution = "normal")
      else
        kb$posterior$beta <- list(distribution = "t",
                                  conditional = "normal")
      kb$posterior$beta$pars <- list(mean = bsp$beta.post,
                                     var = bsp$S2.post *
                                     bsp$beta.var.std.post)
      attr(kb$posterior$beta$pars$var, "Size") <- beta.size
      class(kb$posterior$beta$pars$var) <- "betavar"
    }        
    if(prior$sigmasq.prior == "fixed")
      kb$posterior$sigmasq <- list(status="fixed", fixed.value=sigmasq.fixed)
    else
      kb$posterior$sigmasq <- list(distribution = "sc.inv.chisq",
                                   pars = list(df = bsp$df.post,
                                     S2 = bsp$S2.post))
    kb$posterior$phi<- list(status= "fixed", fixed.value = phi.fixed)
    kb$posterior$tausq.rel <-
      list(status= "fixed", fixed.value = tausq.rel.fixed)
    ##
    ## Preparing output of the predictive distribution
    ##
    kb$predictive$mean <- bsp$pred.mean
    kb$predictive$variance <- bsp$pred.var
    kb$predictive$distribution <- ifelse(prior$sigmasq.prior == "fixed",
                                         "normal", "t")
    bsp[c("pred.mean", "pred.var")] <- NULL
    ##
    ## preparing objects for simulating from the predictive
    ##
    if(do.prediction && simulations.predictive && n.predictive > 0){
      phidist$s2 <-  as.matrix(bsp$S2.post) 
      phidist$probphitausq <-  as.matrix(1)
      phidist$beta <- array(bsp$beta.post, c(1,1,beta.size)) 
      phidist$varbeta  <- array(bsp$beta.var.std.post, c(1,1,beta.size^2))
      phi.unique <- phidist$phitausq <- t(c(phi.fixed, tausq.rel.fixed))
      df.model <- bsp$df.post 
      ind.length <- 1
      inv.lower <- array(bsp$inv.lower, dim=c(1,1,(n*(n-1)/2)))
      inv.diag <- array(bsp$inv.diag, dim=c(1,1,n))
      ind.table <- n.predictive
      phi.discrete <- phi.fixed
      tausq.rel.discrete <- tausq.rel.fixed
    }
  }
  else{
    ##
    ## ====================== PART 3 =============================
    ##                 RANDOM PHI AND TAUSQ.REL
    ## ===========================================================
    ##
    if(messages.screen)
      cat("krige.bayes: computing the discrete posterior of phi/tausq.rel\n")
    ##
    ## Preparing discrete set for phi and/or tausq.rel
    ##
    if(is.null(phi.discrete)){
      phi.discrete <- seq(0, 2 * data.dist.max, l=51)[-1]
      if(messages.screen)
        cat("krige.bayes: argument `phi.discrete` not provided, using default values\n")
    } 
    if(!is.numeric(phi.discrete)) stop("non-numerical value provided in phi.discrete")
    if(length(phi.discrete) == 1)
      stop("only one value provided in phi.discrete. Use prior.phi=`fixed`")
    n.phi.discrete <- length(phi.discrete)
    n.tausq.rel.discrete <- length(tausq.rel.discrete)
    phi.names <- paste("phi", phi.discrete, sep="")
    tausq.rel.names <- paste("tausqrel", tausq.rel.discrete, sep="")
    phidist$phitausq <- as.matrix(expand.grid(phi.discrete, tausq.rel.discrete))
    if(prior$phi.prior == "user" | prior$tausq.rel.prior == "user"){
      if(prior$tausq.rel.prior == "fixed")
        phidist$phitausq <-
          cbind(phidist$phitausq, prior$priors.info$phi$probs, 1)
      else{
        if(is.null(prior$joint.phi.tausq))
          phidist$phitausq <-
            cbind(phidist$phitausq,
                  as.matrix(expand.grid(prior$priors.info$phi$probs,
                                        prior$priors.info$tausq.rel$probs)))
        else
          phidist$phitausq <-
            cbind(phidist$phitausq, as.vector(prior$joint.phi.tausq.rel), 1)
      }
    }
    dimnames(phidist$phitausq) <- list(NULL, NULL)
    ##
    ##  Degrees of freedom for the posteriors
    ##
    df.model <- ifelse(sigmasq.info$df.sigmasq == Inf, Inf,
                       (n + sigmasq.info$df.sigmasq - beta.info$p))
    ##
    ## Function to compute the posterior probabilities
    ## for each parameter sets (phi, tausq.rel)
    ## 
    phi.tausq.rel.post <- function(phinug){
      par.set <- get("parset", envir=counter.env)
      if(messages.screen){
        krige.bayes.counter(.temp.ap = par.set,
                            n.points = ntocount)
      }
      assign("parset", get("parset", envir=counter.env)+1, envir=counter.env)
      phi <- phinug[1]
      tausq.rel <- phinug[2]
      if(prior$beta.prior == "normal" && npr > 1)
        info.id <- par.set
      else info.id <- 1
      bsp <- beta.sigmasq.post(n = n, beta.info = beta.info[[info.id]],
                               sigmasq.info = sigmasq.info[[info.id]],
                               env.dists = dists.env,
                               model = list(cov.model = model$cov.model,
                                 kappa = model$kappa),
                               xmat = trend.data, y = data, phi = phi,
                               tausq.rel = tausq.rel, dets = TRUE,
                               do.prediction.moments = (do.prediction && moments),
                               do.prediction.simulations = (do.prediction && simulations.predictive),
                               env.pred = pred.env, signal = signal)
      logprobphitausq <-  (-0.5) * log(bsp$det.XiRX) - (bsp$log.det.to.half) -
        (bsp$df.post/2) * log(bsp$S2.post)
      ##
      if(prior$phi.prior == "user"){
        if(phinug[3] > 0) logprobphitausq <- logprobphitausq + log(phinug[3])
        else logprobphitausq <- -Inf
      }
      if(prior$phi.prior == "reciprocal"){
        if(phi > 0) logprobphitausq <- logprobphitausq - log(phi)
        else logprobphitausq <- -Inf
      }
      if(prior$phi.prior == "squared.reciprocal"){
        if(phi > 0) logprobphitausq <- logprobphitausq - 2*log(phi)
        else logprobphitausq <- -Inf
      }
      if(prior$phi.prior == "exponential"){
        logprobphitausq <- logprobphitausq - log(exponential.par) - (phi/exponential.par)
       }
      ##
      if(prior$tausq.rel.prior == "user"){
        if(phinug[4] > 0) logprobphitausq <- logprobphitausq + log(phinug[4])
        else logprobphitausq <- -Inf
      }
      ##
      bsp$probphitausq <- drop(exp(logprobphitausq))
      ##
      if(do.prediction && moments){
        assign("expect", (get("expect", envir=expect.env) +
                          (bsp$pred.mean * bsp$probphitausq)),
               env = expect.env)
        assign("expect2", (get("expect2", envir=expect.env) +
                           ((bsp$pred.var + (bsp$pred.mean^2)) *
                             bsp$probphitausq)),
               env = expect.env)
      }
      phi.ind <- which.min(abs(phi.discrete - phi))
      nug.ind <- which.min(abs(tausq.rel.discrete - tausq.rel))
      assign("pn.ind", c(phi.ind, nug.ind), envir=fn.frame)
      assign("bsp", bsp, envir=fn.frame)
      eval(expression(phidist$s2[pn.ind[1], pn.ind[2]] <- bsp$S2.post), envir=fn.frame)
      eval(expression(phidist$probphitausq[pn.ind[1], pn.ind[2]] <- bsp$probphitausq), envir=fn.frame)
      eval(expression(phidist$beta[pn.ind[1], pn.ind[2],] <- drop(bsp$beta.post)), envir=fn.frame)
      eval(expression(phidist$varbeta[pn.ind[1], pn.ind[2],] <- drop(bsp$beta.var.std.post)), envir=fn.frame)
      if(do.prediction && simulations.predictive){
        eval(expression(inv.lower[pn.ind[1], pn.ind[2],] <- bsp$inv.lower),
             envir=fn.frame)
        eval(expression(inv.diag[pn.ind[1], pn.ind[2],] <- bsp$inv.diag),
             envir=fn.frame)
      }
      return(invisible())
    }
    ##
    ## Computing the posterior probabilities and organising results
    ##
    fn.frame <- sys.frame(sys.nframe())
    phidist$s2 <- matrix(NA, n.phi.discrete, n.tausq.rel.discrete)
    dimnames(phidist$s2) <- list(phi.names, tausq.rel.names)
    phidist$probphitausq <- matrix(NA, n.phi.discrete, n.tausq.rel.discrete)
    phidist$beta <- array(NA, dim=c(n.phi.discrete, n.tausq.rel.discrete, beta.size))
    dimnames(phidist$beta) <- list(phi.names, tausq.rel.names, beta.names)
    phidist$varbeta <- array(NA, dim=c(n.phi.discrete, n.tausq.rel.discrete, beta.size^2))
    dimnames(phidist$varbeta) <- list(phi.names, tausq.rel.names, NULL)
    if(do.prediction && simulations.predictive){
      inv.lower <- array(NA, dim=c(n.phi.discrete, n.tausq.rel.discrete, (n * (n - 1))/2))
      inv.diag <- array(NA, dim=c(n.phi.discrete, n.tausq.rel.discrete, n))
      frame.inv <- sys.frame(sys.nframe())
    }
    ##
    counter.env <- new.env()
    assign("parset", 1, envir=counter.env)
    if(messages.screen){
      ntocount <- nrow(phidist$phitausq)
      cat(paste("krige.bayes: computing the posterior probabilities.\n             Number of parameter sets: ", ntocount,"\n"))
    }
    temp.res <- apply(phidist$phitausq, 1, phi.tausq.rel.post)
    remove("bsp")
    if(messages.screen){
      remove(counter.env)
      cat("\n")
    }
    ##
    phidist$sum.prob <- sum(phidist$probphitausq)
    phidist$probphitausq <- phidist$probphitausq/phidist$sum.prob
    ##
    ## Preparing output of the posterior distribution
    ##
    kb$posterior$beta <- list(conditional.distribution = "normal",
                              pars = list(mean=phidist$beta,
                                var = phidist$varbeta))
    attr(kb$posterior$beta$pars$var, "Size") <- beta.size
#    class(kb$posterior$beta$pars$var) <- "betavar"
    kb$posterior$sigmasq <- list(conditional.distribution = "sc.inv.chisq",
                                 pars = list(df = df.model,
                                   S2 = drop(phidist$s2))) 
    kb$posterior$phi$distribution <- drop(apply(phidist$probphitausq, 1, sum))
    names(kb$posterior$phi$distribution) <- prior$phi.discrete
    if(prior$tausq.rel.prior != "fixed"){
      kb$posterior$tausq.rel$distribution <- drop(apply(phidist$probphitausq, 2, sum))
      names(kb$posterior$tausq.rel$distribution) <- tausq.rel.discrete
    }
    else{
      kb$posterior$tausq.rel <-
        list(status= "fixed", fixed.value = tausq.rel.fixed)
    }
    if(prior$phi.prior != "fixed" | prior$tausq.rel.prior != "fixed"){
      kb$posterior$joint.phi.tausq.rel <- phidist$probphitausq
      dimnames(kb$posterior$joint.phi.tausq.rel) <-
        list(phi.names, tausq.rel.names)
    }
    ##
    ##  Sampling from the posterior distribution
    ##
    if(n.posterior > 0){
      if(messages.screen)
        cat("krige.bayes: sampling from posterior distribution\n")
      ##
      ## sampling phi and/or tausq
      ##
      n.points <- length(phidist$probphitausq)
      ind <- sample((1:n.points), n.posterior, replace = TRUE,
                    prob = as.vector(phidist$probphitausq))
      phi.sam <- phidist$phitausq[ind,  ]
      ##
      ## frequencies for sampled phi/tausq
      ##
      ind.unique <- sort(unique(ind))
      ind.length <- length(ind.unique)
      ind.table <- table(ind)
      names(ind.table) <- NULL
      ##
      phi.unique <- phidist$phitausq[ind.unique,, drop=FALSE]
      if(messages.screen) {
        cat("krige.bayes: sample from the (joint) posterior of phi and tausq.rel\n")
        print(rbind(phi = phi.unique[, 1], tausq.rel = 
                    phi.unique[, 2], frequency = ind.table))
        cat("\n")
      }
      vecpars.back.order <- order(order(ind))
      ##
      ## sampling sigmasq
      ##
      sigmasq.sam <- rinvchisq(n = n.posterior, df = df.model,
                               scale = rep(as.vector(phidist$s2)[ind.unique], ind.table))
      ##
      ## sampling beta
      ##
      if(beta.size == 1) {
        vec.beta <- rep(as.vector(phidist$beta)[ind.unique],ind.table)
        vec.vbeta <- rep(as.vector(phidist$varbeta)[ind.unique], ind.table)
        beta.sam <- vec.beta + sqrt(sigmasq.sam * vec.vbeta) * rnorm(n.posterior, mean = 0, sd = 1)
      }
      else {
        ind.beta <- matrix(phidist$beta, ncol = beta.size)[ind.unique,  ]
        ind.beta <- ind.beta[rep(1:ind.length, ind.table),]
        ind.vbeta <- matrix(phidist$varbeta, ncol = 
                            beta.size^2)[ind.unique,  ]
        ind.vbeta <- ind.vbeta[rep(1:ind.length, ind.table),] * sigmasq.sam
        ##      print("2.4: try to speed up this bit!")
        temp.res <- apply(ind.vbeta, 1, rMVnorm,
                          beta.size = beta.size)
        beta.sam <- ind.beta + t(temp.res)
        remove("temp.res")
      }
      ##
      ## summaries of the posterior
      ##
      if(beta.size == 1) {
        trend.mean <- mean(beta.sam)
        trend.median <- median(beta.sam)
      }
      else {
        trend.mean <- apply(beta.sam, 2, mean)
        trend.median <- apply(beta.sam, 2, median)
      }
      S2.mean <- mean(sigmasq.sam)
      S2.median <- median(sigmasq.sam)
      phi.marg <- apply(phidist$probphitausq, 1, sum)
      .marg <- phi.marg/(sum(phi.marg))
      phi.mean <- phi.discrete %*% phi.marg
      phi.median <- median(phi.sam[, 1])
      phi.mode <- phi.discrete[which.min(abs(phi.marg - max(phi.marg)))]
      tausq.rel.marg <- apply(phidist$probphitausq, 2, sum)
      tausq.rel.marg <- tausq.rel.marg/(sum(tausq.rel.marg))
      tausq.rel.mean <- tausq.rel.discrete %*% tausq.rel.marg
      tausq.rel.median <- median(phi.sam[, 2])
      tausq.rel.mode <- tausq.rel.discrete[which.min(abs(tausq.rel.marg - max(tausq.rel.marg)))]
      ##
      ## Computing the conditional mode of beta and sigmasq;
      ## conditional on the mode of (phi, tausq.rel)
      ##
      mode.ind <- which(phidist$probphitausq == max(phidist$probphitausq))
      phi.tausq.rel.mode <- phidist$phitausq[mode.ind, 1:2, drop = FALSE]
      if(nrow(phi.tausq.rel.mode) > 1){
        cat("krige.bayes: WARNING: multiple modes for phi.tausq.rel. Using the first one to compute modes of beta and sigmasq.\n")
        cat("krige.bayes: modes found are:\n")
        print(phi.tausq.rel.mode)
        phi.tausq.rel.mode <- phi.tausq.rel.mode[1,]
      }
      if(prior$beta.prior == "normal" && npr > 1)
        info.id <- mode.ind
      else info.id <- 1
      modes <- beta.sigmasq.post(n = n, beta.info = beta.info[[info.id]],
                                 sigmasq.info = sigmasq.info[[info.id]],
                                 env.dists = dists.env,
                                 model = list(cov.model = model$cov.model,
                                   kappa = model$kappa),
                                 xmat = trend.data, y = data,
                                 phi = phi.tausq.rel.mode[1],
                                 tausq.rel = phi.tausq.rel.mode[2],
                                 dets = FALSE,
                                 do.prediction.moments = FALSE,
                                 do.prediction.simulations = FALSE,
                                 env.pred = pred.env, signal = signal)
      beta.mode.cond <- modes$beta.post
      S2.mode.cond <- modes$S2.post
      rm(modes)
      ##
      ## preparing output on posterior distribution
      ##
      if(beta.size == 1)
        kb$posterior$beta$summary <-
          c(mean = trend.mean, median = trend.median, mode.cond = beta.mode.cond)
      else kb$posterior$beta$summary <-
        cbind(mean = trend.mean, median = trend.median, mode.cond = beta.mode.cond)
      kb$posterior$sigmasq$summary <-
        c(mean = S2.mean, median = S2.median, mode.cond = S2.mode.cond)
      kb$posterior$phi$summary <-
        c(mean = phi.mean, median = phi.median, mode = phi.mode)
      if(prior$tausq.rel.prior != "fixed")
        kb$posterior$tausq.rel$summary <- c(mean = tausq.rel.mean,
                                            median = tausq.rel.median,
                                            mode = tausq.rel.mode)
      kb$posterior$sample <-
        as.data.frame(cbind(drop(as.matrix(beta.sam)[vecpars.back.order,  ]),
                            sigmasq.sam[vecpars.back.order], phi.sam[,1]))
      beta.sam <- sigmasq.sam <- NULL
      names(kb$posterior$sample) <- c(beta.names, "sigmasq", "phi")
      kb$posterior$sample$tausq.rel <- phi.sam[,2]
      phi.lev <- unique(phidist$phitausq[, 1])
      kb$posterior$phi$phi.marginal <-
        data.frame(phi = phi.lev, expected = apply(phidist$probphitausq, 1, sum),
                   sampled = as.vector(table(factor(phi.sam[, 1],
                     levels = phi.lev)))/n.posterior)
      tausq.rel.lev <- unique(phidist$phitausq[, 2])
      if(prior$tausq.rel.prior != "fixed")
        kb$posterior$tausq.rel$tausq.rel.marginal <-
          data.frame(tausq.rel = tausq.rel.lev, expected = apply(phidist$probphitausq, 2, sum),
                     sampled = as.vector(table(factor(phi.sam[, 2],
                       levels = tausq.rel.lev)))/n.posterior)
    }
    ##
    ##  computing results for the predictive distribution 
    ##
    if(do.prediction){
      kb$predictive$distribution <- "obtained by numerical approximation" 
      if(messages.screen)
        cat("krige.bayes: starting prediction at the provided locations\n")
      ##
      ## defining samples to be taken from the predictive
      ##
      if(n.predictive == n.posterior) {
        include.it <- FALSE
        n.predictive <- n.posterior
        phi.sam <- phidist$phitausq[ind,  ]
        message.prediction <- c(message.prediction, "krige.bayes: phi/tausq.rel samples for the predictive are same as for the posterior")
        if(messages.screen)
          cat(message.prediction, "\n")
      }
      else {
        include.it <- TRUE
        ind <- sample((1:(dim(phidist$phitausq)[1])), n.predictive,
                      replace = TRUE, prob = as.vector(phidist$probphitausq))
        ind.unique <- sort(unique(ind))
        ind.length <- length(ind.unique)
        ind.table <- table(ind)
        phi.unique <- phidist$phitausq[ind.unique,, drop=FALSE]
        message.prediction <- c(message.prediction, 
                                "krige.bayes: phi/tausq.rel samples for the predictive are NOT the same as for the posterior ")
        if(messages.screen) {
          cat(message.prediction, "\n")
          cat("krige.bayes: samples and their frequencies from the distribution of  phi and tau.rel when drawing from the predictive distribution\n")
          print(rbind(phi = phi.unique[, 1], tausq.rel
                      = phi.unique[, 2], frequency = ind.table))
        }
        phi.sam <- phidist$phitausq[ind,  ]
        vecpars.back.order <- order(order(ind))
      }
      if(moments){
        if(messages.screen)
          cat("krige.bayes: computing moments of the predictive distribution\n")
        kb$predictive$mean <- get("expect", envir=expect.env)/phidist$sum.prob
        remove("expect", envir=expect.env)
        kb$predictive$variance <-
          (get("expect2", envir=expect.env)/phidist$sum.prob) -
            ((kb$predictive$mean)^2)
        remove("expect2", envir=expect.env)
      }
    }
  }
  ##
  ## Backtransforming predictions
  ##
  if((do.prediction && moments) & (abs(lambda-1) > 0.001)){
    kb$predictive <-
      backtransform.moments(lambda = lambda,
                            mean = kb$predictive$mean,
                            variance = kb$predictive$variance,
                            distribution = kb$predictive$distribution,
                            n.simul = n.back.moments)
  }
  ##
  ## ======================= PART 4 ==============================
  ##                Sampling from the predictive
  ## =============================================================
  ##
  if(do.prediction && simulations.predictive){
    if(is.R()){
      if(cov.model.number > 12)
        stop("simulation in krige.bayes not implemented for the choice of correlation function")
    }
    else
      if(cov.model.number > 10)
        stop("simulation in krige.bayes not implemented for the chosen correlation function")
    krige.bayes.aux20 <- function(phinug){
      iter <- get("counter", envir=counter.env)
      if(messages.screen & prior$phi.prior != "fixed")
        krige.bayes.counter(.temp.ap = iter, n.points = ind.length)
      phinug <- as.vector(phinug)
      phi <- phinug[1]
      tausq.rel <- phinug[2]
      phi.ind <- which.min(abs(phi.discrete - phi))
      nug.ind <- which.min(abs(tausq.rel.discrete - tausq.rel))
      v0 <- cov.spatial(obj = get("d0", envir=pred.env),
                        cov.model = cov.model, kappa = kappa,
                        cov.pars = c(1, phi))
      ## care here, reusing object b
      b <- bilinearformXAY(X = as.vector(cbind(data, trend.data)),
                           lowerA = as.vector(inv.lower[phi.ind, nug.ind,,drop=TRUE]),
                           diagA = as.vector(inv.diag[phi.ind, nug.ind,,drop=TRUE]), 
                           Y = as.vector(v0))
      tv0ivdata <- drop(b[1,])
      b <- t(get("trend.loc", envir=pred.env)) -  b[-1,, drop=FALSE]
      ##
      tmean <- tv0ivdata + drop(crossprod(b,as.vector(phidist$
                                                 beta[phi.ind, nug.ind,  ])))
      tv0ivdata <- NULL
      Nsims <- ind.table[iter]
      if (signal) Dval <- 1.0 else Dval <-  1.0 + tausq.rel
      iter.env <- sys.frame(sys.nframe())
      ## removing this because seens redundant with part (**) below
      ##      if((tausq.rel < 1e-12) & (!is.null(get("loc.coincide", envir=pred.env))))
      ##        tmean[get("loc.coincide", envir=pred.env)] <- get("data.coincide", envir=pred.env)
      coincide.cond <- (((tausq.rel < 1e-12) | !signal) & !is.null(get("loc.coincide", envir=pred.env)))
      if(coincide.cond){
        nloc <- ni - n.loc.coincide
        ind.not.coincide <- -(get("loc.coincide", envir=pred.env))
        v0 <- v0[, ind.not.coincide, drop=FALSE]
        tmean <- tmean[ind.not.coincide]
        b <- b[,ind.not.coincide, drop=FALSE]
      }
      else{
        nloc <- ni
        ind.not.coincide <- TRUE
      }
      if(prior$beta.prior == "normal" && npr > 1)
        info.id <- par.set
      else info.id <- 1
      if(beta.info[[info.id]]$iv == Inf)
        vbetai <- matrix(0, ncol = beta.size, nrow = beta.size)
      else
        vbetai <- matrix(drop(phidist$varbeta[phi.ind, nug.ind,  ]),
                         ncol = beta.size, nrow = beta.size)
      simul <- matrix(NA, nrow=ni, ncol=Nsims)
      if(nloc > 0)
        simul[ind.not.coincide,] <-
          cond.sim(env.loc = base.env, env.iter = iter.env,
                   loc.coincide = get("loc.coincide", envir=pred.env),
                   coincide.cond = coincide.cond,
                   tmean = tmean,
                   Rinv = list(lower= drop(inv.lower[phi.ind, nug.ind,]),
                     diag = drop(inv.diag[phi.ind, nug.ind,])),
                   mod = list(beta.size = beta.size, nloc = nloc,
                     Nsims = Nsims, n = n, Dval = Dval,
                     df.model = df.model,
                     s2 = phidist$s2[phi.ind, nug.ind],
                     cov.model.number = cov.model.number,
                     phi = phi, kappa = kappa),
                   vbetai = vbetai,
                   fixed.sigmasq = (sigmasq.info$df.sigmasq == Inf))
      ## (**) this made previous bit redundant
      if(coincide.cond)
        simul[get("loc.coincide", envir=pred.env),] <-
          rep(get("data.coincide", envir=pred.env), Nsims)
      remove("v0", "b", "tmean")
      assign("counter", (iter + 1), envir=counter.env)
      ##
      ## Back transforming (To be include in C code???)
      ##
      if(abs(lambda - 1) > 0.001){
        return(BCtransform(simul, lambda, inv=TRUE)$data)
      }
      else
        return(simul)
    }
    ##
    counter.env <- new.env()
    if(messages.screen){
      cat("krige.bayes: sampling from the predictive\n")
      if(prior$phi.prior != "fixed")
        cat(paste("             Number of parameter sets: ", ind.length,"\n"))
    }
    assign("counter", 1, envir=counter.env)
     kb$predictive$simulations <- 
      matrix(unlist(apply(phi.unique, 1, krige.bayes.aux20)),
             ncol = n.predictive)
    remove("inv.lower", "inv.diag", "counter.env", "pred.env")
    if(is.R()) gc(verbose=FALSE)
    if(messages.screen)
      if(abs(lambda-1) > 0.001) 
        cat("krige.bayes: Box-Cox data transformation performed.\n             Simulations back-transformed to the original scale\n")
    if(messages.screen)
      cat("krige.bayes: preparing summaries of the predictive distribution\n")
    ##
    ## mean/quantiles/probabilities estimators from simulations
    ##
    kb$predictive <- c(kb$predictive, 
                       statistics.predictive(simuls=kb$predictive$simulations,
                                             mean.var = mean.estimator,
                                             quantile = quantile.estimator,
                                             threshold = probability.estimator))
    ##
    ## keeping or not samples from  predictive
    ##
    if(keep.simulations){
      if(prior$phi.prior != "fixed")
        kb$predictive$simulations <-
          kb$predictive$simulations[, vecpars.back.order]
    }
    else{
      kb$predictive$simulations <- NULL
      if(is.R()) gc(verbose=FALSE)
    }
    ##
    ## recording samples from  predictive if different from the posterior
    ##
    if(prior$phi.prior != "fixed"){
      if(include.it){
        phi.lev <- unique(phidist$phitausq[, 1])
        kb$predictive$phi.marginal <-
          data.frame(phi = phi.lev,
                     expected = apply(phidist$probphitausq, 1, sum),
                     sampled = as.vector(table(factor(phi.sam[, 1],
                       levels = phi.lev)))/n.predictive)
        tausq.rel.lev <- unique(phidist$phitausq[, 2])
        if(prior$tausq.rel.prior != "fixed")
          data.frame(tausq.rel = tausq.rel.lev,
                     expected = apply(phidist$probphitausq, 2, sum),
                     sampled = as.vector(table(factor(phi.sam[, 2],
                       levels = tausq.rel.lev)))/n.predictive)
        else
          kb$predictive$tausq.rel.marginal <-
            paste("fixed tausq.rel with value =", tausq.rel)
        kb$predictive$tausq.rel.marginal <-
          data.frame(tausq.rel = tausq.rel.lev,
                     expected = apply(phidist$probphitausq, 2, sum),
                     sampled = as.vector(table(factor(phi.sam[, 2],
                       levels = tausq.rel.lev)))/n.predictive)
      }
    }
  }
  if(!do.prediction) kb$predictive <- "no prediction locations provided"
  kb$.Random.seed <- seed
  kb$max.dist <- data.dist.max
  kb$call <- call.fc
  attr(kb, "prediction.locations") <- call.fc$locations
  if(do.prediction) attr(kb, 'sp.dim') <- ifelse(krige1d, "1d", "2d")
  if(!is.null(call.fc$borders))
    attr(kb, "borders") <- call.fc$borders
  class(kb) <- c("krige.bayes", "variomodel")
  if(messages.screen) cat("krige.bayes: done!\n")
  return(kb)
}

"prepare.graph.krige.bayes" <-
  function (obj, locations, borders, borders.obj=NULL,
            values.to.plot, number.col, xlim, ylim, messages) 
{
  if(missing(messages))
    messages.screen <- ifelse(is.null(getOption("geoR.messages")), TRUE, getOption("geoR.messages"))
  else messages.screen <- messages
  if (!is.numeric(values.to.plot)){
    switch(values.to.plot,
           mean = {
             values <- obj$predictive$mean
             if(messages.screen) cat("mapping the means of the predictive distribution\n")
           },
           variance = {
             values <- obj$predictive$variance
             if(messages.screen) cat("mapping the variances of the predictive distribution\n")
           },
           mean.simulations = {
             values <- obj$predictive$mean.simulations
             if(messages.screen) cat("mapping the means of simulations from the predictive distribution\n")
           },
           variance.simulations =
           {
             values <- obj$predictive$variance.simulations
             if(messages.screen) cat("mapping the variances of simulations from the predictive distribution\n")
           },
           median = {
             values <- obj$predictive$median
             if(messages.screen) cat("mapping the medians of the predictive distribution\n")
           },
           uncertainty = {
             values <- obj$predictive$uncertainty
             if(messages.screen) cat("mapping the uncertainty of the predictive distribution\n")
           },
           quantiles =
           {
             if(!is.vector(obj$predictive$quantiles))
               if(is.null(number.col))
                 stop("argument number.col must be provided")
               else
                 values <- as.matrix(obj$predictive$quantiles)[,number.col]
             else
               values <- as.matrix(obj$predictive$quantiles)[,1]
             if(messages.screen) cat("mapping a quantile of the predictive distribution\n")
           },
           probabilities =
           {
             if(!is.vector(obj$predictive$probab)){
               if(is.null(number.col))
                 stop("argument number.col must be provided")
               else
                 values <- as.matrix(obj$predictive$probab)[,number.col]
             }
             else{
               values <- as.matrix(obj$predictive$probab)[,1]
             }
             if(messages.screen) cat("mapping a probability of beeing bellow threshold of the predictive distribution\n")
           },
           simulation =
           {
             values <- as.matrix(obj$predictive$simulations)[,number.col]
             if(messages.screen) cat("mapping a simulation from the predictive distribution\n")
           },
           stop("wrong specification for values to plot")
           )
  }
  else values <- values.to.plot
  remove("values.to.plot")
  locations <- locations[order(locations[, 2], locations[,1]), ]
  xx <- as.numeric(levels(as.factor(locations[, 1])))
  nx <- length(xx)
  yy <- as.numeric(levels(as.factor(locations[, 2])))
  ny <- length(yy)
  values.loc <- rep(NA, nrow(locations))
  if(length(values.loc) == length(values)) values.loc <- values
  if(!is.null(borders.obj)){
    borders.obj <- as.matrix(as.data.frame(borders.obj))
    if(require(splancs))
      inout.vec <- as.vector(inout(pts = locations, poly = borders.obj))
    else
      stop("argument borders requires the package splancs - please install it")
    values.loc[inout.vec] <- values
    rm("inout.vec")
  }
  if (!is.null(borders)){
    borders <- as.matrix(as.data.frame(borders))
    dimnames(borders) <- list(NULL, NULL)
    if(!(!is.null(borders.obj) && identical(borders,borders.obj))){
      if(require(splancs))
        inout.vec <- as.vector(inout(pts = locations, poly = borders))
      else
        stop("argument borders requires the package splancs - please install it")
      if(length(values.loc[inout.vec]) == length(values))
        values.loc[inout.vec] <- values
      values.loc[!inout.vec] <- NA
      rm("inout.vec")
    }
  }
  ##
  if (missing(xlim) || is.null(xlim))
    if(is.null(borders)) xlim <- NULL
    else xlim <- range(borders[,1]) 
  if (missing(ylim) || is.null(ylim))
    if(is.null(borders)) ylim <- NULL
    else ylim <- range(borders[,2])
  coords.lims <- set.coords.lims(coords=locations, xlim=xlim, ylim=ylim)
  coords.lims[,1] <- coords.lims[,1] + c(-.025, .025) * diff(coords.lims[,1])
  coords.lims[,2] <- coords.lims[,2] + c(-.025, .025) * diff(coords.lims[,2])
  return(list(x=xx, y=yy, values = matrix(values.loc,ncol=ny), coords.lims=coords.lims))
}

"image.krige.bayes" <-
  function (x, locations, borders, 
            values.to.plot = c("mean", "variance",
              "mean.simulations", "variance.simulations",
              "quantiles", "probabilities", "simulation"),
            number.col, coords.data, xlim, ylim,
            x.leg, y.leg, messages, ...) 
{
  pty.prev <- par()$pty
  ldots <- match.call(expand.dots = FALSE)$...
  ldots[[match(names(ldots), "offset.leg")]] <- NULL
  if(length(ldots[!is.na(match(names(ldots), "xlab"))])==0)
    ldots$xlab <- "X Coord"
  if(length(ldots[!is.na(match(names(ldots), "ylab"))])==0)
    ldots$ylab <- "Y Coord"
  if(missing(x)) x <- NULL
  attach(x)
  on.exit(detach(x))
  if(missing(locations))
    locations <-  eval(attr(x, "prediction.locations"))
  if(is.null(locations)) stop("prediction locations must be provided")
  if(ncol(locations) != 2)
    stop("locations must be a matrix or data-frame with two columns")
  if(!is.numeric(values.to.plot))
    values.to.plot <-
      match.arg(values.to.plot,
                choices = c("mean", "variance",
                  "mean.simulations", "variance.simulations",
                  "quantiles", "probabilities", "simulation"))
  if(missing(borders)){
    if(!is.null(attr(x, "borders"))) borders.arg <- borders <- eval(attr(x, "borders"))
    else borders.arg <- borders <- NULL
  }
  else{
    borders.arg <- borders
    if(is.null(borders)) borders <- eval(attr(x, "borders"))
  }
  if(missing(number.col)) number.col <- NULL
  if(missing(coords.data)) coords.data <- NULL
  if(missing(xlim)) xlim <- NULL
  if(missing(ylim)) ylim <- NULL
  if(missing(x.leg)) x.leg <- NULL
  if(missing(y.leg)) y.leg <- NULL
  ##
  ## Plotting 1D or 2D
  ##
  if(!is.null(attr(x, 'sp.dim')) && attr(x, 'sp.dim') == '1D')
    plot.1d(values, xlim=xlim, ylim = ylim,
            x1vals = unique(round(locations[,1], dig=12)), ...)
  else{
    locations <- prepare.graph.krige.bayes(obj=x,
                                           locations=locations,
                                           borders=borders,
                                           borders.obj = eval(attr(x, "borders")),
                                           values.to.plot=values.to.plot,
                                           number.col = number.col,
                                           xlim = xlim, ylim = ylim, messages=messages)
    par(pty = "s")
    do.call("image", c(list(x=locations$x, y=locations$y,
                          z=locations$values,
                          xlim = locations$coords.lims[,1],
                          ylim = locations$coords.lims[,2]),
                     ldots))
#    image(locations$x, locations$y, locations$values,
#          xlim= locations$coords.lims[,1], ylim=locations$coords.lims[,2], ...)
    if(!is.null(coords.data))
      points(coords.data)
    if(!is.null(borders.arg)) polygon(borders, lwd=2)
    dots.l <- list(...)
    if(is.null(dots.l$col)) dots.l$col <- heat.colors(12)
    if(!is.null(x.leg) & !is.null(y.leg)){
      legend.krige(x.leg=x.leg, y.leg=y.leg,
                   values=locations$values,
                   vertical = vertical, cex=cex.leg,
                   col=dots.l$col, ...)
    }
  }
  par(pty=pty.prev)
  return(invisible())
}

"contour.krige.bayes" <-
  function (x, locations, borders, 
            values.to.plot = c("mean", "variance",
              "mean.simulations", "variance.simulations",
              "quantiles", "probabilities", "simulation"),
            number.col, coords.data, xlim, ylim,
            x.leg, y.leg, messages, ...) 
{
  pty.prev <- par()$pty
  ldots <- match.call(expand.dots = FALSE)$...
  ldots[[match(names(ldots), "offset.leg")]] <- NULL
  if(length(ldots[!is.na(match(names(ldots), "xlab"))])==0)
    ldots$xlab <- "X Coord"
  if(length(ldots[!is.na(match(names(ldots), "ylab"))])==0)
    ldots$ylab <- "Y Coord"
  if(missing(x)) x <- NULL
  attach(x)
  on.exit(detach(x))
  if(missing(locations))
    locations <-  eval(attr(x, "prediction.locations"))
  if(is.null(locations)) stop("prediction locations must be provided")
  if(ncol(locations) != 2)
    stop("locations must be a matrix or data-frame with two columns")
  if(!is.numeric(values.to.plot))
    values.to.plot <-
      match.arg(values.to.plot,
                choices = c("mean", "variance",
                  "mean.simulations", "variance.simulations",
                  "quantiles", "probabilities", "simulation"))
  if(missing(borders)){
    if(!is.null(attr(x, "borders"))) borders.arg <- borders <- eval(attr(x, "borders"))
    else borders.arg <- borders <- NULL
  }
  else{
    borders.arg <- borders
    if(is.null(borders)) borders <- eval(attr(x, "borders"))
  }
  if(missing(xlim)) xlim <- NULL
  if(missing(ylim)) ylim <- NULL
  if(missing(number.col)) number.col <- NULL
  if(missing(coords.data)) coords.data <- NULL
  ##
  ## Plotting 1D or 2D
  ##
  if(!is.null(attr(x, 'sp.dim')) && attr(x, 'sp.dim') == '1D')
    plot.1d(values, xlim=xlim, ylim = ylim,
            x1vals = unique(round(locations[,1], dig=12)), ...)
  else{
    locations <- prepare.graph.krige.bayes(obj=x, locations=locations,
                                           borders=borders,
                                           borders.obj = eval(attr(x, "borders")),
                                           values.to.plot=values.to.plot,
                                           number.col = number.col,
                                           xlim = xlim, ylim = ylim, messages=messages)
    par(pty = "s")
    if(filled){
      temp.contour <- function(){
        axis(1)
        axis(2)
        if(!is.null(coords.data)) points(coords.data, pch=20)
        if(!is.null(borders)) polygon(borders, lwd=2)
      }
      do.call("filled.contour", c(list(x=locations$x,
                                       y=locations$y,
                                       z=locations$values,
                                       xlim = locations$coords.lims[,1],
                                       ylim = locations$coords.lims[,2],
                                       plot.axes={temp.contour()}),
                                  ldots))
    }
    else{
      do.call("contour", c(list(x=locations$x, y=locations$y,
                                z=locations$values,
                                xlim = locations$coords.lims[,1],
                                ylim = locations$coords.lims[,2]),
                           ldots))
      ##
      ## adding borders
      ##
      if(!is.null(borders.arg)) polygon(borders, lwd=2)
    }
  }
  par(pty=pty.prev)
  return(invisible())
}

"persp.krige.bayes" <-
  function (x, locations, borders, 
            values.to.plot = c("mean", "variance",
              "mean.simulations", "variance.simulations",
              "quantiles", "probabilities", "simulation"), number.col, messages, ...) 
{
  if(missing(x)) x <- NULL
  attach(x)
  on.exit(detach(x))
  if(missing(locations)) locations <-  eval(attr(x, "prediction.locations"))
  if(is.null(locations)) stop("prediction locations must be provided")
  if(ncol(locations) != 2) stop("locations must be a matrix or data-frame with two columns")
  if(!is.numeric(values.to.plot)){
    values.to.plot <- match.arg(values.to.plot,
                                choices = c("mean", "variance",
                                  "mean.simulations",
                                  "variance.simulations",
                                  "quantiles", "probabilities",
                                  "simulation"))
  }
  if(missing(borders)) borders <- NULL
  if(missing(number.col)) number.col <- NULL
  ##
  ## Plotting 1D or 2D
  ##
  if(!is.null(attr(x, 'sp.dim')) && attr(x, 'sp.dim') == '1D')
    plot.1d(values, xlim=xlim, ylim = ylim,
            x1vals = unique(round(locations[,1], dig=12)), ...)
  else{
    locations <- prepare.graph.krige.bayes(obj=x, locations=locations,
                                           borders=borders,
                                           borders.obj = eval(attr(x, "borders")),
                                           values.to.plot=values.to.plot,
                                           number.col = number.col, messages=messages)
    persp(locations$x, locations$y, locations$values, ...)
  }
  return(invisible())
}

"model.control" <-
  function(trend.d = "cte", trend.l = "cte", cov.model = "matern",
           kappa = 0.5, aniso.pars = NULL, lambda = 1)
{
  cov.model <-
    match.arg(cov.model,
              choices = c("matern", "exponential", "gaussian",
                "spherical", "circular", "cubic", "wave", "power",
                "powered.exponential", "cauchy", "gneiting",
                "gneiting.matern", "pure.nugget"))
  if(cov.model == "powered.exponential" & (kappa <= 0 | kappa > 2))
    stop("model.control: for power exponential correlation model the parameter kappa must be in the interval \(0,2\]")
  ##  if(any(cov.model == c("exponential", "gaussian", "spherical",
  ##           "circular", "cubic", "wave", "powered.exponential",
  ##           "cauchy", "gneiting", "pure.nugget")))
  ##    kappa <- NULL
  if(!is.null(aniso.pars)) 
    if(length(aniso.pars) != 2 | !is.numeric(aniso.pars))
      stop("model.control: anisotropy parameters must be a vector with two elements: rotation angle (in radians) and anisotropy ratio (a number > 1)")
  res <- list(trend.d = trend.d, trend.l = trend.l,
              cov.model = cov.model,
              kappa=kappa, aniso.pars=aniso.pars, lambda=lambda)
  class(res) <- "model.geoR"
  return(res)
}

"post2prior" <- function(obj)
{
  if(length(class(obj)) == 0)
    stop("post.prior: argument must be an object of the class `krige.bayes` or `posterior.krige.bayes`")
  if(any(class(obj) == "krige.bayes")) obj <- obj$posterior
  if(all(class(obj) != "posterior.krige.bayes"))
    stop("post.prior: argument must be an object of the class `krige.bayes` or `posterior.krige.bayes`")
  ##
  ## beta
  ##
  if(!is.null(obj$beta$status) &&
     obj$beta$status == "fixed"){
    beta.prior <- "fixed"
    beta <- obj$beta$fixed.value
    beta.var.std <- NULL
  }
  else{
    beta.prior <- obj$beta$conditional.distribution
    beta <- obj$beta$pars$mean
    beta.var.std <- obj$beta$pars$var
  }
  ##
  ## sigmasq
  ##
  if(!is.null(obj$sigmasq$status) &&
     obj$sigmasq$status == "fixed"){
    sigamsq.prior <- "fixed"
    sigmasq <- obj$sigmasq$fixed.value
    df.sigmasq <- NULL
  }
  else{
    sigmasq.prior <- obj$sigmasq$conditional.distribution
    sigmasq <- obj$sigmasq$pars$S2
    df.sigmasq <-  obj$sigmasq$pars$df
  }
  ##
  ## phi
  ##
  if(!is.null(obj$phi$status) &&
     obj$phi$status == "fixed"){
    phi.prior <- "fixed"
    phi <- obj$phi$fixed.value
    phi.discrete <- NULL
  }
  else{
    phi.prior <- obj$phi$phi.marginal[,"expected"]
    phi <- NULL
    phi.discrete <- obj$phi$phi.marginal[,"phi"]
  }
  ##
  ## tausq.rel
  ##
  if(!is.null(obj$tausq.rel$status) &&
     obj$tausq.rel$status == "fixed"){
    tausq.rel.prior <- "fixed"
    tausq.rel <- obj$tausq.rel$fixed.value
    tausq.rel.discrete <- NULL 
  }
  else{
    tausq.rel.prior <- obj$tausq.rel$tausq.rel.marginal[,"expected"]
    tausq.rel <- 0
    tausq.rel.discrete <- obj$tausq.rel$tausq.rel.marginal[,"tausq.rel"]
  }
  ##
  res <- prior.control(beta.prior = beta.prior, beta = beta,
                       beta.var.std = beta.var.std,
                       sigmasq.prior = sigmasq.prior, 
                       sigmasq = sigmasq,  df.sigmasq = df.sigmasq,
                       phi.prior = phi.prior, 
                       phi = phi, phi.discrete = phi.discrete, 
                       tausq.rel.prior = tausq.rel.prior,
                       tausq.rel = tausq.rel,
                       tausq.rel.discrete = tausq.rel.discrete)
  res$joint.phi.tausq.rel <- obj$joint.phi.tausq.rel
  res$dep.prior <- TRUE
  return(res)
}

"prior.control" <-
  function(beta.prior = c("flat", "normal", "fixed"),
           beta = NULL, beta.var.std = NULL,
           sigmasq.prior = c("reciprocal",  "uniform", "sc.inv.chisq",  "fixed"),
           sigmasq = NULL,  df.sigmasq = NULL,
           phi.prior = c("uniform", "exponential", "fixed", "squared.reciprocal","reciprocal"),
           phi = NULL, phi.discrete = NULL, 
           tausq.rel.prior = c("fixed", "uniform"),
           tausq.rel = 0,
           tausq.rel.discrete = NULL)
{
  ##
  ## 1. Checking parameters for the priors
  ##
  ##
  ## beta
  ##
  beta.prior <- match.arg(beta.prior)
  if(beta.prior == "fixed" & is.null(beta))
    stop("prior.control: argument beta must be provided with fixed prior for this parameter")
  if(beta.prior == "normal"){
    if(is.null(beta) | is.null(beta.var.std))
      stop("prior.control: arguments `beta` and `beta.var.std` must be provided with normal prior for the parameter beta")
  }
  ##
  ## sigmasq
  ##
  sigmasq.prior <- match.arg(sigmasq.prior)
  if(sigmasq.prior == "fixed" & is.null(sigmasq))
    stop("prior.control: argument `sigmasq' must be provided with fixed prior for the parameter sigmasq")
  if(sigmasq.prior == "sc.inv.chisq")
    if(is.null(sigmasq) | is.null(df.sigmasq))
      stop("prior.control: arguments `sigmasq` and `df.sigmasq' must be provided for this choice of prior distribution")
  if(!is.null(sigmasq))
    if(any(sigmasq < 0))
      stop("prior.control: negative values not allowed for `sigmasq'")
  ##
  ## phi
  ##
  if(!is.null(phi) && length(phi) > 1)
    stop("prior.control: length of phi must be one. Use phi.prior and phi.discrete to specify the prior for phi or enter a single fixed value for phi")
  if(is.numeric(phi.prior)){
    phi.prior.probs <- phi.prior
    phi.prior <- "user"
    if(is.null(phi.discrete))
      stop("prior.control: argument phi.discrete with support points for phi must be provided\n")
    if(length(phi.prior.probs) != length(phi.discrete))
      stop("prior.control: user provided phi.prior and phi.discrete have incompatible dimensions\n")
    if(round(sum(phi.prior.probs), dig=6) != 1)
      stop("prior.control: prior probabilities provided for phi do not sum up to 1")
  }
  else
    phi.prior <- match.arg(phi.prior, choices = c("uniform", "exponential", "fixed", "squared.reciprocal","reciprocal"))
  if(phi.prior == "fixed"){
    if(is.null(phi)){
      stop("prior.control: argument `phi` must be provided with fixed prior for this parameter")
    }
    phi.discrete <- phi
  }
  else{
    if(phi.prior == "exponential" & (is.null(phi) | (length(phi) > 1)))
      stop("prior.control: argument `phi` must be provided when using the exponential prior for the parameter phi")
    ##    if(any(phi.prior == c("reciprocal", "squared.reciprocal")) &
    ##       any(phi.discrete == 0)){
    ##      warning("degenerated prior at phi = 0. Excluding value phi.discrete[1] = 0")
    ##      phi.discrete <- phi.discrete[phi.discrete > 1e-12]
    ##    }
#    if(is.null(phi.discrete))
#      stop("prior.control: argument phi.discrete with support points for phi must be provided\n")
#    else{
    if(!is.null(phi.discrete)){
      discrete.diff <- diff(phi.discrete)
      if(round(max(1e08 * discrete.diff)) != round(min(1e08 * discrete.diff)))
        stop("prior.control: the current implementation requires equally spaced values in the argument `phi.discrete`\n")
    } 
  }
  if(any(phi.discrete < 0)) stop("prior.control: negative values not allowed for parameter phi")
  ##
  ## tausq
  ##
  if(length(tausq.rel) > 1)
    stop("prior.control: length of tausq.rel must be one. Use tausq.rel.prior and tausq.rel.discrete to specify the prior for tausq.rel or enter a single fixed value for tausq.rel")
  if(is.numeric(tausq.rel.prior)){
    tausq.rel.prior.probs <- tausq.rel.prior
    tausq.rel.prior <- "user"
    if(is.null(tausq.rel.discrete))
      stop("prior.control: argument tausq.rel.discrete with support points for tausq.rel must be provided\n")
    if(length(tausq.rel.prior.probs) != length(tausq.rel.discrete))
      stop("prior.control: user provided tausq.rel.prior and tausq.rel.discrete have incompatible dimensions\n")
    if(round(sum(tausq.rel.prior.probs), dig=6) != 1)
      stop("prior.control: prior probabilities for tausq.rel provided do not add up to 1")
  }
  else
    tausq.rel.prior <- match.arg(tausq.rel.prior)
  if(tausq.rel.prior == "fixed"){
    if(is.null(tausq.rel))
      stop("prior.control: argument `tausq.rel` must be provided with fixed prior for the parameter tausq.rel")
    tausq.rel.discrete <- tausq.rel
  }
  else{
    if(is.null(tausq.rel.discrete))
      stop("prior.control: argument `tausq.rel.discrete` must be provided with chosen prior for the parameter tausq.rel")  
    discrete.diff <- diff(tausq.rel.discrete)
    if(round(max(1e08 * discrete.diff)) != round(min(1e08 * discrete.diff)))
      stop("prior.control: the current implementation requires equally spaced values in the argument `tausq.rel.discrete`\n")
  }
  if(any(tausq.rel.discrete) < 0)
    stop("prior.control: negative values not allowed for parameter tausq.rel")
  ##
  ## Further checks on dimensions
  ##
  if(phi.prior != "fixed"){
    if(is.numeric(phi.discrete)){
      if(is.null(tausq.rel.discrete)) nsets <- length(phi.discrete)
      else nsets <- length(phi.discrete) * length(tausq.rel.discrete)
    }
    else nsets <- 0
    if(sigmasq.prior == "sc.inv.chisq"){
      if(length(sigmasq) == nsets) dep.prior <- TRUE
      else dep.prior <- FALSE
    }
    else dep.prior <- FALSE
    if(beta.prior == "normal"){
      if(dep.prior){
        if(((length(beta)/nsets)^2) != (length(beta.var.std)/nsets))
          stop("prior.control: beta and beta.var.std have incompatible dimensions")
      }
      else{
        if((length(beta))^2 != length(beta.var.std))
          stop("prior.control: beta and beta.var.std have incompatible dimensions")
        require(methods)
        if(exists("trySilent")){
          if(inherits(trySilent(solve.geoR(beta.var.std)), "try-error"))
            stop("prior.control: singular matrix in beta.var.std")
          if(inherits(trySilent(chol(beta.var.std)), "try-error"))
            stop("prior.control: no Cholesky decomposition for beta.var.std")
        }
        else{
          error.now <- options()$show.error.messages
          if (is.null(error.now) | error.now) 
            on.exit(options(show.error.messages = TRUE))
          options(show.error.messages = FALSE)
          if(inherits(try(solve.geoR(beta.var.std)), "try-error"))
            stop("prior.control: singular matrix in beta.var.std")
          if(inherits(try(chol(beta.var.std)), "try-error"))
            stop("prior.control: no Cholesky decomposition for beta.var.std")
        }
        if(any(beta.var.std != t(beta.var.std)))
          stop("prior.control: non symmetric matrix in beta.var.std")
      }
    }
  }
  else dep.prior <- FALSE
  if(!dep.prior & beta.prior == "normal"){
    attr(beta.var.std, "Size") <- length(beta)
  }
  ##
  ip <- list(beta=list(), sigmasq=list(), phi=list(), tausq.rel=list())
  ##
  if(beta.prior == "fixed"){
    ip$beta$status <- "fixed"
    ip$beta$fixed.value <- beta 
  }
  else{
    ip$beta$status <- "random"
    ip$beta$dist <- beta.prior
    if(beta.prior == "flat")
      ip$beta$pars <- c(0, +Inf)
    if(beta.prior == "normal"){
      if(length(beta) == 1)
        ip$beta$pars <- c(mean=beta, var.std=beta.var.std)
      else
        ip$beta$pars <- list(mean=beta, var.std=beta.var.std)
    }
  }
  ##
  if(sigmasq.prior == "fixed"){
    ip$sigmasq$status <- "fixed"
    ip$sigmasq$fixed.value <- sigmasq 
  }
  else{
    ip$sigmasq$status <- "random"
    ip$sigmasq$dist <-  sigmasq.prior
    if(sigmasq.prior == "reciprocal")
      ip$sigmasq$pars <- c(df=0, var=+Inf)
    if(sigmasq.prior == "uniform")
      ip$sigmasq$pars <- c(df=-2, var=+Inf)
    if(sigmasq.prior == "sc.inv.chisq")
      ip$sigmasq$pars <- c(df=df.sigmasq, var=sigmasq)
  }
  ##
  if(phi.prior == "fixed"){
    ip$phi$status <- "fixed"
    ip$phi$fixed.value <- phi
  }
  else{
    ip$phi$status <- "random"
    ip$phi$dist <- phi.prior
    if(is.null(phi.discrete))
      ip$phi$probs <- NULL
    else{
      pd <- as.vector(phi.discrete)
      names(pd) <- NULL
      ip$phi$probs <-
        switch(phi.prior,
               uniform = rep(1, length(pd)),
               exponential = dexp(pd, rate=1/phi),
               squared.reciprocal = ifelse((pd > 0), 1/(pd^2),0),
               reciprocal = ifelse((pd > 0), 1/pd, 0),
               user = phi.prior.probs)
      names(ip$phi$probs) <- phi.discrete
    }
    if(phi.prior == "exponential")
      ip$phi$pars <- c(ip$phi$pars, exp.par=phi)
    else
      ip$phi$probs <- ip$phi$probs/sum(ip$phi$probs)
  }
  ##
  if(tausq.rel.prior == "fixed"){
    ip$tausq.rel$status <- "fixed"
    ip$tausq.rel$fixed.value <- tausq.rel 
  }
  else{
    ip$tausq.rel$status <- "random"
    ip$tausq.rel$dist <- tausq.rel.prior
    if(tausq.rel.prior == "user")
      ip$tausq.rel$probs <- tausq.rel.prior.probs
    else      
      ip$tausq.rel$probs <- rep(1/length(tausq.rel.discrete), length(tausq.rel.discrete))
    names(ip$tausq.rel$probs) <- tausq.rel.discrete
  }
  ## checking valid options for random/fixed parameters
  if(ip$phi$status == "random")
    if(ip$beta$status == "fixed" | ip$sigmasq$status == "fixed")
      stop("random phi with fixed sigmasq and/or beta not implemented")
  ##if(ip$sigmasq$status == "random")
  ##  if(ip$beta$status == "fixed")
  ##    stop("random sigmasq with fixed beta not implemented")
  ##
  res <- list(beta.prior = beta.prior, beta = beta,
              beta.var.std = beta.var.std,
              sigmasq.prior = sigmasq.prior,
              sigmasq = sigmasq, df.sigmasq = df.sigmasq,
              phi.prior = phi.prior, phi = phi,
              phi.discrete = phi.discrete,  
              tausq.rel.prior = tausq.rel.prior,
              tausq.rel = tausq.rel,
              tausq.rel.discrete = tausq.rel.discrete, 
              priors.info = ip, dep.prior = dep.prior)
  class(res) <- "prior.geoR"
  return(res)
}

"output.control" <-
  function(n.posterior, n.predictive, moments, n.back.moments, 
           simulations.predictive, mean.var, quantile,
           threshold, signal, messages)
{
  ##
  ## Assigning default values
  ##
  if(missing(n.posterior)) n.posterior <- 1000
  if(missing(n.predictive)) n.predictive <- NULL
  if(missing(moments)) moments <- TRUE
  if(missing(n.back.moments)) n.back.moments <- 1000
  if(missing(simulations.predictive)){
    if(is.null(n.predictive)) simulations.predictive <- NULL
    else
      simulations.predictive <- ifelse(n.predictive > 0, TRUE, FALSE)
  }
  if(missing(mean.var)) mean.estimator <- NULL
  else mean.estimator <- mean.var
  if(missing(quantile))  quantile.estimator <- NULL
  else quantile.estimator <- quantile
  if(missing(threshold)) probability.estimator <- NULL 
  else probability.estimator <- threshold
  if(missing(signal)) signal <- NULL
  if(missing(messages))
    messages.screen <- ifelse(is.null(getOption("geoR.messages")), TRUE, getOption("geoR.messages"))
  else messages.screen <- messages
  ##
  ##
  ##
  if(!is.null(quantile.estimator) | !is.null(probability.estimator) | !is.null(mean.estimator)){
    if(is.null(simulations.predictive)) keep.simulations <- FALSE
    else  keep.simulations <- ifelse(simulations.predictive, TRUE, FALSE)
    simulations.predictive <- TRUE
  }
  else keep.simulations <- NULL
  ##
  if(!is.null(quantile.estimator)){
    if(is.numeric(quantile.estimator))
      if(any(quantile.estimator) < 0 | any(quantile.estimator) > 1)
        stop("output.control: quantiles indicators must be numbers in the interval [0,1]\n")
    if(all(quantile.estimator == TRUE))
      quantile.estimator <- c(0.025, 0.5, 0.975)
  }
  if(!is.null(probability.estimator)){
    if(!is.numeric(probability.estimator))
      stop("output.control: probability.estimator must be a numeric value (or vector) of cut-off value(s)\n")
  }
  res <- list(n.posterior = n.posterior, n.predictive = n.predictive,
              moments = moments, n.back.moments = n.back.moments,
              simulations.predictive = simulations.predictive,
              keep.simulations = keep.simulations,
              mean.estimator = mean.estimator,
              quantile.estimator = quantile.estimator,
              probability.estimator = probability.estimator,
              signal = signal, messages.screen = messages.screen)
  class(res) <- "output.geoR"
  return(res)
}

"beta.sigmasq.post" <-
  function(n, beta.info, sigmasq.info, env.dists,
           model, xmat, y, phi, tausq.rel, do.prediction.moments,
           do.prediction.simulations,
           dets = FALSE, env.pred = NULL, signal)
{
  ##-----------------------------------------------------------------
  ##
  ## dists.env is an environment containing the objetc "data.dist" 
  ## with the distances between pairs os points (output of dists())
  ##
  ## sigmasq.info should contain:
  ##        df.sigmasq: df in prior for sigmasq
  ##                  df.sigmasq = 0 : reciprocal prior for sigmasq
  ##                  df.sigmasq = Inf : fixed sigmasq
  ##        n0S0 : sum of squares in prior for sigmasq
  ## beta.info should contain:
  ##        mivm : computed from the prior of beta: m\prime V^{-1} m
  ##        ivm  : computed from the prior of beta: V^{-1} m
  ##        iv   : computed from the prior of beta: V^{-1}
  ##                  iv = 0 : flat prior for beta
  ##                  iv = Inf : fixed beta
  ##        p    : degrees of freedom correction
  ##                  p = 0  : beta fixed or w/ normal prior
  ##                  p = beta.size : flat prior for beta  
  ## might contain
  ##        beta.fixed
  ##        sigmasq.fixed
  ##-----------------------------------------------------------------
  ##
  ## Using C code to compute bilinear forms (faster)
  ##
  iR <- varcov.spatial(dists.lowertri = get("data.dist", envir=env.dists),
                       cov.model = model$cov.model,
                       kappa = model$kappa, nugget = tausq.rel,
                       cov.pars = c(1, phi), inv = TRUE,
                       only.inv.lower.diag = TRUE, det = dets)
  yiRy <- bilinearformXAY(X = y, lowerA = iR$lower.inverse,
                          diagA = iR$diag.inverse, Y = y)
  xiRy.x <- bilinearformXAY(X = xmat, lowerA = iR$lower.inverse,
                          diagA = iR$diag.inverse, Y = cbind(y, xmat))
  ##
  ## Using R alone (not convenient if det = TRUE !!!) 
  ##
###    R <- varcov.spatial(dists.lowertri=dists.lowertri, cov.model = model$cov.model,
###             kappa = model$kappa, nugget = tausq.rel,
###                        cov.pars = c(1, phi))$varcov
###    iRy.x <- solve.geoR(R, cbind(y,xmat))
###    xiRy.x <- crossprod(xmat, iRy.x)
###    yiRy <- crossprod(y, iRy.x[,1])
###    iRy.x <- NULL
  ##
  ## 1. Computing parameters of posterior for beta
  ##
  if(any(beta.info$iv == Inf)){
    beta.post <- beta.info$beta.fixed
    beta.var.std.post <- 0
    inv.beta.var.std.post <- Inf
  }
  else{
    inv.beta.var.std.post <- drop(beta.info$iv + xiRy.x[,-1])
    beta.var.std.post <- solve.geoR(inv.beta.var.std.post)
    beta.post <- drop(beta.var.std.post %*% (beta.info$ivm + xiRy.x[,1]))
  }
  ##
  ## 2. Computing parameters of posterior for sigmasq
  ##
  if(sigmasq.info$df.sigmasq == Inf){
    S2.post <- sigmasq.info$sigmasq.fixed
    df.post <- Inf
  }
  else{
    df.post <- n + sigmasq.info$df.sigmasq - beta.info$p
    ##
    if(any(beta.info$iv == Inf)){
      S2.post <- sigmasq.info$n0S0 + yiRy -
        2*crossprod(beta.post, xiRy.x[,1]) +
        crossprod(beta.post, (xiRy.x[,-1] %*% beta.post))
    }
    else
      S2.post <- sigmasq.info$n0S0 + beta.info$mivm + yiRy -
        crossprod(beta.post, (inv.beta.var.std.post %*% beta.post))
    S2.post <- drop(S2.post/df.post)
  }
  ##
  res <- list(beta.post = beta.post,
              beta.var.std.post = beta.var.std.post,
              df.post = df.post, S2.post = S2.post)
  if(dets){
    res$log.det.to.half <- iR$log.det.to.half
    res$det.XiRX <- det(xiRy.x[,-1, drop=FALSE])
  }
  ##
  if(do.prediction.moments){
    env.r0 <- new.env()
    assign("r0",cov.spatial(obj = get("d0", envir=env.pred),
                            cov.model = model$cov.model,
                            kappa = model$kappa, cov.pars = c(1, phi)),
           envir=env.r0)
    ## care here, reusing b
    b <- bilinearformXAY(X = get("r0", envir=env.r0),
                         lowerA = iR$lower.inverse,
                         diagA = iR$diag.inverse, Y = cbind(y, xmat))
    riRy <- b[,1, drop=FALSE]
    b <- get("trend.loc", envir=env.pred) -  b[,-1, drop=FALSE]
    ##
    res$pred.mean <- drop(riRy + b %*% beta.post)
    if((tausq.rel < 1e-12) & (!is.null(get("loc.coincide", envir=env.pred))))
      res$pred.mean[get("loc.coincide", envir=env.pred)] <- get("data.coincide", envir=env.pred)
    ##
    R.riRr.bVb <- 1 - diagquadraticformXAX(X = get("r0", envir=env.r0),
                                           lowerA = iR$lower.inverse,
                                           diagA = iR$diag.inverse)
    remove("env.r0")
     if(all(beta.info$iv != Inf))
       R.riRr.bVb <- R.riRr.bVb +
         diagquadraticformXAX(X = t(b),
                              lowerA=beta.var.std.post[lower.tri(beta.var.std.post)],
                              diagA = diag(beta.var.std.post))
    ##
    nug.factor <- ifelse(signal, 0, tausq.rel)
    res$pred.var <- S2.post * (nug.factor + R.riRr.bVb)
    if(((tausq.rel < 1e-12) | signal) & !is.null(get("loc.coincide", envir=env.pred)))
      res$pred.var[get("loc.coincide", envir=env.pred)] <- 0
    res$pred.var[res$pred.var < 1e-16] <- 0
    if(sigmasq.info$df.sigmasq != Inf)
      res$pred.var <- (df.post/(df.post-2)) * res$pred.var
  }
  ##
  if(do.prediction.simulations){
    res$inv.diag <- iR$diag.inverse
    res$inv.lower <- iR$lower.inverse
  }
  return(res)
}

"sample.prior" <-
  function(n, kb.obj = NULL, prior = prior.control())
{
  call.fn <- match.call()
  ##
  if(!is.null(kb.obj))
    prior <- eval(kb.obj$call$prior)
  ##
  ## Checking for improper priors
  ##
  if(prior$beta.prior == "flat")
    stop("sampling is not possible: improper prior for beta")
  if(any(prior$sigmasq.prior == c("reciprocal", "uniform")))
    stop("sampling is not possible: improper prior for sigmasq")
  ##
  ## preparing output object
  ##
  beta.size <- length(prior$beta)
  if(beta.size == 1)
    beta.name <- "beta"
  else
    beta.name <- paste("beta", (0:(beta.size-1)), sep="")
  simul <- as.data.frame(matrix(0, nrow=n, ncol = beta.size+3))
  names(simul) <- c(beta.name, "sigmasq","phi","tausq.rel")
  ##
  ## Sampling phi and tausq.rel
  ##
  if(prior$phi.prior == "fixed" & prior$tausq.rel.prior == "fixed"){
    simul$phi <- rep(prior$phi, n)
    simul$tausq <- rep(prior$tausq.rel, n)
  }
  else{
    ##
    ## Buiding the discrete prior distribution
    ##
    phi.discrete <- prior$phi.discrete
    tausq.rel.discrete <- prior$tausq.rel.discrete
    both.discrete <- expand.grid(phi.discrete, tausq.rel.discrete)
    prob.discrete <- function(phi.discrete, tausq.discrete, prior){
      pd <- phi.discrete
      td <- tausq.rel.discrete
      probs <- switch(prior$phi.prior,
                      uniform = outer(pd, td, function(x,y){as.numeric(1)}),
                      reciprocal = outer(pd, td, function(x,y){ifelse(x>0, 1/x, 0.0)}),
                      squared.reciprocal = outer(pd, td, function(x,y){ifelse(x>0, 1/(x^2), 0.0)}),
                      exponential = outer(pd, td, function(x,y){(1/prior$exponential.par) * exp(x^(1/prior$exponential.par))}),
                      fixed = outer(pd, td, function(x,y){as.numeric(1)}))
      return(probs/sum(probs))
    }
    both.discrete$probs <- as.vector(prob.discrete(phi.discrete = phi.discrete,
                                                   tausq.discrete = tausq.discrete,
                                                   prior = prior))
    n.points <- nrow(both.discrete)
    ind <- sample((1:n.points), n, replace = TRUE,
                  prob = both.discrete$probs)
    simul$phi <- both.discrete[ind, 1]
    simul$tausq.rel <- both.discrete[ind, 2]
  }
  ##
  if(prior$sigmasq.prior == "fixed")
    simul$sigmasq <- rep(prior$sigmasq, n)
  else
    simul$sigmasq <- rinvchisq(n, df = prior$df.sigmasq,
                               scale = prior$sigmasq)
  ##
  if(prior$beta.prior == "fixed")
    simul[,1:beta.size] <- matrix(rep(prior$beta, rep(n, beta.size)), ncol=beta.size)
  else{
    if(beta.size == 1){
      simul$beta <- rnorm(n, mean = prior$beta,
                          sd=sqrt(simul$sigmasq * prior$beta.var.std))
    }
    else{
      "sample.beta" <- function(sigmasq, beta, beta.var.std){
        cov.values <- sigmasq * beta.var.std
        cov.svd <- svd(cov.values)
        cov.decomp <- cov.svd$u %*% (t(cov.svd$u) * sqrt(cov.svd$d))
        zsim <- beta + drop(cov.decomp %*% rnorm(length(beta)))
        return(zsim)
      }
      simul[,1:beta.size] <-
        t(sapply(simul$sigmasq, sample.beta, beta = prior$beta,
                 beta.var.std = prior$beta.var.std))
    }
  }
  attr(simul, "Call") <- call.fn
  return(simul) 
}

"sample.posterior" <-
  function(n, kb.obj)
{
  call.fn <- match.call()
  ##
  if(length(class(kb.obj)) == 0)
    stop("kb.obj must be an object with an output of krige.bayes")
  if(any(class(kb.obj) == "krige.bayes")) post <- kb.obj$posterior
  if(any(class(kb.obj) == "posterior.krige.bayes")) post <- kb.obj
  if(all(class(kb.obj) != "krige.bayes") &
     all(class(kb.obj) != "posterior.krige.bayes"))
    stop("kb.obj must be an object with an output of krige.bayes")
  ##
  ## preparing data frame to store the output 
  ##
  if(length(dim(post$beta$pars$mean)) == 2) beta.size <- 1
  else beta.size <- dim(post$beta$pars$mean)[3]
  if(beta.size == 1)
    beta.name <- "beta"
  else
    beta.name <- paste("beta", (0:(beta.size-1)), sep="")
  simul <- as.data.frame(matrix(0, nrow=n, ncol = beta.size+3))
  names(simul) <- c(beta.name, "sigmasq","phi","tausq.rel")
  ##
  ## Sampling phi and tausq.rel
  ##
  if(post$phi$status == "fixed" & post$tausq.rel$status == "fixed"){
    simul$phi <- rep(post$phi$fixed.value, n)
    simul$tausq.rel <- rep(post$tausq.rel$fixed.value, n)
    ind <- 1
    phi.discrete <- post$phi$fixed.value
  }
  else{
    ##
    ## sampling phi and tausq.rel
    ##
    n.points <- length(post$joint.phi.tausq.rel)
    phi.discrete <- post$phi$phi.marginal[,"phi"]
    tausq.rel.discrete <- post$tausq.rel$tausq.rel.marginal[,"tausq.rel"]
    phi.tau.grid <- expand.grid(phi.discrete, tausq.rel.discrete)
    ind <- sample((1:n.points), n, replace = TRUE,
                  prob = as.vector(post$joint.phi.tausq.rel))
    simul$phi <- phi.tau.grid[ind, 1]
    simul$tausq.rel <- phi.tau.grid[ind, 2]
  }
  ##
  ## sampling sigmasq
  ##
  if(post$sigmasq$status == "fixed")
    simul$sigmasq <- rep(post$sigmasq$fixed.value, n)
  else
    simul$sigmasq <- rinvchisq(n, df = post$sigmasq$pars$df,
                               scale = post$sigmasq$pars$S2[ind])
  ##
  ## sampling beta
  ##
  if(post$beta$status == "fixed")
    simul[,1:beta.size] <-
      matrix(rep(post$beta$fixed.value, rep(n, beta.size)), ncol=beta.size)
  else{
    beta.size <- length(post$beta)
    if(beta.size == 1)
      simul$beta <- rnorm(n, mean = post$beta$pars$mean[ind],
                          sd=sqrt(post$beta$pars$var[ind]))
    else{
      require(MASS)
      if(post$phi$status == "fixed" & post$tausq.rel$status == "fixed"){
        simul[,1:beta.size] <-
          mvrnorm(n=n, mu = post$beta$pars$mean,
                  Sigma = matrix(post$beta$pars$var, ncol=beta.size))
      }
      else{
        "simula.betavec" <- function(i, nphi){
          nc <- ceiling(i/nphi)
          nr <- i %% nphi
          if(nr == 0) nr <- nphi
          beta.sim <-
            mvrnorm(n=n, mu = post$beta$pars$mean[nr,nc,],
                    Sigma = matrix(post$beta$pars$var[nr,nc,],
                      ncol=beta.size))
          return(beta.sim)
        }
        simul[,1:beta.size] <- t(sapply(ind), simula.betavec,
                                 nphi = length(phi.discrete))
      }
    }
  }
  names(simul) <- c(beta.name, c("sigmasq", "phi", "tausq.rel"))
  attr(simul, "Call") <- call.fn
  return(simul) 
}

"statistics.predictive" <-
  function(simuls, mean.var = TRUE, quantile, threshold)
{
  results <- list()
  if(missing(quantile)) quantile.estimator <- NULL
  else quantile.estimator <- quantile
  if(missing(threshold)) probability.estimator <- NULL
  else probability.estimator <- threshold
  ##
  if(!is.null(mean.var) & mean.var){
    results$mean.simulations <- drop(apply(simuls, 1, mean))
    results$variance.simulations <- drop(apply(simuls, 1, var))
  }
  if(!is.null(quantile.estimator)) {
    results$quantiles.simulations <-
      drop(apply(simuls, 1, quantile, probs = quantile.estimator))
    if(length(quantile.estimator) > 1) {
      results$quantiles.simulations <-
        as.data.frame(t(results$quantiles))
      names(results$quantiles.simulations) <-
        paste("q", 100 * quantile.estimator, sep = "")
    }
  }
  if(!is.null(probability.estimator)) {
    nsims <- ncol(simuls)
    "prob.cutoff" <- function(x, thres, nsims){
      return(sapply(thres, FUN = function(cut){sum(x <= cut)/nsims}))
    }
    results$probabilities.simulations <-
      drop(apply(simuls, 1, prob.cutoff,
                 thres = probability.estimator, nsims = nsims))
    if(length(threshold) > 1){
      results$probabilities.simulations <-
        as.data.frame(t(results$probabilities.simulations))
      names(results$probabilities.simulations) <-
        paste("threshold", probability.estimator, sep = "")
    }
  }
  return(results)
}

"rMVnorm" <-
  function(cov.values, beta.size)
{
  ##
  ## This function produces a sample from  a multivariate normal distribution 
  ## mean is 0 and cov.values is a vector of length beta.size^2
  ##
  cov.values <- matrix(cov.values, ncol = beta.size)
  cov.svd <- svd(cov.values)
  cov.decomp <- cov.svd$u %*% (t(cov.svd$u) * sqrt(cov.svd$d))
  zsim <- as.vector(cov.decomp %*% rnorm(beta.size))
  return(zsim)
}

"plot.krige.bayes" <-
  function(x, phi.dist = TRUE, tausq.rel.dist = TRUE, add = FALSE,
           type = c("bars", "h", "l", "b", "o", "p"), thin, ...)
{
  if(length(class(krige.bayes)) > 0 && all(class(x) != "krige.bayes"))
    stop("object x must be of the class `krige.bayes`")
  if(missing(thin)) thin <- c(1,1)
  if(length(thin) == 1) thin <- rep(thin, 2)
  ##
  type <- match.arg(type)
  ldots <- list(...)
  if(is.null(ldots$col)){
    if(type == "bars") col <- 0:1
    else col <- "black"
  }
  else col <- ldots$col
  if(type != "bars"){
    if(is.null(ldots$lty)) lty <- 1
    else lty <- ldots$lty
    if(is.null(ldots$lwd)) lwd <- 1:2
    else lwd <- ldots$lwd
  }
  ##
  if(phi.dist){
    if(x$prior$phi$status == "fixed")
      cat("parameter `phi` is fixed\n")
    else{
      phi.vals <- x$posterior$phi$phi.marginal[,"phi"]
      phi.off <- 0.1 * diff(phi.vals[1:2])
      phi.table <- rbind(x$prior$phi$probs, x$posterior$phi$dist)
      colnames(phi.table) <- phi.vals
      ## thining
      nphi <- length(phi.vals)
      ind <- seq(1, nphi, by = thin[1])
      phi.vals <- phi.vals[ind]
      phi.table <- phi.table[,ind]
      if(is.null(ldots$ylim)) phi.ylim <- c(0, 1.1*max(phi.table))
      else phi.ylim <- ldots$ylim
      if(type == "bars")
        barplot(phi.table, legend.text=c("prior", "posterior"),
                beside=TRUE, col=col, ylim = phi.ylim, 
                xlab = expression(phi), ylab = "density")
      else{
        if(type=="h")
          phi.vals <- cbind(phi.vals - phi.off, 
                            phi.vals + phi.off)
        matplot(phi.vals, t(phi.table), type = type,
                lwd = lwd, lty = lty, ylim = phi.ylim, 
                col = col, xlab = expression(phi),
                ylab = "density", add = add)
      }
    }
  }
  if(tausq.rel.dist){
    if(x$prior$tausq.rel$status == "fixed")
      cat("parameter `tausq.rel` is fixed\n")
    else{
      tausq.rel.vals <- x$posterior$tausq.rel$tausq.rel.marginal[,"tausq.rel"]
      tausq.rel.off <- 0.1 * diff(tausq.rel.vals[1:2])
      tausq.rel.table <- rbind(x$prior$tausq.rel$probs,
                               x$posterior$tausq.rel$dist)
      colnames(tausq.rel.table) <-  tausq.rel.vals
      ## thining
      ntausq.rel <- length(tausq.rel.vals)
      ind <- seq(1, ntausq.rel, by = thin[2])
      tausq.rel.vals <- tausq.rel.vals[ind]
      tausq.rel.table <- tausq.rel.table[,ind]
      if(is.null(ldots$ylim)) tau.ylim <- c(0, 1.1*max(tausq.rel.table))
      else tau.ylim <- ldots$ylim
      if(type == "bars")
        barplot(tausq.rel.table, legend.text=c("prior", "posterior"),
                beside=TRUE, col=col, ylim = tau.ylim, 
                xlab = expression(tau[rel]^2), ylab = "density")
      else{
        if(type=="h")
          tausq.rel.vals <- cbind(tausq.rel.vals - tausq.rel.off,
                                  tausq.rel.vals + tausq.rel.off)
        matplot(tausq.rel.vals, t(tausq.rel.table), type = type, lwd = lwd, lty = lty,
                col = col, ylim = tau.ylim, 
                xlab = expression(tau[rel]^2), ylab = "density", add = add)
      }
    }
  }
  return(invisible())
}

##"lines.posterior.krige.bayes" <-
##  function(x, parameter = c("beta", "sigmasq", "phi", "tausq.rel"), ...)#
##{
##  if(parameter == "beta"){
##    attach(x$posterior$beta, pos=1)
##    
##  return(invisible())#
##}

  
"print.betavar" <-
  function(x, ...)
{
  size <- attr(x, "Size")
  x <- matrix(x, size, size)
  betavar <- matrix(NA, size, size)
  betavar[row(betavar) >= col(betavar)] <- x[row(betavar) >= col(betavar)]
  if(size > 1){
    labels <- paste("beta", 0:(size-1), sep="")
    dimnames(betavar) <- list(labels, labels)
  }
  print(betavar, na="")
  return(invisible(x))
}


"hist.krige.bayes" <-
  function(x, pars, density.est = TRUE,
           histogram = TRUE, ...)
{
  Ldots <- list(...)
  if(missing(pars) | (!missing(pars) && pars == -1)){
    ppars <- names(x$posterior$sample)
    np <- length(ppars) 
    if(x$prior$tausq.rel$status != "random") ppars <- ppars[-np]
    if(x$prior$phi$status != "random") ppars <- ppars[-(np-1)]
    if(x$prior$sigmasq$status != "random") ppars <- ppars[-(np-2)]
    if((!missing(pars) && pars == -1)) ppars <- ppars[-1]
    pars <- ppars
  }
  res <- list(histogram = list(), density.estimation = list())
  for(ipar in pars){
    if(substr(ipar, 1,4) == "beta")
      if(nchar(ipar) == 4) xl <- expression(beta)
      else xl <- substitute(beta[N], list(N=as.numeric(strsplit(ipar, "beta")[[1]][2])))
    if(ipar == "sigmasq") xl <- expression(sigma^2)
    if(ipar == "phi") xl <- expression(phi)
    if(ipar == "tausq.rel") xl <- expression(tau[rel]^2)
    y <- as.vector(x$posterior$sample[[ipar]])
    ymax <- 0
    if(histogram){
      res$histogram[[ipar]] <- plH <- hist(y, prob = FALSE, plot = FALSE, ...)
      ymax <- max(plH$dens)
    }
    if(density.est){
      if(is.null(Ldots$width)){
        if(require(MASS, quietly=TRUE))
          plD <- density(y,width=bandwidth.nrd(y), ...)
        else plD <- density(y, ...)
      }
      else
        plD <- lines(density(y, width = Ldots$width))
      res$density.estimation[[ipar]] <- plD 
      ymax <- max(c(ymax, plD$y))
    }
    if(histogram){
      plot(plH, ylim =c(0, ymax), freq = FALSE,
           xlab=xl, main= Ldots$main, ...)
      if(density.est) lines(plD)
    }
    else
      if(density.est) plot(plD, xlab=xl, ...)
  }
  return(invisible(res))
}

"lines.variomodel.krige.bayes" <- 
  function(x, summary.posterior, max.dist, uvec,
           posterior = c("variogram", "parameters"),  ...)
{
  my.l <- list()
  ##
  ## Setting the maximum distance to compute the variogram
  ##
  if(missing(max.dist)){
    my.l$max.dist <- x$max.dist
    if (is.null(my.l$max.dist) | !is.numeric(my.l$max.dist)) 
      stop("a numerical value must be provided to the argument max.dist")
  }
  else my.l$max.dist <- max.dist
  ##
  ## picking the variogram model
  ##
  if(is.null(x$call$cov.model))
    my.l$cov.model <- "exponential"
  else {
    my.l$cov.model <- x$call$cov.model
    if(x$call$cov.model == "matern" | x$call$cov.model == "powered.exponential" |
       x$call$cov.model == "cauchy" | x$call$cov.model == "gneiting-matern")
      my.l$kappa <- x$call$kappa
    else my.l$kappa <- NULL
  }
  ##
  posterior <- match.arg(posterior)
  if(is.function(summary.posterior)) spost <- post.fc <- summary.posterior
  else spost <- match.arg(summary.posterior, choices = c("mode", "median", "mean"))
  ##
  if(!is.null(x$posterior$sample) & posterior == "variogram"){
    if(!is.function(spost))
      stop("summary.posterior must be a function when posterior = `variogram`")
    if(missing(uvec)) my.l$uvec <- seq(0, my.l$max.dist, l=51)
    calc.vario <- function(x, info = my.l){
      return((x[1] * (1 + x[3])) -
             cov.spatial(info$uvec, cov.model = my.l$cov.model, kappa = my.l$kappa, cov.pars = x[1:2]))
    }
    post.vario <- apply(x$posterior$sample[c("sigmasq","phi","tausq.rel")], 1, calc.vario)
    gamma.post <- drop(apply(post.vario, 1, post.fc))
    if(is.vector(gamma.post))
      lines(my.l$uvec, gamma.post, ...)
    else
      matplot(my.l$uvec, t(gamma.post), add = TRUE, ...)
  }
  else{
    if(is.function(spost))
      stop("summary.posterior must be one of `mean`, `median` or `mode` when posterior = `parameters`")
    if(spost == "mode")
      spost1 <- "mode.cond"
    else spost1 <- spost
    my.l$cov.pars <- c(x$posterior$sigmasq$summary[spost1],
                       x$posterior$phi$summary[spost])
    names(my.l$cov.pars) <- NULL
    if(is.numeric(x$posterior$tausq.rel$summary))
      nugget <- x$posterior$tausq.rel$summary[spost] * my.l$cov.pars[1]
    else nugget <- 0
    names(nugget) <- NULL
    my.l$sill.total <- nugget + my.l$cov.pars[1]
    gamma.f <- function(x, my.l)
      {
        return(my.l$sill.total -
               cov.spatial(x, cov.model = my.l$cov.model, kappa = my.l$kappa,
                           cov.pars = my.l$cov.pars))
      }
    curve(gamma.f(x,my.l=my.l), from = 0, to = my.l$max.dist, add=TRUE, ...)
  }
  return(invisible())
}

"print.krige.bayes" <-
  function(x, ...)
{
  print.default(x, ...)
}

"print.posterior.krige.bayes" <-
  function(x, ...)
{
  print.default(x, ...)
}
##
## Basic data manipulation for the geoR package
## --------------------------------------------
##
## Functions for reading data and basic exploratory analysis. 
## These functions include
##    - creates objets of the class geodata
##    - methods for this class
##

"is.geodata" <- function (x){
  inherits(x, "geodata") && (!is.null(x$coords)) && (!is.null(x$data)) && ncol(x$coords) == 2 && (nrow(x$coords) == nrow(as.matrix(x$data)))
}

"read.geodata" <-
  function(file, header = FALSE, coords.col= 1:2, data.col = 3,
           data.names = NULL, covar.col = NULL,
           covar.names = "header", realisations = NULL,
           na.action = c("ifany", "ifdata", "ifcovar", "none"),
           rep.data.action, rep.covar.action, ...)
{
  call.fc <- match.call()
  ##
  obj <- read.table(file = file, header = header, ...)
  if(all(covar.names == "header")){
    if(!is.null(covar.col)){
      col.names <- names(obj)
      covar.names <- col.names[covar.col]
    }
    else covar.names <- NULL
  }
  ##
  if(missing(rep.data.action)) rep.data.action <- "none"
  if(!is.function(rep.data.action))
    rep.data.action <- match.arg(rep.data.action, choices = c("none", "first")) 
  if(missing(rep.covar.action)) rep.covar.action <- rep.data.action
  if(!is.function(rep.covar.action))
    rep.covar.action <- match.arg(rep.covar.action, choices = c("none", "first")) 
  ##
  res <- as.geodata(obj = obj, coords.col = coords.col, data.col = data.col,
                    covar.col = covar.col, covar.names = covar.names,
                    realisations = realisations, rep.data.action = rep.data.action,
                    rep.covar.action = rep.covar.action)
  res$call <- call.fc
  return(res)
}

"as.geodata" <-
  function(obj, coords.col = 1:2, data.col = 3, data.names = NULL, 
           covar.col = NULL, covar.names = "obj.names", realisations = NULL,
           na.action = c("ifany", "ifdata", "ifcovar", "none"),
           rep.data.action, rep.covar.action)
{
  ## converts a simulation generated by grf
  if(class(obj)[1] == "grf"){
    res <- list(coords=obj$coords, data=obj$data)
    class(res) <- "geodata"
    return(res)
  }
  ## checking input
  if(!is.matrix(obj) & !is.data.frame(obj))
    stop("object must be a matrix or data.frame")
  if(!is.null(data.names) & length(data.col) < 2)
    stop("data.names allowed only if there is more than 1 column of data")
  res <- list()
  ##
  ## testing for NA's setting the coordinates of the data locations
  ##
  if(any(is.na(obj[,coords.col]))){
    warning("NA's not allowed in the coordinates")
    obj <- obj[complete.cases(obj),,drop = FALSE]
    warning("eliminating rows with NA's")
  }
  res$coords <- as.matrix(obj[,coords.col])
  ##
  ## setting the data
  ##
  res$data <- as.matrix(obj[,data.col])
  if(length(data.col) == 1) res$data <- as.vector(res$data)
  else if(!is.null(data.names)) colnames(res$data) <- data.names
  ##
  ## setting the covariates, if the case 
  ##
  if(!is.null(covar.col)){
    res[[3]] <- as.data.frame(obj[,covar.col])
    if(all(covar.names == "obj.names")){
      if(is.matrix(obj))      
        col.names <- dimnames(obj)[2]
      if(is.data.frame(obj))      
        col.names <- names(obj)
    }
    names(res)[3] <- "covariate"
    if(all(covar.names == "obj.names"))
      if(is.null(col.names)) names(res[[3]]) <- paste("covar", 1:length(covar.col), sep="")
      else  names(res[[3]]) <- col.names[covar.col]
    else
      names(res[[3]]) <- covar.names
    covar.names <- names(res[[3]])
  }
  ##
  ## Dealing with NA's
  ##
  na.action <- match.arg(na.action)
  if(na.action != "none"){
    if(na.action == "ifany")
      na.data <- na.covar <- TRUE
    if(na.action == "ifdata")
      {na.data <- TRUE; na.covar <- FALSE}
    if(na.action == "ifcovar")
      {na.data <- FALSE; na.covar <- TRUE}
    not.na <- function(x) !any(is.na(x))
    if(na.data){
      ind <- apply(as.matrix(res$data), 1, not.na)
      if(!all(ind)){
        res$coords <- res$coords[ind,]
        res$data <- drop(as.matrix(res$data)[ind,])
        if(!is.null(covar.col))
          res[[3]] <- drop(as.matrix(res[[3]][ind,]))
        cat(paste("as.geodata:", sum(!ind), "points removed due to NA in the data\n")) 
      }
    }
    if(!is.null(covar.col) && na.covar){
      ind <- apply(as.matrix(res[[3]]), 1, not.na)
      if(!all(ind)){
        res$coords <- res$coords[ind,]
        res$data <- drop(as.matrix(res$data)[ind,])
        if(!is.null(covar.col))
          res[[3]] <- drop(res[[3]][ind,])
        cat(paste("as.geodata:", sum(!ind), "points removed due to NA in the covariate(s)\n")) 
      }
    }
  }
  ##
  ## Checking whether there are data from different realisations
  ##
  if(is.null(realisations)) realisations <- as.factor(rep(1, nrow(res$coords)))
  else{
    if(is.numeric(realisations) && length(realisations) == 1)
      realisations <- as.factor(obj[,realisations])
    res$realisations <- realisations
  }
  if(length(realisations) != nrow(res$coords))
    stop("realisations and coords have incompatible dimensions")
  ##
  ## Checking whether there are data at coincident locations
  ## and dealing with this acoording to the value of the argument
  ## rep.data.action 
  ##
  if(missing(rep.data.action)) rep.data.action <- "none"
  if(!is.function(rep.data.action))
    rep.data.action <- match.arg(rep.data.action, choices = c("none", "first")) 
  if(missing(rep.covar.action)) rep.covar.action <- rep.data.action
  if(!is.function(rep.covar.action))
    rep.covar.action <- match.arg(rep.covar.action, choices = c("none", "first")) 
  if(! "package:stats" %in% search()) require(mva)
  rep.lev <- as.character(paste("x",res$coords[,1],"y",res$coords[,2], sep=""))
  rep.dup <- duplicated(rep.lev)
  if(sum(rep.dup) > 0)
    cat(paste("as.geodata:", sum(rep.dup), "redundant locations found\n"))
  if(is.function(rep.data.action) || rep.data.action == "first"){
    res$coords <- res$coords[!rep.dup,]
    measure.var.f <- function(x) return(summary(lm(x ~ as.factor(rep.lev)))$sigma^2)
    res$m.var <- drop(apply(as.matrix(res$data),2,measure.var.f))
    rep.action.f <- function(x, rep.action){ 
      if(!is.function(rep.action) && rep.action == "first")
        return(x[!rep.dup])
      else
        return((as.vector(by(x, rep.lev, rep.action))[unclass(factor(rep.lev))])[!rep.dup])
    }
    res$data <- drop(apply(as.matrix(res$data), 2, rep.action.f, rep.action=rep.data.action))
    if(!is.null(covar.col))
      res[[3]] <- drop(apply(res[[3]], 2, rep.action.f, rep.action=rep.covar.action))
    if(!is.null(res$realisations))
      res$realisations <- res$realisations[!rep.dup]
  }
  else{
    check.coincide <- function(x){sum(dist(x) < 1e-12) > 0}
    any.coincide <- lapply(split(as.data.frame(res$coords), realisations), check.coincide)
    any.coincide <- as.vector(unlist(any.coincide))
    if(sum(any.coincide) > 0)
      cat("WARNING: there are data at coincident locations, some of the geoR's functions will not work.\n")
  }
  ##
  if(!is.null(covar.col)){
    res[[3]] <- as.data.frame(res[[3]])
    names(res[[3]]) <- covar.names
  }
  class(res) <- "geodata"
  return(res)
}

"summary.geodata" <- function(object, ...)
{
  x <- object
  res <- list()
  res$coords.summary <- apply(x$coords, 2, range)
  rownames(res$coords.summary) <- c("min", "max")
  if(is.null(colnames(object$coords))) colnames(res$coords.summary) <- c("Coord.X", "Coord.Y")
  if(! "package:stats" %in% search()) require(mva)
  res$distances.summary <- range(dist(x$coords))
  names(res$distances.summary) <- c("min", "max")  
  if(!is.null(x$borders)){
    res$borders.summary <- apply(x$borders, 2, range)
    rownames(res$borders.summary) <- c("min", "max")
  }
  res$data.summary <- drop(apply(as.matrix(x$data), 2, summary))
  if(!is.null(x$units.m))
    res$units.m.summary <- drop(apply(as.matrix(x$units.m), 2, summary))
  if(!is.null(x$covariate))
    res$covariate.summary <- summary(x$covariate)
  return(res)
}

"print.summary.geodata" <- function(x, ...)
{
  cat("Coordinates summary\n")
  print(x$coords.summary)
  if(!is.null(x$borders.summary)){
    cat("\nBorders summary\n")
    print(x$borders.summary)
  }
  cat("\nData summary\n")
  print(x$data.summary)
  if(!is.null(x$units.m.summary)){
    cat("\nOffset variable summary\n")
    print(x$units.m.summary)
  }
  if(!is.null(x$covariate.summary)){
    cat("\nCovariates summary\n")
    print(x$covariate.summary)
  }
  return(invisible())
}

"points.geodata" <-
  function (x, coords = x$coords, data = x$data, 
            data.col = 1, borders = NULL,
            pt.divide = c("data.proportional",
              "rank.proportional", "quintiles",
              "quartiles", "deciles", "equal"),
            lambda=1, trend="cte", weights.divide=NULL,
            cex.min, cex.max, pch.seq, col.seq, add.to.plot = FALSE,
            x.leg, y.leg, dig.leg = 2, 
            round.quantiles = FALSE, graph.pars = FALSE, ...) 
{
  if(missing(x)) x <- list(coords = coords, data = data)
  # This is for compatibility with previously used argument pt.sizes
  if(!is.null(list(...)$pt.s)) pt.divide <- list(...)$pt.s
  #
  if(!is.numeric(pt.divide)) pt.divide <- match.arg(pt.divide)
  if(!is.vector(data)) data <- (as.data.frame(data))[,data.col]
  if(nrow(coords) != length(data))
    stop("coords and data have incompatible sizes")
    if (!is.null(weights.divide)) {
    if (length(weights.divide) != length(data)) 
      stop("length of weights.divide must be equals to the length of data")
    data <- data/weights.divide
  }
  ##
  ## data transformation (Box-Cox)
  ##
  if (lambda != 1) data <- BCtransform(data, lambda)$data
  ##
  ## trend removal
  ##
  xmat <- unclass(trend.spatial(trend = trend, geodata = x))
  if (nrow(xmat) != nrow(coords)) 
    stop("coords and trend have incompatible sizes")
  if (trend != "cte") {
    data <- lm(data ~ xmat + 0)$residuals
    names(data) <- NULL
  }
  ##
  attach(x)
  eval(borders)
  detach(x)
  if (!add.to.plot) {
    if(is.null(borders))
      coords.lims <- set.coords.lims(coords=coords)
    else{
      if(ncol(borders) != 2)
        stop("argument borders must be an object with 2 columns with the XY coordinates of the borders of the area")
      coords.lims <- set.coords.lims(coords=rbind(as.matrix(coords), as.matrix(borders)))
    }
    par(pty = "s")
    toplot <- apply(coords, 2, range)
    colnames(toplot) <- c("X Coord", "Y Coord")
    plot(toplot, type = "n",
         xlim = coords.lims[,1], ylim = coords.lims[, 2], ...)
  }
  if(!is.null(borders))
    polygon(borders)
  if (missing(cex.min)) cex.min <- 0.5
  if (missing(cex.max)) cex.max <- 1.5
  graph.list <- list()
  if(is.numeric(pt.divide) || all(pt.divide == "quintiles") | all(pt.divide == "quartiles") | all(pt.divide == "deciles")) {
    if (all(pt.divide == "quintiles")) {
      n.quant <- 5
      if (missing(col.seq)) 
        col.seq <- c("blue", "green", "yellow", "orange3", "red2")
    }
    if (all(pt.divide == "quartiles")) {
      n.quant <- 4
      if (missing(col.seq)) 
        col.seq <- c("blue", "green", "yellow", "red") 
    }
    if (all(pt.divide == "deciles")) {
      n.quant <- 10
      if (missing(col.seq)) 
        col.seq <- rainbow(13)[10:1]
    }
    if(is.numeric(pt.divide)){
      if(length(pt.divide <= length(data))){
        data.quantile <- pt.divide
        n.quant <- length(pt.divide) - 1
      }
      else
        stop("length of pt.divide cannot be greater than length of the data")
    }
    else
      data.quantile <- quantile(data, probs = seq(0, 1, by = (1/n.quant)))
    if(!missing(col.seq) && all(col.seq == "gray")) col.seq <- gray(seq(1,0, l=n.quant))
    if (missing(pch.seq)) pch.seq <- rep(21, n.quant)
    cex.pt <- seq(cex.min, cex.max, l = n.quant)
    if (round.quantiles == TRUE) {
      data.quantile[1] <- floor(data.quantile[1])
      data.quantile[n.quant + 1] <- ceiling(data.quantile[n.quant + 1])
      data.quantile <- round(data.quantile)
    }
    graph.list$quantiles <- data.quantile
    graph.list$cex <- cex.pt
    graph.list$col <- col.seq
    graph.list$pch <- pch.seq
    graph.list$data.group <- cut(data, breaks=data.quantile, include.l=TRUE)
    if (add.to.plot) 
      points(coords, pch = pch.seq, cex = cex.pt[as.numeric(graph.list$data.group)], bg = col.seq[as.numeric(graph.list$data.group)], ...)
    else
      points(coords, pch = pch.seq, cex = cex.pt[as.numeric(graph.list$data.group)], bg = col.seq[as.numeric(graph.list$data.group)])
    ##
    ## Adding legend
    ##
    if(!missing(x.leg) && !missing(y.leg)){
      textleg <- character()
      for (i in 1:(length(graph.list$quantiles)-1))
        textleg <- c(textleg, substitute(a <= y < b, list(a=round(unname(graph.list$quantiles)[i], dig=dig.leg), b=round(unname(graph.list$quantiles[i+1]), dig=dig.leg))))
      legend(x=x.leg, y=y.leg,textleg, pt.bg=graph.list$col, col=graph.list$col, pch=graph.list$pch)
    }
  }
  else {
    n <- length(data)
    if (missing(pch.seq)) pch.seq <- 21
    if (missing(col.seq)) col.seq <- 0
    else if(all(col.seq == "gray")) col.seq <- gray(seq(1,0, l=n))
    coords.order <- coords[order(data), ]
    data.order <- data[order(data)]
    if (pt.divide == "rank.proportional") {
      data.quantile <- range(data.order)
      size <- seq(cex.min, cex.max, l = n)
      graph.list$cex <- range(size)
      graph.list$pch <- unique(range(pch.seq))
      graph.list$col <- col.seq
      if (length(col.seq) == 1) col.seq <- rep(col.seq, n)
      else col.seq <- round(seq(1,length(col.seq),length=n))
      for (i in 1:n) {
        if (add.to.plot) 
          points(coords.order[i, , drop = FALSE], cex = size[i], 
                 pch = pch.seq, bg = col.seq[i], ...)
        else points(coords.order[i, , drop = FALSE], 
                    cex = size[i], pch = pch.seq, bg = col.seq[i])
      }
    }
    if (pt.divide == "data.proportional") {
      r.y <- range(data.order)
      size <- cex.min + ((data.order - r.y[1]) * (cex.max - 
                                                  cex.min))/(r.y[2] - r.y[1])
      graph.list$cex <- c(cex.min, cex.max)
      graph.list$pch <- unique(range(pch.seq))
      graph.list$col <- col.seq
      if (length(col.seq) == 1) col.seq <- rep(col.seq, n)
      else col.seq <- round(seq(1,length(col.seq),length=n))
      for (i in 1:n) {
        if (add.to.plot) 
          points(coords.order[i, , drop = FALSE], cex = size[i], 
                 pch = pch.seq, bg = col.seq[i], ...)
        else points(coords.order[i, , drop = FALSE], 
                    cex = size[i], pch = pch.seq, bg = col.seq[i])
      }
    }
    if (pt.divide == "equal") {
      if (add.to.plot) 
        points(coords, pch = pch.seq, bg = col.seq, cex = cex.max, ...)
      else points(coords, pch = pch.seq, bg = col.seq, cex = cex.max)
    }
    if(!missing(x.leg) && !missing(y.leg)) warning(paste('arguments x.leg and y.leg are ignored when pt.divide = ',pt.divide,'\n'))
  }
  if (graph.pars == TRUE) return(graph.list)
  else return(invisible())
}

plot.geodata <- function (x, coords = x$coords, data = x$data, borders = NULL, 
    trend = "cte", lambda = 1, col.data = 1, weights.divide = NULL, 
    lowess = FALSE, scatter3d = FALSE, ...) 
{
  if(missing(x)) x <- list(coords=coords, data = data)
  if (is.R()) par.ori <- par(no.readonly = TRUE)
  else par.ori <- par()
  on.exit(par(par.ori))
  coords <- as.matrix(coords)
  data <- as.matrix(data)
  data <- data[, col.data]
  attach(x)
  eval(borders)
  detach(x)
  if (!is.null(weights.divide)) {
    if (length(weights.divide) != length(data)) 
      stop("length of weights.divide must be equals to the length of data")
    data <- data/weights.divide
  }
  if (lambda != 1) {
    if (lambda == 0) 
      data <- log(data)
    else data <- ((data^lambda) - 1)/lambda
  }
  xmat <- unclass(trend.spatial(trend = trend, geodata = x))
  if (nrow(xmat) != nrow(coords)) 
    stop("coords and trend have incompatible sizes")
  if (trend != "cte") {
    data <- lm(data ~ xmat + 0)$residuals
    names(data) <- NULL
    data.lab <- "residuals"
  }
  else data.lab <- "data"
  par(mfrow = c(2, 2), mar = c(4, 4, 0, 0.5), mgp=c(2,.8,0))
  data.quantile <- quantile(data)
  if (is.null(borders)) 
    coords.lims <- set.coords.lims(coords = coords)
  else {
    if (ncol(borders) != 2) 
      stop("argument \"borders\" must be a 2 columns object with coordinates of the borders of the study area")
    coords.lims <- set.coords.lims(coords = rbind(as.matrix(coords), as.matrix(borders)))
  }
  par(pty = "s")
  plot(coords, xlab = "X Coord", ylab = "Y Coord ", type = "n", 
       xlim = coords.lims[, 1], ylim = coords.lims[, 2])
  if (!is.null(borders)) polygon(borders)
  if (is.R()) {
    data.breaks <- unique(quantile(data))
    n.breaks <- length(data.breaks)
    data.cut <- cut(data, breaks = data.breaks, include.l = TRUE, 
                    labels = FALSE)
    points(coords, pch = (1:4)[data.cut], col = c("blue", 
                                            "green", "yellow2", "red")[data.cut])
  }
  else {
    points(coords[(data <= data.quantile[2]), ], pch = 1, 
           cex = 0.6, col = 2)
    points(coords[((data > data.quantile[2]) & (data <= data.quantile[3])), 
                  ], pch = 2, cex = 1.4, col = 4)
    points(coords[((data > data.quantile[3]) & (data <= data.quantile[4])), 
                  ], pch = 3, cex = 1.7, col = 7)
    points(coords[(data > data.quantile[4]), ], pch = 4, 
           cex = 2, col = 8)
  }
  plot(data, coords[, 2], ylab = "Coord Y", xlab = data.lab, cex = 1, ylim = coords.lims[, 2])
  if(lowess){
    foo <- lowess(data ~ coords[,2])
    lines(foo[[2]], foo[[1]])
  }
  if (!is.R()) 
    par(mar = c(5, 5, 1, 0.5))
  plot(coords[, 1], data, xlab = "Coord X", ylab = data.lab, cex = 1, xlim = coords.lims[, 1], )
  if(lowess) lines(lowess(data ~ coords[,1]))
  par(pty = "m")
  if (is.R()) par(mar = c(4, 4, 1, 1))
  else par(mar = c(0, 1, 0, 0.5))
  if (scatter3d) {
    if (!require(scatterplot3d)) {
      cat("plot.geodata: the argument scatter3d=TRUE requires the package \"scatterplot3d\" \n              will plot an histogram instead")
      hist(data, xlab= data.lab)
    }
    else scatterplot3d(x = coords[, 1], y = coords[, 2], 
                       z = data, box = FALSE, type = "h", pch = 16, xlab = "Coord X", 
                       ylab = "Coord Y", ...)
  }
  ##  else xyzplot(coords = coords, data = data, ...)
  else hist(data, main="", xlab= data.lab, ...)
  return(invisible())
}
"krige.conv" <-
  function (geodata, coords=geodata$coords, data=geodata$data,
            locations, borders = NULL, krige, output)
{
  if(missing(geodata))
    geodata <- list(coords = coords, data = data)
  call.fc <- match.call()
  base.env <- sys.frame(sys.nframe())
  ##
  ## reading input
  ##
  if(missing(krige))
    krige <- krige.control()
  else{
    ##    if(is.null(class(krige)) || class(krige) != "krige.geoR"){
    if(length(class(krige)) == 0 || class(krige) != "krige.geoR"){
      if(!is.list(krige))
        stop("krige.conv: the argument krige only takes a list or an output of the function krige.control")
      else{
        krige.names <- c("type.krige","trend.d","trend.l","obj.model",
                         "beta","cov.model", "cov.pars",
                         "kappa","nugget","micro.scale","dist.epsilon",
                         "lambda","aniso.pars")
        krige.user <- krige
        krige <- list()
        if(length(krige.user) > 0){
          for(i in 1:length(krige.user)){
            n.match <- match.arg(names(krige.user)[i], krige.names)
            krige[[n.match]] <- krige.user[[i]]
          }
        }
        if(is.null(krige$type.krige)) krige$type.krige <- "ok"  
        if(is.null(krige$trend.d)) krige$trend.d <-  "cte"
        if(is.null(krige$trend.l)) krige$trend.l <-  "cte"
        if(is.null(krige$obj.model)) krige$obj.model <-  NULL
        if(is.null(krige$beta)) krige$beta <- NULL 
        if(is.null(krige$cov.model)) krige$cov.model <- "matern"  
        if(is.null(krige$cov.pars))
          stop("covariance parameters (sigmasq and phi) should be provided in cov.pars")
        if(is.null(krige$kappa)) krige$kappa <-  0.5
        if(is.null(krige$nugget)) krige$nugget <-  0
        if(is.null(krige$micro.scale)) krige$micro.scale <- 0  
        if(is.null(krige$dist.epsilon)) krige$dist.epsilon <-  1e-10
        if(is.null(krige$aniso.pars)) krige$aniso.pars <- NULL  
        if(is.null(krige$lambda)) krige$lambda <- 1 
        krige <- krige.control(type.krige = krige$type.krige,
                               trend.d = krige$trend.d,
                               trend.l = krige$trend.l,
                               obj.model = krige$obj.model,
                               beta = krige$beta,
                               cov.model = krige$cov.model,
                               cov.pars = krige$cov.pars,
                               kappa = krige$kappa,
                               nugget = krige$nugget,
                               micro.scale = krige$micro.scale,
                               dist.epsilon = krige$dist.epsilon, 
                               aniso.pars = krige$aniso.pars,
                               lambda = krige$lambda)
        
      }
    }
  }
  cov.model <- krige$cov.model
  kappa <- krige$kappa
  lambda <- krige$lambda
  beta <- krige$beta
  cov.pars <- krige$cov.pars
  nugget <- krige$nugget
  micro.scale <- krige$micro.scale
  aniso.pars <- krige$aniso.pars
  ##
  ## reading output options
  ##
  if(missing(output))
    output <- output.control()
  else{
    ##    if(is.null(class(output)) || class(output) != "output.geoR"){
    if(length(class(krige)) == 0 || class(output) != "output.geoR"){
      if(!is.list(output))
        stop("krige.conv: the argument output can take only a list or an output of the function output.control")
      else{
        output.names <- c("n.posterior","n.predictive","moments","n.back.moments","simulations.predictive",
                          "mean.var","quantile","threshold","signal","messages.screen")
        output.user <- output
        output <- list()
        if(length(output.user) > 0){
          for(i in 1:length(output.user)){
            n.match <- match.arg(names(output.user)[i], output.names)
            output[[n.match]] <- output.user[[i]]
          }
        }
        if(is.null(output$n.posterior)) output$n.posterior <- 1000 
        if(is.null(output$n.predictive)) output$n.predictive <- NULL
        if(is.null(output$moments)) output$moments <- TRUE
        if(is.null(output$n.back.moments)) output$n.back.moments <- 1000 
        if(is.null(output$simulations.predictive)){
          if(is.null(output$n.predictive)) output$simulations.predictive <- NULL
          else
            output$simulations.predictive <- ifelse(output$n.predictive > 0, TRUE, FALSE)
        }
        if(is.null(output$mean.var)) output$mean.var <- NULL
        if(is.null(output$quantile)) output$quantile <- NULL
        if(is.null(output$threshold)) output$threshold <- NULL
        if(is.null(output$signal)) output$signal <- NULL
        if(is.null(output$messages.screen)) output$messages.screen <- TRUE
        output <- output.control(n.posterior = output$n.posterior,
                                 n.predictive = output$n.predictive,
                                 moments = output$moments,
                                 n.back.moments = output$n.back.moments, 
                                 simulations.predictive = output$simulations.predictive,
                                 mean.var = output$mean.var,
                                 quantile = output$quantile,
                                 threshold = output$threshold,
                                 signal = output$signal,
                                 messages = output$messages.screen)
      }
    }
  }
  signal <- ifelse(is.null(output$signal), FALSE, output$signal)
  messages.screen <- output$messages.screen
  n.predictive <- output$n.predictive
  n.back.moments <- output$n.back.moments
  ##
  n.predictive <- ifelse(is.null(n.predictive), 0, n.predictive)
  simulations.predictive <- ifelse(is.null(output$simulations.predictive), FALSE, TRUE)
  keep.simulations <- ifelse(is.null(output$keep.simulations), TRUE, FALSE)
  mean.estimator <- output$mean.estimator
  if(is.null(mean.estimator) & simulations.predictive) mean.estimator <- TRUE
  quantile.estimator <- output$quantile.estimator
  probability.estimator <- output$probability.estimator
  if(!is.null(probability.estimator)){
    if(length(probability.estimator) > 1 &
       length(probability.estimator) != nrow(locations))
      stop("krige.conv: probability.estimator must either have length 1, or have length = nrow(locations)\n")
  }
  if(simulations.predictive & n.predictive == 0) n.predictive <- 1000
  ##
  ## checking input
  ##
  if(krige$type.krige == "ok") beta.prior <- "flat"
  if(krige$type.krige == "sk") beta.prior <- "deg"
  ##
  if(is.vector(coords)){
    coords <- cbind(coords, 0)
    warning("krige.conv: coordinates provided as a vector, assuming one spatial dimension")
  }
  coords <- as.matrix(coords)
  if(is.vector(locations)) {
    if(length(locations) == 2) {
      locations <- t(as.matrix(locations))
      if(messages.screen) 
        warning("krige.conv: assuming that there is only 1 prediction point")
    }
    else{
      warning("krige.conv: locations provided as a vector, assuming one spatial dimension")
      locations <- as.matrix(cbind(locations, 0))
    }
  }
  else locations <- as.matrix(locations)
  ##
  ## selecting locations inside the borders 
  ##
  if(!is.null(borders)){
    locations <- locations.inside(locations, borders)
    if(nrow(locations) == 0)
      stop("\nkrige.conv: there are no prediction locations inside the borders")
    if(messages.screen)
      cat("krige.conv: results will be returned only for prediction locations inside the borders\n")
  }
  dimnames(coords) <- list(NULL, NULL)
  dimnames(locations) <- list(NULL, NULL)
  ##
  ## Checking for 1D prediction 
  ##
  if(length(unique(locations[,1])) == 1 | length(unique(locations[,2])) == 1)
    krige1d <- TRUE
  else krige1d <- FALSE
  ##
  ## building the trend matrix
  ##
  if(messages.screen){
    if(is.numeric(krige$trend.d))
      cat("krige.conv: model with covariates matrix provided by the user")
    else
      cat(switch(as.character(krige$trend.d)[1],
                 "cte" = "krige.conv: model with constant mean",
                 "1st" = "krige.conv: model with mean given by a 1st order polynomial on the coordinates",
                 "2nd" = "krige.conv: model with mean given by a 2nd order polynomial on the coordinates",
                 "krige.conv: model with mean defined by covariates provided by the user"))
    cat("\n")
  }
  trend.d <- unclass(trend.spatial(trend=krige$trend.d, geodata = geodata))
  if (nrow(trend.d) != nrow(coords)) 
      stop("coords and trend.d have incompatible sizes")
  beta.size <- ncol(trend.d)
  if(beta.prior == "deg")
    if(beta.size != length(beta))
      stop("size of mean vector is incompatible with trend specified") 
  trend.l <- unclass(trend.spatial(trend=krige$trend.l, geodata = list(coords = locations)))
  if (nrow(trend.l) != nrow(locations)) 
    stop("locations and trend.l have incompatible sizes")
  if(beta.size > 1)
    beta.names <- paste("beta", (0:(beta.size-1)), sep="")
  else beta.names <- "beta"
  ##
  ## Anisotropy correction (this should be placed AFTER trend.d/trend.l
  ##
  if(!is.null(aniso.pars)) {
#    if((abs(aniso.pars[1]) > 0.001) & (abs(aniso.pars[2] - 1) > 0.001)){
    if(abs(aniso.pars[2] - 1) > 0.0001){
      if(messages.screen)
        cat("krige.conv: anisotropy correction performed\n")
      coords <- coords.aniso(coords = coords, aniso.pars = aniso.pars)
      locations <- coords.aniso(coords = locations, aniso.pars = aniso.pars)
    }
  }
  ##
  ## Box-Cox transformation
  ##
  if(abs(lambda - 1) > 0.001) {
    if(messages.screen) cat("krige.conv: performing the Box-Cox data transformation\n")
    data <- BCtransform(data, lambda = lambda)$data
  }
  ## 
  ## setting covariance parameters
  ##
  if(is.vector(cov.pars)) {
    sigmasq <- cov.pars[1]
    phi <- cov.pars[2]
    cpars <- c(1, phi)
  }
  else {
    stop("current version of krige.conv does not accept nested covariance models\n") 
    ##    sigmasq <- cov.pars[, 1]
    ##    phi <- cov.pars[, 2]
    ##    cpars <- cbind(1, phi)
  }
  ##  sill.partial <- micro.scale + sum(sigmasq)
  sill.partial <- sum(sigmasq)
  tausq.rel <- nugget/sum(sigmasq)
  tausq.rel.micro <- micro.scale/sum(sigmasq)
  n <- length(data)
  ni <- nrow(trend.l)
  ##
  ## starting kriging calculations
  ##
  kc <- list()
  invcov <- varcov.spatial(coords = coords, cov.model = cov.model, 
                           kappa = kappa, nugget = tausq.rel,
                           cov.pars = cpars, inv = TRUE,
                           only.inv.lower.diag = TRUE)
  ittivtt <- solve.geoR(bilinearformXAY(X = as.vector(trend.d),
                                        lowerA = as.vector(invcov$lower.inverse),
                                        diagA = as.vector(invcov$diag.inverse), 
                                        Y = as.vector(trend.d)))
  if(beta.prior == "flat"){
    beta.flat <- drop(ittivtt %*% bilinearformXAY(X = as.vector(trend.d),
                                                  lowerA = as.vector(invcov$lower.inverse),
                                                  diagA = as.vector(invcov$diag.inverse), 
                                                  Y = as.vector(data)))
  }
  v0 <- loccoords(coords = coords, locations = locations)
  if(n.predictive > 0){
    ## checking if there are data points coincident with prediction locations
    loc.coincide <- apply(v0, 2, function(x, min.dist){any(x < min.dist)},
                          min.dist=krige$dist.epsilon)
    if(any(loc.coincide)) loc.coincide <- (1:ni)[loc.coincide]
    else loc.coincide <- NULL
    if(!is.null(loc.coincide)){
      temp.f <- function(x, data, dist.eps){return(data[x < dist.eps])}
      data.coincide <- apply(v0[, loc.coincide, drop=FALSE], 2, temp.f, data=data, dist.eps=krige$dist.epsilon)
    }
    else data.coincide <- NULL
  }
  else remove("locations")
  ## using nugget interpreted as microscale variation or measurement error
  nug.factor <- ifelse(signal, tausq.rel.micro, tausq.rel)
  ## covariances between data and prediction locations
  v0 <- ifelse(v0 < krige$dist.epsilon, 1+nug.factor,
               cov.spatial(obj = v0, cov.model = cov.model, 
                           kappa = kappa, cov.pars = cpars))
  tv0ivv0 <- diagquadraticformXAX(X = as.vector(v0),
                                  lowerA = invcov$lower.inverse,
                                  diagA = invcov$diag.inverse)
  b <- bilinearformXAY(X = as.vector(cbind(data,trend.d)),
                       lowerA = as.vector(invcov$lower.inverse),
                       diagA = as.vector(invcov$diag.inverse), 
                       Y = as.vector(v0))
  if(n.predictive == 0) remove("v0","invcov")
  tv0ivdata <- drop(b[1,])
  b <- t(trend.l) -  b[-1,, drop=FALSE]
  if(beta.prior == "deg") {
    kc$predict <- tv0ivdata + drop(crossprod(b,beta))
    kc$krige.var <- sill.partial * drop(1+nug.factor - tv0ivv0)
    beta.est <- "Simple kriging performed (beta provided by user)"
  }
  if(beta.prior == "flat"){
    kc$predict <- tv0ivdata + drop(crossprod(b,beta.flat))
    if(beta.size == 1)
      bitb <- drop(b^2) * drop(ittivtt)
    else
      bitb <- diagquadraticformXAX(X = b,
                                   lowerA = (ittivtt[lower.tri(ittivtt)]),
                                   diagA = diag(ittivtt))
    kc$krige.var <- sill.partial * drop(1+nug.factor - tv0ivv0 + bitb)
    kc$beta.est <- beta.flat
    names(kc$beta.est) <- beta.names
    remove("bitb")
  }
  remove("tv0ivv0", "tv0ivdata")
  if(n.predictive == 0) remove("b")
  kc$distribution <- "normal"
  if(any(round(kc$krige.var, dig=12) < 0))
    cat("krige.conv: negative kriging variance found! Investigate why this is happening.\n")
  ##
  ## ########### Sampling from the resulting distribution ###
  ##
  if(n.predictive > 0) {
    seed <- get(".Random.seed", envir=.GlobalEnv, inherits = FALSE)
    if(messages.screen)
      cat("krige.conv: sampling from the predictive distribution (conditional simulations)\n")
    if(length(cov.pars) > 2){
      reduce.var <-
        bilinearformXAY(X = as.vector(v0),
                        lowerA = as.vector(invcov$lower.inverse),
                        diagA = as.vector(invcov$diag.inverse), 
                        Y = as.vector(v0))
      remove("v0")
      attr(reduce.var, "dim") <- c(ni, ni)
      if(beta.prior == "flat"){
        if(beta.size == 1)
          ok.add.var <- outer(as.vector(b),as.vector(b)) * as.vector(ittivtt)
        else
          ok.add.var <-
            bilinearformXAY(X = as.vector(b),
                            lowerA = as.vector(ittivtt[lower.tri(ittivtt)]),
                            diagA = as.vector(diag(ittivtt)), 
                            Y = as.vector(b))
        reduce.var <- reduce.var + ok.add.var
      }
      varcov <- (varcov.spatial(coords = locations,
                                cov.model = cov.model,
                                cov.pars = cov.pars,
                                kappa = kappa, nugget = nugget)$varcov) -
                                  reduce.var
      remove("reduce.var")
      if(is.R()) gc(verbose=FALSE)
      kc$simulations <-  kc$predict +
        crossprod(chol(varcov), matrix(rnorm(ni * n.predictive),
                                       ncol=n.predictive))
    }
    else{
      coincide.cond <- (((round(1e12 * nugget) == 0) | !signal) & (!is.null(loc.coincide)))
      if(coincide.cond){
        nloc <- ni - length(loc.coincide)
        ind.not.coincide <- -(loc.coincide) 
        v0 <- v0[,ind.not.coincide, drop=FALSE]
        b <- b[,ind.not.coincide, drop=FALSE]
      }
      else{
        nloc <- ni
        ind.not.coincide <- TRUE
      }
      Dval <- 1.0 + nug.factor
      if(beta.prior == "deg")
        vbetai <- matrix(0, ncol = beta.size, nrow = beta.size)
      else
        vbetai <- matrix(ittivtt, ncol = beta.size, nrow = beta.size)
      df.model <- ifelse(beta.prior == "deg", n, n-beta.size)
      kc$simulations <- matrix(NA, nrow=ni, ncol=n.predictive)
      if(nloc > 0)
        kc$simulations[ind.not.coincide,] <- 
          cond.sim(env.loc = base.env, env.iter = base.env,
                   loc.coincide = loc.coincide,
                   coincide.cond = coincide.cond, 
                   tmean = kc$predict[ind.not.coincide],
                   Rinv = invcov,
                   mod = list(beta.size = beta.size, nloc = nloc,
                     Nsims = n.predictive, n = n, Dval = Dval,
                     df.model = df.model, s2 = sill.partial,
                     cov.model.number = cor.number(cov.model),
                     phi = phi, kappa = kappa),
                   vbetai = vbetai,
                   fixed.sigmasq = TRUE)
      remove("v0", "b", "locations", "invcov")
      if(is.R()) gc(verbose = FALSE)
      if(coincide.cond)
        kc$simulations[loc.coincide,] <- rep(data.coincide, n.predictive)
    }
    ##
    ## Backtransforming simulations
    ##
    if(abs(lambda - 1) > 0.001){
      if(messages.screen)
        cat("krige.conv: back-transforming the simulated values\n")
      if(any(kc$simulations < -1/lambda))
        warning("Truncation in the back-transformation: there are simulated values less than (- 1/lambda) in the normal scale.")
      kc$simulations <-
        BCtransform(kc$simulations, lambda, inv=TRUE)$data
    }
    ##
    ## mean/quantiles/probabilities estimators from simulations
    ##
    if(!is.null(mean.estimator) | !is.null(quantile.estimator) |
       !is.null(probability.estimator)){
      kc <- c(kc, statistics.predictive(simuls= kc$simulations,
                                        mean.var = mean.estimator,
                                        quantile = quantile.estimator,
                                        threshold = probability.estimator))
    }
    kc$.Random.seed <- seed
  }
  ##
  ## Backtransforming moments of the prediction distribution
  ## NOTE: this must be placed here, AFTER the simulations
  ##
  if(abs(lambda-1) > 0.001){
    if(messages.screen){
      cat("krige.conv: back-transforming the predicted mean and variance\n")
      if((abs(lambda) > 0.001) & (abs(lambda-0.5) > 0.001))
        cat("krige.conv: back-transforming by simulating from the predictive.\n           (run the function a few times and check stability of the results.\n")
    }
    kc[c("predict", "krige.var")] <-
      backtransform.moments(lambda = lambda,
                            mean = kc$predict,
                            variance = kc$krige.var,
                            distribution = "normal",
                            n.simul = n.back.moments)[c("mean", "variance")]
  }
  ##
  message <- "krige.conv: Kriging performed using global neighbourhood"
  if(messages.screen) cat(paste(message, "\n"))
  ##
  kc$message <-  message
  kc$call <- call.fc
  ##
  ## Setting classes and attributes 
  ##
  attr(kc, 'sp.dim') <- ifelse(krige1d, "1d", "2d")
  attr(kc, "prediction.locations") <- call.fc$locations
  if(!is.null(call.fc$borders))
    attr(kc, "borders") <- call.fc$borders
  class(kc) <- "kriging"
  return(kc)
}

"krige.control" <-
  function(type.krige = "ok",
           trend.d = "cte", trend.l = "cte",
           obj.model = NULL,
           beta, cov.model, cov.pars, kappa,
           nugget, micro.scale = 0, dist.epsilon = 1e-10, 
           aniso.pars, lambda)
{
  if(type.krige != "ok" & type.krige != "OK" & type.krige != "o.k." & type.krige != "O.K." & type.krige != "sk" & type.krige != "SK" & type.krige != "s.k." & type.krige != "S.K.")
    stop("krige.conv: wrong option in the argument type.krige. It should be \"sk\" or \"ok\"(if ordinary or simple kriging is to be performed)")
  if(type.krige=="OK" | type.krige=="O.K." |type.krige=="o.k.")
    type.krige <- "ok"
  if(type.krige=="SK" | type.krige=="S.K." |type.krige=="s.k.")
    type.krige <- "sk"
  ##
  if(!is.null(obj.model)){
    if(missing(beta)) beta <- obj.model$beta
    if(missing(cov.model)) cov.model <- obj.model$cov.model
    if(missing(cov.pars)) cov.pars <- obj.model$cov.pars
    if(missing(kappa)) kappa <- obj.model$kappa
    if(missing(nugget)) nugget <- obj.model$nugget
    if(missing(lambda)) lambda <- obj.model$lambda
    if(missing(aniso.pars)) aniso.pars <- obj.model$aniso.pars
  }
  else{
    if(missing(beta)) beta <- NULL
    if(missing(cov.model)) cov.model <- "matern"
    if(missing(cov.pars))
      stop("covariance parameters (sigmasq and phi) should be provided")
    if(missing(kappa)) kappa <- 0.5
    if(missing(nugget)) nugget <- 0
    if(missing(lambda)) lambda <- 1
    if(missing(aniso.pars)) aniso.pars <- NULL
  }
  ##
  if(type.krige == "sk")
    if(is.null(beta) | !is.numeric(beta))
      stop("\nkrige.conv: argument beta must be provided in order to perform simple kriging")
  cov.model <- match.arg(cov.model,
                         choices = c("matern", "exponential", "gaussian",
                           "spherical", "circular", "cubic",
                           "wave", "linear", "power",
                           "powered.exponential", "cauchy", "gneiting",
                           "gneiting.matern", "pure.nugget"))
  if(micro.scale > nugget)
    stop("krige.control: micro.scale must be in the interval [0, nugget]")
  ##
  if(!is.null(aniso.pars))
    if(length(aniso.pars) != 2 | !is.numeric(aniso.pars))
      stop("krige.control: anisotropy parameters must be provided as a numeric vector with two elements: the rotation angle (in radians) and the anisotropy ratio (a number greater than 1)")
  ##
  if(inherits(trend.d, "formula") | inherits(trend.l, "formula")){
    if((inherits(trend.d, "formula") == FALSE) | (inherits(trend.l, "formula") == FALSE))
      stop("krige.control: trend.d and trend.l must have similar specification")
  }
  else{
##    if((!is.null(class(trend.d)) && class(trend.d) == "trend.spatial") &
##       (!is.null(class(trend.l)) && class(trend.l) == "trend.spatial")){
    if((length(class(trend.d)) > 0 && class(trend.d) == "trend.spatial") &
       (length(class(trend.l)) > 0 && class(trend.l) == "trend.spatial")){
      if(ncol(trend.d) != ncol(trend.l))
        stop("krige.bayes: trend.d and trend.l do not have the same number of columns")
    }
    else
      if(trend.d != trend.l)
        stop("krige.control: trend.l is different from trend.d")
  }
  ##
  res <- list(type.krige = type.krige,
              trend = trend.d, trend.d = trend.d, trend.l = trend.l, 
              beta = beta,
              cov.model = cov.model, 
              cov.pars = cov.pars, kappa = kappa,
              nugget = nugget,
              micro.scale = micro.scale, dist.epsilon = dist.epsilon, 
              aniso.pars = aniso.pars, lambda = lambda)
  class(res) <- "krige.geoR"
  return(res)
}

"image.kriging" <-
  function (x, locations, borders, values = x$predict,
            coords.data, xlim, ylim, x.leg, y.leg, ...) 
{
  pty.prev <- par()$pty
  ldots <- match.call(expand.dots = FALSE)$...
  ldots[[match(names(ldots), "offset.leg")]] <- NULL
  if(length(ldots[!is.na(match(names(ldots), "xlab"))])==0)
    ldots$xlab <- "X Coord"
  if(length(ldots[!is.na(match(names(ldots), "ylab"))])==0)
    ldots$ylab <- "Y Coord"
  if(missing(x)) x <- NULL
  attach(x)
  on.exit(detach(x))
  if(missing(locations)) locations <-  eval(attr(x, "prediction.locations"))
  if(is.null(locations)) stop("prediction locations must be provided")
  if(ncol(locations) != 2)
    stop("locations must be a matrix or data-frame with two columns")
  if(missing(borders)){
    if(!is.null(attr(x, "borders")))
      borders.arg <- borders <- eval(attr(x, "borders"))
    else borders.arg <- borders <- NULL
  }
  else{
    borders.arg <- borders
    if(is.null(borders)) borders <- eval(attr(x, "borders"))
  }
  if(missing(coords.data)) coords.data <- NULL
  if(missing(xlim)) xlim <- NULL
  if(missing(ylim)) ylim <- NULL
  if(missing(x.leg)) x.leg <- NULL
  if(missing(y.leg)) y.leg <- NULL
  ##
  ## Plotting 1D or 2D
  ##
  if(!is.null(attr(x, 'sp.dim')) && attr(x, 'sp.dim') == '1D')
    plot.1d(values, xlim=xlim, ylim = ylim,
            x1vals = unique(round(locations[,1], dig=12)), ...)
  else{
    locations <- prepare.graph.kriging(locations=locations,
                                       borders=borders,
                                       borders.obj = eval(attr(x, "borders")),
                                       values=values,
                                       xlim = xlim, ylim = ylim) 
    par(pty = "s")
    do.call("image", c(list(x=locations$x, y=locations$y,
                          z=locations$values,
                          xlim = locations$coords.lims[,1],
                          ylim = locations$coords.lims[,2]),
                     ldots))
#    image(x=locations$x, y=locations$y, z=locations$values,
#          xlim = locations$coords.lims[,1],
#          ylim = locations$coords.lims[,2], ...)
    ##
    ## adding points at data locations
    ##
    if(!is.null(coords.data)) points(coords.data, pch=20)
    ##
    ## adding borders
    ##
    if(!is.null(borders.arg)) polygon(borders, lwd=2)
    ##
    ## adding the legend
    ##
    if(!is.null(x.leg) & !is.null(y.leg)){
##      if(is.null(ldots$col)) ldots$col <- heat.colors(12)
      legend.krige(x.leg=x.leg, y.leg=y.leg,
                   values=locations$values[!is.na(locations$values)], ...)
    }
  }
  par(pty = pty.prev)
  return(invisible())
}

"contour.kriging" <-
  function (x, locations, borders, values = x$predict, coords.data,
            xlim, ylim, filled=FALSE, ...) 
{
  pty.prev <- par()$pty
  ldots <- match.call(expand.dots = FALSE)$...
  ldots[[match(names(ldots), "offset.leg")]] <- NULL
  if(length(ldots[!is.na(match(names(ldots), "xlab"))])==0)
    ldots$xlab <- "X Coord"
  if(length(ldots[!is.na(match(names(ldots), "ylab"))])==0)
    ldots$ylab <- "Y Coord"
  if(missing(x)) x <- NULL
  attach(x)
  on.exit(detach(x))
  if(missing(locations)) locations <-  eval(attr(x, "prediction.locations"))
  if(is.null(locations)) stop("prediction locations must be provided")
  if(ncol(locations) != 2)
    stop("locations must be a matrix or data-frame with two columns")
  if(missing(borders)){
    if(!is.null(attr(x, "borders"))) borders.arg <- borders <- eval(attr(x, "borders"))
    else borders.arg <- borders <- NULL
  }
  else{
    borders.arg <- borders
    if(is.null(borders)) borders <- eval(attr(x, "borders"))
  }
  if(missing(coords.data)) coords.data <- NULL
  if(missing(xlim)) xlim <- NULL
  if(missing(ylim)) ylim <- NULL
  ##
  ## Plotting 1D or 2D
  ##
  if(!is.null(attr(x, 'sp.dim')) && attr(x, 'sp.dim') == '1D')
    plot.1d(values, xlim=xlim, ylim = ylim,
            x1vals = unique(round(locations[,1], dig=12)), ...)
  else{
    locations <- prepare.graph.kriging(locations=locations,
                                       borders=borders,
                                       borders.obj = eval(attr(x, "borders")),
                                       values=values,
                                       xlim = xlim, ylim = ylim) 
    par(pty = "s")
    if(filled){
      temp.contour <- function(){
        axis(1)
        axis(2)
        if(!is.null(coords.data)) points(coords.data, pch=20)
        if(!is.null(borders)) polygon(borders, lwd=2)
      }
      do.call("filled.contour", c(list(x=locations$x,
                                       y=locations$y,
                                       z=locations$values,
                                       xlim = locations$coords.lims[,1],
                                       ylim = locations$coords.lims[,2],
                                       plot.axes={temp.contour()}),
                                  ldots))
    }
    else{
      do.call("contour", c(list(x=locations$x, y=locations$y,
                                z=locations$values,
                                xlim = locations$coords.lims[,1],
                                ylim = locations$coords.lims[,2]),
                           ldots))
      ##     contour(x=locations$x, y=locations$y, z=locations$values,
      ##             xlim = locations$coords.lims[,1],
      ##             ylim = locations$coords.lims[,2], ...)
      ##
      ## adding borders
      ##
      if(!is.null(borders.arg)) polygon(borders, lwd=2)
    }
  }
  par(pty = pty.prev)
  return(invisible())
}

"persp.kriging" <-
  function(x, locations, borders, values = x$predict, ...)
{
  if(missing(x)) x <- NULL
  attach(x)
  on.exit(detach(x))
  if(missing(locations)) locations <-  eval(attr(x, "prediction.locations"))
  if(is.null(locations)) stop("prediction locations must be provided")
  if(ncol(locations) != 2)
    stop("locations must be a matrix or data-frame with two columns")
  if(missing(borders)) borders <- NULL
  ##
  ## Plotting 1D or 2D
  ##
  if(!is.null(attr(x, 'sp.dim')) && attr(x, 'sp.dim') == '1D')
    plot.1d(values, xlim=xlim, ylim = ylim,
            x1vals = unique(round(locations[,1], dig=12)), ...)
  else{
    locations <- prepare.graph.kriging(locations=locations,
                                       borders=borders,
                                       borders.obj = eval(attr(x, "borders")),
                                       values=values)
    persp(locations$x, locations$y, locations$values, ...)
  }
  return(invisible())
}

"prepare.graph.kriging" <-
  function (locations, borders, borders.obj=NULL, values, xlim, ylim) 
{
  locations <- locations[order(locations[, 2], locations[, 1]), ]
  xx <- as.numeric(levels(as.factor(round(locations[, 1], dig = 8))))
  nx <- length(xx)
  yy <- as.numeric(levels(as.factor(round(locations[, 2], dig = 8))))
  ny <- length(yy)
  ##
  ##  if(is.null(borders) && (nx*ny) > length(values))
  ##    borders <- locations[chull(locations),]
  ##
  values.loc <- rep(NA, nrow(locations))
  if(length(values.loc) == length(values)) values.loc <- values
  if(!is.null(borders.obj)){
    borders.obj <- as.matrix(as.data.frame(borders.obj))
    dimnames(borders.obj) <- list(NULL, NULL)
    if(require(splancs))
      inout.vec <- as.vector(inout(pts = locations, poly = borders.obj))
    else
      stop("argument borders requires the package splancs - please install it")
    values.loc[inout.vec] <- values
    rm("inout.vec")
  }
  if (!is.null(borders)){
    borders <- as.matrix(as.data.frame(borders))
    dimnames(borders) <- list(NULL, NULL)
    if(!(!is.null(borders.obj) && identical(borders,borders.obj))){
      if(require(splancs))
        inout.vec <- as.vector(inout(pts = locations, poly = borders))
      else
        stop("argument borders requires the package splancs - please install it")
      if(length(values.loc[inout.vec]) == length(values))
        values.loc[inout.vec] <- values
      values.loc[!inout.vec] <- NA
      rm("inout.vec")
    }
  }
  ##
  if (missing(xlim) || is.null(xlim))
    if(is.null(borders)) xlim <- NULL
    else xlim <- range(borders[,1]) 
  if (missing(ylim) || is.null(ylim))
    if(is.null(borders)) ylim <- NULL
    else ylim <- range(borders[,2])
  coords.lims <- set.coords.lims(coords = locations,
                                 xlim = xlim, 
                                 ylim = ylim)
  coords.lims[, 1] <- coords.lims[, 1] + c(-0.025, 0.025) * 
    diff(coords.lims[, 1])
  coords.lims[, 2] <- coords.lims[, 2] + c(-0.025, 0.025) * 
    diff(coords.lims[, 2])
  return(list(x = xx, y = yy,
              values = matrix(values.loc, ncol = ny), 
              coords.lims = coords.lims))
}

"legend.krige" <-
  function(x.leg, y.leg, values, scale.vals, vertical = FALSE,
           offset.leg = 1, ...)
{
  values <- values[!is.na(values)]
  if(length(x.leg) != 2 | length(y.leg) != 2)
    stop("x.leg and y.leg require a vector with 2 elements")
  v.r <- range(values[is.finite(values)], na.rm = TRUE)
  lags.x <- function(xs, nl){
    xs.r <- 0.5 * diff(xs/(nl-1))
    return(seq(xs[1]+xs.r, xs[2]-xs.r, l=nl))
  }
  leg.l <- list(...)
  if(is.null(leg.l$br))
    nc <- ifelse(is.null(leg.l$col), 12, length(leg.l$col))
  else
    nc <- length(leg.l$breaks) - 1
  if(is.null(leg.l$col)) leg.l$col <- heat.colors(nc)
  if(is.null(leg.l$zl)) leg.l$zlim <- c(v.r[1], v.r[2])
  if(vertical){
    xy <- list(x=x.leg, y=lags.x(xs=y.leg, nl=nc))
    if(is.null(leg.l$br))
      image(x=xy$x, y=xy$y,
            z=matrix(seq(leg.l$zlim[1], leg.l$zlim[2], l=nc), nrow=1),
            add=TRUE, xaxs = "i", yaxs = "i", xlab="", ylab="",
            zlim = leg.l$zlim, col=leg.l$col)
    else
      image(x=xy$x, y=xy$y,
            z=matrix(seq(leg.l$zlim[1], leg.l$zlim[2], l=nc), nrow=1),
            add=TRUE, xaxs = "i", yaxs = "i", xlab="", ylab="",
            zlim = leg.l$zlim, col=leg.l$col, breaks = leg.l$br)
  }
  else{
    xy <- list(x=lags.x(xs=x.leg, nl=nc), y=y.leg)
    if(is.null(leg.l$br))
      image(x=xy$x, y=xy$y,
            z=matrix(seq(leg.l$zlim[1], leg.l$zlim[2], l=nc), ncol=1),
            add=TRUE, xaxs = "i", yaxs = "i", xlab="", ylab="",
            zlim = leg.l$zlim, col=leg.l$col)
    else
      image(x=xy$x, y=xy$y,
            z=matrix(seq(leg.l$zlim[1], leg.l$zlim[2], l=nc), ncol=1),
            add=TRUE, xaxs = "i", yaxs = "i", xlab="", ylab="",
            zlim = leg.l$zlim, col=leg.l$col, breaks = leg.l$br)
  }
  leg.poly <- rbind(c(x.leg[1], y.leg[1]), c(x.leg[2], y.leg[1]),
                    c(x.leg[2], y.leg[2]), c(x.leg[1], y.leg[2]),
                    c(x.leg[1], y.leg[1]))
  polygon(leg.poly)
#  if(is.null(leg.l$cex)) leg.l$cex <- par()$cex
  if(is.null(leg.l$cex)) leg.l$cex <- 0.8
  if(missing(scale.vals))
    scale.vals <- pretty(c(values,leg.l$zlim), n=5, min.n=4)
  scale.vals <- scale.vals[scale.vals > leg.l$zlim[1] &
                           scale.vals < leg.l$zlim[2]]
  if(vertical){
    y.r <- range(lags.x(xs=y.leg,nl=nc))
    y.text <- y.r[1] + ((scale.vals - leg.l$zlim[1]) * diff(y.r))/diff(leg.l$zlim)
    text((max(x.leg)+ offset.leg * diff(x.leg)), y.text,
         lab=scale.vals, col=1, cex=leg.l$cex)
  }
  else{
    x.r <- range(lags.x(xs=x.leg,nl=nc))
    x.text <- x.r[1] + ((scale.vals - leg.l$zlim[1]) * diff(x.r))/diff(leg.l$zlim)
    text(x.text, (max(y.leg)+ offset.leg * (diff(y.leg)/2)), lab=scale.vals, col=1, cex=leg.l$cex)
  }
  return(invisible())
}

"ksline" <-
  function (geodata, coords=geodata$coords, data=geodata$data,
            locations, borders = NULL, 
            cov.model = "matern",
            cov.pars = stop("covariance parameters (sigmasq and phi) needed"), 
            kappa = 0.5, nugget = 0, micro.scale = 0,
            lambda = 1, m0 = "ok", nwin = "full", 
            n.samples.backtransform = 500, 
            trend = 1, d = 2, ktedata = NULL, ktelocations = NULL,
            aniso.pars = NULL,  signal = FALSE,  dist.epsilon = 1e-10,
            messages) 
{
  if(missing(messages))
    messages.screen <- ifelse(is.null(getOption("geoR.messages")), TRUE, getOption("geoR.messages"))
  else messages.screen <- messages
  ##
  ## selecting locations inside the borders 
  ##
  if(!is.null(borders)){
    locations <- locations.inside(locations, borders)
    if(messages.screen)
      cat("ksline: results will be returned only for prediction locations inside the borders\n")
  }
  if(! "package:stats" %in% search()) require(mva)
  call.fc <- match.call()
  cov.model <- match.arg(cov.model,
                         choices = c("matern", "exponential", "gaussian",
                           "spherical", "circular", "cubic", "wave", "power",
                           "powered.exponential", "cauchy", "gneiting",
                           "gneiting.matern", "pure.nugget"))
  if(abs(lambda-1) > 0.001) {
    if(messages.screen)
      cat("ksline: Box-Cox data transformation performed.\n")
    if(abs(lambda) < 0.001)
      data <- log(data)
    else data <- ((data^lambda) - 1)/lambda
  }
  coords <- as.matrix(coords)
  locations <- as.matrix(locations)
  dimnames(coords) <- list(NULL, NULL)
  dimnames(locations) <- list(NULL, NULL)
  if(!is.null(ktedata) & !is.null(ktelocations) & m0 != "kte"){
    cat("ksline: external variable (covariate) provided. Kriging ste to KTE\n")
    m0 <- "kte"
  }
  ##
  ## anisotropy correction
  ##
  if(!is.null(aniso.pars)) {
    if(length(aniso.pars) != 2 | !is.numeric(aniso.pars))
      stop("anisotropy parameters must be provided as a numeric vector with two elements: the rotation angle (in radians) and the anisotropy ratio (a number greater than 1)")
    if(messages.screen)
      cat("ksline: anisotropy correction performed\n")
    coords.c <- coords.aniso(coords = coords, aniso.pars = aniso.pars)
    locations.c <- coords.aniso(coords = locations, aniso.pars = aniso.pars)
  }
  else {
    coords.c <- coords
    locations.c <- locations
  }
  ## 2. Preparing KTE matrices #####
  ##  
  if(m0 == "kte") {
    ktedata <- as.matrix(ktedata)
    ktelocations <- as.matrix(ktelocations)
    dimnames(ktedata) <- list(NULL, NULL)
    dimnames(ktelocations) <- list(NULL, NULL)
  }
  n <- length(data)
  ni <- length(locations[, 1])
  tausq <- nugget
  sigmasq <- cov.pars[1]
  phi <- cov.pars[2]
  if(nwin == "full") {
    est <- rep(0, ni)
    dif <- rep(0, ni)
    kvar <- rep(0, ni)
    sumw <- rep(0, ni)
    wofmean <- rep(0, ni)
    iv <- varcov.spatial(coords = coords.c, cov.model = cov.model, 
                         kappa = kappa, nugget = nugget, cov.pars = cov.pars, 
                         inv = TRUE, det = FALSE, func.inv = "cholesky")$inverse
    av <- mean(data)
    sd <- sqrt(var(data))
    one <- rep(1, n)
    tone <- t(one)
    toneiv <- crossprod(one, iv)
    den <- solve(toneiv %*% one)
    ml <- den %*% toneiv %*% data
    kmsd <- sqrt(den)
    means <- c(average = av, stdev = sd, kmean = ml, kmsd = kmsd)
    if(m0 != "kt") {
      mktlocations <- "Constant trend"
      beta <- ml
    }
    else {
      mktlocations <- rep(0, ni)
      if(m0 == "kt" & trend == 1) {
        if(d == 1) {
          xmat <- cbind(rep(1, n), coords[, 2])
          xmati <- cbind(rep(1, ni), locations[, 2])
        }
        else {
          xmat <- cbind(rep(1, n), coords[, 1], coords[, 2])
          xmati <- cbind(rep(1, ni), locations[, 1], locations[, 
                                                               2])
        }
        iviv <- solve(crossprod(xmat,iv) %*% xmat)
        txiv <- crossprod(xmat,iv)
        beta <- iviv %*% txiv %*% data
        mkt <- xmat %*% beta
      }
      if(m0 == "kt" & trend == 2) {
        if(d == 1) {
          xmat <- cbind(rep(1, n), coords[, 2], (coords[, 2])^2)
          xmati <- cbind(rep(1, ni), locations[, 2], (locations[, 
                                                                2])^2)
        }
        else {
          xmat <- cbind(rep(1, n), coords[, 1], coords[, 2], 
                        (coords[, 1])^2, (coords[, 2])^2, coords[, 1] * coords[, 
                                                                               2])
          xmati <- cbind(rep(1, ni), locations[, 1], locations[, 
                                                               2], (locations[, 1])^2, (locations[, 2])^2, locations[, 
                                                                                                                     1] * locations[, 2])
        }
        iviv <- solve(crossprod(xmat,iv) %*% xmat)
        txiv <- crossprod(xmat,iv)
        beta <- iviv %*% txiv %*% data
        mkt <- xmat %*% beta
      }
    }
    if(m0 != "kte") 
      mktelocations <- "No external trend"
    else {
      if(m0 == "kte") {
        mktelocations <- rep(0, ni)
        xmat <- cbind(rep(1, n), ktedata)
        xmati <- cbind(rep(1, ni), ktelocations)
        iviv <- solve(crossprod(xmat,iv) %*% xmat)
        txiv <- crossprod(xmat,iv)
        beta <- iviv %*% txiv %*% data
        mkte <- xmat %*% beta
      }
    }
    for (i in 1:ni) {
      if(messages.screen) {
        if(ni < 11) 
          cat(paste("ksline: kriging location: ", i, "out of", 
                    ni, "\n"))
        else {
          if(ni < 101 & (i%%10 == 1)) 
            cat(paste("ksline: kriging location: ", i, "out of", 
                      ni, "\n"))
          if(ni > 100 & i%%100 == 1) 
            cat(paste("ksline: kriging location: ", i, "out of", 
                      ni, "\n"))
          if(i == ni) 
            cat(paste("ksline: kriging location: ", i, "out of", 
                      ni, "\n"))
        }
      }
      coords0 <- cbind((coords.c[, 1] - locations.c[i, 1]), (coords.c[, 2] - locations.c[i, 
                                                                                 2]))
      dm0 <- sqrt(coords0[, 1]^2 + coords0[, 2]^2)
      v0 <- cov.spatial(obj = dm0, cov.model = cov.model, 
                        kappa = kappa, cov.pars = cov.pars)
      v0[dm0 < dist.epsilon] <- micro.scale + sigmasq
      tv0 <- t(v0)
      v0iv <- crossprod(v0, iv)
      v0ivv0 <- v0iv %*% v0
      skw <- crossprod(v0,iv)
      wofmean[i] <- 1 - sum(skw)
      ##
      ## 4.2.1 Simple kriging with known mean
      ##
      if(is.numeric(m0) == TRUE) {
        dif[i] <- skw %*% (data - m0)
        est[i] <- m0 + dif[i]
        if(signal == TRUE) 
          kvar[i] <- sigmasq - v0ivv0
        else kvar[i] <- tausq + sigmasq - v0ivv0
        sumw[i] <- sum(skw)
      }
      ##
      ## 4.2.2 Simple kriging with data average mean
      ##
      if(m0 == "av") {
        dif[i] <- skw %*% (data - av)
        est[i] <- av + dif[i]
        if(signal == TRUE) 
          kvar[i] <- sigmasq - v0ivv0
        else kvar[i] <- tausq + sigmasq - v0ivv0
        sumw[i] <- sum(((tone/n) + skw - ((skw %*% one %*% 
                                           tone)/n)))
      }
      ##
      ## 4.2.3 Ordinary kriging (or SK with G.L.S. mean)
      ##
      if(m0 == "ok") {
        dif[i] <- skw %*% (data - ml)
        est[i] <- ml + dif[i]
        redu <- as.vector(1 - toneiv %*% v0)
        if(signal == TRUE)
          kvar[i] <- sigmasq - v0ivv0 + (redu %*%
                                         den %*% redu)
        else kvar[i] <- tausq + sigmasq - v0ivv0 + (
                                                    redu %*% den %*% redu)
        sumw[i] <- sum((den %*% one + tv0 - v0iv %*% 
                        one %*% den %*% tone) %*% iv)
      }
      ##
      ## 4.2.4 Universal Kriging (or Kriging with trend model) 
      ##
      if(m0 == "kt") {
        dif[i] <- skw %*% (data - mkt)
        est[i] <- xmati[i,  ] %*% beta + dif[i]
        redu <- as.vector(xmati[i,  ]) - as.vector(
                                                   txiv %*% v0)
        if(signal == TRUE)
          kvar[i] <- sigmasq - v0ivv0 + (redu %*%
                                         iviv %*% redu)
        else kvar[i] <- tausq + sigmasq - v0ivv0 + (
                                                    redu %*% iviv %*% redu)
        sumw[i] <- sum(skw + xmati[i,  ] %*% iviv %*%
                       txiv - skw %*% xmat %*% iviv %*% txiv)
        mktlocations[i] <- xmati[i,  ] %*% beta
      }
      ##
      ## 4.2.5 Kriging with external trend 
      ##
      if(m0 == "kte") {
        dif[i] <- skw %*% (data - mkte)
        est[i] <- xmati[i,  ] %*% beta + dif[i]
        redu <- as.vector(xmati[i,  ]) - as.vector(
                                                   txiv %*% v0)
        if(signal == TRUE)
          kvar[i] <- sigmasq - v0ivv0 - (redu %*%
                                         iviv %*% redu)
        else kvar[i] <- tausq + sigmasq - v0ivv0 + (
                                                    redu %*% iviv %*% redu)
        sumw[i] <- sum(skw + xmati[i,  ] %*% iviv %*%
                       txiv - skw %*% xmat %*% iviv %*% txiv)
        mktelocations[i] <- xmati[i,  ] %*% beta
      }
      NULL
    }
    message <- "Kriging performed using global neighbourhood"
    if(messages.screen) 
      cat(paste(message,"\n"))
    results <- list(predict = est, krige.var = kvar, dif = dif, summary = means, 
                    ktrend = mktlocations, ktetrend = mktelocations, beta = beta, 
                    wofmean = wofmean)
  }
  else {
    nwin <- min(n, nwin)
    avwin <- rep(0, ni)
    sdwin <- rep(0, ni)
    mlwin <- rep(0, ni)
    kmsdwin <- rep(0, ni)
    estwin <- rep(0, ni)
    difwin <- rep(0, ni)
    kvarwin <- rep(0, ni)
    sumwwin <- rep(0, ni)
    wofmean <- rep(0, ni)
    if(m0 != "kt") 
      mkt <- "Constant position trend"
    else mkt <- rep(0, ni)
    if(m0 != "kte") 
      mkte <- "No external trend"
    else mkte <- rep(0, ni)
    if(m0 != "kt" & m0 != "kte") 
      betawin <- "No polynomial or external trend"
    if(m0 == "kt") {
      if(trend == 1) {
        if(d == 1) 
          xmati <- cbind(rep(1, ni), locations[, 2])
        else xmati <- cbind(rep(1, ni), locations[, 1], locations[, 
                                                                  2])
      }
      if(trend == 2) {
        if(d == 1) 
          xmati <- cbind(rep(1, ni), locations[, 2], locations[, 
                                                               2]^2)
        else xmati <- cbind(rep(1, ni), locations[, 1], locations[, 
                                                                  2], (locations[, 1])^2, (locations[, 2])^2, locations[, 1] * 
                            locations[, 2])
      }
      betawin <- matrix(0, nrow = (ncol(xmati) * ni), ncol = ncol(xmati))
    }
    if(m0 == "kte") {
      xmati <- cbind(rep(1, ni), ktelocations)
      if(is.vector(ktedata) == TRUE) 
        betawin <- matrix(0, nrow = (2 * ni), ncol = 2)
      else betawin <- matrix(0, nrow = ((ncol(ktedata) + 
                                         1) * ni), ncol = (ncol(ktedata) + 1))
    }
    for (i in 1:ni) {
      temp.win <- ksline.aux.1(coords = coords, coords.c = coords.c,
                               data = data, n = n,
                               locations = locations[i,  ],
                               locations.c = locations.c[i,  ],
                               cov.pars = cov.pars, nugget = nugget,
                               cov.model = cov.model, kappa = kappa, m0 = m0,
                               nwin = nwin, trend = trend, d = d, ktedata = 
                               ktedata, ktelocations = ktelocations,
                               micro.scale = micro.scale, 
                               location.number = i, xmati = xmati[i,  ],
                               mkte = NULL, mkt = NULL, betawin = NULL,
                               signal = signal, dist.epsilon = dist.epsilon)
      avwin[i] <- temp.win$avwin
      sdwin[i] <- temp.win$sdwin
      mlwin[i] <- temp.win$mlwin
      kmsdwin[i] <- temp.win$kmsdwin
      estwin[i] <- temp.win$estwin
      difwin[i] <- temp.win$difwin
      kvarwin[i] <- temp.win$kvarwin
      sumwwin[i] <- temp.win$sumwwin
      wofmean[i] <- temp.win$wofmean
      if(m0 == "kt") 
        mkt[i] <- temp.win$mkt
      if(m0 == "kte") 
        mkte[i] <- temp.win$mkte
      if(m0 == "kt" | m0 == "kte") 
        betawin[i, ] <- temp.win$betawin
      if(messages.screen) {
        if(ni < 11) 
          cat(paste("ksline: kriging location: ", i, "out of", 
                    ni, "\n"))
        else {
          if(ni < 101 & (i%%10 == 1)) 
            cat(paste("ksline: kriging location: ", i, "out of", 
                      ni, "\n"))
          if(ni > 100 & i%%100 == 1) 
            cat(paste("ksline: kriging location: ", i, "out of", 
                      ni, "\n"))
          if(i == ni) 
            cat(paste("ksline: kriging location: ", i, "out of", 
                      ni, "\n"))
        }
      }
    }
    message <- "kriging performed in moving neighbourhood"
    if(messages.screen) 
      cat(paste(message,"\n"))
    results <- list(predict = estwin, krige.var = kvarwin, dif = difwin, 
                    avtrend = avwin, sd = sdwin, oktrend = mlwin, oksd = kmsdwin, 
                    ktrend = mkt, ktetrend = mkte, beta = betawin, sumw = sumwwin, 
                    wofmean = wofmean)
  }  
  if(abs(lambda - 1) > 0.001) {
    if(messages.screen)
      cat("Back-transforming the predicted mean and variance.\n")
    if(abs(lambda) < 0.001) {
      predict.transf <- results$predict
      results$predict <- exp(predict.transf) - 0.5 * results$krige.var
      results$krige.var <- (exp(2 * predict.transf - results$krige.var)) * (exp(results$krige.var) - 1)
    }
    if(lambda > 0.001) {
      if(messages.screen)
        cat("Back-transformation by simulating from the normal predictive distribution\n")
      ap.warn <- options()$warn
      options(warn = -1)
      temp.data <- matrix(rnorm(ni * n.samples.backtransform,
                                mean = results$predict,
                                sd = sqrt(results$krige.var)),
                          nrow = ni)
      options(warn = ap.warn)
      temp.data[(results$krige.var == 0),  ] <- results$predict[(results$krige.var == 0)]
      temp.data[temp.data < -1/lambda] <- -1/lambda     
      temp.data <- ((temp.data * lambda) + 1)^(1/lambda)
###      temp.data[is.na(temp.data)] <- Inf
      results$predict <- as.vector(apply(temp.data, 1, mean))
      results$krige.var <- as.vector(apply(temp.data, 1, var))
    }
    if(lambda < -0.001) {
      cat("Resulting distribution has no mean for lambda < 0 - back transformation not performed\n"
          )
    }
  }
  results$locations <- locations
  results$message <- message
  results$call <- call.fc
  class(results) <- c("kriging")
  return(invisible(results))
}

"ksline.aux.1" <-
  function (coords, coords.c, data, n, locations, locations.c, cov.pars,
            nugget, cov.model, kappa, 
            m0, nwin, trend, d, ktedata, ktelocations, mbased,
            micro.scale, location.number, 
            xmati, mkte, mkt, betawin, signal, dist.epsilon) 
{
  if(! "package:stats" %in% search()) require(mva)
  i <- location.number
  sigmasq <- cov.pars[1]
  phi <- cov.pars[2]
  tausq <- nugget
  coords0 <- cbind((coords.c[, 1] - locations.c[1]), (coords.c[, 2] -
                                                      locations.c[2]))
  dm0 <- sqrt(coords0[, 1]^2 + coords0[, 2]^2)
  coordswin <- coords[order(dm0)[1:nwin],  ]
  coordswin.c <- coords.c[order(dm0)[1:nwin],  ]
  datawin <- data[order(dm0)[1:nwin]]
  ivwin <- varcov.spatial(coords = coordswin.c, cov.model = cov.model,
                          kappa = kappa, nugget = nugget, cov.pars = cov.pars, inv = TRUE,
                          det = FALSE, func.inv = "cholesky", only.decomp = FALSE)$inverse
  avwin <- mean(datawin)
  sdwin <- sqrt(var(datawin))
  onewin <- rep(1, nwin)
  toneivwin <- crossprod(onewin, ivwin)
  denwin <- solve(toneivwin %*% onewin)
  mlwin <- denwin %*% toneivwin %*% datawin
  kmsdwin <- sqrt(denwin)
  coords0win <- cbind((coordswin[, 1] - locations[1]), (coordswin[, 2] -
                                                        locations[2]))
  coords0win.c <- cbind((coordswin.c[, 1] - locations.c[1]), (coordswin.c[
                                                                          , 2] - locations.c[2]))
  dm0win <- sqrt(coords0win.c[, 1]^2 + coords0win.c[, 2]^2)
  v0win <- cov.spatial(obj = dm0win, cov.model = cov.model, kappa = kappa,
                       cov.pars = cov.pars)
  v0win[dm0win < dist.epsilon] <- micro.scale + sigmasq
  skwwin <- crossprod(v0win, ivwin)
  wofmean <- 1 - sum(skwwin)
  if(m0 == "kt" & trend == 1) {
    if(d == 1)
      xmatwin <- cbind(rep(1, nwin), coordswin[, 2])
    else xmatwin <- cbind(rep(1, nwin), coordswin[, 1], coordswin[
                                                                  , 2])
    txivwin <- crossprod(xmatwin, ivwin)
    ivivwin <- solve(txivwin %*% xmatwin)
    betawin <- ivivwin %*% txivwin %*% datawin
    mktwin <- xmatwin %*% betawin
    mkt <- xmati %*% betawin
  }
  if(m0 == "kt" & trend == 2) {
    if(d == 1)
      xmatwin <- cbind(rep(1, nwin), coordswin[, 2], (
                                                      coordswin[, 2])^2)
    else xmatwin <- cbind(rep(1, nwin), coordswin[, 1], (coordswin[
                                                                   , 1])^2, coordswin[, 2], (coordswin[, 2])^
                          2, coordswin[, 1] * coordswin[, 2])
    xmatwin.cent <- xmatwin
    xmatwin.cent[, 2] <- xmatwin.cent[, 2] - mean(xmatwin[, 2])
    xmatwin.cent[, 3] <- xmatwin.cent[, 3] - mean(xmatwin[, 3])
    ivivwin <- solve(crossprod(xmatwin.cent, ivwin) %*% 
                     xmatwin.cent)
    txivwin <- crossprod(xmatwin.cent, ivwin)
    betawin <- ivivwin %*% txivwin %*% datawin
    betawin <- mean(datawin) - crossprod(betawin, c(0, mean(xmatwin[
                                                                    , 2]), mean(xmatwin[, 3])))
    mktwin <- xmatwin %*% betawin
    mkt <- xmati %*% betawin
  }
  if(m0 == "kte") {
    if(is.vector(ktedata))
      ktedatawin <- ktedata[order(dm0)[1:nwin]]
    else ktedatawin <- ktedata[order(dm0)[1:nwin],  ]
    xmatwin <- cbind(rep(1, nwin), ktedatawin)
    ivivwin <- solve(crossprod(xmatwin, ivwin) %*% xmatwin)
    txivwin <- crossprod(xmatwin, ivwin)
    betawin <- ivivwin %*% txivwin %*% datawin
    mktewin <- xmatwin %*% betawin
    mkte <- xmati %*% betawin
  }
  ##
  ##  Simple kriging with know mean
  ##
  if(is.numeric(m0)) {
    difwin <- skwwin %*% (data - m0)
    estwin <- m0win + difwin
    if(signal)
      kvarwin <- sigmasq - crossprod(v0win, ivwin) %*% v0win
    else kvarwin <- tausq + sigmasq - crossprod(v0win, ivwin) %*%
      v0win
    sumwwin <- sum(skwwin)
  }
  ##
  ## 4.2.2 Simple kriging with data average mean
  ##
  if(m0 == "av") {
    difwin <- skwwin %*% (datawin - avwin)
    estwin <- avwin + difwin
    if(signal)
      kvarwin <- sigmasq - crossprod(v0win, ivwin) %*% v0win
    else kvarwin <- tausq + sigmasq - crossprod(v0win, ivwin) %*%
      v0win
    sumwwin <- sum(((t(onewin)/nwin) + skwwin - ((skwwin %*% onewin %*%
                                                  t(onewin))/n)))
  }
  ##
  ## Ordinary kriging (or SK with G.L.S. mean)
  ##
  if(m0 == "ok") {
    difwin <- skwwin %*% (datawin - mlwin)
    estwin <- mlwin + difwin
    redu <- as.vector(1 - toneivwin %*% v0win)
    if(signal)
      kvarwin <- sigmasq - v0win %*% ivwin %*% v0win + (
                                                        redu %*% denwin %*% redu)
    else kvarwin <- tausq + sigmasq - v0win %*% ivwin %*% v0win +
      (redu %*% denwin %*% redu)
    sumwwin <- sum((denwin %*% onewin + t(v0win) - crossprod(v0win,
                                                             ivwin) %*% onewin %*% denwin %*% t(onewin)) %*% ivwin)
  }
  ##
  ## Universal Kriging (or Kriging with trend model) 
  ##
  if(m0 == "kt") {
    difwin <- skwwin %*% (datawin - mktwin)
    estwin <- mkt + difwin
    xmati <- as.vector(xmati)
    redu <- as.vector(xmati) - as.vector(txivwin %*% v0win)
    if(signal)
      kvarwin <- sigmasq - (v0win %*% ivwin %*% v0win) + (redu %*% ivivwin %*% redu)
    else kvarwin <- tausq + sigmasq - (v0win %*% ivwin %*% v0win) + (redu %*% ivivwin %*% redu)
    sumwwin <- sum(skwwin + xmati %*% ivivwin %*% txivwin - skwwin %*%
                   xmatwin %*% ivivwin %*% txivwin)
  }
  ##
  ## Kriging with external trend 
  ##
  if(m0 == "kte") {
    difwin <- skwwin %*% (datawin - mktewin)
    estwin <- mkte + difwin
    xmati <- as.vector(xmati)
    redu <- as.vector(xmati) - as.vector(txivwin %*% v0win)
    if(signal)
      kvarwin <- sigmasq - (v0win %*% ivwin %*% v0win) + (redu %*% ivivwin %*% redu)
    else kvarwin <- tausq + sigmasq - (v0win %*% ivwin %*% v0win) + (redu %*% ivivwin %*% redu)
    sumwwin <- sum(skwwin + xmati %*% ivivwin %*% txivwin - skwwin %*%
                   xmatwin %*% ivivwin %*% txivwin)
  }
  ##
  ##
  ##  
  results <- list(avwin = avwin, sdwin = sdwin, mlwin = mlwin, kmsdwin = 
                  kmsdwin, wofmean = wofmean, betawin = betawin, mkt = mkt, mkte
                  = mkte, difwin = difwin, estwin = estwin, kvarwin = kvarwin,
                  sumwwin = sumwwin)
  return(results)
}












##
## Functions for likelihood based inference in the geoR package
## ------------------------------------------------------------
##
## Includes:
##    - the main function for ML estimation - likfit()
##    - methods for class likGRF
##    - some other functions

"likfit" <-
  function (geodata, coords=geodata$coords, data=geodata$data,
            trend = "cte", ini.cov.pars,
            fix.nugget = FALSE, nugget = 0, 
            fix.kappa = TRUE, kappa = 0.5, 
            fix.lambda = TRUE, lambda = 1, 
            fix.psiA = TRUE, psiA = 0, 
            fix.psiR = TRUE, psiR = 1, 
            cov.model = "matern", realisations,
            method.lik = "ML",
            components = FALSE, nospatial = TRUE,
            limits = pars.limits(), 
            print.pars = FALSE, messages, ...) 
{
  if(is.R()) if(! "package:stats" %in% search()) require(mva)
  ##
  ## Checking input
  ##
  call.fc <- match.call()
  temp.list <- list()
  temp.list$print.pars <- print.pars
  if(missing(messages))
    messages.screen <- ifelse(is.null(getOption("geoR.messages")), TRUE, getOption("geoR.messages"))
  else messages.screen <- messages
  ##
  cov.model <- match.arg(cov.model,
                         choices = c("matern", "exponential", "gaussian",
                           "spherical", "circular", "cubic", "wave", "power",
                           "powered.exponential", "cauchy", "gneiting",
                           "gneiting.matern", "pure.nugget"))
  if(cov.model == "power") stop("parameter estimation for power model is not implemented")
  if(cov.model == "gneiting.matern") stop("parameter estimation for gneiting.matern model is not yet implemented")
  if(fix.kappa & !is.null(kappa))
    if(cov.model == "matern" & kappa == 0.5)
      cov.model <- "exponential"
  temp.list$cov.model <- cov.model
  if(cov.model == "powered.exponential")
    if(limits$kappa["upper"] > 2) limits$kappa["upper"] <- 2
  ##
  ## Likelihood method
  ##
  if(method.lik == "REML" | method.lik == "reml" | method.lik == "rml")  method.lik <- "RML"
  if(method.lik == "ML" | method.lik == "ml") method.lik <- "ML"
  if(method.lik == "ML" & cov.model == "power")
    stop("\n\"power\" model can only be used with method.lik=\"RML\".\nBe sure that what you want is not \"powered.exponential\"")
  temp.list$method.lik <- method.lik
  ##
  ## setting coordinates, data and covariate matrices
  ##
  coords <- as.matrix(coords)
  data <- as.vector(data)
  n <- length(data)
  if((nrow(coords) != n) | (2*n) != length(coords))
    stop("\nnumber of locations does not match with number of data")
  if(missing(geodata)) xmat <- trend.spatial(trend=trend, geodata=list(coords = coords, data = data))
  else xmat <- unclass(trend.spatial(trend=trend, geodata=geodata))
  xmat.contrasts  <- attr(xmat,"contrasts")
  xmat <- unclass(xmat)
  if(nrow(xmat) != n)
    stop("trend matrix has dimension incompatible with the data")
  test.xmat <- solve.geoR(crossprod(xmat))
  test.xmat <- NULL
  beta.size <- temp.list$beta.size <- dim(xmat)[2]
  ##
  ## setting a factor to indicate different realisations
  ##
  if(missing(realisations))
    realisations <- as.factor(rep(1, n))
  else{
    if(!missing(geodata)){
      real.name <- deparse(substitute(realisations))
      if(!is.null(geodata[[real.name]]))
        realisations <- geodata$realisations
    }
    if(length(realisations) != n)
      stop("realisations must be a vector with the same length of the data")
    realisations <- as.factor(realisations)
  }
  temp.list$realisations <- realisations
  nrep <- temp.list$nrep <- length(levels(realisations))
  ind.rep <- split(1:n, realisations)
  vecdist <- function(x){as.vector(dist(x))}
  ##
  ## Initial values for parameters
  ##
  if(is.matrix(ini.cov.pars) | is.data.frame(ini.cov.pars)){
    ini.cov.pars <- as.matrix(ini.cov.pars)
    if(nrow(ini.cov.pars) == 1)
      ini.cov.pars <- as.vector(ini.cov.pars)
    else{
      if((cov.model != "pure.nugget") & (ncol(ini.cov.pars) != 2))
        stop("\nini.cov.pars must be a matrix or data.frame with 2 components: \ninitial values for sigmasq and phi")
    }
  }
  if(is.vector(ini.cov.pars)){
    if((cov.model != "pure.nugget") & (length(ini.cov.pars) != 2))
      stop("\nini.cov.pars must be a vector with 2 components: \ninitial values for sigmasq and phi")
  }
  ##
  ## Checking for multiple initial values for preliminar search of   
  ## best initial value
  ##
  if(is.matrix(ini.cov.pars) | (length(nugget) > 1) | (length(kappa) > 1) | (length(lambda) > 1) | (length(psiR) > 1) | (length(psiA) > 1)){
    if(messages.screen) cat("likfit: searching for best initial value ...")
    ini.temp <- matrix(ini.cov.pars, ncol=2)
    grid.ini <- as.matrix(expand.grid(sigmasq=unique(ini.temp[,1]), phi=unique(ini.temp[,2]), tausq=unique(nugget), kappa=unique(kappa), lambda=unique(lambda), psiR=unique(psiR), psiA=unique(psiA)))
    .likGRF.dists.vec <<- lapply(split(as.data.frame(coords), realisations), vecdist)
    temp.f <- function(parms, coords, data, temp.list)
      return(loglik.GRF(geodata = geodata,
                        coords = coords, data = as.vector(data),
                        cov.model=temp.list$cov.model,
                        cov.pars=parms[1:2],
                        nugget=parms["tausq"], kappa=parms["kappa"],
                        lambda=parms["lambda"], psiR=parms["psiR"],
                        psiA=parms["psiA"], trend= trend,
                        method.lik=temp.list$method.lik,
                        compute.dists=FALSE,
                        realisations = realisations))
    grid.lik <- apply(grid.ini, 1, temp.f, coords = coords,
                      data = data, temp.list = temp.list)
    grid.ini <- grid.ini[(grid.lik != Inf) & (grid.lik != -Inf) & !is.na(grid.lik) & !is.nan(grid.lik),, drop=FALSE] 
    grid.lik <- grid.lik[(grid.lik != Inf) & (grid.lik != -Inf) & !is.na(grid.lik) & !is.nan(grid.lik)] 
    ini.temp <- grid.ini[which(grid.lik == max(grid.lik)),, drop=FALSE]
    if(all(ini.temp[,"phi"] == 0)) ini.temp <- ini.temp[1,, drop=FALSE]
    rownames(ini.temp) <- "initial.value"
    if(messages.screen){
      cat(" selected values:\n")
      print(rbind(format(ini.temp, dig=2), status=ifelse(c(FALSE, FALSE, fix.nugget, fix.kappa, fix.lambda, fix.psiR, fix.psiA), "fix", "est")))
      cat(paste("likelihood value:", max(grid.lik), "\n"))
    }
    dimnames(ini.temp) <- NULL
    ini.cov.pars <- ini.temp[1:2]
    nugget <- ini.temp[3]
    kappa <- ini.temp[4]
    lambda <- ini.temp[5]
    psiR <- ini.temp[6]
    psiA <- ini.temp[7]
    grid.ini <- NULL
    if(is.R()) remove(".likGRF.dists.vec", pos=1)
    else remove(".likGRF.dists.vec", where=1)    
  }
  ##
  tausq <- nugget
  ##
  ## Box-Cox transformation for fixed lambda
  ##
  if(fix.lambda) {
    if(abs(lambda - 1) < 0.0001) {
      temp.list$log.jacobian <- 0
      temp.list$z <- as.vector(data)
    }
    else {
      if(any(data <= 0))
        stop("Transformation option not allowed when there are zeros or negative data")
      Jdata <- data^(lambda - 1)
      if(any(Jdata <= 0))
        temp.list$log.jacobian <- log(prod(Jdata))
      else temp.list$log.jacobian <- sum(log(Jdata))
      Jdata <- NULL
      if(abs(lambda) < 0.0001)
        temp.list$z <- log(data)
      else temp.list$z <- ((data^lambda) - 1)/lambda
    }
  }
  else{
    temp.list$z <- as.vector(data)
    temp.list$log.jacobian <- NULL
  }
  ##
  ## Coordinates transformation for fixed anisotropy parameters
  ##
  if(fix.psiR & fix.psiA){
    if(psiR != 1 | psiA != 0)
      coords <- coords.aniso(coords, aniso.pars=c(psiA, psiR))
    if(is.R())
      assign(".likGRF.dists.vec", lapply(split(as.data.frame(coords), realisations), vecdist), pos=1)
    else
      assign(".likGRF.dists.vec", lapply(split(as.data.frame(coords), realisations), vecdist), where=1)
    range.dist <- range(.likGRF.dists.vec)
    max.dist <- max(range.dist)
    min.dist <- min(range.dist)
  }
  ##
  ##
  ##
  ini <- ini.cov.pars[2]
  ##  fixed.pars <- NULL
  lower.optim <- c(limits$phi["lower"])
  upper.optim <- c(limits$phi["upper"])
  fixed.values <- list()
  if(fix.nugget) {
    ##    fixed.pars <- c(fixed.pars, 0)
    fixed.values$tausq <- nugget
  }
  else {
    ini <- c(ini, nugget/ini.cov.pars[1])
    lower.optim <- c(lower.optim, limits$tausq.rel["lower"])
    upper.optim <- c(upper.optim, limits$tausq.rel["upper"])
  }
  if(fix.kappa){
    ##    fixed.kappa <- c(fixed.pars, kappa)
    fixed.values$kappa <- kappa
  }
  else {
    ini <- c(ini, kappa)
    lower.optim <- c(lower.optim, limits$kappa["lower"])
    upper.optim <- c(upper.optim, limits$kappa["upper"])
  }
  if(fix.lambda){
    ##    fixed.pars <- c(fixed.pars, lambda)
    fixed.values$lambda <- lambda
  }
  else {
    ini <- c(ini, lambda)
    lower.optim <- c(lower.optim, limits$lambda["lower"])
    upper.optim <- c(upper.optim, limits$lambda["upper"])
  }
  if(fix.psiR){
    ##    fixed.pars <- c(fixed.pars, psiR)
    fixed.values$psiR <- psiR
  }
  else {
    ini <- c(ini, psiR)
    lower.optim <- c(lower.optim, limits$psiR["lower"])
    upper.optim <- c(upper.optim, limits$psiR["upper"])
  }
  if(fix.psiA){
    ##    fixed.pars <- c(fixed.pars, psiA)
    fixed.values$psiA <- psiA
  }
  else {
    ini <- c(ini, psiA)
    lower.optim <- c(lower.optim, limits$psiA["lower"])
    upper.optim <- c(upper.optim, limits$psiA["upper"])
  }
  ## This must be here, after the previous ones:
  if(fix.nugget & nugget > 0){
    ## Warning: Inverting order here, ini will be now: c(phi,sigmasg)
    ini <- c(ini, ini.cov.pars[1])
    lower.optim <- c(lower.optim, limits$sigmasq["lower"])
    upper.optim <- c(upper.optim, limits$sigmasq["upper"])
    ##    fixed.pars <- c(fixed.pars, ini.cov.pars[1])
    ##    fixed.values$sigmasq <- 0
  }
  ##
  names(ini) <- NULL
  if(length(ini) == 1) justone <- TRUE
  else justone <- FALSE
  ##
  ip <- list(f.tausq = fix.nugget, f.kappa = fix.kappa, f.lambda = fix.lambda,
             f.psiR = fix.psiR, f.psiA = fix.psiA)
  ##
  npars <- beta.size + 2 + sum(unlist(ip)==FALSE)
  temp.list$coords <- coords
  temp.list$xmat <- split(as.data.frame(unclass(xmat)), realisations)
  temp.list$xmat <- lapply(temp.list$xmat, as.matrix)
  temp.list$n <- as.vector(unlist(lapply(temp.list$xmat, nrow)))
  ##
  ## Constant term in the likelihood
  ##
  temp.list$loglik.cte <- rep(0, nrep)
  for(i in 1:nrep){
    if(method.lik == "ML"){
      if(ip$f.tausq & (tausq > 0))
        temp.list$loglik.cte[i] <-  (temp.list$n[i]/2)*(-log(2*pi))
      else
        temp.list$loglik.cte[i] <-  (temp.list$n[i]/2)*(-log(2*pi) +
                                                        log(temp.list$n[i]) -1)
    }
    if(method.lik == "RML"){
      xx.eigen <- eigen(crossprod(temp.list$xmat[[i]]),
                        symmetric = TRUE, only.values = TRUE)
      if(ip$f.tausq & (tausq > 0))
        temp.list$loglik.cte[i] <- - ((temp.list$n[i]-beta.size)/2)*(log(2*pi)) +
          0.5 * sum(log(xx.eigen$values))
      else
        temp.list$loglik.cte[i] <-  - ((temp.list$n[i]-beta.size)/2)*(log(2*pi)) +
          ((temp.list$n[i]-beta.size)/2)*(log(temp.list$n[i]-beta.size)) -
            ((temp.list$n[i]-beta.size)/2) + 0.5 * sum(log(xx.eigen$values))
    }
  }
  ##  
  if(messages.screen) {
    cat("---------------------------------------------------------------\n")
    cat("likfit: likelihood maximisation using the function ")
    if(is.R()){if(justone) cat("optimize.\n") else cat("optim.\n")} else cat("nlminb.\n")
    cat("likfit: Use control() to pass additional\n         arguments for the maximisation function.")
    cat("\n        For further details see documentation for ")
    if(is.R()){if(justone) cat("optimize.\n") else cat("optim.\n")} else cat("nlminb.\n")        
    cat("likfit: It is highly advisable to run this function several\n        times with different initial values for the parameters.\n")
    cat("likfit: WARNING: This step can be time demanding!\n")
    cat("---------------------------------------------------------------\n")
  }
  ##
  ## Numerical minimization of the -loglikelihood
  ##
  if(is.R()){
    if(length(ini) == 1){
      if(upper.optim == Inf) upper.optim <- 1000*max.dist
      lik.minim <- optimize(negloglik.GRF,lower=lower.optim,upper=upper.optim,fp=fixed.values, ip=ip,temp.list = temp.list, ...)
      lik.minim <- list(par = lik.minim$minimum, value = lik.minim$objective, convergence = 0, message = "function optimize used")      
    }
    else
      lik.minim <- optim(par = ini, fn = negloglik.GRF, method="L-BFGS-B",
                         lower=lower.optim, upper=upper.optim,
                         fp=fixed.values, ip=ip, temp.list = temp.list, ...)
  }
  else{
    lik.minim <- nlminb(ini, negloglik.GRF,
                        lower=lower.optim, upper=upper.optim,
                        fp=fixed.values, ip=ip, temp.list = temp.list, ...)
  }
  ##
  if(messages.screen) cat("likfit: end of numerical maximisation.\n")
  par.est <- lik.minim$par
  if(any(par.est < 0)) par.est <- round(par.est, dig=12)
  phi <- par.est[1]
  ##
  ## Values of the maximised likelihood
  ##
  if(is.R())
    loglik.max <- - lik.minim$value
  else
    loglik.max <- - lik.minim$objective
  ##
  ## Assigning values for estimated parameters
  ##
  if(ip$f.tausq & ip$f.kappa & ip$f.lambda & ip$f.psiR & !ip$f.psiA){
    psiA <- par.est[2]
  }
  if(ip$f.tausq & ip$f.kappa & ip$f.lambda & !ip$f.psiR & ip$f.psiA){
    psiR <- par.est[2]
  }
  if(ip$f.tausq & ip$f.kappa & ip$f.lambda & !ip$f.psiR & !ip$f.psiA){
    psiR <- par.est[2]
    psiA <- par.est[3]
  }
  if(ip$f.tausq & ip$f.kappa & !ip$f.lambda & ip$f.psiR & ip$f.psiA){
    lambda  <- par.est[2]
  }
  if(ip$f.tausq & ip$f.kappa & !ip$f.lambda & ip$f.psiR & !ip$f.psiA){
    lambda  <- par.est[2]
    psiA <- par.est[3]
  }
  if(ip$f.tausq & ip$f.kappa & !ip$f.lambda & !ip$f.psiR & ip$f.psiA){
    lambda  <- par.est[2]
    psiR <- par.est[3]
  }
  if(ip$f.tausq & ip$f.kappa & !ip$f.lambda & !ip$f.psiR & !ip$f.psiA){
    lambda  <- par.est[2]
    psiR <- par.est[3]
    psiA <- par.est[4]
  }
  if(ip$f.tausq & !ip$f.kappa & ip$f.lambda & ip$f.psiR & ip$f.psiA){
    kappa  <-  par.est[2]
  }
  if(ip$f.tausq & !ip$f.kappa & ip$f.lambda & ip$f.psiR & !ip$f.psiA){
    kappa  <-  par.est[2]
    psiA <- par.est[3]
  }
  if(ip$f.tausq & !ip$f.kappa & ip$f.lambda & !ip$f.psiR & ip$f.psiA){
    kappa  <-  par.est[2]
    psiR <- par.est[3]
  }
  if(ip$f.tausq & !ip$f.kappa & ip$f.lambda & !ip$f.psiR & !ip$f.psiA){
    kappa  <-  par.est[2]
    psiR <- par.est[3]
    psiA <- par.est[4]
  }
  if(ip$f.tausq & !ip$f.kappa & !ip$f.lambda & ip$f.psiR & ip$f.psiA){
    kappa <-  par.est[2]
    lambda <- par.est[3]
  }
  if(ip$f.tausq & !ip$f.kappa & !ip$f.lambda & ip$f.psiR & !ip$f.psiA){
    kappa <-  par.est[2]
    lambda <- par.est[3]
    psiA <- par.est[4]
  }
  if(ip$f.tausq & !ip$f.kappa & !ip$f.lambda & !ip$f.psiR & ip$f.psiA){
    kappa <-  par.est[2]
    lambda <- par.est[3]
    psiR<- par.est[4]
  }
  if(ip$f.tausq & !ip$f.kappa & !ip$f.lambda & !ip$f.psiR & !ip$f.psiA){
    kappa <-  par.est[2]
    lambda <- par.est[3]
    psiR<- par.est[4]
    psiA<- par.est[5]
  }
  if(!ip$f.tausq & ip$f.kappa & ip$f.lambda & ip$f.psiR & ip$f.psiA){
    tausq <- par.est[2]
  }
  if(!ip$f.tausq & ip$f.kappa & ip$f.lambda & ip$f.psiR & !ip$f.psiA){
    tausq <- par.est[2]
    psiA<- par.est[3]
  }
  if(!ip$f.tausq & ip$f.kappa & ip$f.lambda & !ip$f.psiR & ip$f.psiA){
    tausq <- par.est[2]
    psiR<- par.est[3]
  }
  if(!ip$f.tausq & ip$f.kappa & ip$f.lambda & !ip$f.psiR & !ip$f.psiA){
    tausq <- par.est[2]
    psiR<- par.est[3]
    psiA<- par.est[4]
  }
  if(!ip$f.tausq & ip$f.kappa & !ip$f.lambda & ip$f.psiR & ip$f.psiA){
    tausq <- par.est[2]
    lambda <- par.est[3]
  }
  if(!ip$f.tausq & ip$f.kappa & !ip$f.lambda & ip$f.psiR & !ip$f.psiA){
    tausq <- par.est[2]
    lambda <- par.est[3]
    psiA <- par.est[4]
  }
  if(!ip$f.tausq & ip$f.kappa & !ip$f.lambda & !ip$f.psiR & ip$f.psiA){
    tausq <- par.est[2]
    lambda <- par.est[3]
    psiR <- par.est[4]
  }
  if(!ip$f.tausq & ip$f.kappa & !ip$f.lambda & !ip$f.psiR & !ip$f.psiA){
    tausq <- par.est[2]
    lambda <- par.est[3]
    psiR <- par.est[4]
    psiA <- par.est[5]
  }
  if(!ip$f.tausq & !ip$f.kappa & ip$f.lambda & ip$f.psiR & ip$f.psiA){
    tausq <- par.est[2]
    kappa <-  par.est[3]
  }
  if(!ip$f.tausq & !ip$f.kappa & ip$f.lambda & ip$f.psiR & !ip$f.psiA){
    tausq <- par.est[2]
    kappa <-  par.est[3]
    psiA <- par.est[4]
  }
  if(!ip$f.tausq & !ip$f.kappa & ip$f.lambda & !ip$f.psiR & ip$f.psiA){
    tausq <- par.est[2]
    kappa <-  par.est[3]
    psiR <- par.est[4]
  }
  if(!ip$f.tausq & !ip$f.kappa & ip$f.lambda & !ip$f.psiR & !ip$f.psiA){
    tausq <- par.est[2]
    kappa <-  par.est[3]
    psiR <- par.est[4]
    psiA <- par.est[5]
  }
  if(!ip$f.tausq & !ip$f.kappa & !ip$f.lambda & ip$f.psiR & ip$f.psiA){
    tausq <- par.est[2]
    kappa <-  par.est[3]
    lambda <- par.est[4]
  }
  if(!ip$f.tausq & !ip$f.kappa & !ip$f.lambda & ip$f.psiR & !ip$f.psiA){
    tausq <- par.est[2]
    kappa <-  par.est[3]
    lambda <- par.est[4]
    psiA <- par.est[5]
  }
  if(!ip$f.tausq & !ip$f.kappa & !ip$f.lambda & !ip$f.psiR & ip$f.psiA){
    tausq <- par.est[2]
    kappa <-  par.est[3]
    lambda <- par.est[4]
    psiR <- par.est[5]
  }
  if(!ip$f.tausq & !ip$f.kappa & !ip$f.lambda & !ip$f.psiR & !ip$f.psiA){
    tausq <- par.est[2]
    kappa <-  par.est[3]
    lambda <- par.est[4]
    psiR <- par.est[5]
    psiA <- par.est[6]
  }
  ##
  if(fix.nugget & nugget > 0){
    sigmasq <- par.est[length(par.est)]
    if(sigmasq > 1e-12) tausq <- nugget/sigmasq
    check.sigmasq <- TRUE
  }
  else check.sigmasq <- FALSE
  ##
  ##
  ## Transforming data acccording to the estimated lambda (Box-Cox) parameter
  ##
  if(!fix.lambda) {
    if(abs(lambda - 1) < 0.0001) {
      log.jacobian.max <- 0
    }
    else {
      if(any(data^(lambda - 1) <= 0))
        log.jacobian.max <- log(prod(data^(lambda - 1)))
      else log.jacobian.max <- sum(log(data^(lambda - 1)))
      temp.list$z <- ((data^lambda)-1)/lambda
    }
  }
  else{
    log.jacobian.max <- temp.list$log.jacobian
  }
  data.rep <- split(temp.list$z, realisations)
  coords.rep <- split(as.data.frame(coords), realisations)
  coords.rep <- lapply(coords.rep, as.matrix)
  ##
  ## Transforming coords for estimated anisotropy (if the case)
  ##
  if(fix.psiR & fix.psiA){
    if(is.R()) remove(".likGRF.dists.vec", pos=1)
    else remove(".likGRF.dists.vec", where=1)
  }
  else{
    if(round(psiR, dig=6) != 1 | round(psiA, dig=6) != 0)
      coords <- coords.aniso(coords, aniso.pars=c(psiA, psiR))
    rangevecdist <- function(x){range(as.vector(dist(x)))}
    range.dist <- lapply(split(as.data.frame(coords), realisations), rangevecdist)
    range.dist <- range(as.vector(unlist(range.dist)))
    max.dist <- max(range.dist)
    min.dist <- min(range.dist)
  }      
  if(is.R()) gc(verbose=FALSE)
  ##
  ## Computing estimated beta and tausq/sigmasq (if the case)
  ##
  xivx <- matrix(0, ncol=beta.size, nrow=beta.size)
  xivy <- matrix(0, ncol=1, nrow=beta.size)
  yivy <- 0
  for(i in 1:nrep){
    ni <- temp.list$n[i]
    xmati <- temp.list$xmat[[i]]
    if((phi < 1e-12))
      siv <- diag(x=1/sqrt((1+tausq)), ni)
    else{
      if(check.sigmasq){
        if(sigmasq < 1e-12){
          if(!fix.nugget)
            siv <- diag(x=1/sqrt((1+tausq)), ni)
          else
            siv <- diag(x=1/sqrt((tausq)), ni)          
        }
        else
          siv <- varcov.spatial(coords = coords.rep[[i]], cov.model = cov.model,
                                kappa = kappa,
                                nugget = tausq, cov.pars = c(1, phi),
                                inv=TRUE, sqrt.inv = TRUE,
                                det = FALSE)$sqrt.inverse
      }
      else
        siv <- varcov.spatial(coords = coords.rep[[i]], cov.model = cov.model,
                              kappa = kappa,
                              nugget = tausq, cov.pars = c(1, phi),
                              inv=TRUE, sqrt.inv = TRUE,
                              det = FALSE)$sqrt.inverse
    }
    sivx <- crossprod(siv, temp.list$xmat[[i]])
    xivx <- xivx + crossprod(sivx)
    sivy <- crossprod(siv, data.rep[[i]])
    xivy <- xivy + crossprod(sivx, sivy)
    yivy <- yivy + crossprod(sivy)
  }
  betahat <- solve.geoR(xivx, xivy)
  res <- as.vector(temp.list$z - xmat %*% betahat)
  if(!fix.nugget | (nugget < 1e-12)){
    ssres <- as.vector(yivy - 2*crossprod(betahat,xivy) +
                       crossprod(betahat,xivx) %*% betahat)  
    if(method.lik == "ML")
      sigmasq <- ssres/n
    else
      sigmasq <- ssres/(n - beta.size)
  }
  if(fix.nugget){
    if(nugget > 0)
      tausq <- nugget
  }
  else tausq <- tausq * sigmasq
  betahat.var <- solve.geoR(xivx)
  if(sigmasq > 1e-12) betahat.var <- sigmasq * betahat.var
#  if(!fix.nugget & phi < 1e-16){
#    tausq <- sigmasq + tausq
#    sigmasq <- 0
#  }
  ##
  ## Preparing output
  ##
  if((phi < 0.001*min.dist)){
    tausq <- tausq + sigmasq
    sigmasq <- 0
  }
  if((sigmasq < 1e-12)) phi <- 0
  ##
  n.model.pars <- beta.size + 7
  par.su <- data.frame(status=rep(-9,n.model.pars))
  ind.par.su <- c(rep(0, beta.size), ip$f.tausq, 0, 0, ip$f.kappa,
                  ip$f.psiR, ip$f.psiA,ip$f.lambda)
  par.su$status <- ifelse(ind.par.su,"fixed", "estimated")
  par.su$values <- round(c(betahat, tausq, sigmasq, phi, kappa, psiR, psiA, lambda), dig=4)
  if(beta.size == 1) beta.name <- "beta"
  else beta.name <- paste("beta", 0:(beta.size-1), sep="")
  row.names(par.su) <- c(beta.name, "tausq", "sigmasq", "phi", "kappa",
                             "psiR", "psiA", "lambda")
  par.su <- par.su[c((1:(n.model.pars-3)), n.model.pars-1, n.model.pars-2, n.model.pars),] 
  ##
  lik.results <- list(cov.model = cov.model,
                      nugget = tausq,
                      cov.pars=c(sigmasq, phi),
                      sigmasq = sigmasq,
                      phi = phi,
                      kappa = kappa,
                      beta = as.vector(betahat),
                      beta.var = betahat.var,
                      lambda = lambda,
                      aniso.pars = c(psiA = psiA, psiR = psiR),
                      method.lik = method.lik, trend = trend,
                      loglik = loglik.max,
                      npars = npars,
                      AIC = -2 * (loglik.max - npars),
                      BIC = -2 * (loglik.max - 0.5 * log(n) * npars),
#                      residuals = res,
                      parameters.summary = par.su,
                      info.minimisation.function = lik.minim,
                      max.dist = max.dist,
                      trend = trend,
                      trend.matrix= xmat,
                      transform.info = list(fix.lambda = fix.lambda,
                        log.jacobian = log.jacobian.max))
  ##
  ## Likelihood results for the model without spatial correlation
  ##
  if(nospatial){
    if(fix.lambda){
      beta.ns <- solve.geoR(crossprod(xmat), crossprod(xmat, temp.list$z))
      ss.ns <- sum((as.vector(temp.list$z - xmat %*% beta.ns))^2)
      if(method.lik == "ML"){
        nugget.ns <- ss.ns/n
        loglik.ns <- (n/2)*((-log(2*pi)) - log(nugget.ns) - 1) + temp.list$log.jacobian
      }
      if(method.lik == "RML"){
        nugget.ns <- ss.ns/(n-beta.size)
        loglik.ns <- ((n-beta.size)/2)*((-log(2*pi)) - log(nugget.ns) -1) +
          temp.list$log.jacobian
      }
      npars.ns <- beta.size + 1 + fix.lambda
      lambda.ns <- lambda
    }
    else{
      if(is.R())
        lik.lambda.ns <- optim(par=1, fn = boxcox.negloglik,
                               method = "L-BFGS-B",
                               lower = limits$lambda["lower"],
                               upper = limits$lambda["upper"],
                               data = data, xmat = xmat,
                               lik.method = method.lik)
      else
        lik.lambda.ns <- nlminb(par=1, fn = boxcox.negloglik,
                                lower=limits$lambda["lower"],
                                upper=limits$lambda["upper"],
                                data = data, xmat = xmat,
                                lik.method = method.lik)
      lambda.ns <- lik.lambda.ns$par
      if(abs(lambda) < 0.0001) tdata.ns <- log(data)
      else tdata.ns <- ((data^lambda.ns)-1)/lambda.ns
      beta.ns <- solve.geoR(crossprod(xmat),crossprod(xmat,tdata.ns))
      ss.ns <- sum((as.vector(tdata.ns - xmat %*% beta.ns))^2)
      if(is.R())
        value.min.ns <- lik.lambda.ns$value
      else
        value.min.ns <- lik.lambda.ns$objective
      if(method.lik == "ML"){
        loglik.ns <- (- value.min.ns)+ (n/2)*((-log(2*pi)) + log(n) - 1)
        nugget.ns <- ss.ns/n
      }
      if(method.lik == "RML"){
        nugget.ns <- ss.ns/(n-beta.size)
        loglik.ns <- (- value.min.ns)+ ((n-beta.size)/2)*((-log(2*pi)) +
                                                          log(n-beta.size) - 1)
      }      
      npars.ns <- beta.size + 1 + fix.lambda
    }
    lik.results$nospatial <- list(beta.ns = beta.ns, variance.ns = nugget.ns,
                                  loglik.ns = loglik.ns, npars.ns = npars.ns,
                                  lambda.ns = lambda.ns)
  }
  ##
  ## Assigning names to the components of the mean vector beta
  ##
  if(length(lik.results$beta.var) == 1)
    lik.results$beta.var <- as.vector(lik.results$beta.var)
  if(length(lik.results$beta) > 1){
    ##    if(inherits(trend, "formula") || (!is.null(class(trend)) && any(class(trend) == "trend.spatial")))
    if(inherits(trend, "formula") || (length(class(trend)) > 0 && any(class(trend) == "trend.spatial")))
      beta.names <- c("intercept", paste("covar", 1:(ncol(xmat)-1), sep = ""))
    else
      if(trend == "1st")
        beta.names <- c("intercept", "x", "y")
      else
        if(trend == "2nd")
          beta.names <- c("intercept", "x", "y", "x2", "xy", "y2")
    names(lik.results$beta) <- beta.names
  }
  ##
  ## Computing residuals and predicted values
  ## (isolated components of the model)
  ##
  if(components) {
    if(!fix.psiR & !fix.psiA)
      if(psiR != 1 | psiA != 0)
        coords <- coords.aniso(coords, aniso.pars=c(psiA, psiR))
    coords.rep <- split(as.data.frame(coords), realisations)
    res.rep <- split(res, realisations)
    trend.comp <- temp.list$z - res
    spatial.comp <- list()
    for(i in 1:nrep){
#      invcov <- varcov.spatial(coords = coords[ind.rep[[i]],], cov.model = cov.model, 
#                               kappa = kappa, nugget = tausq,
#                               cov.pars = c(sigmasq, phi), inv=TRUE)$inverse 
#      covmat.signal <- varcov.spatial(coords = coords[ind.rep[[i]],],
#                                      cov.model = cov.model, 
#                                      kappa = kappa, nugget = 0,
#                                      cov.pars = c(sigmasq, phi))$varcov
      spatial.comp[[i]] <- as.vector(varcov.spatial(coords = coords[ind.rep[[i]],],
                                                    cov.model = cov.model, 
                                                    kappa = kappa, nugget = 0,
                                                    cov.pars = c(sigmasq, phi))$varcov %*%
                                     varcov.spatial(coords = coords[ind.rep[[i]],],
                                                    cov.model = cov.model, 
                                                    kappa = kappa, nugget = tausq,
                                                    cov.pars = c(sigmasq, phi), inv=TRUE)$inverse %*%
                                     res[ind.rep[[i]]]) 
    }
    spatial.comp <- as.vector(unlist(spatial.comp))[as.vector(unlist(ind.rep))]
    predict.comp <- trend.comp + spatial.comp
    residual.comp <- as.vector(temp.list$z - predict.comp)
#    residual.std <- as.vector(invcov %*% residual.comp)
#    residual.trend.std <- as.vector(invcov %*% res)
    lik.results$model.components <-
      data.frame(trend = trend.comp, spatial = spatial.comp, residuals = residual.comp)
#    lik.results$s2.random <- (crossprod(res,invcov) %*% res)/(n - beta.size)
#    lik.results$s2 <- (crossprod(residual.comp,invcov) %*% residual.comp)/(n - beta.size)
  }
  ##
  lik.results$contrasts <- xmat.contrasts
  lik.results$call <- call.fc
  ##
  ## Assigning classes
  ##
  if(is.R()) class(lik.results) <- c("likGRF", "variomodel")
  else{
    if(version$major <= 4) class(lik.results) <- c("likGRF", "variomodel")
    else oldClass(lik.results) <- c("likGRF", "variomodel")
  }
  ##
  ## Some warning messages about particular possible results
  ##
  if(messages.screen){
    if((lik.results$cov.pars[1] < (0.01 * (lik.results$nugget + lik.results$cov.pars[1])))& lik.results$cov.pars[2] > 0)
      cat("\nWARNING: estimated sill is less than 1 hundredth of the total variance. Consider re-examine the model excluding spatial dependence\n" )      
    if((lik.results$cov.pars[2] > (10 * max.dist)) & lik.results$cov.pars[1] > 0 )
      cat("\nWARNING: estimated range is more than 10 times bigger than the biggest distance between two points. Consider re-examine the model:\n 1) excluding spatial dependence if estimated sill is too low and/or \n 2) taking trends (covariates) into account\n" ) 
    if(((lik.results$cov.pars[2] < (0.1 * min.dist)) & (lik.results$cov.pars[1] > 0)) & lik.results$cov.pars[2] > 0)
      cat("\nWARNING: estimated range is less than 1 tenth of the minimum distance between two points. Consider re-examine the model excluding spatial dependence\n" ) 
  }
  ##
  return(lik.results)
}

"negloglik.GRF" <-
  function(pars, fp, ip, temp.list)
### pars : values for the parameters to be estimated
  ## sequence is c(phi, tausq, kappa, lambda, psiR, psiA, sigmasq)
### fixed pars: parameters considered fixed
### ind.pars : list indicating which are fixed and which are to be estimated
  ##
  ## Warning:
  ##  if fix.nugget = TRUE and nugget > 0 ,
  ## sigmasq should be passed and fp$nugget is the value of the nugget
  ## otherwise the RELATIVE nugget should be passed
{
  p <- temp.list$beta.size
  log.jacobian <- temp.list$log.jacobian
  ## Obligatory parameter:
  phi <- pars[1]
  ## Others
  if(ip$f.tausq){
    if(fp$tausq > 0){
      npars.min <- length(pars)
      sigmasq <- pars[npars.min]
    }
    else sigmasq <- 1
  }
  else sigmasq <- 1
  if(ip$f.tausq & ip$f.kappa & ip$f.lambda & ip$f.psiR & ip$f.psiA){
    tausq <- fp$tausq
    kappa <- fp$kappa
    lambda <- fp$lambda
    psiR <- fp$psiR
    psiA <- fp$psiA
  }
  if(ip$f.tausq & ip$f.kappa & ip$f.lambda & ip$f.psiR & !ip$f.psiA){
    tausq <- fp$tausq
    kappa <- fp$kappa
    lambda <- fp$lambda
    psiR <- fp$psiR
    psiA <- pars[2]
  }
  if(ip$f.tausq & ip$f.kappa & ip$f.lambda & !ip$f.psiR & ip$f.psiA){
    tausq <- fp$tausq
    kappa <- fp$kappa
    lambda <- fp$lambda
    psiR <- pars[2]
    psiA <- fp$psiA
  }
  if(ip$f.tausq & ip$f.kappa & ip$f.lambda & !ip$f.psiR & !ip$f.psiA){
    tausq <- fp$tausq
    kappa <- fp$kappa
    lambda <- fp$lambda
    psiR <- pars[2]
    psiA <- pars[3]
  }
  if(ip$f.tausq & ip$f.kappa & !ip$f.lambda & ip$f.psiR & ip$f.psiA){
    tausq <- fp$tausq
    kappa <- fp$kappa
    lambda <- pars[2]
    psiR <- fp$psiR
    psiA <- fp$psiA
  }
  if(ip$f.tausq & ip$f.kappa & !ip$f.lambda & ip$f.psiR & !ip$f.psiA){
    tausq <- fp$tausq
    kappa <- fp$kappa
    lambda <- pars[2]
    psiR <- fp$psiR
    psiA <- pars[3]
  }
  if(ip$f.tausq & ip$f.kappa & !ip$f.lambda & !ip$f.psiR & ip$f.psiA){
    tausq <- fp$tausq
    kappa <- fp$kappa
    lambda <- pars[2]
    psiR <- pars[3]
    psiA <- fp$psiA
  }
  if(ip$f.tausq & ip$f.kappa & !ip$f.lambda & !ip$f.psiR & !ip$f.psiA){
    tausq <- fp$tausq
    kappa <- fp$kappa
    lambda <- pars[2]
    psiR <- pars[3]
    psiA <- pars[4]
  }
  if(ip$f.tausq & !ip$f.kappa & ip$f.lambda & ip$f.psiR & ip$f.psiA){
    tausq <- fp$tausq
    kappa <- pars[2]
    lambda <- fp$lambda
    psiR <- fp$psiR
    psiA <- fp$psiA
  }
  if(ip$f.tausq & !ip$f.kappa & ip$f.lambda & ip$f.psiR & !ip$f.psiA){
    tausq <- fp$tausq
    kappa <- pars[2]
    lambda <- fp$lambda
    psiR <- fp$psiR
    psiA <- pars[3]
  }
  if(ip$f.tausq & !ip$f.kappa & ip$f.lambda & !ip$f.psiR & ip$f.psiA){
    tausq <- fp$tausq
    kappa <- pars[2]
    lambda <- fp$lambda
    psiR <- pars[3]
    psiA <- fp$psiA
  }
  if(ip$f.tausq & !ip$f.kappa & ip$f.lambda & !ip$f.psiR & !ip$f.psiA){
    tausq <- fp$tausq
    kappa <- pars[2]
    lambda <- fp$lambda
    psiR <- pars[3]
    psiA <- pars[4]
  }
  if(ip$f.tausq & !ip$f.kappa & !ip$f.lambda & ip$f.psiR & ip$f.psiA){
    tausq <- fp$tausq
    kappa <- pars[2]
    lambda <- pars[3]
    psiR <- fp$psiR
    psiA <- fp$psiA
  }
  if(ip$f.tausq & !ip$f.kappa & !ip$f.lambda & ip$f.psiR & !ip$f.psiA){
    tausq <- fp$tausq
    kappa <- pars[2]
    lambda <- pars[3]
    psiR <- fp$psiR
    psiA <- pars[4]
  }
  if(ip$f.tausq & !ip$f.kappa & !ip$f.lambda & !ip$f.psiR & ip$f.psiA){
    tausq <- fp$tausq
    kappa <- pars[2]
    lambda <- pars[3]
    psiR <- pars[4]
    psiA <- fp$psiA
  }
  if(ip$f.tausq & !ip$f.kappa & !ip$f.lambda & !ip$f.psiR & !ip$f.psiA){
    tausq <- fp$tausq
    kappa <- pars[2]
    lambda <- pars[3]
    psiR <- pars[4]
    psiA <- pars[5]
  }
  if(!ip$f.tausq & ip$f.kappa & ip$f.lambda & ip$f.psiR & ip$f.psiA){
    tausq <- pars[2]
    kappa <- fp$kappa
    lambda <- fp$lambda
    psiR <- fp$psiR
    psiA <- fp$psiA
  }
  if(!ip$f.tausq & ip$f.kappa & ip$f.lambda & ip$f.psiR & !ip$f.psiA){
    tausq <- pars[2]
    kappa <- fp$kappa
    lambda <- fp$lambda
    psiR <- fp$psiR
    psiA <- pars[3]
  }
  if(!ip$f.tausq & ip$f.kappa & ip$f.lambda & !ip$f.psiR & ip$f.psiA){
    tausq <- pars[2]
    kappa <- fp$kappa
    lambda <- fp$lambda
    psiR <- pars[3]
    psiA <- fp$psiA
  }
  if(!ip$f.tausq & ip$f.kappa & ip$f.lambda & !ip$f.psiR & !ip$f.psiA){
    tausq <- pars[2]
    kappa <- fp$kappa
    lambda <- fp$lambda
    psiR <- pars[3]
    psiA <- pars[4]
  }
  if(!ip$f.tausq & ip$f.kappa & !ip$f.lambda & ip$f.psiR & ip$f.psiA){
    tausq <- pars[2]
    kappa <- fp$kappa
    lambda <- pars[3]
    psiR <- fp$psiR
    psiA <- fp$psiA
  }
  if(!ip$f.tausq & ip$f.kappa & !ip$f.lambda & ip$f.psiR & !ip$f.psiA){
    tausq <- pars[2]
    kappa <- fp$kappa
    lambda <- pars[3]
    psiR <- fp$psiR
    psiA <- pars[4]
  }
  if(!ip$f.tausq & ip$f.kappa & !ip$f.lambda & !ip$f.psiR & ip$f.psiA){
    tausq <- pars[2]
    kappa <- fp$kappa
    lambda <- pars[3]
    psiR <- pars[4]
    psiA <- fp$psiA
  }
  if(!ip$f.tausq & ip$f.kappa & !ip$f.lambda & !ip$f.psiR & !ip$f.psiA){
    tausq <- pars[2]
    kappa <- fp$kappa
    lambda <- pars[3]
    psiR <- pars[4]
    psiA <- pars[5]
  }
  if(!ip$f.tausq & !ip$f.kappa & ip$f.lambda & ip$f.psiR & ip$f.psiA){
    tausq <- pars[2]
    kappa <- pars[3]
    lambda <- fp$lambda
    psiR <- fp$psiR
    psiA <- fp$psiA
  }
  if(!ip$f.tausq & !ip$f.kappa & ip$f.lambda & ip$f.psiR & !ip$f.psiA){
    tausq <- pars[2]
    kappa <- pars[3]
    lambda <- fp$lambda
    psiR <- fp$psiR
    psiA <- pars[4]
  }
  if(!ip$f.tausq & !ip$f.kappa & ip$f.lambda & !ip$f.psiR & ip$f.psiA){
    tausq <- pars[2]
    kappa <- pars[3]
    lambda <- fp$lambda
    psiR <- pars[4]
    psiA <- fp$psiA
  }
  if(!ip$f.tausq & !ip$f.kappa & ip$f.lambda & !ip$f.psiR & !ip$f.psiA){
    tausq <- pars[2]
    kappa <- pars[3]
    lambda <- fp$lambda
    psiR <- pars[4]
    psiA <- pars[5]
  }
  if(!ip$f.tausq & !ip$f.kappa & !ip$f.lambda & ip$f.psiR & ip$f.psiA){
    tausq <- pars[2]
    kappa <- pars[3]
    lambda <- pars[4]
    psiR <- fp$psiR
    psiA <- fp$psiA
  }
  if(!ip$f.tausq & !ip$f.kappa & !ip$f.lambda & ip$f.psiR & !ip$f.psiA){
    tausq <- pars[2]
    kappa <- pars[3]
    lambda <- pars[4]
    psiR <- fp$psiR
    psiA <- pars[5]
  }
  if(!ip$f.tausq & !ip$f.kappa & !ip$f.lambda & !ip$f.psiR & ip$f.psiA){
    tausq <- pars[2]
    kappa <- pars[3]
    lambda <- pars[4]
    psiR <- pars[5]
    psiA <- fp$psiA
  }
  if(!ip$f.tausq & !ip$f.kappa & !ip$f.lambda & !ip$f.psiR & !ip$f.psiA){
    tausq <- pars[2]
    kappa <- pars[3]
    lambda <- pars[4]
    psiR <- pars[5]
    psiA <- pars[6]
  }
  ##
  if(temp.list$print.pars){
    running.pars <- c(phi = phi, tausq = tausq, kappa =kappa, psiA = psiA, psiR = psiR, lambda = lambda)
    if(ip$f.tausq && fp$tausq > 0)
      running.pars <- c(sigmasq=sigmasq, running.pars)
    print(running.pars)
  }
  ##
  ## Absurd values
  ##
  if(kappa < 1e-04) return(.Machine$double.xmax^0.5)
  if((tausq+sigmasq) < (.Machine$double.eps^0.5)) return(.Machine$double.xmax^0.5)
  ##
  ## Anisotropy
  ##
  if(!ip$f.psiR | !ip$f.psiA){
    coords.c <- coords.aniso(temp.list$coords, aniso.pars=c(psiA, psiR))
    vecdist <- function(x){as.vector(dist(x))}
    if(is.R())
      assign(".likGRF.dists.vec", lapply(split(as.data.frame(coords.c),
                                               temp.list$realisations), vecdist), pos=1)
    else
      assign(".likGRF.dists.vec", lapply(split(as.data.frame(coords.c),
                                               temp.list$realisations), vecdist), where=1)
  }
  ##
  ## Box-Cox transformation
  ##
  if(!ip$f.lambda){
    if(abs(lambda - 1) < 0.0001) {
      log.jacobian <- 0
    }
    else {
      if(any(temp.list$z <= 0))
        stop("Transformation not allowed for zero or negative data")
      data <- temp.list$z^(lambda - 1)
      if(any(data <= 0)) log.jacobian <- log(prod(data))
      else log.jacobian <- sum(log(data))
      data <- NULL
    }
    if(abs(lambda) < 0.0001)
      data <- log(temp.list$z)
    else data <- ((temp.list$z^lambda) - 1)/lambda
  }
  else data <- temp.list$z
  data <- split(data, as.factor(temp.list$realisations))
  ##
  ## Computing likelihood
  ##
  sumnegloglik <- 0
  for(i in 1:temp.list$nrep){
    ## NOTE: Likelihood for Independent observations 
    ##       arbitrary criteria used here:
    ##       (phi < 1-e16) or (sigmasq < 1-e16)  ==> independence
    ##
    n <- temp.list$n[i]
    xmat <- temp.list$xmat[[i]]
    z <- data[[i]]
    if((phi < 1e-16) | (sigmasq < 1e-16)){
      if(ip$f.tausq)
        iv <- list(sqrt.inverse = diag(x=1/sqrt((tausq+sigmasq)), n),
                   log.det.to.half = (n/2) * log(tausq+sigmasq))
      else
        iv <- list(sqrt.inverse = diag(x=1/sqrt((1+tausq)), n),
                   log.det.to.half = (n/2) * log(1+tausq))
    }
    else{
      iv <- varcov.spatial(dists.lowertri = .likGRF.dists.vec[[i]],
                           cov.model = temp.list$cov.model, kappa=kappa,
                           nugget = tausq, cov.pars=c(sigmasq, phi),
                           sqrt.inv = TRUE, det = TRUE)
    }
    if(!is.null(iv$crash.parms)) return(.Machine$double.xmax^0.5)
    sivx <- crossprod(iv$sqrt.inverse, xmat)
    xivx <- crossprod(sivx)
    sivy <- crossprod(iv$sqrt.inverse, z)
    xivy <- crossprod(sivx, sivy)
    betahat <- solve.geoR(xivx, xivy)
    if(inherits(betahat, "try-error")){
      t.ei <- eigen(xivx, symmetric = TRUE)
      require(methods)
      if(exists("trySilent"))
        betahat <- trySilent(t.ei$vec %*% diag(t.ei$val^(-1)) %*% t(t.ei$vec) %*% xivy)
      else{
        error.now <- options()$show.error.message
        options(show.error.messages = FALSE)
        betahat <- try(t.ei$vec %*% diag(t.ei$val^(-1)) %*% t(t.ei$vec) %*% xivy)
        if(is.null(error.now) || error.now == TRUE) options(show.error.messages = TRUE)        
      }
    }
    if(inherits(betahat, "try-error"))
      stop("Covariates have very different orders of magnitude. Try to multiply and/or divide them to bring them to similar orders of magnitude") 
    res <- z - xmat %*% betahat
    ssres <- as.vector(crossprod(crossprod(iv$sqrt.inverse,res)))
    if(temp.list$method.lik == "ML"){
      if(ip$f.tausq & (tausq > 0))
        negloglik <- iv$log.det.to.half +  0.5 * ssres
      else
        negloglik <- (n/2) * log(ssres) +  iv$log.det.to.half
    }
    if(temp.list$method.lik == "RML"){
      if(length(as.vector(xivx)) == 1) {
        choldet <- 0.5 * log(xivx)
      }
      else {
        chol.xivx <- chol(xivx)
        choldet <- sum(log(diag(chol.xivx)))
      }
      if(ip$f.tausq & (tausq > 0))
        negloglik <- iv$log.det.to.half +  0.5 * ssres + choldet
      else
        negloglik <- ((n-p)/2) * log(ssres) +  iv$log.det.to.half + choldet
    }  
    negloglik <- negloglik - temp.list$loglik.cte[i]
    sumnegloglik <- sumnegloglik + negloglik
  }
  sumnegloglik <- sumnegloglik - log.jacobian
  if(sumnegloglik > (.Machine$double.xmax^0.5) | sumnegloglik == Inf | sumnegloglik == -Inf)
    sumnegloglik <- .Machine$double.xmax^0.5
  if(temp.list$print.pars)
    cat(paste("log-likelihood = ", -sumnegloglik, "\n"))
  return(sumnegloglik) 
}

"pars.limits" <-
  function(phi = c(lower=0, upper=+Inf),
           sigmasq = c(lower=0, upper=+Inf),
           nugget.rel = c(lower=0, upper=+Inf),
           kappa = c(lower=0, upper=+Inf),
           lambda = c(lower=-3, upper=3),
           psiR = c(lower=1, upper=+Inf),
           psiA = c(lower=0, upper=2*pi),
           tausq.rel = nugget.rel
           )
{
  if(length(phi) != 2)
    stop("phi must be a 2 components vector with lower and upper limits for the parameter phi") 
  if(length(sigmasq) != 2)
    stop("phi must be a 2 components vector with lower and upper limits for the parameter phi") 
  if(length(tausq.rel) != 2)
    stop("phi must be a 2 components vector with lower and upper limits for the parameter phi") 
  if(length(kappa) != 2)
    stop("phi must be a 2 components vector with lower and upper limits for the parameter phi") 
  if(length(lambda) != 2)
    stop("phi must be a 2 components vector with lower and upper limits for the parameter phi")
  if(length(psiR) != 2)
    stop("phi must be a 2 components vector with lower and upper limits for the parameter phi") 
  if(length(psiA) != 2)
    stop("phi must be a 2 components vector with lower and upper limits for the parameter phi") 
  if(phi[1] >= phi[2])
    stop("parameter phi: lower limit greater or equal upper limit")
  if(sigmasq[1] >= sigmasq[2])
    stop("parameter sigmasq: lower limit greater or equal upper limit")
  if(tausq.rel[1] >= tausq.rel[2])
    stop("parameter tausq.rel: lower limit greater or equal upper limit")
  if(kappa[1] >= kappa[2])
    stop("parameter kappa: lower limit greater or equal upper limit")
  if(lambda[1] >= lambda[2])
    stop("parameter lambda: lower limit greater or equal upper limit")
  if(psiR[1] >= psiR[2])
    stop("parameter psiR: lower limit greater or equal upper limit")
  if(psiA[1] >= psiA[2])
    stop("parameter psiA: lower limit greater or equal upper limit")
  names(phi) <- c("lower", "upper")
  names(sigmasq) <- c("lower", "upper")
  names(tausq.rel) <- c("lower", "upper")
  names(kappa) <- c("lower", "upper")
  names(lambda) <- c("lower", "upper")
  names(psiR) <- c("lower", "upper")
  names(psiA) <- c("lower", "upper")
  return(list(phi = phi, sigmasq = sigmasq,
              tausq.rel = tausq.rel, kappa = kappa,
              lambda = lambda, psiR = psiR, psiA = psiA))
}

"likfit.limits" <- pars.limits

"print.likGRF" <-
  function (x, digits = max(3, getOption("digits") - 3), ...)
{
  est.pars <- as.vector(x$parameters.summary[x$parameters.summary[,1] == "estimated",2])
  names.est.pars <- dimnames(x$parameters.summary[x$parameters.summary[,1] == "estimated",])[[1]]
  names(est.pars) <- names.est.pars
  cat("likfit: estimated model parameters:\n")
  print.default(format(est.pars, digits = digits), ...)
  ##  print(round(est.pars, digits=digits))
  cat("\nlikfit: maximised log-likelihood = ")
  cat(format(x$loglik, digits = digits))
  ##  cat(round(x$loglik, digits=digits))
  cat("\n")
  return(invisible())
}  

"summary.likGRF" <-
  function(object, ...)
{
  names.pars <- dimnames(object$parameters.summary)[[1]]
  summ.lik <- list()
  if(object$method.lik == "ML")
    summ.lik$method.lik <- "maximum likelihood"
  if(object$method.lik == "RML")
    summ.lik$method.lik <- "restricted maximum likelihood"
  summ.lik$mean.component <- object$beta
  names(summ.lik$mean.component) <- names.pars[1:length(object$beta)]
  summ.lik$cov.model <- object$cov.model
  summ.lik$spatial.component <- object$parameters.summary[c("sigmasq", "phi"),]
  summ.lik$spatial.component.extra <- object$parameters.summary[c("kappa", "psiA", "psiR"),]
  summ.lik$nugget.component <- object$parameters.summary[c("tausq"),, drop=FALSE]
  summ.lik$transformation  <- object$parameters.summary[c("lambda"),, drop=FALSE]
  summ.lik$likelihood <- list(log.L = object$loglik, n.params = as.integer(object$npars),
                               AIC = object$AIC, BIC = object$BIC)
  summ.lik$estimated.pars <- dimnames(object$parameters.summary[object$parameters.summary[,1] == "estimated",])[[1]]
  summ.lik$call <- object$call
  class(summ.lik) <- "summary.likGRF"
  return(summ.lik)
}

"print.summary.likGRF" <-
  function (x, digits = max(3, getOption("digits") - 3), ...)
{
  if(length(class(x)) == 0 || all(class(x) != "summary.likGRF"))
    stop("object is not of the class \"summary.likGRF\"")
  cat("Summary of the parameter estimation\n")
  cat("-----------------------------------\n")
  cat(paste("Estimation method:", x$method.lik, "\n"))
  cat("\n")
  ##
  ## Estimates of the model components
  ## Model: Y(x) = X\beta + S(x) + e 
  ##
  cat("Parameters of the mean component (trend):")
  cat("\n")
  print.default(round(x$mean.component, digits = digits), ...)
  ##  print(round(x$mean.component, digits=digits))
  cat("\n")
  ##
  cat("Parameters of the spatial component:")
  cat("\n")
  cat(paste("   correlation function:", x$cov.model))
  cat(paste("\n      (estimated) variance parameter sigmasq (partial sill) = ", format(x$spatial.component[1,2], dig=digits)))
  cat(paste("\n      (estimated) cor. fct. parameter phi (range parameter)  = ", format(x$spatial.component[2,2], dig=digits)))
  if(x$cov.model == "matern" | x$cov.model == "powered.exponential" |
     x$cov.model == "cauchy" | x$cov.model == "gneiting.matern"){
    kappa <- x$spatial.component.extra["kappa",2]
    if(x$spatial.component.extra["kappa",1] == "estimated")
      cat(paste("\n      (estimated) extra parameter kappa =", format(kappa, digits=digits)))
    else{
      cat(paste("\n      (fixed) extra parameter kappa = ", kappa))
      if(x$cov.model == "matern" & (format(kappa, digits=digits)  == 0.5))
      cat(" (exponential)")
    }
  }
  cat("\n")
  ##
  aniso <-  x$spatial.component.extra[c("psiA", "psiR"),]
  psiApsiR <- x$spatial.component.extra[c("psiA", "psiR"),2]
  cat("   anisotropy parameters:")
  if(aniso["psiA",1] == "estimated")
    cat(paste("\n      (estimated) anisotropy angle =",
              format(psiApsiR[1], digits=digits),
              " (",format((psiApsiR[1]*360)/(2*pi), dig=1), "degrees )"))
  else
    cat(paste("\n      (fixed) anisotropy angle =", psiApsiR[1],
              " (",(psiApsiR[1]*360)/(2*pi), "degrees )"))
  if(aniso["psiR",1] == "estimated")
    cat(paste("\n      (estimated) anisotropy ratio =",
              format(psiApsiR[2], digits=digits)))
  else
    cat(paste("\n      (fixed) anisotropy ratio =", psiApsiR[2]))
  cat("\n")
  cat("\n")  
  cat("Parameter of the error component:")
  if(x$nugget.component[,1] == "estimated")
    cat(paste("\n      (estimated) nugget = ", format(x$nugget.component[,2], dig=digits)))
  else
    cat(paste("\n      (fixed) nugget =", x$nugget.component[,2]))
  cat("\n")
  cat("\n")
  cat("Transformation parameter:")
  cat("\n")
  lambda <- x$transformation[,2]
  if(x$transformation[,1] == "estimated")
    cat(paste("      (estimated) Box-Cox parameter =", format(lambda, dig=digits)))
  else{
    cat(paste("      (fixed) Box-Cox parameter =", lambda))
    if(abs(lambda - 1) <  0.0001) cat(" (no transformation)")
    if(abs(lambda) < 0.0001) cat(" (log-transformation)")
  }
  cat("\n")
  cat("\n")
  cat("Maximised Likelihood:")
  cat("\n")
  print(format(x$likelihood, digits=digits), ...)
  cat("\n")
  cat("Call:")
  cat("\n")
  print(x$call)
  cat("\n")
  invisible(x)
}

"loglik.GRF" <-
  function(geodata, coords=geodata$coords, data=geodata$data,
           obj.model = NULL,
           cov.model="exp", cov.pars,
           nugget=0, kappa=0.5, lambda=1, psiR=1, psiA=0,
           trend="cte", method.lik="ML",
           compute.dists = TRUE, realisations = NULL)
{
  if(!is.null(obj.model)){
    if(!is.null(obj.model$cov.model)) cov.model <- obj.model$cov.model
    if(!is.null(obj.model$cov.pars)) cov.pars <- obj.model$cov.pars
    if(!is.null(obj.model$nugget)) nugget <- obj.model$nugget
    if(!is.null(obj.model$kappa)) kappa <- obj.model$kappa
    if(!is.null(obj.model$lambda)) lambda <- obj.model$lambda
    if(!is.null(obj.model$psiR)) psiR <- obj.model$psiR
    if(!is.null(obj.model$psiA)) psiA <- obj.model$psiA      
   if(!is.null(obj.model$trend)) trend <- eval(obj.model$trend)
  ## a resolver: problema em passando  trend
  }
  sigmasq <- cov.pars[1]
  phi <- cov.pars[2]
  if(method.lik == "REML" | method.lik == "reml" | method.lik == "rml") 
    method.lik <- "RML"
  if(method.lik == "ML" | method.lik == "ml")
    method.lik <- "ML"
  if(is.null(realisations))
    realisations <- as.factor(rep(1, length(data)))
  else
    realisations <- as.factor(realisations)
  nrep <- length(levels(realisations))
  ##
  ## Absurd values
  ##
  if(kappa < 1e-04) return(-(.Machine$double.xmax^0.5))
  if((nugget+sigmasq) < 1e-16) return(-(.Machine$double.xmax^0.5))
  ##
  ## Trend matrix
  ##
  if(missing(geodata))
    xmat <- unclass(trend.spatial(trend=trend, geodata = list(coords = coords, data = data)))
  else
    xmat <- unclass(trend.spatial(trend=trend, geodata = geodata))
  if (nrow(xmat) != nrow(coords)) 
    stop("coords and trend have incompatible sizes")
  beta.size <- ncol(xmat)
  xmat <- split(as.data.frame(unclass(xmat)), realisations)
  xmat <- lapply(xmat, as.matrix)
  ##
  ## Anisotropy
  ##
  vecdist <- function(x){as.vector(dist(x))}
  if(psiR != 1 | psiA != 0){
    coords.c <- coords.aniso(coords, aniso.pars=c(psiA, psiR))
    .likGRF.dists.vec <- lapply(split(as.data.frame(coords.c),
                                      as.factor(realisations)), vecdist)
  }
  else if(compute.dists) .likGRF.dists.vec <- lapply(split(as.data.frame(coords),
                                                           as.factor(realisations)), vecdist)
  ##
  ## Box-Cox transformation
  ##
  z <- data
  if(abs(lambda - 1) < 0.0001)
    log.jacobian <- 0
  else {
    if(any(z <= 0))
      stop("Transformation not allowed for zero or negative data")
    data <- z^(lambda - 1)
    if(any(data <= 0)) log.jacobian <- log(prod(data))
    else log.jacobian <- sum(log(data))
    data <- NULL
    if(abs(lambda) < 0.0001)
      data <- log(z)
    else data <- ((z^lambda) - 1)/lambda
  }
  data <- split(data, as.factor(realisations))
  ##
  ## Computing likelihood
  ##
  sumnegloglik <- 0
  for(i in 1:nrep){
    ## NOTE: Likelihood for Independent observations 
    ##       arbitrary criteria used here:
    ##       (phi < 1-e16) or (sigmasq < 1-e16)  ==> independence
    ##
    n <- length(data[[1]])
    if((phi < 1e-16) | (sigmasq < 1e-16)){
      iv <- list(sqrt.inverse = diag(x=1/sqrt((nugget+sigmasq)), n),
                 log.det.to.half = (n/2) * log(nugget+sigmasq))
    }
    else{
      iv <- varcov.spatial(dists.lowertri = .likGRF.dists.vec[[i]],
                           cov.model = cov.model, kappa=kappa,
                           nugget = nugget, cov.pars=c(sigmasq, phi),
                           sqrt.inv = TRUE, det = TRUE)
    }
    if(!is.null(iv$crash.parms)){
      cat("varcov.spatial: improper matrix for following the given parameters:")
      print(iv$crash.parms)
      stop()
    }
    sivx <- crossprod(iv$sqrt.inverse, xmat[[i]])
    xivx <- crossprod(sivx)
    sivy <- crossprod(iv$sqrt.inverse, data[[i]])
    xivy <- crossprod(sivx, sivy)  
    betahat <- solve.geoR(xivx, xivy)
    res <- data[[i]] - xmat[[i]] %*% betahat
    ssres <- as.vector(crossprod(crossprod(iv$sqrt.inverse,res)))
    if(method.lik == "ML"){
      negloglik <- (n/2)*(log(2*pi)) + iv$log.det.to.half +  0.5 * ssres
    }
    if(method.lik == "RML"){
      if(length(as.vector(xivx)) == 1) {
        choldet <- 0.5 * log(xivx)
      }
      else {
        chol.xivx <- chol(xivx)
        choldet <- sum(log(diag(chol.xivx)))
      }
      negloglik <- iv$log.det.to.half +  0.5 * ssres + choldet
      xx.eigen <- eigen(crossprod(xmat[[i]]), symmetric = TRUE, only.values = TRUE)
      negloglik <- negloglik + ((n-beta.size)/2)*(log(2*pi)) - 0.5 * sum(log(xx.eigen$values))
    }
    sumnegloglik <- sumnegloglik + negloglik 
  }
  sumnegloglik <- sumnegloglik - log.jacobian
  if(sumnegloglik > (.Machine$double.xmax^0.5))
    sumnegloglik <- .Machine$double.xmax^0.5
  return(as.vector(-sumnegloglik))
}


##
## ----------------
## Other functions
## ----------------
##
##

"likfit.nospatial" <-
  function(temp.list, ...)
{
  results <- list()
  z <- temp.list$z
  n <- temp.list$n
  beta.size <- temp.list$beta.size
  xmat <- temp.list$xmat
  txmat <- temp.list$txmat
  ixx <- solve(crossprod(xmat))
  if(temp.list$fix.lambda == FALSE){
    if (temp.list$minimisation.function == "nlm"){
      assign(".temp.lower.lambda",-2, pos=1)
      assign(".temp.upper.lambda", 2, pos=1)
      results <- nlm(proflik.lambda, 1, ...)
      if(exists(".temp.lambda")){
        results$lambda <- .temp.lambda
        remove(".temp.lambda", pos=1, inherits = TRUE)
      }
      else{
        results$lambda <- results$estimate
      }
      rm(.temp.lower.lambda, .temp.upper.lambda, inherits = TRUE, pos=1)
    }
    if (temp.list$minimisation.function == "nlmP"){
      results <- nlmP(proflik.lambda, 1, lower=-2, upper=2,...)  
      results$lambda <- results$estimate
    }
    if (temp.list$minimisation.function == "optim"){
      results <- optim(1, proflik.lambda, method="L-BFGS-B", lower=-2, upper=2,...)
      results$minimum <- results$value
      results$lambda <- results$par
    }
    if(results$lambda == 1) {
      temp.list$log.jacobian <- 0
    }
    else {
      if(any(z <= 0))
        stop("Transformation option not allowed when there are zeros or negative data")
      if(any(z^(results$lambda - 1) <= 0))
        temp.list$log.jacobian <- log(prod(z^(results$lambda - 1)))
      else temp.list$log.jacobian <- sum(log(z^(results$lambda - 1)))
      if(results$lambda == 0)
        z <- log(z)
      else z <- ((z^results$lambda) - 1)/results$lambda
    }
  }
  else{
    results$lambda <- temp.list$lambda
    results$code <- 1
    if (temp.list$minimisation.function == "optim") results$convergence <- 0
  }
  ssres <- (z %*% (diag(n) - xmat %*%
                   solve(crossprod(xmat)) %*% txmat) %*% z)
  if(temp.list$method == "ML"){
    results$tausqhat <- ssres/n
    if(temp.list$fix.lambda)
      results$minimum <- as.vector(((n/2) * log(2 * pi) +
                          (n/2) * log(results$tausqhat) +
                          (n/2)  -
                          temp.list$log.jacobian))
  }
  if(temp.list$method == "RML") {
    results$tausqhat  <- (ssres/(n-beta.size))
    if(temp.list$fix.lambda){
      results$minimum <- as.vector((((n - beta.size)/2) * log(2 * pi) +
                          ((n - beta.size)/2) * log(results$tausqhat) +
                          (n/2) -
                          temp.list$log.jacobian
                          ))
    }
  }
  if (temp.list$minimisation.function == "optim") results$value <- results$minimum    
  return(results)
}

"loglik.spatial" <- function(pars)
{
  tausq <- pars[1]
  sigmasq <- pars[2]
  sill.total <- tausq + sigmasq
  phi <- pars[3]
  lambda <- pars[4]
  z <- .temp.list$z
  n <- .temp.list$n
  if(.temp.list$fix.lambda == FALSE) {
    if(lambda == 1) {
      .temp.list$log.jacobian <- 0
    }
    else {
      if(any(z < 0))
        stop("Transformation option not allowed when there are zeros or negative data"
             )
      if(any(z^(lambda - 1) <= 0))
        .temp.list$log.jacobian <- log(prod(z^(lambda -
                                               1)))
      else .temp.list$log.jacobian <- sum(log(z^(lambda - 1)))
      if(lambda == 0)
        z <- log(z)
      else z <- ((z^lambda) - 1)/lambda
    }
  }
  beta.size <- .temp.list$beta.size
  kappa <- .temp.list$kappa
  covinf <- varcov.spatial(dists.lowertri = .temp.list$
                           dists.lowertri, cov.model = .temp.list$cov.model,
                           kappa = kappa, nugget = tausq,
                           cov.pars = c(sigmasq, phi), scaled = FALSE,
                           inv = TRUE, det = TRUE,
                           only.inv.lower.diag = TRUE)
  xix <- as.double(rep(0, beta.size*beta.size))
  xix <- .C("bilinearform_XAY",
            as.double(covinf$lower.inverse),
            as.double(covinf$diag.inverse),
            as.double(as.vector(.temp.list$xmat)),
            as.double(as.vector(.temp.list$xmat)),
            as.integer(beta.size),
            as.integer(beta.size),
            as.integer(n),
            res = xix, PACKAGE = "geoR")$res
  attr(xix, "dim") <- c(beta.size, beta.size)
  if(length(as.vector(xix)) == 1) {
    ixix <- 1/xix
    choldet <- 0.5 * log(xix)
  }
  else {
    chol.xix <- chol(xix)
    ixix <- chol2inv(chol.xix)
    choldet <- sum(log(diag(chol.xix)))
  }
  xiy <- as.double(rep(0, beta.size))
  xiy <- .C("bilinearform_XAY",
            as.double(covinf$lower.inverse),
            as.double(covinf$diag.inverse),
            as.double(as.vector(.temp.list$xmat)),
            as.double(as.vector(z)),
            as.integer(beta.size),
            as.integer(1),
            as.integer(n),
            res = xiy, PACKAGE = "geoR")$res
  beta.hat <- as.vector(ixix %*% xiy)
  yiy <- as.double(0.0)
  yiy <- .C("bilinearform_XAY",
            as.double(covinf$lower.inverse),
            as.double(covinf$diag.inverse),
            as.double(as.vector(z)),
            as.double(as.vector(z)),
            as.integer(1),
            as.integer(1),
            as.integer(n),
            res = yiy, PACKAGE = "geoR")$res
  ssresmat <- as.vector(yiy - 2*crossprod(beta.hat,xiy) +  beta.hat %*% xix %*% beta.hat)
  if(.temp.list$method == "ML") {
    loglik <- ( - (n/2) * log(2 * pi) -
               covinf$log.det.to.half -
               0.5 * ssresmat + 
               .temp.list$log.jacobian)
  }
  if(.temp.list$method == "RML") {
    xx.eigen <- eigen(crossprod(.temp.list$xmat), symmetric = TRUE, only.values = TRUE)
    loglik <- ( - ((n - beta.size)/2) * log(2 * pi) +
               0.5 * sum(log(xx.eigen$values)) -
               covinf$log.det.to.half -
               (0.5) * ssresmat -
               choldet +
               .temp.list$log.jacobian)
  }
  return(as.vector(loglik))
}

"likfit.old" <-
  function (geodata, coords=geodata$coords, data=geodata$data, trend = "cte",
            ini, fix.nugget = FALSE, nugget = 0, 
            cov.model = "matern",
            kappa = 0.5, fix.lambda = TRUE, lambda = 1, method = "ML", 
            predicted = FALSE, residuals = FALSE, 
            minimisation.function = c("optim","nlmP", "nlm"),
            automatic.refit = FALSE, range.limits,
            messages = TRUE, ...) 
{
  if(missing(messages))
    messages.screen <- ifelse(is.null(getOption("geoR.messages")), TRUE, getOption("geoR.messages"))
  else messages.screen <- messages
  if(missing(geodata))
    geodata <- list(coords = coords, data = data)
  call.fc <- match.call()
  cov.model <- match.arg(cov.model,
                         choices = c("matern", "exponential", "gaussian",
                           "spherical", "circular", "cubic", "wave", "power",
                           "powered.exponential", "cauchy", "gneiting",
                           "gneiting.matern", "pure.nugget"))
  if (cov.model=="pure.nugget"){
    if(fix.nugget == TRUE) ini <- rep(0,2)
    else
      if(fix.nugget == TRUE) ini <- rep(0,3)
  }
  if(!is.null(kappa))
    if(cov.model == "matern" & kappa == 0.5)
      cov.model <- "exponential"
  minimisation.function <- match.arg(minimisation.function)
  if(! "package:stats" %in% search()) require(mva)
  ftau <- nugget
  fixtau <- fix.nugget
  coords <- as.matrix(coords)
  dists.vec <- as.vector(dist(coords))
  range.dist <- range(dists.vec)
  max.dist <- max(range.dist)
  min.dist <- min(range.dist)
  if(missing(range.limits)){
    lower.phi <- 0
    upper.phi <- +Inf
  }
  else{
    lower.phi <- range.limits[1]
    upper.phi <- range.limits[2]
  }
  z <- as.vector(data)
  if(fix.lambda) {
    if(lambda == 1) {
      log.jacobian <- 0
    }
    else {
      if(any(z <= 0))
        stop("Transformation option not allowed when there are zeros or negative data"
             )
      if(any(z^(lambda - 1) <= 0))
        log.jacobian <- log(prod(z^(lambda - 1)))
      else log.jacobian <- sum(log(z^(lambda - 1)))
      if(lambda == 0)
        z <- log(z)
      else z <- ((z^lambda) - 1)/lambda
    }
  }
  n <- length(z)
  if ((2*n) != length(coords))
    stop("Number of locations does not match with number of data")
  reduce.pars <- 0
  if (method == "REML" | method == "reml" | method == "rml") 
    method <- "RML"
  if(method == "ML" | method == "ml")
    method <- "ML"
  if(method == "ML" & cov.model == "power")
    stop("\n\"power\" model can only be used with method=\"RML\".\nBe sure that what you want is not \"powered.exponential\"")
  xmat <- unclass(trend.spatial(trend=trend, geodata=geodata))
  if (nrow(xmat) != nrow(coords)) 
    stop("coords and trend have incompatible sizes")
  fit.ols <- lm(z ~ xmat + 0)
  trend.ols <- list(coefficients = fit.ols$coefficients)
  var.z <- sum((fit.ols$residual)^2)/(n-length(fit.ols$coefficients))
  dimnames(xmat) <- list(NULL, NULL)
  txmat <- t(xmat)
  beta.size <- dim(xmat)[2]  
  if(missing(ini) | ini=="default"){
    cat("likfit: no initial values for the parameters was provided. Default initial values will be used\n")
    if(fix.nugget==FALSE) ini <- c(.2*var.z, 0.8*var.z, max.dist/5)
    else ini <- c(0.8*var.z, max.dist/5)
  }    
  if(all(ini==0)){
    cov.model <- "pure.nugget"
    cat("likfit: all initial values equal to zero. Model without spatial correlation will be fitted\n")
  }
  else{
    if(is.matrix(ini)) {
      inilength <- dim(ini)[2]
      if(fixtau == FALSE & inilength != 3)
        stop("wrong number of columns for ini (must be 3)")
      if(fixtau == TRUE & inilength != 2)
        stop("wrong number of columns for ini (must be 2)")
    }
    else {
      inilength <- length(ini)
      if (fixtau == FALSE & inilength != 3) 
        stop("wrong length for ini (must be 3)")
      if (fixtau == TRUE & inilength != 2) 
        stop("wrong length for ini (must be 2)")
    }
  }
  assign(".temp.list", list(z = z, xmat = xmat,  txmat = txmat, fixtau = fixtau, 
                            ftau = ftau, method = method, kappa = kappa,
                            cov.model = cov.model, beta.size = beta.size,
                            lower.phi = lower.phi, 
                            dists.lowertri = dists.vec, var.z = var.z,
                            fix.lambda = fix.lambda, n = n,
                            minimisation.function=minimisation.function), pos=1)
  if(fix.lambda == TRUE) {
    .temp.list$lambda <<- lambda
    .temp.list$log.jacobian <<- log.jacobian
  }
  if ((cov.model == "pure.nugget") | all(ini==0) ){  
    if(messages.screen == TRUE)
      cat("likfit: fitting model without spatial correlation\n")
    lik.results <- likfit.nospatial(.temp.list, ...)
    if (fix.nugget == FALSE)
      temp.pars <- c(lik.results$tausqhat, 0, 0)
    else
      temp.pars <- c(ftau, (lik.results$tausqhat - ftau), 0)        
    lambda <- lik.results$lambda
  }
  else{
    if(is.matrix(ini) | is.data.frame(ini)) {
      ini <- as.matrix(ini)
      if(messages.screen == TRUE)
        cat("likfit: searching for the best initial value\n")
      ini.search <- ini
      if(fix.nugget == TRUE)
        ini.search <- cbind(nugget, ini.search)
      if(length(lambda) == 1)
        ini.search <- cbind(ini.search, lambda)
      else {
        temp <- ini.search
        for(i in 1:(length(lambda) - 1)) {
          ini.search <- rbind(ini.search, temp)
        }
        ini.search <- cbind(ini.search, rep(lambda, each = dim(
                                                      temp)[1]))
      }
      dimnames(ini.search) <- list(NULL, NULL)
      loglik.ini <- round(100000000. * apply(ini.search, 1, 
                                             loglik.spatial))
      ini.max <- as.vector(ini.search[loglik.ini == max(loglik.ini),
                                      ])
      if(fixtau == TRUE) {
        ini <- as.vector(ini.max[2:3])
        if(minimisation.function == "nlmP" & ini[2] == 0)
          ini[2] <- min(ini.search[(ini.search[,3] != 0),3])
      }
      else {
        ini <- as.vector(ini.max[1:3])
        if(minimisation.function == "nlmP" & ini[3] == 0)
          ini[3] <- min(ini.search[(ini.search[,3] != 0),3])
        
      }
      if(messages.screen == TRUE) {
        cat("likfit: best initial value:\n")
        names(ini.max) <- c("nugget", "sill", "range", "lambda"
                            )
        print(ini.max)
      }
      if(fix.lambda == FALSE)
        lambda <- as.vector(ini.max[4])
    }
    if (messages.screen == TRUE) {
      cat(paste("likfit: Initialising likelihood maximisation using the function", minimisation.function, "\n"))
      cat("------------------------------------------------------------\n")
      cat("likfit: consider providing additional (non-default) arguments for the minimisation function.\n")
      if(minimisation.function == "nlm" | minimisation.function == "nlmP"){
        cat("likfit: some relevant arguments are: iterlim, steptol, stepmax, print.level, ndigit. For more details see documentation for the function nlm.\n")
      }
      if(minimisation.function == "optim"){
      cat("likfit: parameters can be passed to the function optim using the argument control(). For more details see documentation for the function optim.\n")
    }
      cat("likfit: it is highly advisable to run the function several times with different initial values for the parameters (argument ini).\n")
      cat("------------------------------------------------------------\n")
      "nice" <-
        function (x, decimal = 2, fixed = FALSE) 
          {
            ergb <- x
            index <- (x != 0) & is.finite(x)
            if (fixed) 
              n <- 0
            else n <- floor(log(abs(x[index]))/log(10))
            ergb[index] <- trunc(x[index]/10^(n - decimal)) * 10^(n - decimal)
            return(ergb)
          }
      cat(paste("likfit: range of values allowed for the parameter:", nice(lower.phi), "to", nice(upper.phi), "\n"))
      cat("likfit: WARNING: This step can be time demanding!\n")
      cat("\n")
    }
    if (fixtau == FALSE | ftau != 0) {
      if (fixtau == TRUE & ftau != 0) {
                                        #        if (messages.screen == TRUE)
                                        #          print("covariance parameters used in the minimization function are $\sigma^2$ and $\phi$")
        if(minimisation.function == "nlm") assign(".temp.lower", c(0, lower.phi), pos=1)
        if(fix.lambda == TRUE) {
          assign(".temp.lower", c(0, lower.phi), pos=1)
          if(minimisation.function == "nlm"){
            lik.results <- nlm(proflik.ftau, ini, ...)
            if(exists(".temp.sill")){
              lik.results$estimate[1] <- .temp.sill
              remove(".temp.sill", pos=1)
            }
            if(exists(".temp.phi")){
              lik.results$estimate[2] <- .temp.phi
              remove(".temp.phi", pos=1)
            }
            rm(.temp.lower, inherits = TRUE, pos=1)
          }
          if(minimisation.function == "nlmP"){
            assign(".ind.prof.phi", 2, pos=1)
            lik.results <- nlmP(proflik.ftau, ini, lower=c(0, lower.phi), upper=c(10000*var.z, upper.phi), ...)
          }            
          if(minimisation.function == "optim"){
            lik.results <- optim(ini, proflik.ftau, method="L-BFGS-B", lower=c(0, lower.phi), upper=c(10000*var.z, upper.phi), ...)
            lik.results$estimate <- lik.results$par
          }            
        }
        else{
          if(minimisation.function == "nlm"){
            assign(".temp.lower", c(0, lower.phi), pos=1)
            assign(".temp.lower.lambda", -2, pos=1)
            assign(".temp.upper.lambda", 2, pos=1)
            lik.results <- nlm(proflik.ftau, c(ini,lambda), ...)
            if(exists(".temp.sill")){
              lik.results$estimate[1] <- .temp.sill
              remove(".temp.sill", pos=1)
            }
            if(exists(".temp.phi")){
              lik.results$estimate[2] <- .temp.phi
              remove(".temp.phi", pos=1)
            }
            if(exists(".temp.lambda")){
              lambda <- .temp.lambda
              remove(".temp.lambda", pos=1)
            }
            else{
              lambda <- lik.results$estimate[3]
            }
            rm(.temp.lower, .temp.lower.lambda, .temp.upper.lambda, inherits = TRUE, pos=1)
          }
          if(minimisation.function == "nlmP"){
            assign(".ind.prof.phi", 2, pos=1)
            lik.results <- nlmP(proflik.ftau, c(ini,lambda), lower = c(0, lower.phi, -2), upper = c(10000*var.z, upper.phi, 2), ...)
            lambda <- lik.results$estimate[3]
          }
          if(minimisation.function == "optim"){
            lik.results <- optim(c(ini,lambda), proflik.ftau, method="L-BFGS-B", lower = c(0, lower.phi, -2), upper = c(10000*var.z, upper.phi, 2), ...)
            lik.results$estimate <- lik.results$par              
            lambda <- lik.results$estimate[3]
          }
          lik.results$estimate <- as.vector(lik.results$estimate[1:2])
          if(lambda == 0)
            z <- log(as.vector(data))
          else z <- (((as.vector(data))^lambda) - 1)/
            lambda
        }        
        lik.results$estimate <- temp.pars <- as.vector(c(ftau, lik.results$estimate))
      }
      if (fixtau == FALSE) {
                                        #        if (messages.screen == TRUE) 
                                        #          print("parameters used in the minimization function are the ratio (tau^2/sigma^2) and $\phi$")
        ini.m <- c(ini[1]/ini[2], ini[3])
        if(fix.lambda == TRUE) {
          if (minimisation.function=="nlm"){
            assign(".temp.lower", c(0, lower.phi), pos=1)
            lik.results <- nlm(proflik.nug, ini.m, ...) 
            if(exists(".temp.nugget")){
              lik.results$estimate[1] <- .temp.nugget
              remove(".temp.nugget", pos=1)
            }
            if(exists(".temp.phi")){
              lik.results$estimate[2] <- .temp.phi
              remove(".temp.phi", pos=1)
            }
            rm(.temp.lower, inherits = TRUE, pos=1)
          }
          if (minimisation.function=="nlmP"){
            if(ini.m[1] == 0) ini.m[1] <- 0.05
            assign(".ind.prof.phi", 2, pos=1)
            lik.results <- nlmP(proflik.nug, ini.m, lower=c(0, lower.phi), upper=c(100, upper.phi),...) 
          }
          if (minimisation.function=="optim"){
            lik.results <- optim(ini.m, proflik.nug, method="L-BFGS-B", lower=c(0, lower.phi), upper=c(100, upper.phi),...) 
            lik.results$estimate <- lik.results$par              
          }
        }
        else{
          if (minimisation.function=="nlm"){
            assign(".temp.lower", c(0, lower.phi), pos=1)
            assign(".temp.lower.lambda", -2, pos=1)
            assign(".temp.upper.lambda", 2, pos=1)
            lik.results <- nlm(proflik.nug, c(ini.m, lambda), ...)
            if(exists(".temp.nugget")){
              lik.results$estimate[1] <- .temp.nugget
              remove(".temp.nugget", pos=1)
            }
            if(exists(".temp.phi")){
              lik.results$estimate[2] <- .temp.phi
              remove(".temp.phi", pos=1)
            }
            if(exists(".temp.lambda")){
              lambda <- .temp.lambda
              remove(".temp.lambda", pos=1)
            }
            else{
              lambda <- lik.results$estimate[3]
            }
            rm(.temp.lower, .temp.lower.lambda,  .temp.upper.lambda, inherits = TRUE, pos=1)
          }
          if (minimisation.function=="nlmP"){
            assign(".ind.prof.phi", 2, pos=1)
            lik.results <- nlmP(proflik.nug, c(ini.m, lambda), lower=c(0, lower.phi, -2), upper=c(100, upper.phi, 2),...)
            lambda <- lik.results$estimate[3]
          }
          if (minimisation.function=="optim"){
            lik.results <- optim(c(ini.m, lambda), proflik.nug, method="L-BFGS-B", lower=c(0, lower.phi, -2), upper=c(100, upper.phi, 2),...)
            lik.results$estimate <- lik.results$par              
            lambda <- lik.results$estimate[3]
          }            
          lik.results$estimate <- as.vector(lik.results$estimate[1:2])
          if(lambda == 0)
            z <- log(as.vector(data))
          else z <- (((as.vector(data))^lambda) - 1)/
            lambda
        }
        if(messages.screen == TRUE) {
          if(minimisation.function == "nlm" | minimisation.function == "nlmP") 
          if(minimisation.function == "optim") cat(paste("likfit: optim convergence code: ",lik.results$convergence, "\n"))
        }
        if(automatic.refit == TRUE & (lik.results$estimate[1] < 0.01)) {
          if (messages.screen == TRUE)
            cat(paste("likfit: WARNING: ratio of estimates tau^2/sigma^2 < 0.01 (",round(lik.results$estimate[1], dig = 4), ")", sep = ""))
          cat("\n")
          reduce.pars <- 1
          .temp.list$ftau <<- 0
          .temp.list$fixtau <<- TRUE
          if(fix.lambda == TRUE) {
            if (minimisation.function=="nlm"){
              assign(".temp.lower.phi", lower.phi, pos=1)
              lik.results <- nlm(proflik.phi, ini[3],  ...)
              if(exists(".temp.phi")){
                lik.results$estimate <- .temp.phi
                remove(".temp.phi", pos=1)
              }
              rm(.temp.lower, inherits = TRUE, pos=1)
            }
            if (minimisation.function=="nlmP"){
              assign(".ind.prof.phi", 1, pos=1)
              lik.results <- nlmP(proflik.phi, ini[3],  lower=lower.phi, upper=upper.phi,...)
            }
            if (minimisation.function=="optim"){
              lik.results <- optim(ini[3], proflik.phi, method="L-BFGS-B",  lower=lower.phi, upper=upper.phi,...)
              lik.results$estimate <- lik.results$par  
            }
          }
          else {
            if (minimisation.function=="nlm"){
              assign(".temp.lower.phi", lower.phi, pos=1)
              assign(".temp.lower.lambda", -2, pos=1)
              assign(".temp.upper.lambda", 2, pos=1)
              lik.results <- nlm(proflik.phi, c(ini[3], lambda), ...)
              if(exists(".temp.lambda")){
                lambda <- .temp.lambda
                remove(".temp.lambda", pos=1)
              }
              else{
                lambda <- lik.results$estimate[2]
              }
              if(exists(".temp.phi")){
                lik.results$estimate <- .temp.phi
                remove(".temp.phi", pos=1)
              }
              else{
                lik.results$estimate <- as.vector(lik.results$estimate[1])
              }
              rm(.temp.lower.phi, .temp.lower.lambda, .temp.upper.lambda, inherits = TRUE, pos=1)
            }
            if (minimisation.function=="nlmP"){
              lik.results <- nlmP(proflik.phi, c(ini[3], lambda), lower=c(lower.phi, -2), upper=c(upper.phi, 2),...)
              lambda <- lik.results$estimate[2]
              lik.results$estimate <- lik.results$estimate[1]
            }
            if (minimisation.function=="optim"){
              lik.results <- optim(c(ini[3], lambda), proflik.phi, method="L-BFGS-B", lower=c(lower.phi, -2), upper=c(upper.phi, 2),...)
              lik.results$estimate <- lik.results$par  
              lambda <- lik.results$estimate[2]
              lik.results$estimate <- lik.results$estimate[1]
            }
            if(lambda == 0)
              z <- log(as.vector(data))
            else z <- (((as.vector(data))^lambda) -
                       1)/lambda
          }
          if (messages.screen == TRUE)        
            cat("likfit: model re-fitted without nugget effect (tausq = 0)\n")
          lik.results$estimate <- as.vector(c(0, lik.results$estimate))
          if(messages.screen == TRUE) {
            if(minimisation.function == "nlm" | minimisation.function == "nlmP") cat(paste("likfit: nlm optimisation code: ",lik.results$code,"\n"))
            if(minimisation.function == "optim") cat(paste("likfit: optim convergence code: ",lik.results$convergence,"\n"))
          }
        }          
        nugget.rel <- lik.results$estimate[1]
        if (lik.results$estimate[2] < 1e-08)
          icovhat <- diag(n)
        else
          icovhat <- varcov.spatial(coords = coords, cov.model = 
                                    cov.model, kappa = kappa, nugget = nugget.rel,
                                    cov.pars = c(1, lik.results$estimate[
                                      2]), inv = TRUE, det = FALSE)$inverse
        txiv <- crossprod(xmat, icovhat)
        sigmasqhat <- (z %*% (icovhat - crossprod(txiv,solve(txiv %*% xmat)) %*% txiv) %*% z)/n
        if(method == "RML") sigmasqhat <- sigmasqhat * n/(n-beta.size)
        nuggethat <- lik.results$estimate[1] * sigmasqhat
        lik.results$estimate <- temp.pars <- as.vector(c(nuggethat, sigmasqhat, lik.results$estimate[2]))
      }
      lik.results$estimate <- as.vector(lik.results$estimate)
      if((automatic.refit == TRUE & (lik.results$estimate[3] <= lower.phi)) | lik.results$estimate[3] < 1e-12) {
        if (messages.screen == TRUE){
          cat("likfit: WARNING: phi estimate < minimum value allowed\n")
          cat("likfit: model re-fitted without spatial correlation (phi=0)\n")
        }
        reduce.pars <- 2
        lik.results <- likfit.nospatial(.temp.list, ...)
        lambda <- lik.results$lambda
        if(fix.nugget == TRUE) {
          lik.results$parameters <- temp.pars <-
            as.vector(c(ftau, (lik.results$tausqhat - ftau), 0))
        }
        else {
          lik.results$parameters <- temp.pars <-
            as.vector(c(lik.results$tausqhat, 0, 0))
        }
      }
    }
    else {
                                        # case 3: parameters = $(\sigma^2, \phi)$ ; fixed nugget: tau^2= 0$
      ini.m <- ini[2]
      if(fix.lambda == TRUE) {
        if (minimisation.function=="nlm"){
          assign(".temp.lower.phi", lower.phi, pos=1)
          lik.results <- nlm(proflik.phi,ini.m,   ...)
          if(exists(".temp.phi")){
            lik.results$estimate <- .temp.phi
            remove(".temp.phi", pos=1)
          }
          rm(.temp.lower, inherits = TRUE, pos=1)
        }
        if (minimisation.function=="nlmP"){
          assign(".ind.prof.phi", 1, pos=1)
          lik.results <- nlmP(proflik.phi,ini.m, lower=lower.phi, upper=upper.phi,...)
        }
        if (minimisation.function=="optim"){
          lik.results <- optim(ini.m, proflik.phi, method="L-BFGS-B", lower=lower.phi, upper=upper.phi,...)
          lik.results$estimate <- lik.results$par
        }
      }
      else {
        if (minimisation.function=="nlm"){
          assign(".temp.lower.phi", lower.phi, pos=1)
          assign(".temp.lower.lambda", -2, pos=1)
          assign(".temp.upper.lambda", 2, pos=1)
          lik.results <- nlm(proflik.phi, c(ini.m, lambda), ...)
          if(exists(".temp.lambda")){
            lambda <- .temp.lambda
            remove(".temp.lambda", pos=1)
          }
          else{
            lambda <- lik.results$estimate[2]
          }
          if(exists(".temp.phi")){
            lik.results$estimate <- .temp.phi
            remove(".temp.phi", pos=1)
          }
          else{
            lik.results$estimate <- as.vector(lik.results$estimate[1])
          }
          rm(.temp.lower.phi, .temp.lower.lambda, .temp.upper.lambda, inherits = TRUE, pos=1)
        }
        if (minimisation.function=="nlmP"){
          assign(".ind.prof.phi", 1, pos=1)
          lik.results <- nlmP(proflik.phi, c(ini.m, lambda), lower=c(lower.phi, -2), upper=c(upper.phi, 2),...)
          lambda <- as.vector(lik.results$estimate[2])
          lik.results$estimate <- as.vector(lik.results$estimate[1])
        }
        if (minimisation.function=="optim"){
          lik.results <- optim(c(ini.m, lambda), proflik.phi, method="L-BFGS-B", lower=c(lower.phi, -2.5), upper=c(upper.phi, 2.5),...)
          lik.results$estimate <- lik.results$par        
          lambda <- as.vector(lik.results$estimate[2])
          lik.results$estimate <- as.vector(lik.results$estimate[1])
        }
        if(lambda == 0)
          z <- log(as.vector(data))
        else z <- (((as.vector(data))^lambda) - 1)/lambda
      }    
      if(messages.screen == TRUE) {
        if(minimisation.function == "nlm" | minimisation.function == "nlmP")
          cat(paste("likfit: nlm optimisation code: ",lik.results$code, "\n"))
        if(minimisation.function == "optim") cat(paste("likfit: optim convergence code: ",lik.results$convergence, "\n"))
      }      
      if(automatic.refit == TRUE & (lik.results$estimate <= lower.phi)) {
        if (messages.screen == TRUE) {
          cat("likfit: WARNING: phi estimate < minimum value allowed\n")
          cat("likfit: model without spatial correlation was fitted (phi=0 and sigma^2=0)\n")
        }
        reduce.pars <- 1
        lik.results <- likfit.nospatial(.temp.list, ...)
        lambda <- lik.results$lambda
        if(fix.nugget == TRUE) {
          lik.results$parameters <- temp.pars <-
            as.vector(c(ftau, (lik.results$tausqhat - ftau), 0))
        }
        else {
          lik.results$parameters <- temp.pars <-
            as.vector(c(lik.results$tausqhat, 0, 0))
        }
      }
      else {
        if(lik.results$estimate < 1e-08)
          icovhat <- diag(n)
        else
          icovhat <- varcov.spatial(coords = coords, cov.model = 
                                    cov.model, kappa = kappa,
                                    nugget = 0, cov.pars
                                    = c(1, lik.results$estimate),
                                    inv = TRUE, det = FALSE)$inverse
        txiv <- crossprod(xmat, icovhat)
        sigmasqhat <- (z %*% (icovhat - crossprod(txiv, solve(txiv %*% xmat
                                                              ) %*% txiv)) %*% z)/n
        if(method == "RML") sigmasqhat <- sigmasqhat * n/(n-beta.size)
        temp.pars <- as.vector(c(0, sigmasqhat, lik.results$estimate))
        lik.results$estimate <- as.vector(c(0,sigmasqhat, lik.results$estimate))
      }
    }
  }
  if(messages.screen == TRUE) {
    cat("likfit: end of likelihood maximisation\n")
  }
  if(any(temp.pars < 0)){
    temp.pars <- round(temp.pars, dig=14)
    lik.results$estimate <- round(lik.results$estimate, dig=14)
  }
  if(minimisation.function == "optim") lik.results$minimum <- lik.results$value
  loglik <- -lik.results$minimum
  npars <- length(trend.ols$coefficients) + length(ini) - reduce.pars
  if (fix.lambda == FALSE) npars <- npars + 1
  AIC <- loglik - npars
  BIC <- loglik - 0.5 * log(n) * npars
  if (messages.screen == TRUE) 
    cat("likfit: computing the beta estimate\n")
  if(any(temp.pars[2:3]) != 0)
    invcov <- varcov.spatial(coords = coords, cov.model = cov.model, 
                             kappa = kappa, nugget = temp.pars[1],
                             cov.pars = temp.pars[2:3], 
                             inv = TRUE, det = FALSE)$inverse
  else invcov <- diag((1/temp.pars[1]), n)
  txmatinvcov <- crossprod(xmat, invcov)
  beta <- solve(txmatinvcov %*% xmat) %*% txmatinvcov %*% z
  beta.var <- solve(txmatinvcov %*% xmat)
  if (residuals == TRUE | predicted == TRUE) {
    cat("likfit: computing predicted values and residuals\n")
    trend.est <- as.vector(xmat %*% beta)
    residuals.trend <- as.vector(z - trend.est)
    covmat.signal <- varcov.spatial(coords = coords, cov.model = cov.model, 
                                    kappa = kappa, nugget = 0,
                                    cov.pars = temp.pars[2:3])$varcov
    signal.est <- as.vector(covmat.signal %*% invcov %*% 
                            residuals.trend)
    predict.est <- trend.est + signal.est
    residuals.est <- as.vector(z - predict.est)
    residuals.std <- as.vector(invcov %*% residuals.est)
    residuals.trend.std <- as.vector(invcov %*% residuals.trend)
    s2.trend <- (crossprod(residuals.trend,invcov) %*% residuals.trend)/(n - 
                                                                         length(beta))
    s2 <- (crossprod(residuals.est,invcov) %*% residuals.est)/(n - 
                                                               length(beta))
  }
  if (messages.screen == TRUE) 
    cat("likfit: preparing output\n")
  results <- list()
  results$cov.model <- cov.model
  results$nugget <- temp.pars[1]
  results$cov.pars <- as.vector(c(sigmasq = temp.pars[2], phi = temp.pars[3]))
  if (is.null(kappa))
    results$kappa <- "not used"
  else
    results$kappa <- kappa
  results$beta <- as.vector(beta)
  results$beta.var <- beta.var
  if (length(results$beta.var) == 1)
    results$beta.var <- as.vector(results$beta.var)
  if (length(results$beta) > 1){
    if(inherits(trend, "formula"))
      beta.names <- c("intercept", paste("covar", 1:(ncol(xmat)-1), sep = ""))
    else
      if (trend == "1st")
        beta.names <- c("1", "x", "y")
      else
        if (trend == "2nd")
          beta.names <- c("1", "x", "y", "x2", "xy", "y2")
    names(results$beta) <- beta.names
  }
  results$lambda <- lambda
  results$loglik <- loglik
  results$npars <- npars
  results$AIC <- AIC
  results$BIC <- BIC
  results$trend.ols <- as.vector(trend.ols$coefficients)
  names(results$trend.ols) <- names(results$beta)
  if (residuals == TRUE) {
    results$s2 <- s2
    results$s2.trend <- s2.trend
  }
  if (predicted == TRUE) 
    results$predicted <- cbind(predicted = predict.est, trend.est = trend.est, 
                               signal.est = signal.est)
  if (residuals == TRUE) 
    results$residuals <- round(cbind(residuals = residuals.est, 
                                     resid.trend = residuals.trend, resid.std = residuals.std, 
                                     resid.trend.std = residuals.trend.std), dig = 12)
  if(fix.lambda == FALSE) {
    if(lambda == 1) {
      log.jacobian <- 0
    }
    else {
      if(any(data^(lambda - 1) <= 0))
        log.jacobian <- log(prod(data^(lambda - 1)))
      else log.jacobian <- sum(log(data^(lambda - 1)))
    }
  }
  results$info.lambda <- list(fix.lambda = fix.lambda, log.jacobian = 
                              log.jacobian)
  lik.results$estimate <- NULL
  lik.results$aux <- NULL
  lik.results$minimum <- NULL
  results$method <- method
  results$info <- lik.results
  results$max.dist <- max.dist
  results$trend.matrix <- xmat
  results$call <- call.fc
  class(results) <- "variomodel"
  if(messages.screen == TRUE){
    cat("likfit: estimated model parameters are:\n")
    cat(paste("covariance model:", cov.model))
    if(cov.model == "matern" | cov.model == "powered.exponential" | 
       cov.model == "cauchy" | cov.model == "gneiting.matern")
      cat(paste(" with kappa =", kappa))
    if(!is.null(kappa))
      if(cov.model == "matern" & kappa == 0.5)
        cat(" (exponential)")
    cat("\n")
    print(c(nugget=results$nugget, sill=results$cov.pars[1], range=results$cov.pars[2]))
    if (fix.lambda == FALSE)
      cat(paste("Box-Cox transformation parameter:", round(results$lambda, dig=4),"\n"))
    if((results$cov.pars[1] < (0.01 * (results$nugget + results$cov.pars[1])))& results$cov.pars[2] > 0)
      cat("\nWARNING: estimated sill is less than 1 hundredth of the total variance. Consider re-examine the model excluding spatial dependence\n" )      
    if((results$cov.pars[2] > (10 * max.dist)) & results$cov.pars[1] > 0 )
      cat("\nWARNING: estimated range is more than 10 times bigger than the biggest distance between two points. Consider re-examine the model:\n 1) excluding spatial dependence if estimated sill is too low and/or \n 2) taking trends (covariates) into account\n" ) 
    if(((results$cov.pars[2] < (0.1 * min.dist)) & (results$cov.pars[1] > 0)) & results$cov.pars[2] > 0)
      cat("\nWARNING: estimated range is less than 1 tenth of the minimum distance between two points. Consider re-examine the model excluding spatial dependence\n" ) 
  }
  remove(".temp.list", pos=1)
  return(results)
}

"proflik.ftau" <-
  function (theta) 
{
  if (any(is.na(theta)) | any(theta==Inf) | any(is.nan(theta)))
    neglik <- 1e+32
  else{
    if(length(theta) == 3) include.lambda <- TRUE else include.lambda <- FALSE 
    if(.temp.list$minimisation.function == "nlm"){
      if (exists(".temp.phi", w=1)) remove(".temp.phi", pos=1, inherits = TRUE)
      if (exists(".temp.lambda", w=1)) remove(".temp.lambda", pos=1, inherits = TRUE)
      if (exists(".temp.sill", w=1)) remove(".temp.sill", pos=1, inherits = TRUE)
      theta.minimiser <- theta
      penalty <- 10000 * sum(.temp.lower - pmin(theta[1:2], .temp.lower))
      theta[1:2] <- pmax(theta[1:2], .temp.lower)
      if (theta.minimiser[1] <  .temp.lower[1])
        assign(".temp.sill", theta[1], pos=1)
      if (theta.minimiser[2] < 1.001 * .temp.lower[2])
        assign(".temp.phi", theta[2], pos=1)
      if (include.lambda){
        lambda <- theta[3]
        penalty <- penalty + 1000 * (.temp.lower.lambda - min(lambda, .temp.lower.lambda))
        lambda <- max(lambda, .temp.lower.lambda)
        penalty <- penalty + 1000 * (.temp.upper.lambda - max(lambda, .temp.upper.lambda))
        lambda <- min(lambda, .temp.upper.lambda)
        if (round(1000 * theta.minimiser[3]) <= round(1000 * .temp.lower.lambda))
          assign(".temp.lambda", lambda, pos=1)
        if (round(1000 * theta.minimiser[3]) >= round(1000 * .temp.upper.lambda))
          assign(".temp.lambda", lambda, pos=1)
      }
    }
    else{
      if (include.lambda) lambda <- theta[3]
    }
    z <- .temp.list$z
    n <- length(z)
    if (include.lambda){
      if(lambda == 1) {
        .temp.list$log.jacobian <<- 0
      }
      else {
        if(any(z < 0))
          stop("Transformation option not allowed when there are zeros or negative data"
               )
        if(any(z^(lambda - 1) <= 0))
          .temp.list$log.jacobian <<- log(prod(z^(lambda - 1)))
        else .temp.list$log.jacobian <<- sum(log(z^(lambda - 1)))
        if(lambda == 0)
          z <- log(z)
        else z <- ((z^lambda) - 1)/lambda
      }
    }
    beta.size <- .temp.list$beta.size
    kappa <- .temp.list$kappa
    ftau <- .temp.list$ftau
    sigmasq <- theta[1]
    sill.total <- ftau + sigmasq
    phi <- theta[2]
    covinf <- varcov.spatial(dists.lowertri = .temp.list$dists.lowertri,
                             cov.model = .temp.list$cov.model, kappa = kappa, 
                             nugget = ftau, cov.pars = c(sigmasq, phi), 
                             det = TRUE, func.inv = "eigen",
                             only.inv.lower.diag = TRUE)
    xix <- as.double(rep(0, beta.size*beta.size))
    xix <- .C("bilinearform_XAY",
              as.double(covinf$lower.inverse),
              as.double(covinf$diag.inverse),
              as.double(as.vector(.temp.list$xmat)),
              as.double(as.vector(.temp.list$xmat)),
              as.integer(beta.size),
              as.integer(beta.size),
              as.integer(n),
              res = xix, PACKAGE = "geoR")$res
    attr(xix, "dim") <- c(beta.size, beta.size)
    if(length(as.vector(xix)) == 1) {
      ixix <- 1/xix
      choldet <- 0.5 * log(xix)
    }
    else {
      chol.xix <- chol(xix)
      ixix <- chol2inv(chol.xix)
      choldet <- sum(log(diag(chol.xix)))
    }
    xiy <- as.double(rep(0, beta.size))
    xiy <- .C("bilinearform_XAY",
              as.double(covinf$lower.inverse),
              as.double(covinf$diag.inverse),
              as.double(as.vector(.temp.list$xmat)),
              as.double(as.vector(z)),
              as.integer(beta.size),
              as.integer(1),
              as.integer(n),
              res = xiy, PACKAGE = "geoR")$res
    beta.hat <- as.vector(ixix %*% xiy)
    yiy <- as.double(0.0)
    yiy <- .C("bilinearform_XAY",
              as.double(covinf$lower.inverse),
              as.double(covinf$diag.inverse),
              as.double(as.vector(z)),
              as.double(as.vector(z)),
              as.integer(1),
              as.integer(1),
              as.integer(n),
              res = yiy, PACKAGE = "geoR")$res
    ssresmat <- as.vector(yiy - crossprod(beta.hat,xiy))
    if(.temp.list$method == "ML") {
      neglik <- ((n/2) * log(2 * pi) +
                 covinf$log.det.to.half +
                 0.5 * ssresmat -
                 .temp.list$log.jacobian
                 )
    }
    if(.temp.list$method == "RML") {
      xx.eigen <- eigen(crossprod(.temp.list$xmat), symmetric = TRUE, only.values = TRUE)
      neglik <- (((n - beta.size)/2) * log(2 * pi) +
                 covinf$log.det.to.half +
                 0.5 * ssresmat +
                 choldet -
                 0.5 * sum(log(xx.eigen$values)) -
                 .temp.list$log.jacobian
                 )
    }
  }
  if(.temp.list$minimisation.function == "nlm")
    return(as.vector(neglik + penalty))
  else
    return(as.vector(neglik))
}
"proflik.lambda" <-
function(lambda)
{
  if (any(is.na(lambda)) | any(lambda==Inf) | any(is.nan(lambda)))
    neglik <- 1e+32
  else{
    if(.temp.list$minimisation.function == "nlm"){
      if (exists(".temp.lambda", w=1)) remove(".temp.lambda", pos=1, inherits = TRUE)
      lambda.minimiser <- lambda
      penalty <-  1000 * (.temp.lower.lambda - min(lambda, .temp.lower.lambda))
      lambda <- max(lambda, .temp.lower.lambda)
      penalty <- penalty + 1000 * (.temp.upper.lambda - max(lambda, .temp.upper.lambda))
      lambda <- min(lambda, .temp.upper.lambda)
      if (round(1000 * lambda.minimiser) <= round(1000 * .temp.lower.lambda))
        assign(".temp.lambda", lambda, pos=1)
      if (round(1000 * lambda.minimiser) >= round(1000 * .temp.upper.lambda))
        assign(".temp.lambda", lambda, pos=1)
    }
    z <- .temp.list$z
    n <- .temp.list$n
    if(lambda == 1) {
      .temp.list$log.jacobian <- 0
    }
    else {
      if(any(z < 0))
        stop("Transformation option not allowed when there are zeros or negative data"
             )
      if(any(z^(lambda - 1) <= 0))
        .temp.list$log.jacobian <- log(prod(z^(lambda - 1)))
      else .temp.list$log.jacobian <- sum(log(z^(lambda - 1)))
      if(lambda == 0)
        z <- log(z)
      else z <- ((z^lambda) - 1)/lambda
    }
    beta.size <- .temp.list$beta.size
    kappa <- .temp.list$kappa
    xmat <- .temp.list$xmat
    txmat <- .temp.list$txmat
    ixx <- solve(crossprod(xmat))
    tausqhat <- (z %*% (diag(n) - xmat %*% ixx %*% txmat) %*% z)/n
    if(.temp.list$method == "ML")
      neglik <- ((n/2) * log(2 * pi) +
                 (n/2) * log(tausqhat) +
                 (n/2) -
                 .temp.list$log.jacobian
                 )
    if(.temp.list$method == "RML") {
      eigentrem <- eigen(ixx, symmetric = TRUE, only.values = TRUE)
      neglik <- (((n - beta.size)/2) * log(2 * pi) +
                 ((n - beta.size)/2) * log(tausqhat) +
                 (n/2) -
                 0.5 * sum(log(eigentrem$values)) -
                 .temp.list$log.jacobian
                 )
    }
  }
  if(.temp.list$minimisation.function == "nlm")
    return(as.vector(neglik + penalty))
  else
    return(as.vector(neglik))
}

"proflik.nug" <-
  function (theta) 
{
  if (any(is.na(theta)) | any(theta==Inf) | any(is.nan(theta)))
    neglik <- 1e+32
  else{
    if(length(theta) == 3) include.lambda <- TRUE else include.lambda <- FALSE 
    if(.temp.list$minimisation.function == "nlm"){
      if (exists(".temp.phi", w=1)) remove(".temp.phi", pos=1, inherits = TRUE)
      if (exists(".temp.lambda", w=1)) remove(".temp.lambda", pos=1, inherits = TRUE)
      if (exists(".temp.nugget", w=1)) remove(".temp.nugget", pos=1, inherits = TRUE)
      theta.minimiser <- theta
      penalty <- 10000 * sum(.temp.lower - pmin(theta[1:2], .temp.lower))
      theta[1:2] <- pmax(theta[1:2], .temp.lower)
      if (theta.minimiser[1] <  .temp.lower[1])
        assign(".temp.nugget", theta[1], pos=1)
      if (theta.minimiser[2] < 1.001 * .temp.lower[2])
        assign(".temp.phi", theta[2], pos=1)
      if (include.lambda){
        lambda <- theta[3]
        penalty <- penalty + 1000 * (.temp.lower.lambda - min(lambda, .temp.lower.lambda))
        lambda <- max(lambda, .temp.lower.lambda)
        penalty <- penalty + 1000 * (.temp.upper.lambda - max(lambda, .temp.upper.lambda))
        lambda <- min(lambda, .temp.upper.lambda)
        if (round(1000 * theta.minimiser[3]) <= round(1000 * .temp.lower.lambda))
          assign(".temp.lambda", lambda, pos=1)
        if (round(1000 * theta.minimiser[3]) >= round(1000 * .temp.upper.lambda))
          assign(".temp.lambda", lambda, pos=1)
      }
    }
    else{
      if(include.lambda) lambda <- theta[3]
    }
    z <- .temp.list$z
    n <- .temp.list$n
    if(include.lambda){
      if(lambda == 1) {
        .temp.list$log.jacobian <<- 0
      }
      else {
        if(any(z < 0))
          stop("Transformation option not allowed when there are zeros or negative data"
               )
        if(any(z^(lambda - 1) <= 0))
          .temp.list$log.jacobian <<- log(prod(z^(lambda - 1)))
        else .temp.list$log.jacobian <<- sum(log(z^(lambda - 1)))
        if(lambda == 0)
          z <- log(z)
        else z <- ((z^lambda) - 1)/lambda
      }
    }
    beta.size <- .temp.list$beta.size
    kappa <- .temp.list$kappa
    tausq.rel <- theta[1]
    phi <- theta[2]
    covinf <- varcov.spatial(dists.lowertri = .temp.list$dists.lowertri,
                             cov.model = .temp.list$cov.model, kappa = kappa,
                             nugget = tausq.rel, cov.pars = c(1, phi),
                             det = TRUE, func.inv = "eigen",
                             only.inv.lower.diag = TRUE)
    xix <- as.double(rep(0, beta.size*beta.size))
    xix <- .C("bilinearform_XAY",
              as.double(covinf$lower.inverse),
              as.double(covinf$diag.inverse),
              as.double(as.vector(.temp.list$xmat)),
              as.double(as.vector(.temp.list$xmat)),
              as.integer(beta.size),
              as.integer(beta.size),
              as.integer(n),
              res = xix, PACKAGE = "geoR")$res
    attr(xix, "dim") <- c(beta.size, beta.size)
    if(length(as.vector(xix)) == 1) {
      ixix <- 1/xix
      choldet <- 0.5 * log(xix)
    }
    else {
      chol.xix <- chol(xix)
      ixix <- chol2inv(chol.xix)
      choldet <- sum(log(diag(chol.xix)))
    }
    xiy <- as.double(rep(0, beta.size))
    xiy <- .C("bilinearform_XAY",
              as.double(covinf$lower.inverse),
              as.double(covinf$diag.inverse),
              as.double(as.vector(.temp.list$xmat)),
              as.double(as.vector(z)),
              as.integer(beta.size),
              as.integer(1),
              as.integer(n),
              res = xiy, PACKAGE = "geoR")$res
    beta.hat <- as.vector(ixix %*% xiy)
    yiy <- as.double(0.0)
    yiy <- .C("bilinearform_XAY",
              as.double(covinf$lower.inverse),
              as.double(covinf$diag.inverse),
              as.double(as.vector(z)),
              as.double(as.vector(z)),
              as.integer(1),
              as.integer(1),
              as.integer(n),
              res = yiy, PACKAGE = "geoR")$res
    ssresmat <- as.vector(yiy - crossprod(beta.hat,xiy))
    if(.temp.list$method == "ML") {
      neglik <- ((n/2) * log(2 * pi) +
                 covinf$log.det.to.half +
                 (n/2) * log(ssresmat/n) +
                 (n/2) -
                 .temp.list$log.jacobian
                 )
    }
    if(.temp.list$method == "RML") {
      xx.eigen <- eigen(crossprod(.temp.list$xmat), symmetric = TRUE, only.values = TRUE)
      neglik <- (((n - beta.size)/2) * log(2 * pi) +
                 covinf$log.det.to.half +
                 ((n - beta.size)/2) * log(ssresmat/(n-beta.size)) +
                 (n/2) +
                 choldet -
                 0.5 * sum(log(xx.eigen$values)) -
                 .temp.list$log.jacobian
                 )
    }
  }
  if(.temp.list$minimisation.function == "nlm")
    return(as.vector(neglik + penalty))
  else
    return(as.vector(neglik))
}

"proflik.phi" <-
  function (theta) 
{
  if (any(is.na(theta)) | any(theta==Inf) | any(is.nan(theta)))
    neglik <- 1e+32
  else{
    if(length(theta) == 2) include.lambda <- TRUE else include.lambda <- FALSE 
    if(.temp.list$minimisation.function == "nlm"){
      if (exists(".temp.phi", w=1)) remove(".temp.phi", pos=1, inherits = TRUE)
      if (exists(".temp.lambda", w=1)) remove(".temp.lambda", pos=1, inherits = TRUE)
      phi <- phi.minimiser <- theta[1]
      penalty <-  100000 * (.temp.lower.phi - min(phi, .temp.lower.phi))
      phi <- max(phi, .temp.lower.phi)
      if (phi.minimiser < 1.001 * .temp.lower.phi)
        assign(".temp.phi", phi, pos=1)
      if(include.lambda){
        lambda <- lambda.minimiser <- phi.lambda[2]
        penalty <-  penalty + 1000 * (.temp.lower.lambda - min(lambda, .temp.lower.lambda))
        lambda <- max(lambda, .temp.lower.lambda)
        penalty <- penalty + 1000 * (.temp.upper.lambda - max(lambda, .temp.upper.lambda))
        lambda <- min(lambda, .temp.upper.lambda)
        if (round(1000 * lambda.minimiser) <= round(1000 * .temp.lower.lambda))
          assign(".temp.lambda", lambda, pos=1)
        if (round(1000 * lambda.minimiser) >= round(1000 * .temp.upper.lambda))
          assign(".temp.lambda", lambda, pos=1)
      }
    }
    else{
      phi <- theta[1]
      if(include.lambda) lambda <- theta[2]
    }
    z <- .temp.list$z
    n <- .temp.list$n
    if(include.lambda){
      if(lambda == 1) {
        .temp.list$log.jacobian <<- 0
      }
      else {
        if(any(z <= 0))
          stop("Transformation option not allowed when there are zeros or negative data"
               )
        if(any(z^(lambda - 1) <= 0))
          .temp.list$log.jacobian <<- log(prod(z^(lambda - 1)))
        else .temp.list$log.jacobian <<- sum(log(z^(lambda - 1)))
        if(lambda == 0)
          z <- log(z)
        else z <- ((z^lambda) - 1)/lambda
      }
    }
    beta.size <- .temp.list$beta.size
    kappa <- .temp.list$kappa
    covinf <- varcov.spatial(dists.lowertri = .temp.list$
                             dists.lowertri,
                             cov.model = .temp.list$cov.model,
                             kappa = kappa, nugget = 0,
                             cov.pars = c(1, phi),
                             det = TRUE, func.inv = "eigen",
                             only.inv.lower.diag = TRUE)
    xix <- as.double(rep(0, beta.size*beta.size))
    xix <- .C("bilinearform_XAY",
              as.double(covinf$lower.inverse),
              as.double(covinf$diag.inverse),
              as.double(as.vector(.temp.list$xmat)),
              as.double(as.vector(.temp.list$xmat)),
              as.integer(beta.size),
              as.integer(beta.size),
              as.integer(n),
              res = xix, PACKAGE = "geoR")$res
    attr(xix, "dim") <- c(beta.size, beta.size)
    if(length(as.vector(xix)) == 1) {
      ixix <- 1/xix
      choldet <- 0.5 * log(xix)
    }
    else {
      chol.xix <- chol(xix)
      ixix <- chol2inv(chol.xix)
      choldet <- sum(log(diag(chol.xix)))
    }
    xiy <- as.double(rep(0, beta.size))
    xiy <- .C("bilinearform_XAY",
              as.double(covinf$lower.inverse),
              as.double(covinf$diag.inverse),
              as.double(as.vector(.temp.list$xmat)),
              as.double(as.vector(z)),
              as.integer(beta.size),
              as.integer(1),
              as.integer(n),
              res = xiy, PACKAGE = "geoR")$res
    beta.hat <- as.vector(ixix %*% xiy)
    yiy <- as.double(0.0)
    yiy <- .C("bilinearform_XAY",
              as.double(covinf$lower.inverse),
              as.double(covinf$diag.inverse),
              as.double(as.vector(z)),
              as.double(as.vector(z)),
              as.integer(1),
              as.integer(1),
              as.integer(n),
              res = yiy, PACKAGE = "geoR")$res
    ssresmat <- as.vector(yiy - crossprod(beta.hat,xiy))
    if(.temp.list$method == "ML") {
      neglik <- ((n/2) * log(2 * pi) + covinf$log.det.to.half +
                 (n/2) * log(ssresmat/n) + (n/2)) - .temp.list$
      log.jacobian
    }
    if(.temp.list$method == "RML") {
      xx.eigen <- eigen(crossprod(.temp.list$xmat), symmetric = TRUE, only.values = TRUE)
      neglik <- (((n - beta.size)/2) * log(2 * pi) +
                 covinf$log.det.to.half +
                 ((n - beta.size)/2) * log(ssresmat/(n-beta.size)) +
                 (n/2) +
                 choldet -
                 0.5 * sum(log(xx.eigen$values)) -
                 .temp.list$log.jacobian
                 )
    }
  }
  if(.temp.list$minimisation.function == "nlm")
    return(as.vector(neglik + penalty))
  else
    return(as.vector(neglik))
}








"proflik" <- 
  function(obj.likfit, geodata, coords = geodata$coords,
           data = geodata$data,
           sill.values,
           range.values, 
           nugget.values,
           nugget.rel.values,
           lambda.values,
           sillrange.values = TRUE,
           sillnugget.values = TRUE,
           rangenugget.values = TRUE, 
           sillnugget.rel.values = FALSE,
           rangenugget.rel.values = FALSE, 
           silllambda.values = FALSE,
           rangelambda.values = TRUE, 
           nuggetlambda.values = FALSE,
           nugget.rellambda.values = FALSE,
           uni.only = TRUE,
           bi.only = FALSE,
           messages,
           ...)
{
  ##
  ## 1. setting arguments
  ##
  if(missing(messages))
    messages.screen <- ifelse(is.null(getOption("geoR.messages")), TRUE, getOption("geoR.messages"))
  else messages.screen <- messages
  if(missing(geodata))
    geodata <- list(coords = coords, data = data)
  if(! "package:stats" %in% search()) require(mva)
  call.fc <- match.call()
  n.cov.pars <- obj.likfit$npars - length(obj.likfit$beta)
  if(obj.likfit$transform.info$fix.lambda == FALSE)
    n.cov.pars <- n.cov.pars - 1
  if(missing(sill.values))
    sill.values <- sillrange.values <- sillnugget.values <- FALSE
  if(missing(range.values))
    range.values <- sillrange.values <- rangenugget.values <- rangelambda.values <- FALSE 
  if(!is.null(obj.likfit$call$fix.nugget))
    if(obj.likfit$call$fix.nugget == TRUE)
      nugget.values <-  nugget.rel.values <- FALSE
  if(missing(nugget.values))
    nugget.values <- sillnugget.values <- rangenugget.values <- nuggetlambda.values <- FALSE
  if(missing(nugget.rel.values))
    nugget.rel.values <- sillnugget.rel.values <- rangenugget.rel.values <- nugget.rellambda.values <- FALSE
  if(missing(lambda.values) | obj.likfit$transform.info$fix.lambda) lambda.values <- FALSE  
  if(uni.only){
    sillrange.values <- sillnugget.values <- rangenugget.values <-
      sillnugget.rel.values <- rangenugget.rel.values <- 
        silllambda.values <- rangelambda.values <- 
          nugget.rellambda.values <- nuggetlambda.values <- FALSE
  }
  else{
    if(all(sillrange.values == TRUE)){
      if(all(sill.values == FALSE) | all(range.values == FALSE)){
        sillrange.values <- FALSE
        stop("if argument sillrange.values = TRUE sill.values and range.values must be provided. Alternatively a matrix can be provided in sillrange.values  or set this to FALSE")
      }
      else
        sillrange.values <- as.matrix(expand.grid(sill.values, range.values))
    }
    if(n.cov.pars == 2){
        sillnugget.values <- rangenugget.values <-
          sillnugget.rel.values <- rangenugget.rel.values <- 
            nugget.rellambda.values <- nuggetlambda.values <- FALSE
    }
    else{
      if(all(sillnugget.values == TRUE)){
        if(all(sill.values == FALSE) | all(nugget.values == FALSE)){
          sillnugget.values <- FALSE
          stop("if argument sillnugget.values = TRUE sill.values and nugget.values must be provided. Alternatively a matrix can be provided in sillnugget.values or set this to FALSE")
        }
        else
          sillnugget.values <- as.matrix(expand.grid(sill.values, nugget.values))
      }
      if(all(rangenugget.values == TRUE)){
        if(all(range.values == FALSE) | all(nugget.values == FALSE)){
          rangenugget.values <- FALSE
          stop("if argument rangenugget.values = TRUE range.values and nugget.values must be provided. Alternatively a matrix can be provided in rangenugget.values or set this to FALSE")
        }
        else
          rangenugget.values <- as.matrix(expand.grid(range.values, nugget.values))
      }
      if(all(sillnugget.rel.values == TRUE)){
        if(all(sill.values == FALSE) | all(nugget.rel.values == FALSE)){
          sillnugget.rel.values <- FALSE
          stop("if argument sillnugget.rel.values = TRUE sill.values and nugget.rel.values must be provided. Alternatively a matrix can be provided in sillnugget.rel.values or set this to FALSE")
        }
        else
          sillnugget.rel.values <- as.matrix(expand.grid(sill.values, nugget.rel.values))
      }
      if(all(rangenugget.rel.values == TRUE)){
        if(all(range.values == FALSE) | all(nugget.rel.values == FALSE)){
          rangenugget.rel.values <- FALSE
          stop("if argument rangenugget.rel.values = TRUE range.values and nugget.rel.values must be provided. Alternatively a matrix can be provided in rangenugget.rel.values or set this to FALSE")
        }
        else
          rangenugget.rel.values <- as.matrix(expand.grid(range.values, nugget.rel.values))
      }
      if(obj.likfit$transform.info$fix.lambda == TRUE){
        if(all(nuggetlambda.values == TRUE)){
          if(all(lambda.values == FALSE) | all(nugget.values == FALSE)){
            nuggetlambda.values <- FALSE
            stop("if argument nuggetlambda.values = TRUE lambda.values and nugget.values must be provided. Alternatively a matrix can be provided in nuggetlambda.values or set this to FALSE")
          }
          else
            nuggetlambda.values <- as.matrix(expand.grid(lambda.values, nugget.values))
        }
        if(all(nugget.rellambda.values == TRUE)){
          if(all(lambda.values == FALSE) | all(nugget.rel.values == FALSE)){
            nugget.rellambda.values <- FALSE
            stop("if argument nugget.rellambda.values = TRUE lambda.values and nugget.rel.values must be provided. Alternatively a matrix can be provided in nugget.rellambda.values or set this to FALSE")
          }
          else
            nugget.rellambda.values <- as.matrix(expand.grid(lambda.values, nugget.rel.values))
        }
      }
    }
    if(obj.likfit$transform.info$fix.lambda == TRUE)
      silllambda.values <- rangelambda.values <- FALSE
    else{
      if(all(silllambda.values == TRUE)){
        if(all(sill.values == FALSE) | all(lambda.values == FALSE)){
          silllambda.values <- FALSE
          stop("if argument silllambda.values = TRUE sill.values and lambda.values must be provided. Alternatively a matrix can be provided in silllambda.values or set this to FALSE")
        }
        else
          silllambda.values <- as.matrix(expand.grid(sill.values, lambda.values))
      }
      if(all(rangelambda.values == TRUE)){
        if(all(range.values == FALSE) | all(lambda.values == FALSE)){
          rangelambda.values <- FALSE
          stop("if argument rangelambda.values = TRUE range.values and lambda.values must be provided. Alternatively a matrix can be provided in rangelambda.values or set this to FALSE")
        }
        else
          rangelambda.values <- as.matrix(expand.grid(range.values, lambda.values))
      }      
    }
  }
  ##
  ## 2. data preparation
  ##
  trend <- unclass(trend.spatial(trend=obj.likfit$trend, geodata = geodata))
  if (nrow(trend) != nrow(coords)) 
    stop("coords and trend have incompatible sizes")
  data <- as.vector(data)
  dimnames(trend) <- list(NULL, NULL)
  if(obj.likfit$transform.info$fix.lambda) {
    if(obj.likfit$lambda != 1) {
      if(any(data <= 0))
        stop("Data transformation not allowed when there are zeros or negative data"
             )
      if(obj.likfit$lambda == 0)
        data <- log(data)
      else data <- ((data^obj.likfit$lambda) - 1)/obj.likfit$lambda
    }
  }
  n <- length(data)
  dists.vec <- as.vector(dist(coords))
  d <- range(dists.vec)
  min.dist <- d[1]
  max.dist <- d[2]
  tausq <- obj.likfit$nugget
  sigmasq <- obj.likfit$cov.pars[1]
  tausq.rel <- tausq/sigmasq
  phi <- obj.likfit$cov.pars[2]
  lambda <- obj.likfit$lambda
  loglik <- obj.likfit$loglik
  sill.total <- sigmasq + tausq
  n.uni <- 0
  n.bi <- 0
  lower.phi <- 0.01 * (min.dist/max.dist) 
  upper.phi <- 1000 * max.dist
  lower.sigmasq <- 0.01 * sill.total
  result <- list()
  assign(".temp.list", list(n = n,
                            z = data,
                            beta.size = dim(trend)[2],
                            kappa = obj.likfit$kappa,
                            xmat = trend,
                            ## txmat = t(trend),
                            method.lik = obj.likfit$method.lik,
                            dists.lowertri = dists.vec,
                            cov.model = obj.likfit$cov.model,
                            fix.lambda = obj.likfit$transform.info$fix.lambda,
                            lambda = obj.likfit$lambda,
                            lower.phi = lower.phi,
                            upper.phi = upper.phi,
                            lower.sigmasq = lower.sigmasq, 
                            phi.est = phi,
                            tausq.rel.est = tausq.rel,
                            tausq.est = tausq,
                            sigmasq.est = sigmasq), pos=1)
  if(obj.likfit$transform.info$fix.lambda == TRUE)
    .temp.list$log.jacobian <<- obj.likfit$transform.info$log.jacobian
  ##
  ## 3. One-dimentional profile likelihoods
  ##
  ##  
  ## 3.1 Profile for sigmasq
  ##  
  if(bi.only == FALSE) {
    if(any(sill.values != FALSE)) {
      n.uni <- n.uni + 1
      if(messages.screen) cat("proflik: computing profile likelihood for the sill\n")
      if(n.cov.pars == 2) {
        if(tausq == 0) {
          if(obj.likfit$transform.info$fix.lambda == FALSE) {
            ini.grid <- as.matrix(expand.grid(seq(min(range.values),
                                                  max(range.values),
                                                            l = 5),
                                                  seq(-1,1,l = 5)))
          }
          else {
            ini.grid <- as.matrix(seq(min(range.values),
                                                max(range.values),
                                                l = 10))
          }
          dimnames(ini.grid) <- list(NULL, NULL)
          .temp.list$ini.grid <<- ini.grid
          pl.sigmasq <- apply(matrix(sill.values,
                                     ncol = 1), 1, proflik.aux2, ...)
          .temp.list$ini.grid <<- NULL
        }
        else {
          stop("not yet implemented for fixed nugget != 0")
        }
      }
      if(n.cov.pars == 3) {
        if(any(lambda.values != FALSE)) {
          ini.grid <- as.matrix(expand.grid(seq(min(range.values),
                                                max(range.values), l = 6),
                                            seq(0, 2 * tausq.rel, l = 4),
                                            seq(-1, 1, l = 5)))
        }
        else {
          ini.grid <- as.matrix(expand.grid(seq(min(range.values),
                                                max(range.values), l = 10),
                                            seq(0, 2 * tausq.rel, l = 4)))
        }
        dimnames(ini.grid) <- list(NULL, NULL)
        .temp.list$ini.grid <<- ini.grid
        pl.sigmasq <- apply(matrix(sill.values, ncol = 
                                   1), 1, proflik.aux9, ...)
        .temp.list$ini.grid <<- NULL
      }
      v.ord <- order(c(sigmasq, sill.values))
      if(obj.likfit$transform.info$fix.lambda == TRUE)
        pl.sigmasq <- pl.sigmasq + obj.likfit$transform.info$log.jacobian
      result$sill <- list(sill = c(sigmasq, sill.values)[
                            v.ord], proflik.sill = c(loglik, pl.sigmasq)[
                                      v.ord], est.sill = c(sigmasq, loglik))
    }
    ##  
    ## 3.2 Profile for phi
    ##
    if(any(range.values != FALSE)) {
      n.uni <- n.uni + 1
      if(messages.screen) cat("proflik: computing profile likelihood for the range\n")
      if(n.cov.pars == 2) {
        if(tausq == 0) {
          .temp.list$nugget <<- 0
          pl.phi <- apply(matrix(range.values,
                                 ncol = 1), 1, proflik.aux0, ...)
          .temp.list$nugget <<- NULL
        }
        else {
          stop("not yet implemented for fixed nugget != 0"
               )
        }
      }
      if(n.cov.pars == 3) {
        pl.phi <- apply(matrix(range.values, ncol = 1),
                        1, proflik.aux7, ...)
      }
      v.ord <- order(c(phi, range.values))
      if(obj.likfit$transform.info$fix.lambda == TRUE)
        pl.phi <- pl.phi + obj.likfit$transform.info$log.jacobian
      result$range <- list(range = c(phi, range.values)[
                             v.ord], proflik.range = c(loglik, pl.phi)[
                                       v.ord], est.range = c(phi, loglik))
    }
    ##  
    ## 3.3 Profile for \tau^2
    ##  
    if(n.cov.pars == 3) {
      if(any(nugget.values != FALSE)) {
        n.uni <- n.uni + 1
        if(messages.screen) cat("proflik: computing profile likelihood for the nugget\n"
              )
        pl.tausq <- apply(matrix(nugget.values, ncol = 
                                 1), 1, proflik.aux11, ...)
        v.ord <- order(c(tausq, nugget.values))
        if(obj.likfit$transform.info$fix.lambda == TRUE)
          pl.tausq <- pl.tausq + obj.likfit$transform.info$log.jacobian
        result$nugget <- list(nugget = c(tausq, 
                                nugget.values)[v.ord], proflik.nugget
                              = c(loglik, pl.tausq)[v.ord], 
                              est.nugget = c(tausq, loglik))
      }
      ##  
      ## 3.4 Profile for relative \tau^2
      ##
      if(any(nugget.rel.values != FALSE)) {
        if(messages.screen) cat("proflik: computing profile likelihood for the relative nugget\n"
              )
        n.uni <- n.uni + 1
        pl.tausq.rel <- apply(matrix(nugget.rel.values,
                                     ncol = 1), 1, proflik.aux5, ...)
        v.ord <- order(c(tausq.rel, nugget.rel.values))
        if(obj.likfit$transform.info$fix.lambda == TRUE)
          pl.tausq.rel <- pl.tausq.rel + obj.likfit$transform.info$log.jacobian
        result$nugget.rel <- list(nugget.rel = c(
                                    tausq.rel, nugget.rel.values)[v.ord],
                                  proflik.nugget = c(loglik, pl.tausq.rel
                                    )[v.ord],
                                  est.nugget.rel = c(tausq.rel,loglik))
      }
    }
    ##  
    ## 3.5 Profile for \lambda
    ##
    if(any(lambda.values != FALSE)) {
      .temp.temp.list <<- .temp.list
      .temp.temp.list$coords <<- coords
      n.uni <- n.uni + 1
      if(messages.screen) cat("proflik: computing profile likelihood for lambda\n"
            )
      if(n.cov.pars == 2) {
        if(tausq == 0) {
          .temp.temp.list$fixtau <<- TRUE
          .temp.temp.list$ini <<- c(sigmasq,phi)
          pl.lambda <- apply(as.matrix(lambda.values), 1,
                             proflik.aux23, ...)
        }
        else {
          stop("not yet implemented for fixed nugget != 0"
               )
        }
      }
      if(n.cov.pars == 3) {
        .temp.temp.list$fixtau <<- FALSE
        .temp.temp.list$ini <<- phi
        pl.lambda <- apply(matrix(lambda.values,
                                  ncol = 1), 1, proflik.aux23, ...)
      }
      v.ord <- order(c(lambda, lambda.values))
      result$lambda <- list(lambda = c(lambda, 
                              lambda.values)[v.ord], proflik.lambda
                            = c(loglik, pl.lambda)[v.ord], 
                            est.lambda = c(lambda, loglik))
      remove(.temp.temp.list, inherits=TRUE, pos=1)
    }
  }
  ##
  ## 4. Two-dimentional profile likelihoods
  ##
  ##  
  ## 4.1 Profile for \sigma^2 and \phi
  ##
  if(uni.only == FALSE){
    if(any(sillrange.values != FALSE)) {
      n.bi <- n.bi + 1
      if(messages.screen) cat("proflik: computing 2-D profile likelihood for the sill and range parameters\n")
      if(n.cov.pars == 2) {
        if(tausq == 0) {
          .temp.list$nugget <<- 0
          if(.temp.list$fix.lambda == TRUE) {
            pl.sigmasqphi <- apply(cbind(0, sillrange.values, 1), 1, loglik.spatial, ...)
          }
          else {
            pl.sigmasqphi <- apply(sillrange.values,
                                   1, proflik.aux28, ...)
          }
          .temp.list$nugget <<- NULL
        }
        else {
          stop("not yet implemented for fixed nugget != 0"
               )
        }
      }
      if(n.cov.pars == 3) {
        pl.sigmasqphi <- apply(sillrange.values, 1, proflik.aux13, ...)
      }
      names(pl.sigmasqphi) <- NULL
      if(obj.likfit$transform.info$fix.lambda == TRUE)
        pl.sigmasqphi <- pl.sigmasqphi + obj.likfit$transform.info$log.jacobian
      result$sillrange <- list(sill = as.numeric(levels(as.factor(sillrange.values[,1]))), range = 
                               as.numeric(levels(as.factor(sillrange.values[,2]))), proflik.sillrange = pl.sigmasqphi, 
                               est.sillrange = c(sigmasq, phi, loglik))
    }
    ##  
    ## 4.2 Profile for \sigma^2 and \tau^2
    ##  
    if(any(sillnugget.values != FALSE)) {
      n.bi <- n.bi + 1
      if(messages.screen) cat("proflik: computing 2-D profile likelihood for the sill and nugget\n")
      if(obj.likfit$transform.info$fix.lambda == FALSE)
        ini.grid <- as.matrix(expand.grid(seq(min(range.values),
                                              max(range.values), l = 
                                              10), seq(-1, 1, l = 5)))
      else
        ini.grid <- as.matrix(seq(min(range.values),
                                  max(range.values), l = 10))
      dimnames(ini.grid) <- list(NULL, NULL)
      .temp.list$ini.grid <<- ini.grid
      pl.sigmasqtausq <- apply(sillnugget.values, 1, proflik.aux15, ...)
      .temp.list$ini.grid <<- NULL
      names(pl.sigmasqtausq) <- NULL
      if(obj.likfit$transform.info$fix.lambda == TRUE)
        pl.sigmasqtausq <- pl.sigmasqtausq + obj.likfit$transform.info$log.jacobian
      result$sillnugget <- list(sill = as.numeric(levels(as.factor(sillnugget.values[,1]))), nugget = as.numeric(levels(as.factor(sillnugget.values[,2]))), proflik.sillnugget = 
                                pl.sigmasqtausq, est.sillrange = c(sigmasq,
                                                   tausq, loglik))
    }
    ##  
    ## 4.3 Profile for \phi and \tau^2
    ##
    if(any(rangenugget.values != FALSE)) {
      n.bi <- n.bi + 1
      if(messages.screen) cat("proflik: computing 2-D profile likelihood for the range and nugget\n"
            )
      .temp.list$ini.grid <<- as.matrix(seq(sigmasq/4, 5 * 
                                          sigmasq, l = 15))
      pl.phitausq <- apply(rangenugget.values, 1, proflik.aux17, ...)
      .temp.list$ini.grid <<- NULL
      names(pl.phitausq) <- NULL
      if(obj.likfit$transform.info$fix.lambda == TRUE)
        pl.phitausq <- pl.phitausq + obj.likfit$transform.info$log.jacobian
      result$rangenugget <- list(range = as.numeric(levels(as.factor(rangenugget.values[,1]))), nugget
                               = as.numeric(levels(as.factor(rangenugget.values[,1]))), proflik.rangenugget = 
                               pl.phitausq, est.rangenugget = c(phi, tausq,
                                              loglik))
    }
    ##  
    ## 4.4 Profile for \sigma^2 and \tau^2_{rel}
    ##
    if(any(sillnugget.rel.values != FALSE)) {
      n.bi <- n.bi + 1
      if(messages.screen) cat("proflik: computing 2-D profile likelihood for the sill and relative nugget parameters\n"
            )
      if(.temp.list$fix.lambda == FALSE)
        ini.grid <- as.matrix(expand.grid(seq(min(range.values), max(range.values), l = 
                                              10), seq(-1, 1, l = 5)))
      else
        ini.grid <- as.matrix(seq(min(range.values),
                                  max(range.values), l = 10))
      dimnames(ini.grid) <- list(NULL, NULL)
      .temp.list$ini.grid <<- ini.grid
      pl.sigmasqtausq.rel <- apply(sillnugget.rel.values, 1, 
                                   proflik.aux19, ...)
      .temp.list$ini.grid <<- NULL
      names(pl.sigmasqtausq.rel) <- NULL
      if(obj.likfit$transform.info$fix.lambda == TRUE)
        pl.sigmasqtausq.rel <- pl.sigmasqtausq.rel + obj.likfit$transform.info$log.jacobian
      result$sillnugget.rel <- list(sill = as.numeric(levels(as.factor(sillnugget.rel.values[,1]))), 
                                    nugget.rel = as.numeric(levels(as.factor(sillnugget.rel.values[,2]))), 
                                    proflik.sillnugget.rel = pl.sigmasqtausq.rel,
                                    est.sillrange.rel = c(sigmasq, tausq.rel, 
                                      loglik))
    }
    ##  
    ## 4.5 Profile for \phi and \tau^2_{rel}
    ##
    if(any(rangenugget.rel.values != FALSE)) {
      n.bi <- n.bi + 1
      if(messages.screen) cat("proflik: computing 2-D profile likelihood for the range and relative nugget parameters\n"
            )
      pl.phitausq.rel <- apply(rangenugget.rel.values, 1, proflik.aux30, ...)
      names(pl.phitausq.rel) <- NULL
      if(obj.likfit$transform.info$fix.lambda == TRUE)
        pl.phitausq.rel <- pl.phitausq.rel + obj.likfit$transform.info$log.jacobian
      result$rangenugget.rel <- list(range = as.numeric(levels(as.factor(rangenugget.rel.values[,1]))),
                                   nugget.rel = as.numeric(levels(as.factor(rangenugget.rel.values[,2]))), 
                                   proflik.rangenugget.rel = pl.phitausq.rel,
                                   "est.rangenugget.rel" = c(phi, tausq.rel,
                                     loglik))
    }
  }
  ##  
  ## 4.6 Profile for \sigma^2 and \lambda
  ##
  if(any(silllambda.values != FALSE)) {
    n.bi <- n.bi + 1
    if(messages.screen) cat("proflik: computing 2-D profile likelihood for the sill and transformation parameters\n"
          )
    if(n.cov.pars == 2) {
      ini.grid <- as.matrix(seq(min(range.values), max(
                                                       range.values), l = 10))
      dimnames(ini.grid) <- list(NULL, NULL)
      .temp.list$ini.grid <<- ini.grid
      if(tausq == 0) {
        .temp.list$nugget <<- 0
        pl.sigmasqlambda <- apply(silllambda.values, 1,
                                  proflik.aux24, ...)
        .temp.list$ini.grid <<- .temp.list$nugget <<- NULL
      }
      else {
        stop("not yet implemented for fixed nugget != 0"
             )
      }
    }
    if(n.cov.pars == 3) {
      ini.grid <- as.matrix(expand.grid(seq(min(range.values),
                                            max(range.values), l = 10), seq(0, 1, l = 5)))
      dimnames(ini.grid) <- list(NULL, NULL)
      .temp.list$ini.grid <<- ini.grid
      pl.sigmasqlambda <- apply(sigmasqlambda.values, 1, 
                                proflik.aux27, ...)
      .temp.list$ini.grid <<- NULL
    }
    names(pl.sigmasqlambda) <- NULL
    result$silllambda <- list(sill = as.numeric(levels(as.factor(silllambda.values[,1]))), lambda = as.numeric(levels(as.factor(silllambda.values[,2]))), proflik.silllambda = pl.sigmasqlambda,
                              est.silllambda = c(sigmasq, lambda, loglik))
  }
  ##  
  ## 4.7 Profile for \phi and \lambda
  ##
  if(any(rangelambda.values != FALSE)) {
    .temp.list$data <<- .temp.list$z
    n.bi <- n.bi + 1
    cat("proflik: computing 2-D profile likelihood for the range and transformation parameters\n"
              )
    if(n.cov.pars == 2) {
      if(tausq == 0) {
        .temp.list$nugget <<- 0
        pl.philambda <- apply(rangelambda.values, 1, 
                              proflik.aux1, ...)
        .temp.list$nugget <<- NULL
      }
      else {
        stop("not yet implemented for fixed nugget != 0"
             )
      }
    }
    if(n.cov.pars == 3) {
      pl.philambda <- apply(rangelambda.values, 1, proflik.aux31, ...)
    }
    names(pl.philambda) <- NULL
    result$rangelambda <- list(range = as.numeric(levels(as.factor(rangelambda.values[,1]))), lambda = as.numeric(levels(as.factor(rangelambda.values[,2]))), proflik.rangelambda = pl.philambda,
                               est.rangelambda = c(phi, lambda, loglik))
  }
  ##  
  ## 4.8 Profile for \tau^2 and \lambda
  ##                                        
  if(any(nuggetlambda.values != FALSE)) {
    n.bi <- n.bi + 1
    cat("proflik: computing 2-D profile likelihood for the nugget and transformation parameters\n"
          )
    pl.nuggetlambda <- apply(nuggetlambda.values, 1, proflik.aux32, ...)
      names(pl.nuggetlambda) <- NULL
    result$nuggetlambda <- list(nugget = as.numeric(levels(as.factor(nuggetlambda.values[,1]))), lambda = as.numeric(levels(as.factor(nuggetlambda.values[,2])))
                                , proflik.nuggetlambda = pl.nuggetlambda,
                                est.nuggetlambda = c(tausq, lambda, loglik))
  }
  ##  
  ## 4.9 2-D Profile for \tau^2_{rel} and \lambda
  ##
  if(any(nugget.rellambda.values != FALSE)) {
    n.bi <- n.bi + 1
    pl.nugget.rellambda <- apply(nugget.rellambda.values, 1, proflik.aux33, ...)
    names(pl.nugget.rellambda) <- NULL
    result$nugget.rellambda <- list(nugget.rel = as.numeric(levels(as.factor(nugget.rellambda.values[,1]))),
                                    lambda = as.numeric(levels(as.factor(nugget.rellambda.values[,2]))), proflik.nugget.rellambda = 
                                    pl.nugget.rellambda, est.nugget.rellambda = c(tausq.rel,
                                                           lambda, loglik))
  }
  result$n.uni <- n.uni
  result$n.bi <- n.bi
  result$method.lik <- obj.likfit$method.lik
  result$call <- call.fc
  class(result) <- "proflik"
  return(result)
}

"proflik.aux0" <-
  function(phi, ...)
{
  ## This function computes the value of the profile likelihood for the correlation parameter \phi when nugget effect is not included in the model.
  ## It requires the minimisation of the function wrt \phi for each value of \lambda, if this transformation parameter is included in the model
  ## This is an auxiliary function called by likfit.proflik
  ##
  if(.temp.list$fix.lambda == TRUE)
    proflik <- proflik.aux1(phi = phi)
  else {
    .temp.list$phi <<- phi
#    proflik <-  - (optim(.temp.list$lambda, proflik.aux1.1, method="L-BFGS-B", lower
#                         = -2, upper = 2, ...)$value)
    proflik <-  - (optimise(proflik.aux1.1, lower = -5, upper = 5, ...)$objective)
    return(proflik)
  }
}
"proflik.aux1" <-
  function(philambda, ...)
{
  ## This function computes the value of the profile likelihood for the correlation function scale parameter \phi when nugget effect = 0
  if(length(philambda) == 2) lambda <- philambda[2]
  else lambda <- 1
  n <- .temp.list$n
  main <- proflik.main(tausq=.temp.list$nugget, sigmasq=1, phi=philambda[1], lambda = lambda)
  if(.temp.list$method.lik == "ML") {
    proflik <-  - (n/2) * log(2 * pi) - main$log.det.to.half -
      (n/2) * log(main$ssresmat/n) - (n/2) + main$
    log.jacobian
  }
  if(.temp.list$method.lik == "RML") {
    eigentrem <- eigen(main$ixix, symmetric = TRUE, only.values = TRUE)
    proflik <-  - ((n - .temp.list$beta.size)/2) * log(2 * pi) - main$
    log.det.to.half - ((n - .temp.list$beta.size)/2) * log(main$ssresmat/
                                                n) - (n/2) + 0.5 * sum(log(eigentrem$values)) + 
                                                  main$log.jacobian
  }
  return(proflik)
}

"proflik.aux10" <-
  function(phitausq.rel.lambda, ...)
{
  if(length(phitausq.rel.lambda) == 3)
    lambda <- phitausq.rel.lambda[3]
  else lambda <- 1
  phitausq.rel.lambda <- as.vector(phitausq.rel.lambda)
  n <- .temp.list$n
  phi <- phitausq.rel.lambda[1]
  tausq <- phitausq.rel.lambda[2]
  sigmasq <- .temp.list$sigmasq
  main <- proflik.main(tausq=tausq, sigmasq=1, phi=phi, lambda = lambda)
  if(.temp.list$method.lik == "ML") {
    neglik <- (n/2) * log(2 * pi) + main$log.det.to.half +
      (n/2) * log(sigmasq) + (0.5/sigmasq) * main$ssresmat -
        main$log.jacobian
  }
  if(.temp.list$method.lik == "RML") {
    eigentrem <- eigen(main$ixix, symmetric = TRUE, only.values = TRUE)
    neglik <- ((n - .temp.list$beta.size)/2) * log(2 * pi) + main$
    log.det.to.half + ((n - .temp.list$beta.size)/2) * log(sigmasq) +
      (0.5/sigmasq) * main$ssresmat - 0.5 * sum(log(eigentrem$
                                               values)) - main$log.jacobian
  }
  return(as.vector(round(neglik, dig=8)))
}
"proflik.aux11" <-
  function(tausq, ...)
{
  ## This function computes the value of the profile likelihood for the parameter \tau^2.
  ## It requires the minimisation of the function wrt \sigma^2, \phi and \lambda (if the case)  for each value of \tau^2.
  ## This is an auxiliary function called by proflik.
  .temp.list$nugget <<- as.vector(tausq)
  if(.temp.list$fix.lambda == TRUE) {
    sigmasqphi.res <- optim(c(.temp.list$sigmasq.est, .temp.list$phi.est),
                            proflik.aux12,method="L-BFGS-B",
                            lower = c(.temp.list$lower.sigmasq,
                              .temp.list$lower.phi),
                            upper=c(+Inf, .temp.list$upper.phi), ...)$value
  }
  else {
    sigmasqphi.res <- optim(c(.temp.list$sigmasq.est, .temp.list$
                              phi.est, .temp.list$lambda), proflik.aux12,method="L-BFGS-B",  lower = c(.temp.list$lower.sigmasq, .temp.list$lower.phi, -2),
                            upper = c( + Inf, .temp.list$upper.phi, 2), ...)$value
  }
  .temp.list$nugget <<- NULL
  return( - sigmasqphi.res)    
}

"proflik.aux1.1" <-
  function(lambda, ...)
{
  ## This function computes the value of the profile likelihood for the correlation function scale parameter \phi when nugget effect = 0
  phi <- .temp.list$phi
  n <- .temp.list$n
  main <- proflik.main(tausq=.temp.list$nugget, sigmasq=1, phi=phi, lambda = lambda)
  if(.temp.list$method.lik == "ML") {
    neglik <- (n/2) * log(2 * pi) + main$log.det.to.half +
      (n/2) * log(main$ssresmat/n) + (n/2) - main$log.jacobian
  }
  if(.temp.list$method.lik == "RML") {
    eigentrem <- eigen(main$ixix, symmetric = TRUE, only.values = TRUE)
    neglik <- ((n - .temp.list$beta.size)/2) * log(2 * pi) + main$
    log.det.to.half + ((n - .temp.list$beta.size)/2) * log(main$ssresmat/n) +
      (n/2) - 0.5 * sum(log(eigentrem$values)) - 
        main$log.jacobian
  }
  return(as.vector(round(neglik, dig=8)))
}

"proflik.aux12" <-
  function(sigmasqphi.lambda, ...)
{
  ## This function computes the value of the profile likelihood for the nugget parameter \tau^2, minimizing the likelihood wrt correlation function scale parameter \phi (range), the random field scale parameter \sigma^2 (sill) and the transformation parameter \lambda. 
  if(length(sigmasqphi.lambda) == 3) lambda <-  sigmasqphi.lambda[3]
  else lambda <- 1
  sigmasqphi.lambda <- as.vector(sigmasqphi.lambda)
  n <- .temp.list$n
  sigmasq <- sigmasqphi.lambda[1]
  phi <- sigmasqphi.lambda[2]
  main <- proflik.main(tausq=.temp.list$nugget, sigmasq=sigmasq, phi=phi, lambda = lambda)
  if(.temp.list$method.lik == "ML") {
    neglik <- (n/2) * log(2 * pi) +
      main$log.det.to.half +
        0.5 * (main$ssresmat) -
          main$log.jacobian
  }
  if(.temp.list$method.lik == "RML") {
    eigentrem <- eigen(main$ixix, symmetric = TRUE, only.values = TRUE)
    neglik <- ((n - .temp.list$beta.size)/2) * log(2 * pi) +
      main$log.det.to.half +
        0.5 * (main$ssresmat) -
          0.5 * sum(log(eigentrem$values)) -
            main$log.jacobian
  }
  return(as.vector(round(neglik, dig=8)))
}
"proflik.aux13" <-
  function(sigmasqphi, ...)
{
  ## This function computes the value of the 2-D profile likelihood for the parameters \sigma^2 and \phi when the nugget is included.
  ## It requires the minimisation of the function wrt \tau^2 and \lambda (if the case) for each value of (\sigma^2, \phi)
  ## This is an auxiliary function called by likfit.proflik
  .temp.list$sigmasqphi <<- as.vector(sigmasqphi)
  if(.temp.list$fix.lambda == TRUE) {
##      tausq.res <- optim(.temp.list$tausq.est, proflik.aux14, method="L-BFGS-B", lower
##			 = 0, ...)$value
      tausq.res <- optimise(proflik.aux14, lower = 0, upper = .Machine$double.xmax^0.25, ...)$objective
  }
  else {
    tausq.res <- optim(
                       c(.temp.list$tausq.est, .temp.list$lambda), proflik.aux14, method="L-BFGS-B",lower = c(0, -2
                                                                                                      ), upper = c( +Inf, 2), ...)$value
  }
  .temp.list$sigmasqphi <<- NULL
  return( - tausq.res)
}

"proflik.aux14" <-
  function(tausq.lambda, ...)
{
  ## This function computes the value of the 2-D profile likelihood for the parameters (\sigma^2, \phi), minimizing the likelihood wrt the nugget parameter \tau^2.
  ## This functions is called by the auxiliary function proflik.aux13
  if(length(tausq.lambda) == 2) lambda <- tausq.lambda[2]
  else lambda <- 1
  n <- .temp.list$n
  tausq <- tausq.lambda[1]
  main <- proflik.main(tausq=tausq, .temp.list$sigmasqphi[1], phi=.temp.list$sigmasqphi[2], lambda = lambda)
  if(.temp.list$method.lik == "ML") {
    neglik <- (n/2) * log(2 * pi) + main$log.det.to.half + 0.5 *
      main$ssresmat - main$log.jacobian
  }
  if(.temp.list$method.lik == "RML") {
    eigentrem <- eigen(main$ixix, symmetric = TRUE, only.values = TRUE)
    neglik <- ((n - .temp.list$beta.size)/2) * log(2 * pi) + main$
    log.det.to.half + 0.5 * main$ssresmat - 0.5 * sum(log(
                                                     eigentrem$values)) - main$log.jacobian
  }
  return(as.vector(round(neglik, dig=8)))
}
"proflik.aux15" <-
  function(sigmasqtausq, ...)
{
  ## This function computes the value of the 2-D profile likelihood for the parameters \sigma^2 and \tau^2
  ## It requires the minimisation of the function wrt \phi and also \lambda (if the case) for each value of (\sigma^2, \tau^2) 
  ## This is an auxiliary function called by likfit.proflik
  .temp.list$sigmasqtausq <<- as.vector(sigmasqtausq)
  ini.lik <- round(100000000. * apply(.temp.list$ini.grid, 1,
                                      proflik.aux16))
  ini <- as.vector(.temp.list$ini.grid[which(ini.lik == min(ini.lik, na.rm = TRUE)),,drop=FALSE][1,])
  if(.temp.list$fix.lambda == TRUE) {
#    phi.res <- optim(ini, proflik.aux16, method="L-BFGS-B", lower = 
#                     .temp.list$lower.phi, upper=.temp.list$upper.phi, ...)$value
    phi.res <- optimise(proflik.aux16, lower = .temp.list$lower.phi, upper=.temp.list$upper.phi, ...)$objective
  }
  else {
    phi.res <- optim(ini, proflik.aux16, method="L-BFGS-B", 
                     lower = c(.temp.list$lower.phi, -2),
                     upper = c(.temp.list$upper.phi, 2), ...)$value
  }
  .temp.list$sigmasqtausq <<- NULL
  return( - phi.res)
}

"proflik.aux16" <-
  function(phi.lambda, ...)
{
  ## This function computes the value of the 2-D profile likelihood for the sill and nugget parameters (\sigma^2,\tau^2), minimising the profile likelihood wrt correlation function scale parameter \phi (and the transformation parameter \lambda
  ## This is an auxiliary function called by likfit.aux15  
  if(length(phi.lambda) == 2) lambda <- phi.lambda[2]
  else lambda <- 1
  n <- .temp.list$n
  phi <- phi.lambda[1]
  main <- proflik.main(tausq=.temp.list$sigmasqtausq[2], sigmasq=.temp.list$sigmasqtausq[1], phi=phi, lambda = lambda)
  if(.temp.list$method.lik == "ML") {
    neglik <- (n/2) * log(2 * pi) + main$log.det.to.half + 0.5 *
      main$ssresmat - main$log.jacobian
  }
  if(.temp.list$method.lik == "RML") {
    eigentrem <- eigen(main$ixix, symmetric = TRUE, only.values = TRUE)
    neglik <- ((n - .temp.list$beta.size)/2) * log(2 * pi) + main$
    log.det.to.half + 0.5 * main$ssresmat - 0.5 * sum(log(
                                                     eigentrem$values)) -
                                                       main$log.jacobian
  }
    return(as.vector(round(neglik, dig=8)))
}
"proflik.aux17" <-
  function(phitausq, ...)
{
  ## This function computes the value of the 2-D profile likelihood for the parameters (\phi, \tau^2)
  ## It requires the minimisation of the function wrt \sigma^2 and \lambda (if the case) for each value of (\phi, \tau^2) 
  ## This is an auxiliary function called by likfit.proflik
  .temp.list$phitausq <<- as.vector(phitausq)
  if(.temp.list$fix.lambda == TRUE) {
##    ini.lik <- round(100000000. * apply(.temp.list$ini.grid, 1,
##                                        proflik.aux18))
##    ini <- as.vector(.temp.list$ini.grid[which(ini.lik == min(ini.lik, na.rm = TRUE)),,drop=FALSE][1,])
##    sigmasq.res <- optim(ini, proflik.aux18, method="L-BFGS-B", 
##                         lower = .temp.list$lower.sigmasq, ...)$value
    sigmasq.res <- optimise(proflik.aux18, lower = .temp.list$lower.sigmasq, upper = .Machine$double.xmax^0.25, ...)$objective
  }
  else {
    sigmasq.res <- optim(c(.temp.list$sigmasq.est, .temp.list$lambda
                           ), proflik.aux18, method="L-BFGS-B", lower = c(.temp.list$lower.sigmasq,
                                                                  -2), upper = c( + Inf, 2), ...)$value
  }
  .temp.list$phitausq <<- NULL
  return( - sigmasq.res)
}

"proflik.aux18" <-
  function(sigmasq.lambda, ...)
{
  ## This function computes the value of the 2-D profile likelihood for the range and nugget parameters (\phi, \tau^2), minimising the likelihood wrt the random field scale parameter \sigma^2 (sill) ant the transformation parameter \lambda. 
  ## This is an auxiliary function called by likfit.aux17.
  if(length(sigmasq.lambda) == 2) lambda <- sigmasq.lambda[2]
  else lambda <- 1
  n <- .temp.list$n
  sigmasq <- sigmasq.lambda[1]
  main <- proflik.main(tausq=.temp.list$phitausq[2], sigmasq=sigmasq, phi=.temp.list$phitausq[1], lambda = lambda)
  if(.temp.list$method.lik == "ML") {
    neglik <- (n/2) * log(2 * pi) + main$log.det.to.half + 0.5 *
      main$ssresmat - main$log.jacobian
  }
  if(.temp.list$method.lik == "RML") {
    eigentrem <- eigen(main$ixix, symmetric = TRUE, only.values = TRUE)
    neglik <- ((n - .temp.list$beta.size)/2) * log(2 * pi) + main$
    log.det.to.half + 0.5 * main$ssresmat - 0.5 * sum(log(
                                                     eigentrem$values)) -
                                                       main$log.jacobian
  }
  return(as.vector(round(neglik, dig=8)))
}
"proflik.aux19" <-
  function(sigmasqtausq.rel, ...)
{
  ## This function computes the value of the 2-D profile likelihood for the parameters (\sigma^2, \tau^2_{rel})
  ## It requires the minimisation of the function wrt \phi and \lambda (if the case) for each value of (\sigma^2, \tau^2_{rel})
  ## This is an auxiliary function called by likfit.proflik
  .temp.list$sigmasqtausq.rel <<- as.vector(sigmasqtausq.rel)
  if(.temp.list$fix.lambda == TRUE) {
##    phi.res <- optim(.temp.list$phi.est, proflik.aux20, method="L-BFGS-B", lower = 
##                     .temp.list$lower.phi, upper=.temp.list$upper.phi, ...)$value
    phi.res <- optimise(proflik.aux20, lower = 
                     .temp.list$lower.phi, upper=.temp.list$upper.phi, ...)$objective
  }
  else {
    phi.res <- optim(c(.temp.list$phi.est, .temp.list$lambda, ...), proflik.aux20, method="L-BFGS-B", 
                     lower = c(.temp.list$lower.phi, -2),
                     upper = c(.temp.list$upper.phi, 2), ...)$value
  }
  .temp.list$sigmasqtausq.rel <<- NULL
  return( - phi.res)
}


"proflik.aux2" <-
  function(sigmasq, ...)
{
  ## This function computes the value of the profile likelihood for the random field scale (variance) parameter \sigma^2 when nugget effect is not included in the model.
  ## It requires the minimisation of the function wrt \phi and maybe \lambda for each value of \sigma^2
  ## This is an auxiliary function called by likfit.proflik
  ##
  .temp.list$sigmasq <<- as.vector(sigmasq)
  ini.lik <- round(100000000. * apply(.temp.list$ini.grid, 1,
                                      proflik.aux3))
  ini <- as.vector(.temp.list$ini.grid[which(ini.lik == min(ini.lik, na.rm = TRUE)),,drop=FALSE][1,])
  if(.temp.list$fix.lambda == TRUE) {
##    phi.res <- optim(ini , proflik.aux3, method="L-BFGS-B",
 ##                    lower = .temp.list$lower.phi,
  ##                   upper=.temp.list$upper.phi, ...)$value
    phi.res <- optimise(proflik.aux3,
                     lower = .temp.list$lower.phi,
                     upper=.temp.list$upper.phi, ...)$objective
  }
  else {
    phi.res <- optim(ini, proflik.aux3, method="L-BFGS-B",
                     lower = c(.temp.list$lower.phi, -2),
                     upper = c(.temp.list$upper.phi, 2), ...)$value
  }
  .temp.list$sigmasq <<- NULL
  return( - phi.res)
}

"proflik.aux20" <-
  function(phi.lambda, ...)
{
  ## This function computes the value of the 2-D profile likelihood for the sill and relative nugget parameters (\sigma^2, \tau^2_{rel}), minimising the likelihood wrt the correlation function scale parameter \phi and the transformation parameter \lambda.
  ## This is an auxiliary function called by likfit.aux19.
  phi.lambda <- as.vector(phi.lambda)
  if(length(phi.lambda) == 2) lambda <- phi.lambda[2]
  else lambda <- 1
  sigmasqtausq.rel <- as.vector(.temp.list$sigmasqtausq.rel)
  sigmasq <- sigmasqtausq.rel[1]
  tausq.rel <- sigmasqtausq.rel[2]
  phi <- phi.lambda[1]
  n <- .temp.list$n
  main <- proflik.main(tausq=tausq.rel, sigmasq=1, phi=phi, lambda = lambda)
  if(.temp.list$method.lik == "ML") {
    neglik <- (n/2) * log(2 * pi) +
      main$log.det.to.half +
        (n/2) * log(sigmasq) +
          (0.5/sigmasq) * main$ssresmat -
            main$log.jacobian
  }
  if(.temp.list$method.lik == "RML") {
    eigentrem <- eigen(main$ixix, symmetric = TRUE, only.values = TRUE)
    neglik <- ((n - .temp.list$beta.size)/2) * log(2 * pi) +
      main$log.det.to.half +
        (n/2) * log(sigmasq) +
          (0.5/sigmasq) * main$ssresmat -
            0.5 * sum(log(eigentrem$values)) -
              main$log.jacobian
  } 
  return(as.vector(round(neglik, dig=8)))
}
"proflik.aux21" <-
function(phitausq.rel, ...)
{
  ## This function computes the value of the 2-D profile likelihood for the parameters (\phi, \tau^2_{rel})
  ## This is an auxiliary function called by likfit.proflik
  phitausq.rel <- as.vector(phitausq.rel)
  phi <- phitausq.rel[1]
  tausq.rel <- phitausq.rel[2]
  n <- .temp.list$n
  main <- proflik.main(tausq=tausq.rel, sigmasq=1, phi=phi, lambda = 1)
  sigmasq.hat <- main$ssresmat/n
  if(.temp.list$method.lik == "ML") {
    proflik <-  - (n/2) * log(2 * pi) -
      main$log.det.to.half -
      (n/2) * log(sigmasq.hat) -
        (n/2) -
          main$log.jacobian
  }
  if(.temp.list$method.lik == "RML") {
    eigentrem <- eigen(main$ixix, symmetric = TRUE, only.values = TRUE)
    proflik <-  - ((n - .temp.list$beta.size)/2) * log(2 * pi) -
      main$log.det.to.half -
        (n/2) * log(sigmasq.hat) -
          (n/2) +
            0.5 * sum(log(eigentrem$values)) -
              main$log.jacobian
  }
  return(proflik)
}

"proflik.aux21.1" <-
  function(lambda, ...)
{
  ## This function computes the value of the 2-D profile likelihood for the parameters (\phi, \tau^2_{rel})
  ## This requires minimasation wrt to the transformation parameter \lambda
  ## This is an auxiliary function called by likfit.proflik
  n <- .temp.list$n
  main <- proflik.main(tausq = .temp.list$phitausq.rel[2], sigmasq = 1,
                       phi = .temp.list$phitausq.rel[1], lambda = lambda)
  sigmasq.hat <- main$ssresmat/n
  if(.temp.list$method.lik == "ML") {
    neglik <- (n/2) * log(2 * pi) +
      main$log.det.to.half + (n/2) * log(sigmasq.hat) +
        (n/2) -
          main$log.jacobian
  }
  if(.temp.list$method.lik == "RML") {
    eigentrem <- eigen(main$ixix, symmetric = TRUE, only.values = TRUE)
    neglik <- ((n - .temp.list$beta.size)/2) * log(2 * pi) +
      main$log.det.to.half +
        (n/2) * log(sigmasq.hat) +
          (n/2) -
            0.5 * sum(log(eigentrem$values)) -
              main$log.jacobian
  }
  return(as.vector(round(neglik, dig=8)))
}

"proflik.aux22" <-
  function(sigmasq, ...)
{
  ## This function computes the value of the 2-D profile likelihood for the range and nugget parameters (\phi, \tau^2), minimising the likelihood wrt the random field scale parameter \sigma^2 (sill) 
  ## This is an auxiliary function called by likfit.aux17
  n <- .temp.list$n
  main <- proflik.main(tausq=.temp.list$phitausq[2], sigmasq=sigmasq, phi= .temp.list$phitausq[1], lambda = 1)
  if(.temp.list$method.lik == "ML") {
    neglik <- (n/2) * log(2 * pi) +
      main$log.det.to.half +
        0.5 * main$ssresmat -
          main$log.jacobian
  }
  if(.temp.list$method.lik == "RML") {
    eigentrem <- eigen(main$ixix, symmetric = TRUE, only.values = TRUE)
    neglik <- ((n - .temp.list$beta.size)/2) * log(2 * pi) +
      main$log.det.to.half +
        0.5 * main$ssresmat -
          0.5 * sum(log(eigentrem$values)) -
            main$log.jacobian
  }
  return(as.vector(round(neglik, dig=8)))
}

"proflik.aux23" <-
  function(lambda, ...)
{
  ## This function computes the value of the profile likelihood for the transformation parameter \lambda
  ## It requires the minimisation of the function wrt \phi and \tau^2 and sigma^2 for each value of \lambda
  ## This is an auxiliary function called by proflik
  lambda <- as.vector(lambda)
  if(.temp.temp.list$fixtau == FALSE) {
    if(lambda == 0)
      data.l <- log(.temp.list$z)
    else data.l <- ((.temp.list$z^lambda) - 1)/lambda
    var.l <- var(data.l)
    ini.cov <- c(var.l, .temp.temp.list$ini)
  }
  else
    ini.cov <- .temp.temp.list$ini
  if(dim(.temp.list$xmat)[2] == 1 & all(.temp.list$xmat == 1))
    trend.mat <- "cte"
  else
    trend.mat <- ~ (.temp.list$xmat[,-1])
  lambda.res <- likfit(coords = .temp.temp.list$coords,
                       data = .temp.list$z,
                       ini = ini.cov, trend = trend.mat,
                       fix.nugget = .temp.temp.list$fixtau,
                       method.lik = .temp.list$method.lik,
                       cov.model = .temp.list$cov.model,
                       kappa = .temp.list$kappa, fix.lambda = TRUE,
                       lambda = lambda,
                       messages = FALSE)$loglik
  .temp.list <<- .temp.temp.list
  return(lambda.res)
}

"proflik.aux24" <-
  function(sigmasqlambda, ...)
{
  ## This function computes the value of the 2-D profile likelihood for the parameters (\sigma^2, \lambda) when there is no nugget effect (\tau^2 = 0, fixed)
  ## It requires the minimisation of the function wrt \phi for each value of (\sigma^2, \lambda)
  ## This is an auxiliary function called by proflik
  sigmasqlambda <- as.vector(sigmasqlambda)
  .temp.list$sigmasq <<- sigmasqlambda[1]
  lambda <- sigmasqlambda[2]
  if(lambda == 1) {
    .temp.list$log.jacobian <<- 0
  }
  else {
    if(any(.temp.list$z <= 0))
      stop("Transformation option not allowed when there are zeros or negative data"
           )
    .temp.list$log.jacobian <<- sum(log(.temp.list$z^(lambda - 1)))
    if(lambda == 0)
      .temp.list$z <<- log(.temp.list$z)
    else .temp.list$z <<- ((.temp.list$z^lambda) - 1)/lambda
  }
  ini.lik <- round(100000000. * apply(.temp.list$ini.grid, 1,
                                      proflik.aux3))
  ini <- as.vector(.temp.list$ini.grid[which(ini.lik == min(ini.lik, na.rm = TRUE)),,drop=FALSE][1,])
##  phi.res <- optim(ini, proflik.aux3, method="L-BFGS-B", lower = .temp.list$
##                   lower.phi, upper = .temp.list$upper.phi, ...)$value
  phi.res <- optimise(proflik.aux3, lower = .temp.list$lower.phi, upper = .temp.list$upper.phi, ...)$objective
  .temp.list$log.jacobian <<- NULL
  .temp.list$sigmasq <<- NULL
  .temp.list$z <<- .temp.list$data
  return( - phi.res)
}

"proflik.aux27" <-
  function(sigmasqlambda, ...)
{
  ## This function computes the value of the 2-D profile likelihood for sill \sigma^2 and the transformation parameter \lambda
  ## It requires the minimisation of the function wrt \phi and \tau^2 and for each value of (\sigma^2,\lambda)
  ## This is an auxiliary function called by proflik.
  sigmasqlambda <- as.vector(sigmasqlambda)
  .temp.list$sigmasq <<- sigmasqlambda[1]
  lambda <- sigmasqlambda[2]
  if(lambda == 1) {
    .temp.list$log.jacobian <<- 0
  }
  else {
    .temp.list$fix.lambda <<- TRUE
    if(any(.temp.list$z^(lambda - 1) <= 0))
      .temp.list$log.jacobian <<- log(prod(.temp.list$z^(lambda -
                                                         1)))
    else .temp.list$log.jacobian <<- sum(log(.temp.list$z^(lambda -
                                                           1)))
    if(lambda == 0)
      .temp.list$z <<- log(.temp.list$z)
    else .temp.list$z <<- ((.temp.list$z^lambda) - 1)/lambda
  }
  ini.lik <- round(100000000. * apply(.temp.list$ini.grid, 1,
                                      proflik.aux10))
  ini <- as.vector(.temp.list$ini.grid[which(ini.lik == min(ini.lik, na.rm = TRUE)),,drop=FALSE][1,])
  phitausq.rel.res <- optim(ini, proflik.aux10, method="L-BFGS-B",
                            lower = c(.temp.list$lower.phi,
                              0), upper=c(.temp.list$upper.phi, 100), ...)$value
  .temp.list$log.jacobian <<- NULL
  .temp.list$sigmasq <<- NULL
  .temp.list$z <<- .temp.list$data
  return( - phitausq.rel.res)
}

"proflik.aux28" <-
  function(sigmasqphi, ...)
{
  ## This function computes the value of the 2-D profile likelihood for the random field scale (variance) parameter \sigma^2  and the correlation function parameter \phi when nugget effect is not included in the model.
  ## It requires the minimisation of the function wrt \lambda for each value of (\sigma^2, \phi)
  ## This is an auxiliary function called by likfit.proflik
  ##
##  ini.seq <- seq(-1.5, 1.5, l=7)
##  .temp.list$sigmasqphi <<- as.vector(sigmasqphi)
##  lambda.lik <- apply(as.matrix(ini.seq), 1, proflik.aux4)
##  ini <- ini.seq[lambda.lik == max(lambda.lik)]
##  lambda.res <- optim(ini, proflik.aux4, method="L-BFGS-B", lower = -2.5, upper = 2.5, ...)$value
  lambda.res <- optimise(proflik.aux4, lower = -5, upper = 5, ...)$objective
  .temp.list$sigmasqphi <<- NULL
  return( - lambda.res)
}

"proflik.aux30" <-
  function(phitausq.rel, ...)
{
  ## This function computes the value of the profile likelihood for the correlation parameter \phi when nugget effect is not included in the model.
  ## It requires the minimisation of the function wrt \phi for each value of \lambda, if this transformation parameter is included in the model
  ## This is an auxiliary function called by likfit.proflik
  ##
  if(.temp.list$fix.lambda == TRUE)
    proflik <- proflik.aux21(phitausq.rel = phitausq.rel)
  else {
    .temp.list$phitausq.rel <<- phitausq.rel
##    proflik <-  - (optim(.temp.list$lambda, proflik.aux21.1, method="L-BFGS-B", lower =
##                         -2, upper = 2, ...)$value)
    proflik <-  - (optimise(proflik.aux21.1, lower = -5, upper = 5, ...)$objective)
    .temp.list$phitausq.rel <<- NULL
  }
  return(proflik)
}


"proflik.aux3" <-
  function(phi.lambda, ...)
{
  ## This function computer the negative of the likelihood function for the correlation function scale parameter \phi (and maybe the transformation parameter \lambda) only for models with fixed nugget effect (i.e., when it is not a parameter to be estimated) 
  ## This function is used when computing the profile likelihood for \sigma^2
  ## This is an auxiliary function called by proflik.aux2
  ##  phi <- pmax(phi, .temp.list$lower.phi)
  if(length(phi.lambda) == 2)
    lambda <- phi.lambda[2]
  else lambda <- 1
  sigmasq <- .temp.list$sigmasq
  phi <- phi.lambda[1]
  n <- .temp.list$n
  main <- proflik.main(tausq=0, sigmasq=1, phi=phi, lambda = lambda)
  if(.temp.list$method.lik == "ML") {
    neglik <- (n/2) * log(2 * pi) +
      main$log.det.to.half +
      (n/2) * log(sigmasq) + (0.5/sigmasq) * main$ssresmat - 
        main$log.jacobian
  }
  if(.temp.list$method.lik == "RML") {
    eigentrem <- eigen(main$ixix, symmetric = TRUE, only.values = TRUE)
    neglik <- ((n - .temp.list$beta.size)/2) * log(2 * pi) +
      main$log.det.to.half +
        ((n - .temp.list$beta.size)/2) * log(sigmasq) +
          (0.5/sigmasq) * main$ssresmat -
            0.5 * sum(log(eigentrem$values)) -
              main$log.jacobian
  }
  return(as.vector(round(neglik, dig=8)))
}

"proflik.aux31" <-
  function(philambda, ...)
{
  ## This function computes the value of the 2-D profile likelihood for range \phi and the transformation parameter \lambda.
  ## It requires the minimisation of the function wrt \tau^2_{rel} and for each value of (\phi,\lambda).
  ## This is an auxiliary function called by proflik.
  philambda <- as.vector(philambda)
  .temp.list$phi <<- philambda[1]
  .temp.list$lambda <- philambda[2]
##  tausq.rel.res <- optim(.temp.list$tausq.rel.est, proflik.aux8, method="L-BFGS-B", lower = 
##                         0, upper=100, ...)$value
  tausq.rel.res <- optimise(proflik.aux8, lower =  0, upper=1000, ...)$objective
  .temp.list$phi <<- NULL
  return( - tausq.rel.res)
}

"proflik.aux32" <-
  function(tausqlambda, ...)
{
  ## This function computes the value of the 2-D profile likelihood for nugget \tau^2 and the transformation parameter \lambda.
                                        # It requires the minimisation of the function wrt \phi and \sigma^2 and for each value of (\tau^2,\lambda).
                                        # This is an auxiliary function called by proflik.
  tausqlambda <- as.vector(tausqlambda)
  .temp.list$nugget <<- tausqlambda[1]
  lambda <- tausqlambda[2]
  if(lambda == 1) {
    .temp.list$log.jacobian <<- 0
  }
  else {
    if(any(.temp.list$z^(lambda - 1) <= 0))
      .temp.list$log.jacobian <<- log(prod(.temp.list$z^(lambda -
                                                         1)))
    else .temp.list$log.jacobian <<- sum(log(.temp.list$z^(lambda -
                                                           1)))
    if(lambda == 0)
      .temp.list$z <<- log(.temp.list$z)
    else .temp.list$z <<- ((.temp.list$z^lambda) - 1)/lambda
  }
  sigmasqphi.res <- optim(c(.temp.list$sigmasq.est, .temp.list$phi.est), proflik.aux12, method="L-BFGS-B",
                          lower = c(.temp.list$lower.sigmasq, .temp.list$
                            lower.phi), upper=c(+Inf, .temp.list$upper.phi), ...)$value
  .temp.list$log.jacobian <<- NULL
  .temp.list$z <<- .temp.list$data
  .temp.list$nugget <<- NULL
  return( - sigmasqphi.res)
}

"proflik.aux33" <-
  function(tausq.rellambda, ...)
{
  ## This function computes the value of the 2-D profile likelihood for nugget \tau^2 and the transformation parameter \lambda.
  ## It requires the minimisation of the function wrt \phi for each value of (\tau^2,\lambda).
  ## This is an auxiliary function called by proflik.
  tausq.rellambda <- as.vector(tausq.rellambda)
  .temp.list$nugget.rel <<- tausq.rellambda[1]
  lambda <- tausq.rellambda[2]
  if(lambda == 1) {
    .temp.list$log.jacobian <<- 0
  }
  else {
    if(any(.temp.list$z^(lambda - 1) <= 0))
      .temp.list$log.jacobian <<- log(prod(.temp.list$z^(lambda -
                                                         1)))
    else .temp.list$log.jacobian <<- sum(log(.temp.list$z^(lambda -
                                                           1)))
    if(lambda == 0)
      .temp.list$z <<- log(.temp.list$z)
    else .temp.list$z <<- ((.temp.list$z^lambda) - 1)/lambda
  }
##  phi.res <- optim(.temp.list$phi.est, proflik.aux6, method="L-BFGS-B", lower = .temp.list$
##                    lower.phi, upper=.temp.list$upper.phi, ...)$value
  phi.res <- optimise(proflik.aux6, lower = .temp.list$lower.phi, upper=.temp.list$upper.phi, ...)$objective
  .temp.list$log.jacobian <<- NULL
  .temp.list$nugget.rel <<- NULL
  .temp.list$z <<- .temp.list$data
  return( - phi.res)
}

"proflik.aux4" <-
  function(lambda, ...)
{
  ## This function computer the values of the profile likelihood function for the parameters \phi  and \sigma^2 for models with nugget effect = 0, including the tranformation parameter \lambda
  ## This is an auxiliary function called by proflik.aux28
  ##
  sigmasqphi <- as.vector(.temp.list$sigmasqphi)
  sigmasq <- sigmasqphi[1]
  phi <- sigmasqphi[2]
  n <- .temp.list$n
  if(lambda > 0.999 & lambda < 1.001)
    lambda <- 1
  main <- proflik.main(tausq=.temp.list$nugget, sigmasq = sigmasq, phi=phi, lambda = lambda)
  if(.temp.list$method.lik == "ML") {
    neglik <- ((n/2) * log(2 * pi) +
               main$log.det.to.half +
               0.5 * main$ssresmat - 
               main$log.jacobian)
  }
  if(.temp.list$method.lik == "RML") {
    xx.eigen <- eigen(crossprod(.temp.list$xmat), symmetric = TRUE,
                      only.values = TRUE)
    neglik <- (((n - .temp.list$beta.size)/2) * log(2 * pi) -
               0.5 * sum(log(xx.eigen$values)) +
               main$log.det.to.half +
               (0.5) * main$ssresmat +
               choldet +
               main$log.jacobian)
  }
  return(as.vector(round(neglik, dig=8)))
}
"proflik.aux5" <-
  function(tausq.rel, ...)
{
  ## This function computes the value of the profile likelihood for the parameter \tau^2_{rel}.
  ## It requires the minimisation of the function wrt \phi and \lambda (if the case) for each value of \tau^2_{rel}.
  ## This is an auxiliary function called by proflik.
  .temp.list$nugget.rel <<- as.vector(tausq.rel)
  if(.temp.list$fix.lambda == TRUE) {
##    phi.res <- optim(.temp.list$phi.est, proflik.aux6, method="L-BFGS-B", lower = 
##                     .temp.list$lower.phi, upper=.temp.list$upper.phi, ...)$value
    phi.res <- optimise(proflik.aux6, lower = 
                     .temp.list$lower.phi, upper=.temp.list$upper.phi, ...)$objective
  }
  else {
    phi.res <- optim(c(.temp.list$phi.est, .temp.list$lambda), proflik.aux6, method="L-BFGS-B", 
                       lower = c(.temp.list$lower.phi, -2),
                       upper = c(.temp.list$upper.phi, 2), ...)$value
  }
  .temp.list$nugget.rel <<- NULL
  return( - phi.res)
}

"proflik.aux6" <-
function(phi.lambda, ...)
{
  ## This function computes the value of the profile likelihood for the relative nugget parameter \tau^2_{rel}, minimizing the likelihood wrt correlation function scale parameter \phi (range) and the transformation parameter \lambda.
  if(length(phi.lambda) == 2) lambda <- phi.lambda[2] else lambda <- 1
  phi.lambda <- as.vector(phi.lambda)
  phi <- phi.lambda[1]
  n <- .temp.list$n
  main <- proflik.main(tausq=.temp.list$nugget.rel, sigmasq=1, phi=phi, lambda = lambda)
  if(.temp.list$method.lik == "ML") {
    neglik <- (n/2) * log(2 * pi) +
      main$log.det.to.half +
        (n/2) * log(main$ssresmat/n) +
          (n/2) -
            main$log.jacobian
  }
  if(.temp.list$method.lik == "RML") {
    eigentrem <- eigen(main$ixix, symmetric = TRUE, only.values = TRUE)
    neglik <- ((n - .temp.list$beta.size)/2) * log(2 * pi) +
      main$log.det.to.half +
        ((n - .temp.list$beta.size)/2) * log(main$ssresmat/n) +
          (n/2) -
            0.5 * sum(log(eigentrem$values)) -
              main$log.jacobian
  }
  return(as.vector(round(neglik, dig=8)))
}
"proflik.aux7" <-
  function(phi, ...)
{
  ## This function computes the value of the profile likelihood for the parameter \phi when the nugget \tau^2 is included in the model
  ## It requires the minimisation of the function wrt relative \tau^2_{rel} for each value of \phi
  ## This is an auxiliary function called by proflik.
  .temp.list$phi <<- as.vector(phi)
  if(.temp.list$fix.lambda == TRUE) {
    .temp.list$lambda <<- 1
##    tausq.rel.res <- optim(.temp.list$tausq.rel.est, proflik.aux8, method="L-BFGS-B", 
##                           lower = 0, upper=100, ...)$value
    tausq.rel.res <- optimise(proflik.aux8, lower = 0, upper=1000, ...)$objective
    .temp.list$lambda <<- NULL
  }
  else {
    tausq.rel.res <- optim(c(.temp.list$tausq.rel.est, .temp.list$lambda), proflik.aux8, method="L-BFGS-B", lower = c(0, -2), upper = c(100, 2), ...)$value
  }
  .temp.list$phi <<- NULL
  return( - tausq.rel.res)
}

"proflik.aux8" <-
  function(tausq.rel.lambda, ...)
{
  ## This function computes the value of the profile likelihood for the correlation function scale parameter \phi (and lambda), minimizing the likelihood wrt relative nugget parameter \tau^2_{rel}
  if(length(tausq.rel.lambda) == 2)
    lambda <- tausq.rel.lambda[2]
  else lambda <- .temp.list$lambda
  n <- .temp.list$n
  phi <- .temp.list$phi
  tausq.rel <- tausq.rel.lambda[1]
  main <- proflik.main(tausq=tausq.rel, sigmasq=1, phi=phi, lambda = lambda)
  if(.temp.list$method.lik == "ML") {
    neglik <- (n/2) * log(2 * pi) + main$log.det.to.half + (
                                                              n/2) * log(main$ssresmat/n) + (n/2) - main$log.jacobian
  }
  if(.temp.list$method.lik == "RML") {
    eigentrem <- eigen(main$ixix, symmetric = TRUE, only.values = TRUE)
    neglik <- (((n - .temp.list$beta.size)/2) * log(2 * pi) + main$
               log.det.to.half + ((n - .temp.list$beta.size)/2) * log(main$ssresmat/
                                                           n) + (n/2) - 0.5 * sum(log(eigentrem$values))) - 
                                                             main$log.jacobian
  }
  return(as.vector(round(neglik, dig=8)))
}
"proflik.aux9" <-
  function(sigmasq, ...)
{
  ## This function computes the value of the profile likelihood for the parameter \sigma^2 when \tau^2 is included in the model
  ## It requires the minimisation of the function wrt \phi and \tau^2 for each value of \sigma^2
  ## This is an auxiliary function called by likfit.proflik
  .temp.list$sigmasq <<- as.vector(sigmasq)
  ini.lik <- round(100000000. * apply(.temp.list$ini.grid, 1,
                                      proflik.aux10))
  ini <- as.vector(.temp.list$ini.grid[which(ini.lik == min(ini.lik, na.rm = TRUE)),,drop=FALSE][1,])
  if(.temp.list$fix.lambda == TRUE) {
    phitausq.rel.res <- optim(ini, proflik.aux10, method="L-BFGS-B",
                              lower = c(.temp.list$
                                lower.phi, 0),
                              upper=c(.temp.list$upper.phi, 100), ...)$value
  }
  else {
    if(ini[2] == 0) ini[2] <- 0.01
    phitausq.rel.res <- optim(ini, proflik.aux10, method="L-BFGS-B", 
                                lower = c(.temp.list$lower.phi,
                                  0,-2),
                                upper = c(.temp.list$upper.phi,
                                  100, 2), ...)$value
  }
  .temp.list$sigmasq <<- NULL
  return( - phitausq.rel.res)
}

"plot.proflik" <-
  function(x, pages = c("user", "one", "two"),
           uni.only, bi.only, type.bi = c("contour", "persp"),
           conf.int = c(0.90,0.95),
           yaxis.lims = c("conf.int", "as.computed"),
           by.col = TRUE, log.scale = FALSE, use.splines = TRUE,
           par.mar.persp = c(0, 0, 0, 0), ask = FALSE, 
           ...)
{
  ##
  ## Saving original par() parameters
  ##
#  if (is.R()) 
#    par.ori <- par(no.readonly = TRUE)
#  else par.ori <- par()
#  on.exit(par(par.ori))
  ##
  parask <- par()$ask
  par(ask = ask)
  on.exit(par(ask = parask))  
  ##
  ## Checking whether to plot 1D and/or 2D profiles
  ##
  if(missing(uni.only)){
    if(x$n.bi == 0) uni.only <- TRUE
    else uni.only <- FALSE
  }
  if(missing(bi.only)){
    if(x$n.uni == 0) bi.only <- TRUE
    else bi.only <- FALSE
  }
  if(!is.logical(uni.only))
    stop("argument uni.only must be logical (TRUE or FALSE)")
  if(!is.logical(bi.only))
    stop("argument bi.only must be logical (TRUE or FALSE)")
  n.uni <- x$n.uni
  n.bi <- x$n.bi
  if((uni.only == FALSE) & (bi.only == FALSE))
    np <- n.uni + n.bi
  if((uni.only == TRUE) & (bi.only == FALSE))
    np <- n.uni
  if((uni.only == FALSE) & (bi.only == TRUE))
    np <- n.bi
  if(n.uni == 0 & np > 0) bi.only <- TRUE
  if(n.bi == 0 & np > 0) uni.only <- TRUE
  ##
  ##
  ##
  if(all(is.character(yaxis.lims)))
    yaxis.lims <- match.arg(yaxis.lims)
  type.bi <- match.arg(type.bi)
  ##
  ## Definig number of pages to place the plots
  ##
  pages <- match.arg(pages)
  if(pages == "one") {
    if(np >= 1 & np < 4)
      par(mfrow = c(np, 1))
    if(np >= 4) {
      if(by.col == TRUE)
        par(mfcol = c(ceiling(np/2), 2))
      else par(mfrow = c(ceiling(np/2), 2))
    }
  }
  if(pages == "two") {
    if(n.uni > 1 & n.uni < 4)
      par(mfrow = c(n.uni, 1))
    if(n.uni >= 4)
      par(mfrow = c(ceiling(n.uni/2), 2))
  }
  ##
  ##
  ##
  if(bi.only == FALSE) {
    for(i in 1:n.uni) {
      if(x$method.lik == "ML")
        ylabm <- "profile log-likelihood"
      else ylabm <- "profile log-(restricted) likelihood"
      if(all(conf.int) != FALSE) {
        if(!is.numeric(conf.int) | any(conf.int > 1))
          stop("argument conf.int must be numerical (scalar or vector) with values between 0 and 1")
        conf.int.drop <- x[[i]][[3]][2] - 0.5 * qchisq(conf.int,1)
      }
      if(all(is.character(yaxis.lims))){
        if(yaxis.lims == "conf.int")
          lik.lims <- c(min(conf.int.drop), 
                        x[[i]][[3]][2])
        else lik.lims <- c(min(x[[i]][[2]]),
                           x[[i]][[3]][2])
      }
      else
        lik.lims <- yaxis.lims
      if(log.scale == TRUE) {
        if(use.splines){
          nxpoints <- 5*length(x[[i]][[1]])
          nodups <- which(duplicated(x[[i]][[1]]) == FALSE)
          plot(spline(x = log(x[[i]][[1]][nodups]), 
                      y = x[[i]][[2]][nodups],
                      n = nxpoints,
                      method="natural"), type = "l",
               xlab = paste("log-",
                 proflik.plot.aux1(names(x[[i]])[1])),
               ylab = ylabm, ylim = lik.lims)
        }
        else{
          plot(log(x[[i]][[1]]), 
               x[[i]][[2]],
               type = "l",
               xlab = paste("log-",
                 proflik.plot.aux1(names(x[[i]])[1])),
               ylab = ylabm, ylim = lik.lims)
        }
        lines(log(c(x[[i]][[3]][1], x[[i]][[3]][1])),
              c(min(lik.lims), x[[i]][[3]][2]), lty = 2)
      }
      else {
        if(use.splines){
          nxpoints <- 5*length(x[[i]][[1]])
          nodups <- which(duplicated(x[[i]][[1]]) == FALSE)
          plot(spline(x = x[[i]][[1]][nodups],
                      y = x[[i]][[2]][nodups],
                      n = nxpoints,
                      method="natural"),
               type = "l",
               xlab = proflik.plot.aux1(names(x[[i]])[1]),
               ylab = ylabm, ylim = lik.lims)
        }
        else{
          plot(x[[i]][[1]],
               x[[i]][[2]],
               type = "l", xlab = 
               proflik.plot.aux1(names(x[[i]])[1]),
               ylab = ylabm, ylim = lik.lims)
        }
        lines(c(x[[i]][[3]][1], 
                x[[i]][[3]][1]),
              c(min(lik.lims), x[[
                                            i]][[3]][2]), lty = 2)
      }
      abline(h = conf.int.drop, lty = 3)
    }
  }
  if(uni.only == FALSE) {
    if(pages == "two") {
      if(n.bi >= 1 & n.bi < 4)
        par(mfrow = c(n.bi, 1))
      if(n.bi >= 4)
        par(mfrow = c(ceiling(n.bi/2), 2))
    }
    for(i in 1:n.bi) {
      if(type.bi == "contour") {
        if(log.scale == TRUE) {
          contour(log(x[[(n.uni + i)]][[1]]),
                  log(x[[(n.uni + i)]][[2]]),
                  matrix(x[[(n.uni + i)]][[3]],
                         ncol = length(x[[(n.uni +i)]][[2]])),
                  xlab = paste("log-", proflik.plot.aux1(names(x[[(n.uni + i)]][1]))),
                  ylab = paste("log-", proflik.plot.aux1(names(x[[(n.uni + i)]][2]))),
                    ...)
          points(log(t(x[[(n.uni + i)]][[4]][1:2])))
        }
        else {
          contour(x[[(n.uni + i)]][[1]],
                  x[[(n.uni + i)]][[2]],
                  matrix(x[[(n.uni + i)]][[3]],
                         ncol = length(x[[(n.uni + i)]][[2]])),
                  xlab = proflik.plot.aux1(names(x[[(n.uni + i)]][1])),
                  ylab = proflik.plot.aux1(names(x[[(n.uni + i)]][2])),
                  ...)
          points(t(x[[(n.uni + i)]][[4]][1:2]))
        }
      }
      if(type.bi == "persp") {
        cat("For better visualisation arguments for the funtion `persp` can be passed.\nSome relevant argments are: theta, phi, r, d, among others.\n Type help(persp) for a description of the options\n")
        if(x$method.lik == "ML")
          zlabm <- 
            "profile log-likelihood"
        else zlabm <- "profile log-(restricted) likelihood"
        zlimm <- range(x[[(n.uni +
                                     i)]][[3]])
        zlimm[1] <- 1.01 * zlimm[1]
        minlik <- min(x[[(n.uni + i)]][[3]])
        if(log.scale == TRUE) {
          persp(log(x[[(n.uni + i)]][[1]]),
                log(x[[(n.uni + i)]][[2]]),
                matrix(x[[(n.uni + i)]][[3]],
                       ncol = length(x[[(n.uni + i)]][[2]])),
                xlab = proflik.plot.aux1(paste("log-", names(x[[(n.uni + i)]][1]))),
                ylab = paste("log-", proflik.plot.aux1(names(x[[(n.uni + i)]][2]))),
                zlab = zlabm, box = TRUE, ...)
                                        #          pp1 <- perspp(x = log(c(x[[(n.uni + i)]][[4]][1],
                                        #                          min( x[[(n.uni + i)]][[1]]))[c(1, 1, 1, 2)]),
                                        #                        y = log(c(x[[(n.uni + i)]][[4]][2],
                                        #                          min(x[[(n.uni + i)]][[2]]))[c(1, 1, 2, 1)]),
                                        #                        z = c(minlik, x[[(n.uni + i)]][[4]][3])[c(1, 2, 1, 1)], pp)
                                        #          segments(log(pp1$x[1]), log(pp1$y[1]), log(pp1$x[2]), log(pp1$y[2]),
                                        #                   lwd = 2)
        }
        else {
          persp(x = x[[(n.uni + i)]][[1]],
                y = x[[(n.uni + i)]][[2]],
                z = matrix(x[[(n.uni + i)]][[3]],
                  ncol = length(x[[(n.uni + i)]][[2]])),
                xlab = proflik.plot.aux1(names(x[[(n.uni + i)]][1])),
                ylab = proflik.plot.aux1(names(x[[(n.uni + i)]][2])),
                zlab = zlabm, box = TRUE, ...)
                                        #          pp1 <- perspp(x = c(x[[(n.uni + i)]][[4]][1],
                                        #                          min(x[[(n.uni + i)]][[1]]))[c(1, 1, 1, 2)],
                                        #                        y = c(x[[(n.uni + i)]][[4]][2],
                                        #                          min(x[[(n.uni + i)]][[2]]))[c(1, 1,2, 1)],
                                        #                        z = c(minlik, x[[(n.uni + i)]][[4]][3])[c(1, 2, 1, 1)], pp)
                                        #          segments(pp1$x[1], pp1$y[1], pp1$x[2], pp1$y[2], lwd = 2)
        }
      }
    }
  }
  return(invisible())
}

"proflik.plot.aux1" <-
  function(parameter.name)
{
  switch(parameter.name,
         range = expression(phi),
         sill = expression(sigma^2),
         lambda = expression(lambda),
         nugget = expression(tau^2),
         nugget.rel = expression(tau[rel]^2))
}

"proflik.main" <-
  function(tausq, sigmasq, phi, lambda)
{
  z <- .temp.list$z
  n <- .temp.list$n
  if(lambda == 1){
    ##    log.jacobian <- .temp.list$log.jacobian
    log.jacobian <- 0
  }
  else {
    if(any(z <= 0))
      stop("Transformation option not allowed when there are zeros or negative data")
    if(any(z^(lambda - 1) <= 0))
      log.jacobian <- log(prod(z^(lambda - 1)))
    else log.jacobian <- sum(log(z^(lambda - 1)))
    if(lambda == 0)
      z <- log(z)
    else z <- ((z^lambda) - 1)/lambda
  }
  beta.size <- .temp.list$beta.size
  kappa <- .temp.list$kappa
  covinf <- varcov.spatial(dists.lowertri = .temp.list$dists.lowertri,
                           cov.model = .temp.list$cov.model, kappa = kappa,
                           nugget = tausq, cov.pars = c(sigmasq, phi),
                           inv = TRUE, det = TRUE, func.inv = "eigen",
                           only.inv.lower.diag = TRUE)  
  xix <- as.double(rep(0, beta.size*beta.size))
  xix <- .C("bilinearform_XAY",
            as.double(covinf$lower.inverse),
            as.double(covinf$diag.inverse),
            as.double(as.vector(.temp.list$xmat)),
            as.double(as.vector(.temp.list$xmat)),
            as.integer(beta.size),
            as.integer(beta.size),
            as.integer(n),
            res = xix,PACKAGE = "geoR")$res
  attr(xix, "dim") <- c(beta.size, beta.size)
  if(length(as.vector(xix)) == 1) {
    ixix <- 1/xix
    choldet <- 0.5 * log(xix)
  }
  else {
    chol.xix <- chol(xix)
    ixix <- chol2inv(chol.xix)
    choldet <- sum(log(diag(chol.xix)))
  }
  xiy <- as.double(rep(0, beta.size))
  xiy <- .C("bilinearform_XAY",
            as.double(covinf$lower.inverse),
            as.double(covinf$diag.inverse),
            as.double(as.vector(.temp.list$xmat)),
            as.double(as.vector(z)),
            as.integer(beta.size),
            as.integer(1),
            as.integer(n),
            res = xiy, PACKAGE = "geoR")$res
  beta.hat <- as.vector(ixix %*% xiy)
  yiy <- as.double(0.0)
  yiy <- .C("bilinearform_XAY",
            as.double(covinf$lower.inverse),
            as.double(covinf$diag.inverse),
            as.double(as.vector(z)),
            as.double(as.vector(z)),
            as.integer(1),
            as.integer(1),
            as.integer(n),
            res = yiy, PACKAGE = "geoR")$res
  ssresmat <- as.vector(yiy - crossprod(beta.hat,xiy))
  return(list(log.det.to.half = covinf$log.det.to.half,
              ssresmat = ssresmat,
              ixix = ixix, log.jacobian = log.jacobian))
}
"grf" <-
  function(n, grid = "irreg", 
           nx, ny, xlims = c(0, 1), ylims = c(0, 1), nsim = 1, 
           cov.model = "matern",
           cov.pars = stop("covariance parameters (sigmasq and phi) needed"),
           kappa = 0.5,  nugget=0, lambda=1, aniso.pars = NULL,
           method = c("cholesky", "svd", "eigen", "circular.embedding"),
           messages)
{
  ##
  ## reading and checking input
  ##
  call.fc <- match.call()
  if(missing(messages))
    messages.screen <- ifelse(is.null(getOption("geoR.messages")), TRUE, getOption("geoR.messages"))
  else messages.screen <- messages
  method <- match.arg(method)
  if((method == "circular.embedding") & messages.screen)
    cat("grf: for simulation of fields with large number of points the consider the package RandomFields.\n")
  ##
  ## defining the model to simulate from
  ##
  cov.model <- match.arg(cov.model,
                         choices = c("matern", "exponential", "gaussian",
                           "spherical", "circular", "cubic", "wave", "power",
                           "powered.exponential", "cauchy", "gneiting",
                           "gneiting.matern", "pure.nugget"))
  if (cov.model == "matern" && kappa == 0.5) cov.model <- "exponential"
  tausq <- nugget
  if (is.vector(cov.pars)) {
    sigmasq <- cov.pars[1]
    phi <- cov.pars[2]
    nst <- 1
  }
  else {
    sigmasq <- cov.pars[, 1]
    phi <- cov.pars[, 2]
    nst <- nrow(cov.pars)
  }
  sill.total <- tausq + sum(sigmasq)
  messa <- grf.aux1(nst, nugget, sigmasq, phi, kappa, cov.model)
  if (messages.screen) {
    cat(messa$nst)
    cat(messa$nugget)
    cat(messa$cov.structures)
    cat(paste("grf: decomposition algorithm used is: ", method, "\n"))
  }
  ##
  ##
  ##
  rseed <- get(".Random.seed", envir=.GlobalEnv, inherits = FALSE)
  results <- list()
  ##
  ## defining the locations for the simulated data
  ##
  if(is.character(grid)) grid <- match.arg(grid, choices=c("irreg", "reg"))
  if (is.matrix(grid) | is.data.frame(grid)) {
    results$coords <- as.matrix(grid)
    if (messages.screen) 
      cat("grf: simulation on locations provided by the user\n")
  }
  else {
    ##
    ## checking whether it is a 1D simulation
    ##
    if((!missing(nx) && nx == 1) | (!missing(ny) && ny == 1) |
       diff(xlims) == 0 | diff(ylims) == 0){
      sim1d <- TRUE
      if (messages.screen) 
        cat("simulations in 1D\n")
    }
    else sim1d <- FALSE
    ##
    ## defining number of points in each direction
    ##
    if(missing(nx)){
      if(sim1d)
        if(diff(xlims) == 0) nx <- 1
        else nx <- n
      else
        if(is.character(grid) && grid == "reg") nx <- round(sqrt(n))
        else nx <- n
    }
    if(missing(ny)){
      if(sim1d)
        if(diff(ylims) == 0) ny <- 1
        else ny <- n
      else
        if(is.character(grid) && grid == "reg") ny <- round(sqrt(n))
        else ny <- n
    }
    ##
    ## defining the grid
    ##
    if (is.character(grid) && grid == "irreg") {
      results$coords <- cbind(x = runif(nx, xlims[1], xlims[2]),
                              y = runif(ny, ylims[1], ylims[2]))
      if (messages.screen) 
        cat(paste("grf: simulation(s) on randomly chosen locations with ", n, " points\n"))
    }
    else {
      xpts <- seq(xlims[1], xlims[2], l = nx)
      ypts <- seq(ylims[1], ylims[2], l = ny)
      results$coords <- as.matrix(expand.grid(x = xpts, y = ypts))
      if(length(xpts) == 1) xspacing <- 0
      else xspacing <- xpts[2] - xpts[1] 
      if(length(ypts) == 1) yspacing <- 0
      else yspacing <- ypts[2] - ypts[1] 
      if(abs(xspacing - yspacing) < 1e-12) equal.spacing <- TRUE
      else equal.spacing <- FALSE
      if (messages.screen) 
        cat(paste("grf: generating grid ", nx, " * ", ny, 
                  " with ", (nx*ny), " points\n"))
    }
  }
  n <- nrow(results$coords)
  if(length(unique(results$coords[,1])) == 1 |
     length(unique(results$coords[,2])) == 1)
    sim1d <- TRUE
  else sim1d <- FALSE
  ##
  ## transforming to the isotropic space 
  ##
  if(!is.null(aniso.pars)) {
    if(method == "circular.embedding")
      stop("anisotropic models not implemented for the circular embedding method. \nConsider using the package \"RandomFields")
    if(length(aniso.pars) != 2 | !is.numeric(aniso.pars))
      stop("anisotropy parameters must be provided as a numeric vector with two elements: the rotation angle (in radians) and the anisotropy ratio (a number greater than 1)")
    if(messages.screen)
      cat("grf: transforming to the isotropic space \n")
    results$coords <- coords.aniso(coords = results$coords,
                                   aniso.pars = aniso.pars)
  }
  ##
  ## simulating data at locations defined by the matrix results$coords
  ##
  if (all(phi) == 0) {
    results$data <- matrix(rnorm((n * nsim), mean = 0, sd = sqrt(sill.total)), 
                           nrow = n, ncol = nsim)
  }
  else {
    if (method == "circular.embedding") {
      if (is.character(grid) && grid == "irreg") 
        stop("Option for \"circular.embedding\" algorithm only allowed for regular grids. You might have to include the argument grid=\"reg\"")
      if(cov.model == "power")
        stop("power covariance model not implemented for the circular embedding method") 
      stepx <- (xlims[2] - xlims[1])/(nx - 1)
      stepy <- (ylims[2] - ylims[1])/(ny - 1)
      if (round(1e+08 * stepx) != round(1e+08 *stepy)) 
        stop("grf: distance between grid points must be the same in X and Y directions")
      temp <- list(n = n, nst = nst, sigmasq = sigmasq, 
                   xlims = xlims, ylims = ylims, stepx = stepx, 
                   cov.model = cov.model, phi = phi, kappa = kappa)
      if(messages.screen)
        cat("\ngrf: WARNING:\nmessages of the type mtot=XXXXX will appear on your screen. \nIf there are many (3 or more, say) or they run indefinitely, you should stop the simulation and try again with a different grid (e.g. try to add 1 point in each direction)\n")
      grf.aux3 <- function (nsim, temp) {
        realiz <- rep(0, temp$n)
        for (i in 1:temp$nst) {
          realiz <- realiz + sqrt(temp$sigmasq[i]) *
            grf.aux2(xlim = temp$xlims, 
                     ylim = temp$ylims, step = temp$stepx,
                     cov.model = temp$cov.model,
                     phi = temp$phi[i], kappa = temp$kappa)
          NULL
        }
        return(realiz)
      }      
      results$data <- apply(as.matrix(1:nsim), 1, grf.aux3, temp = temp)
      if (nugget != 0) {
        results$data <- results$data +
          matrix(rnorm((n * nsim), sd = sqrt(nugget)), ncol = nsim)
      }
    }
    else{
      results$data <- matrix(rnorm((n * nsim)), nrow = n, ncol = nsim)
      cov.decomp <- t(varcov.spatial(coords = results$coords, 
                                   cov.model = cov.model, kappa = kappa,
                                   nugget = nugget, cov.pars = cov.pars, 
                                   only.decomposition = TRUE,
                                   func.inv = method)$sqrt.varcov)
      results$data <- cov.decomp %*% results$data
    }
    if (nsim == 1) 
      results$data <- as.vector(results$data)
  }
  ##
  ## transforming data (Box - Cox)
  ##
  if (lambda != 1){
    if (lambda != 0)
      results$data <- (results$data * lambda + 1)^(1/lambda)
    else
      results$data <- exp(results$data)
    messa$transformation <- paste("grf: Data transformed (Box-Cox), for lambda =", lambda)
    if (messages.screen) 
      cat(messa$transformation); cat("\n")
  }
  ##
  ## back-transformation to the anisotropic space 
  ##
  if(!is.null(aniso.pars)) {
    if(messages.screen)
      cat("grf: back-transforming to the anisotropic space \n")
    results$coords <- coords.aniso(coords = results$coords,
                                   aniso.pars = aniso.pars, reverse=TRUE)
  }
  else{aniso.pars <- "no anisotropy parameters provided/used"}
  ##
  ## preparing output
  ##
  if (messages.screen) 
    cat(paste("grf: End of simulation procedure. Number of realizations:",
              nsim, "\n"))
  results  <- c(results, list(cov.model = cov.model, 
                              nugget = nugget, cov.pars = cov.pars,
                              kappa = kappa, lambda = lambda,
                              aniso.pars = aniso.pars, method = method,
#                              sim.dim = ifelse(sim1d, "1d", "2d"),
                              .Random.seed = rseed, messages = messa,
                              call = call.fc))
  if(is.character(grid) && grid == "reg"){
    if(equal.spacing) attr(results, "spacing") <- xspacing
    else{
      attr(results, "xspacing") <- xspacing
      attr(results, "yspacing") <- yspacing
    }
  }
  attr(results, 'sp.dim') <- ifelse(sim1d, "1d", "2d")
  class(results) <- c("grf", "geodata", "variomodel")
  return(results)
}


"grf.aux1" <-
  function (nst, nugget, sigmasq, phi, kappa, cov.model) 
{
  cov.nst <- paste("grf: process with ", nst, " covariance structure(s)\n")
  cov.nugget <- paste("grf: nugget effect is: tausq=", nugget,"\n")
  cov.message <- NULL
  for (i in 1:nst) {
    if (phi[i] == 0) 
      cov.message[i] <- paste("grf: covariance model", i, "is a pure nugget effect\n")
    else {
      if (cov.model == "matern" | cov.model == "powered.exponential" | 
          cov.model == "cauchy" | cov.model == "gneiting-matern") 
        cov.message[i] <- paste("grf: covariance model ", 
                                i, " is: ", cov.model, "(sigmasq=", sigmasq[i], 
                                ", phi=", phi[i], ", kappa = ", kappa, ")\n", sep = "")
      else cov.message[i] <- paste("grf: covariance model ", 
                                   i, " is: ", cov.model, "(sigmasq=", sigmasq[i], 
                                   ", phi=", phi[i], ")\n", sep = "")
    }
  }
  return(list(nst = cov.nst, nugget = cov.nugget, cov.structures = cov.message))
}

"grf.aux2" <-
  function (xlim, ylim, step, cov.model, phi, kappa = 0.5) 
{
  if(is.null(kappa)) kappa <- 1
  nx <- c(diff(xlim)/step + 1, diff(ylim)/step + 1)
  res <- double(nx[1] * nx[2])
  ln <- as.integer(2)
  mm <- integer(ln)
  x <- .C("woodandchan",
          as.integer(cor.number(cov.model)),
          as.integer(nx),
          ln,
          as.double(step),
          as.double(phi),
          as.double(kappa), 
          res = res,
          m = mm, PACKAGE = "geoR")$res
  cat("\n")
  return(x)
}

"lines.variomodel.grf" <-
  function (x, max.dist = max(dist(x$coords)), length = 100, 
            lwd = 2, ...) 
{
  if(! "package:stats" %in% search()) require(mva)
  if (x$cov.model == "matern" | x$cov.model == "powered.exponential" | 
      x$cov.model == "cauchy" | x$cov.model == "gneiting-matern") 
    kappa <- x$kappa
  else kappa <- NULL
  distance <- seq(0, max.dist, length = length)
  if (is.vector(x$cov.pars)) 
    sill.total <- x$nugget + x$cov.pars[1]
  else sill.total <- x$nugget + sum(x$cov.pars[, 1])
  gamma <- sill.total - cov.spatial(distance, cov.model = x$cov.model, 
                                  kappa = kappa, cov.pars = x$cov.pars)
  lines(distance, gamma, lwd = lwd, ...)
  return(invisible())
}

"plot.1d" <-
  function(x, xlim, ylim, x1vals, ...)
{
  cat("data in 1-D\n")
  if(length(x1vals) == 1) col.ind <- 2
  else col.ind <- 1
  order.it <- order(x$coords[,col.ind])
  if(is.null(list(...)$xla)) xlabel <- "locations"
  else xlabel <- list(...)$xla
  if(is.null(list(...)$yla)) ylabel <- "data"
  else ylabel <- list(...)$yla
  pty.prev <- par()$pty
  par(pty="m")
  plot(x$coords[order.it,col.ind], x$data[order.it],
       xlab = xlabel, ylab = ylabel, xlim = xlim,
       ylim = ylim, ...)
  par(pty=pty.prev)
  return(invisible())
}

"image.grf" <-
  function (x, sim.number = 1, xlim, ylim,
            x.leg, y.leg, ...) 
{
  pty.prev <- par()$pty
  x1vals <- unique(round(x$coords[,1], dig=12))
  x2vals <- unique(round(x$coords[,2], dig=12))
  if(missing(xlim)) xlim <- NULL
  if(missing(ylim)) ylim <- NULL
  ##
  ## Plotting simulations in 1-D
  ##
  if(attr(x, 'sp.dim') == "1d" | length(x1vals) == 1 | length(x2vals) == 1)
    plot.1d(x, xlim=xlim, ylim = ylim, x1vals = x1vals, ...)
  else{
    ##
    ## Plotting simulations in 2-D
    ##
    ldots <- match.call(expand.dots = FALSE)$...
    ldots[[match(names(ldots), "offset.leg")]] <- NULL
    if(length(ldots[!is.na(match(names(ldots), "xlab"))])==0)
      ldots$xlab <- "X Coord"
    if(length(ldots[!is.na(match(names(ldots), "ylab"))])==0)
      ldots$ylab <- "Y Coord"
    ##
    ## Checking for retangular grid
    ##
    nx <- length(as.numeric(levels(as.factor(round(x$coords[, 1], dig=12)))))
    ny <- length(as.numeric(levels(as.factor(round(x$coords[, 2], dig=12)))))
    x$data <- as.matrix(x$data)
    n <- nrow(x$data)
    if (nx * ny != n) 
      stop("cannot produce image plot probably due to irregular grid of locations")
    ##
    ## Preparing image plot elements
    ##
    locations <- prepare.graph.kriging(locations=x$coords,
                                       values=x$data[, sim.number],
                                       borders =  NULL,
                                       borders.obj = eval(attr(x, "borders")),
                                       xlim = xlim, ylim = ylim) 
    ##
    par(pty = "s")
    do.call("image", c(list(x=locations$x, y=locations$y,
                            z=locations$values,
                            xlim = locations$coords.lims[,1],
                            ylim = locations$coords.lims[,2]),
                       ldots))
    ##
    ## Adding the legend (if the case)
    ##
    if(!missing(x.leg) && !missing(y.leg)){
      if(is.null(ldots$col)) ldots$col <- heat.colors(12)
      legend.krige(x.leg=x.leg, y.leg=y.leg,
                   values=locations$values[!is.na(locations$values)],
                   vertical = vertical, cex=cex.leg,
                   col=ldots$col, ...)
    }
  }
  par(pty = pty.prev)
  return(invisible())
}

#"image.grf" <-
#  function (x, sim.number = 1, ...) 
#{
#  x1vals <- unique(x$coords[,1])
#  x2vals <- unique(x$coords[,2])
#  if(x$sim.dim == "1d" | length(x1vals) == 1 | length(x2vals) == 1)
#    plot.1d(x, ...)
#  else{
#    xl <- as.numeric(levels(as.factor(round(x$coords[, 1], dig=12))))
#    nx <- length(xl)
 #   yl <- as.numeric(levels(as.factor(round(x$coords[, 2], dig=12))))
#    ny <- length(yl)
 #   x$data <- as.matrix(x$data)
 #   n <- nrow(x$data)
 #   if (nx * ny != n) 
 #     stop("cannot produce image plot probably due to data on irregular grid")
 ##   m <- matrix(x$data[, sim.number], ncol = ny)
 #   coords.lims <- set.coords.lims(coords=x$coords)
 #   x.ex <- diff(range(coords.lims[,1]))/(2*(nx-1))
 #   y.ex <- diff(range(coords.lims[,2]))/(2*(ny-1))
 #   xlim.ex <- coords.lims[,1] + c(-x.ex, x.ex)
 #   ylim.ex <- coords.lims[,2] + c(-y.ex, y.ex)
 #   pty.prev <- par()$pty
 #   par(pty = "s")
 #   image(xl, yl, m, xlim= xlim.ex, ylim=ylim.ex,...)
 #   par(pty=pty.prev)
 # }
 # return(invisible())
#}

"persp.grf" <- 
  function(x, sim.number = 1, ...)
{
  x1vals <- unique(round(x$coords[,1], dig=12))
  x2vals <- unique(round(x$coords[,2], dig=12))
  ldots <- list(...)
  if(is.null(ldots$xlim)) xlim <- NULL
  if(is.null(ldots$ylim)) ylim <- NULL
  if(attr(x, 'sp.dim') == "1d" | length(x1vals) == 1 | length(x2vals) == 1)
    plot.1d(x, xlim=xlim, ylim = ylim, x1vals = x1vals, ...)
  else{
    xl <- as.numeric(levels(as.factor(round(x$coords[, 1], dig=12))))
    nx <- length(xl)
    yl <- as.numeric(levels(as.factor(round(x$coords[, 2], dig=12))))
    ny <- length(yl)
    x$data <- as.matrix(x$data)
    n <- nrow(x$data)
    if(nx * ny != n)
      stop("cannot produce perspective plot, probably irregular grid")
    m <- matrix(x$data[, sim.number], ncol = ny)
    persp(xl, yl, m, ...)
  }
  return(invisible())
}

"plot.grf" <-
  function (x, model.line = TRUE, plot.locations = FALSE, ...) 
{
  nsim <- ncol(x$data)
  if (plot.locations){
    points.geodata(x, pt.divide="equal", xlab = "Coord X", ylab = "Coord Y")
    if(is.null(list(...)$ask)){
      ask.now <- par()$ask
      par(ask = TRUE)
      on.exit(par(ask=ask.now)) 
    }
  }
  if (is.vector(x$cov.pars)) 
    sill.total <- x$nugget + x$cov.pars[1]
  else sill.total <- x$nugget + sum(x$cov.pars[, 1])
  if (x$lambda != 1){
    if (x$lambda == 0) data <- log(x$data)
    else data <- ((x$data^x$lambda)-1)/x$lambda
  }
  else
    data <- x$data          
  sim.bin <- variog(x, data=data)
  plot(sim.bin, ...)
  if (model.line){
    var.model <- list(nugget = x$nugget, cov.pars = x$cov.pars, 
                      kappa = x$kappa, max.dist = max(sim.bin$u),
                      cov.model = x$cov.model)
    lines.variomodel(var.model, lwd = 3)
  }
  return(invisible())
}

"print.grf" <-
  function(x, ...)
{
  print.default(x, ...)
}
"variofit" <-
  function (vario, ini.cov.pars, cov.model = "matern",
            fix.nugget = FALSE, nugget = 0, 
            fix.kappa = TRUE, kappa = 0.5,
            simul.number = NULL,  max.dist = vario$max.dist,
            weights = c("npairs", "equal", "cressie"),
            minimisation.function,
            limits = pars.limits(), 
            messages, ...) 
{
  call.fc <- match.call()
  if(missing(messages))
    messages.screen <- ifelse(is.null(getOption("geoR.messages")), TRUE, getOption("geoR.messages"))
  else messages.screen <- messages
  if(length(class(vario)) == 0 || all(class(vario) != "variogram"))
    warning("object vario should preferably  be of the class \"variogram\"")
  weights <- match.arg(weights)
  if(missing(minimisation.function)){
    if(weights == "equal") minimisation.function <- "nls"
    else minimisation.function <- "optim"
  }
  if(any(cov.model == c("linear", "power")) & minimisation.function == "nls"){
    cat("warning: minimisation function nls can not be used with given cov.model.\n          changing for \"optim\".\n")
    minimisation.function <- "optim"
  }
  if(minimisation.function == "nls" & weights != "equal"){
    warning("variofit: minimisation function nls can only be used with weights=\"equal\".\n          changing for \"optim\".\n")
    minimisation.function <- "optim"
  }
  if(messages.screen)
    cat(paste("variofit: weights used:", weights, "\n"))
  cov.model <- match.arg(cov.model,
                         choices = c("matern", "exponential", "gaussian",
                           "spherical", "circular", "cubic", "wave",
                           "linear", "power", "powered.exponential", "cauchy",
                           "gneiting", "gneiting.matern", "pure.nugget"))
  if(cov.model == "powered.exponential")
    if(limits$kappa["upper"] > 2) limits$kappa["upper"] <- 2
  ##  if(cov.model == "matern" | cov.model == "    powered.exponential" | 
  ##     cov.model == "cauchy" | cov.model == "gneiting.matern")
  ##    fix.kappa <- TRUE
  if (is.matrix(vario$v) & is.null(simul.number)) 
    stop("object in vario$v is a matrix. This function works for only 1 empirical variogram at once\n")
  if (!is.null(simul.number)) 
    vario$v <- vario$v[, simul.number]
  ##
  ## Setting maximum distance
  ##
  if(!is.numeric(max.dist) || length(max.dist) > 1)
    stop("a single numerical value must be provided in the argument max.dist") 
  if (max.dist == vario$max.dist) 
    XY <- list(u = vario$u, v = vario$v, n=vario$n)
  else
    XY <- list(u = vario$u[vario$u <= max.dist],
               v = vario$v[vario$u <= max.dist],
               n = vario$n[vario$u <= max.dist])
  if(cov.model == "pure.nugget"){
    ##
    ## parameter estimation for model which does not require numerical minimisation
    ##
    minimisation.function <- "not used"
    message <- "correlation function chosen does not require numerical minimisation"
    if(weights == "equal") lm.wei <- rep(1, length(XY$u))
    else lm.wei <- XY$n
    if(cov.model == "pure.nugget"){
      if(fix.nugget){
        temp <- lm((XY$v-nugget) ~ 1, weights = lm.wei)
        cov.pars <- c(temp$coef, 0)
      }
      else{
        temp <- lm(XY$v ~ 1, weights = lm.wei)
        nugget <- temp$coef
        cov.pars <- c(0,0)
      }
    }
    value <- sum((temp$residuals)^2)
  }
  else{
    if(messages.screen)
      cat(paste("variofit: minimisation function used:", minimisation.function, "\n"))
    ##
    ## setting things for numerical minimisation
    ##
    ##  Checking initial values
    ##
    if(is.matrix(ini.cov.pars) | is.data.frame(ini.cov.pars)){
      ini.cov.pars <- as.matrix(ini.cov.pars)
      if(nrow(ini.cov.pars) == 1)
        ini.cov.pars <- as.vector(ini.cov.pars)
      else{
        if(ncol(ini.cov.pars) != 2)
          stop("\nini.cov.pars must be a matrix or data.frame with 2 components: \ninitial values for sigmasq (partial sill) and phi (range parameter)\n")
      }
    }
    else
      if(length(ini.cov.pars) > 2)
        stop("\nini.cov.pars must provide initial values for sigmasq and phi\n")
    ##
    ## Preparing grid of initial values and choosing the best
    ##
    if(is.matrix(ini.cov.pars) | (length(nugget) > 1) | (length(kappa) > 1)) {
      if(messages.screen)
        cat("variofit: searching for best initial value ...")
      ini.temp <- matrix(ini.cov.pars, ncol=2)
      grid.ini <- as.matrix(expand.grid(sigmasq=unique(ini.temp[,1]),
                                        phi=unique(ini.temp[,2]),
                                        tausq=unique(nugget), kappa=unique(kappa)))
      ##  loss function:
      v.loss <- function(parms, u, v, n, cov.model, weights){
        sigmasq <- parms[1]
        phi <- parms[2]
        if(cov.model == "power") phi <- 2 * exp(phi)/(1+exp(phi))
        tausq <- parms[3]
        kappa <- parms[4]
        if(cov.model == "power")
          v.mod <- tausq +
            cov.spatial(u, cov.pars=c(sigmasq, phi), cov.model="power", kappa=kappa)
        else
          v.mod <- (sigmasq + tausq) -
            cov.spatial(u, cov.pars=c(sigmasq, phi), cov.model = cov.model,
                        kappa = kappa)
        if(weights == "equal")
          loss <- sum((v - v.mod)^2)
        if (weights == "npairs") 
          loss <- sum(n * (v - v.mod)^2)
        if (weights == "cressie") 
          loss <- sum((n/(v.mod^2)) * (v - v.mod)^2)
        return(loss)
      }
      grid.loss <- apply(grid.ini, 1, v.loss, u=XY$u, v=XY$v, n=XY$n, cov.model = cov.model, weights = weights)
      ini.temp <- grid.ini[which(grid.loss == min(grid.loss)),, drop=FALSE]
      if(is.R()) rownames(ini.temp) <- "initial.value"
      if(messages.screen){
        cat(" selected values:\n")
        print(rbind(round(ini.temp, dig=2), status=ifelse(c(FALSE, FALSE, fix.nugget, fix.kappa), "fix", "est")))
        cat(paste("loss value:", max(grid.loss), "\n"))
      }
      names(ini.temp) <- NULL
      ini.cov.pars <- ini.temp[1:2]
      nugget <- ini.temp[3]
      kappa <- ini.temp[4]
      grid.ini <- NULL
    }
    ##
    ## transforming kappa for constraint minimisation
    ##
    if(fix.kappa == FALSE){
      if(cov.model == "powered.exponential")
        Tkappa.ini <- log(kappa/(2-kappa))
      else
        Tkappa.ini <- log(kappa)
    }
    ##
    ## minimisation using "nls"
    ##
    if (minimisation.function == "nls") {
      if(! "package:stats" %in% search()) require(nls)
      if(ini.cov.pars[2] == 0) ini.cov.pars <- max(XY$u)/10
      if(kappa == 0) kappa <- 0.5
      if(cov.model == "power") Tphi.ini <- log(ini.cov.pars[2]/(2-ini.cov.pars[2])) 
      else Tphi.ini <- log(ini.cov.pars[2])
      XY$cov.model <- cov.model
      ##
      if (fix.nugget) {
        XY$nugget <- as.vector(nugget)
        if(fix.kappa){
          XY$kappa <- as.vector(kappa)
          res <- nls((v-nugget) ~ matrix((1-cov.spatial(u,cov.pars=c(1,exp(Tphi)),
                                                        cov.model=cov.model, kappa=kappa)),
                                         ncol=1),
                     start=list(Tphi=Tphi.ini), data=XY, alg="plinear", ...)
        }
        else{
          if(cov.model == "powered.exponential")
            res <- nls((v-nugget) ~ matrix((1-cov.spatial(u,cov.pars=c(1,exp(Tphi)),
                                                          cov.model=cov.model,
                                                          kappa=(2*exp(Tkappa)/(1+exp(Tkappa))))),
                                           ncol=1),
                       start=list(Tphi=Tphi.ini, Tkappa = Tkappa.ini),
                       data=XY, alg="plinear", ...)
          else
            res <- nls((v-nugget) ~ matrix((1-cov.spatial(u,cov.pars=c(1,exp(Tphi)),
                                                          cov.model=cov.model,
                                                          kappa=exp(Tkappa))), ncol=1),
                       start=list(Tphi=Tphi.ini, Tkappa = Tkappa.ini),
                       data=XY, alg="plinear", ...)       
          kappa <- exp(coef(res)["Tkappa"])
          names(kappa) <- NULL
        }
        cov.pars <- coef(res)[c(".lin", "Tphi")]
        names(cov.pars) <- NULL
      }
      else{
        if(fix.kappa){
          XY$kappa <- kappa
          res <- nls(v ~ cbind(1,(1- cov.spatial(u, cov.pars=c(1,exp(Tphi)),
                                                 cov.model = cov.model, kappa=kappa))),
                     start=list(Tphi=Tphi.ini), alg="plinear", data=XY, ...)
        }
        else{
          if(cov.model == "powered.exponential")
            res <- nls(v ~ cbind(1, (1-cov.spatial(u, cov.pars=c(1, exp(Tphi)),
                                                   cov.model = cov.model, kappa=exp(Tkappa)))),
                       start=list(Tphi=Tphi.ini, Tkappa = Tkappa.ini),
                       alg="plinear", data=XY, ...)
          else
            res <- nls(v ~ cbind(1, (1-cov.spatial(u, cov.pars=c(1, exp(Tphi)),
                                                   cov.model = cov.model,
                                                   kappa=(2*exp(Tkappa)/(1+exp(Tkappa)))))),
                       start=list(Tphi=Tphi.ini, Tkappa = Tkappa.ini),
                       alg="plinear", data=XY, ...)
          kappa <- exp(coef(res)["Tkappa"]);names(kappa) <- NULL
        }
        nugget <- coef(res)[".lin1"];names(nugget) <- NULL
        cov.pars <- coef(res)[c(".lin2", "Tphi")]
        names(cov.pars) <- NULL
      }
      if(cov.model == "power") cov.pars[2] <- 2 * exp(cov.pars[2])/(1+exp(cov.pars[2]))  
      else cov.pars[2] <- exp(cov.pars[2])
      if(nugget < 0 | cov.pars[1] < 0){
        warning("\nvariofit: negative variance parameter found using the default option \"nls\".\n        Try another minimisation function and/or fix some of the parameters.\n")
        temp <- c(sigmasq=cov.pars[1], phi=cov.pars[2], tausq=nugget, kappa=kappa)
        print(rbind(round(temp, dig=4),
                    status=ifelse(c(FALSE, FALSE, fix.nugget, fix.kappa), "fix", "est")))
        return(invisible())
      }
      value <- sum(resid(res)^2)
      message <- "nls does not provides convergence message"
    }
    ##
    ## minimisation using "optim" or "nlm"
    ##
    if (minimisation.function == "nlm" | minimisation.function == "optim") {
      ##
      ## Preparing lists for the minimiser
      ##
      .global.list <- list(u = XY$u, v = XY$v, n=XY$n, fix.nugget = fix.nugget,
                           nugget = nugget, fix.kappa = fix.kappa, kappa = kappa,
                           cov.model = cov.model, m.f = minimisation.function,
                           weights = weights)
      ##
      ## Preparing initial value
      ##
      ini <- ini.cov.pars
      if(cov.model == "power") ini[2] <- log(ini[2]/(2-ini[2])) 
      if(cov.model == "linear") ini <- ini[1] 
      if(fix.nugget == FALSE) ini <- c(ini, nugget)
      if(fix.kappa == FALSE) ini <- c(ini, Tkappa.ini)
      names(ini) <- NULL
      if(minimisation.function == "nlm"){
        result <- nlm(loss.vario, ini, g.l = .global.list, ...)
        result$par <- result$estimate
        result$value <- result$minimum
        result$convergence <- result$code
        if(is.R()){
          if(!is.null(get(".temp.theta", pos =1)))
            result$par <- get(".temp.theta", pos=1)
        }
        else{
          if(!is.null(get(".temp.theta", where = 1)))
            result$par <- get(".temp.theta", where = 1)
        }
      }
      else{
        lower.l <- sapply(limits, function(x) x[1])
        upper.l <- sapply(limits, function(x) x[2])
        if(fix.kappa == FALSE){
            if(fix.nugget){
              lower <- lower.l[c("sigmasq.lower", "phi.lower","kappa.lower")]
              upper <- upper.l[c("sigmasq.upper", "phi.upper","kappa.upper")]
            }
            else{
              lower <- lower.l[c("sigmasq.lower", "phi.lower",
                               "tausq.rel.lower", "kappa.lower")]
              upper <- upper.l[c("sigmasq.upper", "phi.upper",
                               "tausq.rel.upper", "kappa.upper")]
            }
          }
        else{
          if(cov.model == "power"){
            if(fix.nugget){
              lower <- lower.l[c("sigmasq.lower", "phi.lower")]
              upper <- upper.l[c("sigmasq.upper", "phi.upper")]
            }
            else{
              lower <- lower.l[c("sigmasq.lower", "phi.lower", "tausq.rel.lower")]
              upper <- upper.l[c("sigmasq.upper", "phi.upper", "tausq.rel.upper")]
            }
          }
          else{
            lower <- lower.l["phi.lower"]
            upper <- upper.l["phi.upper"]
          }
        }
        result <- optim(ini, loss.vario, method = "L-BFGS-B",
                        hessian = TRUE, lower = lower,
                        upper = upper, g.l = .global.list, ...)
#        require(methods)
#        if(exists("trySilent"))
#          hess <- trySilent(solve(as.matrix(result$hessian)))
#        else{
#          op.sem <- options()$show.error.messages
#          options(show.error.messages = FALSE)
#          hess <- try(solve(as.matrix(result$hessian)))
#          options(show.error.messages = op.sem)
#        }
#        if(!inherits(hess, "try-error")) hess <- sqrt(diag(hess))
#        else print("WARNING: unable to compute the hessian")
      }
      value <- result$value
      message <- paste(minimisation.function, "convergence code:", result$convergence)
      if(cov.model == "linear")
        result$par <- c(result$par[1],1,result$par[-1])
      cov.pars <- as.vector(result$par[1:2])
      if(cov.model == "power") cov.pars[2] <- 2 * exp(cov.pars[2])/(1+exp(cov.pars[2]))  
      if(fix.kappa == FALSE){
        if (fix.nugget)
          Tkappa <- result$par[3]
        else{
          nugget <- result$par[3]
          Tkappa <- result$par[4]
        }
        if(.global.list$cov.model == "powered.exponential")
          kappa <- 2*(exp(Tkappa))/(1+exp(Tkappa))
        else kappa <- exp(Tkappa)
      }
      else
        if(fix.nugget == FALSE)
          nugget <- result$par[3]        
    }
  }
  ##
  ## Estimating implicity beta
  ##
  
  ##
  ## Preparing output
  ##
  estimation <- list(nugget = nugget, cov.pars = cov.pars, 
                     cov.model = cov.model, kappa = kappa, value = value, 
                     trend = vario$trend, beta.ols = vario$beta.ols,
                     max.dist = max.dist, 
                     minimisation.function = minimisation.function)
#  if(exists("hess")) estimation$hessian <- hess
  estimation$weights <- weights
  if(weights == "equal") estimation$method <- "OLS"
  else estimation$method <- "WLS"
  estimation$fix.nugget <- fix.nugget
  estimation$fix.kappa <- fix.kappa
  estimation$lambda <- vario$lambda
  estimation$message <- message
  estimation$call <- call.fc
  class(estimation) <- c("variomodel", "variofit")
  return(estimation)
}

"loss.vario" <-
  function (theta, g.l) 
{
  if(g.l$cov.model == "linear") theta <- c(theta[1], 1, theta[-1])
  ##
  ## Imposing constraints for nlm
  ##
  if(g.l$m.f == "nlm"){
    .temp.theta <<- NULL
    if(g.l$fix.kappa == FALSE){
      if(g.l$fix.nugget){
        if(g.l$cov.model == "power")
          theta.minimiser <- theta[1]
        else          
          theta.minimiser <- theta[1:2]
        Tkappa <- theta[3]
      }
      else{
        if(g.l$cov.model == "power")
          theta.minimiser <- theta[c(1:3)]
        else          
          theta.minimiser <- theta[1:3]
        Tkappa <- theta[4]
      }
    }
    else theta.minimiser <- theta
    penalty <- 10000 * sum(0 - pmin(theta.minimiser, 0))
    theta <- pmax(theta.minimiser, 0)
    if(g.l$fix.kappa == FALSE)
      theta <- c(theta.minimiser, Tkappa)
    if (any(theta.minimiser < 0)) .temp.theta <<- theta
    else penalty <- 0
  }
  else penalty <- 0
  ##
  ## reading parameters
  ##
  if(g.l$fix.kappa == FALSE){
    if (g.l$fix.nugget){
      tausq <- g.l$nugget
      Tkappa <- theta[3]
    }
    else{
      tausq <- theta[3]
      Tkappa <- theta[4]
    }
    if(g.l$cov.model == "powered.exponential")
      kappa <- 2*(exp(Tkappa))/(1+exp(Tkappa))
    else kappa <- exp(Tkappa)
  }
  else{
    kappa <- g.l$kappa
    if (g.l$fix.nugget) tausq <- g.l$nugget
    else tausq <- theta[3]
  }
  ##
  sigmasq <- theta[1]
  phi <- theta[2]
  if(g.l$cov.model == "power") phi <- 2 * exp(phi)/(1+exp(phi))
  sill.total <- sigmasq + tausq
  ##
  ## Computing values for the theoretical variogram 
  ##
  if(any(g.l$cov.model == c("linear", "power")))
    gamma <- tausq + sigmasq * (g.l$u^phi)
  else
    gamma <- sill.total - cov.spatial(g.l$u, cov.model = g.l$cov.model, 
                                      kappa = kappa, cov.pars = c(sigmasq, phi))
  ##
  ## Computing loss function
  ##
  if(g.l$weight == "equal")
    loss <- sum((g.l$v - gamma)^2)
  if (g.l$weights == "npairs") 
    loss <- sum(g.l$n * (g.l$v - gamma)^2)
  if (g.l$weights == "cressie") 
    loss <- sum((g.l$n/(gamma^2)) * (g.l$v - gamma)^2)
  return(loss + penalty)
}

"print.variomodel" <-
  function(x, digits = "default", ...)
{
  if(is.R() & digits == "default") digits <- max(3, getOption("digits") - 3)
  else digits <- options()$digits
  if(x$fix.nugget){
    est.pars <- c(sigmasq = x$cov.pars[1], phi=x$cov.pars[2])
    if(x$fix.kappa == FALSE)
      est.pars <- c(est.pars, kappa = x$kappa)
  }
  else{
    est.pars <- c(tausq = x$nugget, sigmasq = x$cov.pars[1], phi=x$cov.pars[2])    
    if(x$fix.kappa == FALSE)
      est.pars <- c(est.pars, kappa = x$kappa)
  }
  if(x$weights == "equal")
    cat("variofit: model parameters estimated by OLS (ordinary least squares):\n")
  else
    cat("variofit: model parameters estimated by WLS (weighted least squares):\n")
  cat(paste("covariance model is:", x$cov.model))
  if(x$cov.model == "matern" | x$cov.model == "powered.exponential" |
     x$cov.model == "cauchy" | x$cov.model == "gneiting.matern")
    if(x$fix.kappa) cat(paste(" with fixed kappa =", x$kappa)) 
  if(x$cov.model == "matern" & x$fix.kappa & x$kappa == 0.5)
    cat(" (exponential)")
  cat("\n")
  if(x$fix.nugget)
    cat(paste("fixed value for tausq = ", x$nugget,"\n"))
  cat("parameter estimates:\n")
  print(round(est.pars, digits=digits))
  if(x$weights == "equal")
    cat("\nvariofit: minimised sum of squares = ")
  else
      cat("\nvariofit: minimised weighted sum of squares = ")
  cat(round(x$value, digits=digits))
  cat("\n")
  return(invisible())
}  

"summary.variomodel" <-
  function(object, ...)
{
  summ.lik <- list()
  if(object$weights == "equal")
    summ.lik$pmethod <- "OLS (ordinary least squares)"
  else
    summ.lik$pmethod <- "WLS (weighted least squares)"
  summ.lik$cov.model <- object$cov.model
  summ.lik$spatial.component <- c(sigmasq = object$cov.pars[1], phi=object$cov.pars[2])
  summ.lik$spatial.component.extra <- c(kappa = object$kappa)
  summ.lik$nugget.component <- c(tausq = object$nugget)
  summ.lik$fix.nugget <- object$fix.nugget
  summ.lik$fix.kappa <- object$fix.kappa
  summ.lik$sum.of.squares <- c(value = object$value)
  if(object$fix.nugget){
    summ.lik$estimated.pars <- c(sigmasq = object$cov.pars[1], phi=object$cov.pars[2])
    if(object$fix.kappa == FALSE)
      summ.lik$estimated.pars <- c(summ.lik$estimated.pars, kappa = object$kappa)
  }
  else{
    summ.lik$estimated.pars <- c(tausq = object$nugget, sigmasq = object$cov.pars[1], phi=object$cov.pars[2])
    if(object$fix.kappa == FALSE)
      summ.lik$estimated.pars <- c(summ.lik$estimated.pars, kappa = object$kappa)
  }
  summ.lik$weights <- object$weights
  summ.lik$call <- object$call
  class(summ.lik) <- "summary.variomodel"
  return(summ.lik)
}

"print.summary.variomodel" <-
  function(x, digits = "default", ...)
{
  if(length(class(x)) == 0 || all(class(x) != "summary.variomodel"))
    stop("object is not of the class \"summary.variomodel\"")
  if(is.R() & digits == "default") digits <- max(3, getOption("digits") - 3)
  else digits <- options()$digits
  cat("Summary of the parameter estimation\n")
  cat("-----------------------------------\n")
  cat(paste("Estimation method:", x$pmethod, "\n"))
  cat("\n")
  ##
  ## Estimates of the model components
  ## Model: Y(x) = X\beta + S(x) + e 
  ##
#  cat("Parameters of the mean component (trend):")
#  cat("\n")
#  print(round(x$mean.component, digits=digits))
#  cat("\n")
  ##
  cat("Parameters of the spatial component:")
  cat("\n")
  cat(paste("   correlation function:", x$cov.model))
  if(x$cov.model == "matern" & x$fix.kappa & x$spatial.component.extra == 0.5)
    cat(" (exponential)")
  if(x$cov.model == "matern" | x$cov.model == "powered.exponential" |
     x$cov.model == "cauchy" | x$cov.model == "gneiting.matern"){
    if(x$fix.kappa)
      cat(paste("\n      (fixed) extra parameter kappa = ", round(x$spatial.component.extra, digits=digits)))
    else
      cat(paste("\n      (estimated) extra parameter kappa = ", round(x$spatial.component.extra, digits=digits)))
  }
  cat(paste("\n      (estimated) variance parameter sigmasq (partial sill) = ", round(x$spatial.component[1], dig=digits)))
  cat(paste("\n      (estimated) cor. fct. parameter phi (range parameter)  = ", round(x$spatial.component[2], dig=digits)))
  cat("\n")
  ##
  cat("\n")  
  cat("Parameter of the error component:")
  if(x$fix.nugget)
    cat(paste("\n      (fixed) nugget =", round(x$nugget.component, digits = digits)))
  else
    cat(paste("\n      (estimated) nugget = ", round(x$nugget.component, dig=digits)))
  cat("\n")
  cat("\n")
  names(x$sum.of.squares) <- NULL
  if(x$weights == "equal") cat("Minimised sum of squares: ")
  else cat("Minimised weighted sum of squares: ")
  cat(round(x$sum.of.squares, digits=digits))
  cat("\n")
  cat("\n")
  cat("Call:")
  cat("\n")
  print(x$call)
  cat("\n")
  invisible(x)
}

##"beta.variofit" <-
##  function(geodata, coords = geodata$coords, data=geodata$data,
##           obj.variofit)
##  {
##
##  }
"variog" <-
  function(geodata, coords = geodata$coords, data = geodata$data, 
       uvec = "default", breaks = "default",
       trend = "cte", lambda = 1,
       option = c("bin", "cloud", "smooth"),
       estimator.type = c("classical", "modulus"), 
       nugget.tolerance, max.dist, pairs.min = 2,
       bin.cloud = FALSE, direction = "omnidirectional", tolerance = pi/8,
       unit.angle = c("radians","degrees"), 
       messages, ...) 
{
  if(! "package:stats" %in% search()) require(mva)
  if(! "package:stats" %in% search()) require(modreg)
  call.fc <- match.call()
  if(missing(messages))
    messages.screen <- ifelse(is.null(getOption("geoR.messages")), TRUE, getOption("geoR.messages"))
  else messages.screen <- messages
  keep <- list(...)
  if(is.null(keep$keep.NA)) keep.NA <- FALSE
  else keep.NA <- keep$keep.NA
  ##
  ## Directional variogram - setting angles
  ##
  unit.angle <- match.arg(unit.angle)
  if(is.numeric(direction)){
    if(length(direction) > 1) stop("only one direction is allowed")
    if(length(tolerance) > 1) stop("only one tolerance value is allowed")
    if(unit.angle == "degrees"){
      ang.deg <- direction
      ang.rad <- (ang.deg * pi)/180
      tol.deg <- tolerance
      tol.rad <- (tol.deg * pi)/180
    }
    else{
      ang.rad <- direction
      ang.deg <- (ang.rad * 180)/pi
      tol.rad <- tolerance
      tol.deg <- (tol.rad * 180)/pi
    }
    if(ang.rad > pi | ang.rad < 0)
      stop("direction must be an angle in the interval [0,pi[ radians")
    if(tol.rad > pi/2 | tol.rad < 0)
      stop("tolerance must be an angle in the interval [0,pi/2] radians")
    if(tol.deg >= 90){
      direction <- "omnidirectional"
      cat("variog: computing omnidirectional variogram\n")
    }
    else{
      if(messages.screen){
        cat(paste("variog: computing variogram for direction = ", round(ang.deg, dig=3), " degrees (", round(ang.rad, dig=3), " radians)\n", sep=""))
        cat(paste("        tolerance angle = ", round(tol.deg, dig=3), " degrees (", round(tol.rad, dig=3), " radians)\n", sep=""))
      }
    }
  }
  else if(messages.screen) cat("variog: computing omnidirectional variogram\n")
  ##   
  ##
  coords <- as.matrix(coords)
  data <- as.matrix(data)
  if(nrow(coords) != nrow(data)) stop("coords and data have incompatible dimensions") 
  data.var <- apply(data, 2, var)
  n.data <- nrow(coords)
  n.datasets <- ncol(data)
  data <- drop(data)
  ##
  ## variogram estimator
  ##
  option <- match.arg(option)
  estimator.type <- match.arg(estimator.type)
  ##
  ## transformation
  ##
  if (abs(lambda - 1) > 0.0001) {
    if (abs(lambda) < 0.0001) data <- log(data)
    else data <- ((data^lambda) - 1)/lambda
  }
  ##
  ## trend removal
  ##
  xmat <- unclass(trend.spatial(trend = trend, geodata = list(coords=coords, data=data)))
  if (nrow(xmat) != n.data) 
    stop("coords and trend have incompatible sizes")
  if (trend != "cte") {
    if (is.vector(data)) {
      temp.fit <- lm(data ~ xmat + 0)
      beta.ols <- temp.fit$coeff
      data <- temp.fit$residuals
      temp.fit <- NULL
      names(data) <- NULL
    }
    else {
      only.res <- function(y, x)
        lm(y ~ xmat + 0)$residuals
      data <- apply(data, 2, only.res, x = xmat)
      only.beta <- function(y, x)
        lm(y ~ xmat + 0)$coef
      beta.ols <- apply(data, 2, only.beta, x = xmat)
    }
  }
  else beta.ols <- apply(as.matrix(data), 2, mean)
  ##
  ## Defining bins
  ##
  u <- as.vector(dist(as.matrix(coords)))
  if(missing(nugget.tolerance) || nugget.tolerance < 1e-11){
    nugget.tolerance <- 1e-12
    nt.ind <- FALSE
  }
  else{
    if(!is.numeric(nugget.tolerance)) stop("nugget.tolerance must be numeric")
    nt.ind <- TRUE
  }
  min.dist <- min(u)
  if(min.dist < nugget.tolerance) nt.ind <- TRUE
  ##
  ## directional
  ##
  if(direction != "omnidirectional"){
    u.ang <- .C("tgangle",
                as.double(as.vector(coords[,1])),
                as.double(as.vector(coords[,2])),
                as.integer(dim(coords)[1]),
                res = as.double(rep(0, length(u))),
                PACKAGE = "geoR")$res
    u.ang <- atan(u.ang)
    u.ang[u.ang < 0] <- u.ang[u.ang < 0] + pi
  }
  ##
  if (option == "bin" && bin.cloud == FALSE && direction == "omnidirectional") {
    if (missing(max.dist)) umax <- max(u)  
    else umax <- max(u[u < max.dist])
    dbins <- define.bins(max.dist = umax, uvec = uvec, breaks = breaks, nugget.tolerance = nugget.tolerance)
    uvec <- dbins$uvec ; bins.lim <- dbins$bins.lim
    nbins <- length(bins.lim) - 1
    if (missing(max.dist)) max.dist <- max(bins.lim)
    if(bins.lim[1] < 1e-16) bins.lim[1] <- -1
    bin.f <- function(data) {
      cbin <- vbin <- sdbin <- rep(0, nbins)
      result <- .C("binit", as.integer(n.data),
                   as.double(as.vector(coords[, 1])),
                   as.double(as.vector(coords[, 2])), as.double(as.vector(data)), 
                   as.integer(nbins), as.double(as.vector(bins.lim)), 
                   as.integer(estimator.type == "modulus"), as.double(max.dist), 
                   cbin = as.integer(cbin), vbin = as.double(vbin), 
                   as.integer(TRUE), sdbin = as.double(sdbin),
                   PACKAGE = "geoR")[c("vbin", "cbin", "sdbin")]
    }
    result <- array(unlist(lapply(as.data.frame(data), bin.f)), 
                    dim = c(nbins, 3, n.datasets))
    indp <- (result[, 2, 1] >= pairs.min)
    result[!indp,1,] <- NA
    if(bins.lim[1] < 0) bins.lim[1] <- 0
    if(!nt.ind){
      uvec <- uvec[-1]
      indp <- indp[-1]
      bins.lim <- bins.lim[-1]
      result <- result[-1,,,drop=FALSE]
    }
    if(keep.NA)
      result <- list(u = uvec, v = result[, 1, ], 
                     n = result[, 2, 1], sd = result[, 3, ], 
                     bins.lim = bins.lim, 
                     ind.bin = indp)
    else
      result <- list(u = uvec[indp], v = result[indp, 1, ], 
                     n = result[indp, 2, 1], sd = result[indp, 3, ],
                     bins.lim = bins.lim, ind.bin = indp)
  }
  else {
    data <- as.matrix(data)
    v <- matrix(0, nrow = length(u), ncol = n.datasets)
    for (i in 1:n.datasets) {
      v[, i] <- as.vector(dist(data[, i]))
      if (estimator.type == "modulus") 
        v[, i] <- v[, i,drop=FALSE]^(0.5)
      else v[, i] <- (v[, i,drop=FALSE]^2)/2
    }
    if (!missing(max.dist)) {
      v <- v[u <= max.dist,,drop=FALSE]
      if(direction != "omnidirectional")
        u.ang <- u.ang[u <= max.dist]
      u <- u[u <= max.dist]
    }
    if(direction != "omnidirectional"){
      ang.lower <- ang.rad - tol.rad
      ang.upper <- ang.rad + tol.rad
     if(ang.lower >= 0 & ang.upper < pi)
        ang.ind <- (!is.na(u.ang) & ((u.ang >= ang.lower) & (u.ang <= ang.upper)))
      if(ang.lower < 0)
        ang.ind <- (!is.na(u.ang) & ((u.ang < ang.upper) | (u.ang > (pi + ang.lower))))
      if(ang.upper >= pi)
        ang.ind <- (!is.na(u.ang) & ((u.ang > ang.lower) | (u.ang < (ang.upper - pi))))
      ##ang.ind <- ((u.ang >= ang.rad - tol.rad)&(u.ang <= ang.rad + tol.rad))
      v <- v[ang.ind,,drop=FALSE]
      u <- u[ang.ind]
    }
    data <- drop(data)
    v <- drop(v)
    if (option == "cloud") result <- list(u = u, v = v)
    if (option == "bin") {
      if (missing(max.dist)) umax <- max(u)
      else umax <- max(u[u < max.dist])
      if(bin.cloud == 'diff') dd <- diffpairs(coords,data)$diff
      else dd <- 0
      result <- rfm.bin(cloud = list(u = u, v = v, d = dd),
                        estimator.type = estimator.type, 
                        uvec = uvec, breaks = breaks, nugget.tolerance = nugget.tolerance, 
                        bin.cloud = bin.cloud, max.dist = umax, keep.NA = keep.NA)
      if(keep.NA){
        if (pairs.min > 0) {
          indp <- (result$n < pairs.min)
          if(!nt.ind){
            for(i in 1:5) result[[i]] <- result[[i]][-1]
            indp <- indp[-1]
          }
          if (is.matrix(result$v)) {
            result$v[indp, ] <- result$sd[indp, ] <- NA
          }
          else {
            result$v[indp] <- result$sd[indp] <- NA
          }
        }
        result$ind.bin <- indp
      }
      else{
        if (pairs.min > 0) {
#          indp <- (result$n >= pairs.min)
          if(!nt.ind){
            for(i in 1:5) result[[i]] <- result[[i]][-1]
#            indp <- indp[-1]
          }
          indp <- (result$n >= pairs.min)
          if (is.matrix(result$v)) {
            result$v <- result$v[indp, ]
            result$sd <- result$sd[indp, ]
          }
          else {
            result$v <- result$v[indp]
            result$sd <- result$sd[indp]
          }
          result$u <- result$u[indp]
          result$n <- result$n[indp]
        }
        result$ind.bin <- indp
      }
    }
    if (option == "smooth") {
      if(! "package:stats" %in% search()) require(modreg)
      if (is.matrix(v)) stop("smooth not yet available for more than one data-set")
      temp <- ksmooth(u, v, ...)
      result <- list(u = temp[[1]], v = temp[[2]])
    }
    if(missing(max.dist)) max.dist <- max(u)
  }
  if(nt.ind){
    if(!exists(".variog4.nomessage",w=1)) cat("variog: co-locatted data found, adding one bin at the origin\n")
    if(all(result$u[1:2] < 1e-11)) result$u[2] <- sum(result$bins.lim[2:3])/2
  }
  result <- c(result, list(var.mark = data.var, beta.ols = beta.ols,
                           output.type = option, max.dist = max.dist, 
                           estimator.type = estimator.type, n.data = n.data,
                           lambda = lambda, trend = trend, pairs.min = pairs.min))
  result$nugget.tolerance <- nugget.tolerance
  if(direction != "omnidirectional") result$direction <- ang.rad
  else result$direction <- "omnidirectional"
  if(direction != "omnidirectional") result$tolerance <- tol.rad
  else result$tolerance <- "none" 
  result$uvec <- uvec
  result$call <- call.fc
  class(result) <- "variogram"
  return(result)
}

"variog4" <-
  function (geodata, coords = geodata$coords, data = geodata$data, 
            uvec = "default", breaks = "default", trend = "cte", lambda = 1,
            option = c("bin", "cloud", "smooth"),
            estimator.type = c("classical", "modulus"), 
            nugget.tolerance, max.dist, pairs.min = 2,
            bin.cloud = FALSE, direction = c(0, pi/4, pi/2, 3*pi/4), tolerance = pi/8,
            unit.angle = c("radians", "degrees"),
            messages, ...) 
{
  if(! "package:stats" %in% search()) require(mva)
  if(missing(geodata)) geodata <- list(coords = coords, data = data)
  if(missing(messages))
    messages.screen <- ifelse(is.null(getOption("geoR.messages")), TRUE, getOption("geoR.messages"))
  else messages.screen <- messages
  if(missing(nugget.tolerance)) nugget.tolerance <- 1e-12
  u <- as.vector(dist(as.matrix(coords)))
  if(length(direction) != 4)
    stop("argument direction must be a vector with 4 values. For different specification use the function variog()")
  if(length(tolerance) != 1)
    stop("only one value can be provided to the argument tolerance. For different specification use the function variog()")
  res <- list()
  unit.angle <- match.arg(unit.angle)
  if(unit.angle == "radians") dg <- direction * 180/pi
  else dg <- direction
  if (missing(max.dist)) umax <- max(u)  
  else umax <- max(u[u < max.dist])
  .variog4.nomessage <<- TRUE
  for(angle in direction){
    res[[as.character(round(dg[which(direction == angle)], dig=1))]] <-
      variog(geodata=geodata,
             uvec=uvec, breaks = breaks, trend = trend,
             lambda = lambda, option = option,
             estimator.type = estimator.type,
             nugget.tolerance = nugget.tolerance,
             max.dist = max.dist,
             pairs.min = pairs.min,
             bin.cloud = bin.cloud,
             direction = angle,
             tolerance = tolerance,
             unit.angle = unit.angle,
             messages = messages.screen, keep.NA = TRUE)
    NULL
  }
  if (exists(".variog4.nomessage", w=1)) remove(".variog4.nomessage", pos=1, inherits = TRUE)
  res$omnidirectional <- variog(geodata=geodata,
                                uvec=uvec, breaks = breaks, trend = trend,
                                lambda = lambda, option = option,
                                estimator.type = estimator.type,
                                nugget.tolerance = nugget.tolerance,
                                max.dist = max.dist,
                                pairs.min = pairs.min,
                                bin.cloud = bin.cloud,
                                direction = "omnidirectional",
                                tolerance = tolerance,
                                unit.angle = unit.angle,
                                messages = messages.screen,
                                keep.NA = TRUE 
                                )
  class(res) <- "variog4"
  return(res) 
}

"plot.variog4" <-
  function (x, omnidirectional = FALSE, same.plot = TRUE, legend = TRUE,...)
{
  ymax <- max(c(x[[1]]$v, x[[2]]$v, x[[3]]$v, x[[4]]$v), na.rm=TRUE)
  n.o <- names(x)[1:4]
  GP <- list(...)
  if(is.null(GP$xlab)) GP$xlab <- "distance"
  if(is.null(GP$ylab)) GP$ylab<- "semi-variance"
  if (same.plot) {
    xx <- x[[5]]$u
    yy <- cbind(x[[1]]$v, x[[2]]$v, x[[3]]$v, x[[4]]$v)
    if (omnidirectional) yy <- cbind(x[[5]]$v, yy)
    ## OFC, changed : if (is.null(GP$lty)) GP$lty <- 1:5
    if (is.null(GP$lty)){
      if(omnidirectional) GP$lty <- 1:5
      else GP$lty <- 1:4
    }
    if (is.null(GP$lwd)) GP$lwd <- 1
    ## OFC, changed : if (is.null(GP$col)) GP$col <- 1:5
    if (is.null(GP$col)){
      if(omnidirectional) GP$col <- 1:5
      else GP$col <- 1:4
    }
    if (is.null(GP$pch)) GP$pch <- NULL
    if (is.null(GP$type)) GP$type <- "l"
    matplot(x = xx, y = yy, type = GP$type, lty=GP$lty, lwd=GP$lwd, col=GP$col, pch=GP$pch, xlab=GP$xlab, ylab=GP$ylab, xlim = c(0, max(xx)), ylim=c(0,max(yy, na.rm=TRUE)))
    if (legend) {
      if (omnidirectional) {
        legend(0, ymax,
               legend = c("omnid.",
                 substitute(a * degree, list(a = n.o[1])),
                 substitute(a * degree, list(a = n.o[2])),
                 substitute(a * degree, list(a = n.o[3])),
                 substitute(a * degree, list(a = n.o[4])),
                 expression()),
               lty = GP$lty, lwd = GP$lwd, col = GP$col)
      }
      else {
        legend(0, ymax,
               legend = c(substitute(a * degree,
                 list(a = n.o[1])), substitute(a * degree, list(a = n.o[2])),
                 substitute(a * degree, list(a = n.o[3])), substitute(a *
                                               degree, list(a = n.o[4])), expression()),
               lty = GP$lty, lwd = GP$lwd, col = GP$col)
      }
    }
  }
  else {
    temp.mf <- par()$mfrow
    par(mfrow = c(2, 2))
    if (is.null(GP$lty)) {
      GP$lty <- rep(1, 4)
      if (omnidirectional)
        GP$lty <- c(GP$lty, 2)
    }
    else {
      if (length(GP$lty) == 1)
        if (omnidirectional)
          GP$lty <- rep(GP$lty, 5)
        else GP$lty <- rep(GP$lty, 4)
      if (length(GP$lty) == 2)
        if (omnidirectional)
          GP$lty <- c(rep(GP$lty[1], 4), GP$lty[2])
        else GP$lty <- c(rep(GP$lty, 4))
      if (length(GP$lty) == 4 & omnidirectional)
        GP$lty <- c(rep(GP$lty, 2))
    }
    if (is.null(GP$lwd)) {
      GP$lwd <- rep(1, 4)
      if (omnidirectional)
        GP$lwd <- c(GP$lwd, 1)
    }
    else {
      if (length(GP$lwd) == 1)
        if (omnidirectional)
          GP$lwd <- rep(GP$lwd, 5)
        else GP$lwd <- rep(GP$lwd, 4)
      if (length(GP$lwd) == 2)
        if (omnidirectional)
          GP$lwd <- c(rep(GP$lwd[1], 4), GP$lwd[2])
        else GP$lwd <- rep(GP$lwd, 4)
      if (length(GP$lwd) == 4 & omnidirectional)
        GP$lwd <- c(rep(GP$lwd, 1))
    }
    if (is.null(GP$col)) {
      GP$col <- rep(1, 4)
      if (omnidirectional) GP$col <- c(GP$col, 1)
    }
    else {
      if (length(GP$col) == 1)
        if (omnidirectional) GP$col <- rep(GP$col, 5)
        else GP$col <- rep(GP$col, 4)
      if (length(GP$col) == 2)
        if (omnidirectional)
          GP$col <- c(rep(GP$col[1], 4), GP$col[2])
        else GP$col <- rep(GP$col, 2)
      if (length(GP$col) == 4 & omnidirectional)
        GP$col <- c(rep(GP$col, 1))
    }
    if (is.null(GP$pch)) {
      GP$pch <- rep(1, 4)
      if (omnidirectional)
        GP$pch <- c(GP$pch, 1)
    }
    else {
      if (length(GP$pch) == 1)
        if (omnidirectional)
          GP$pch <- rep(GP$pch, 5)
        else GP$pch <- rep(GP$pch, 4)
      if (length(GP$pch) == 2)
        if (omnidirectional)
          GP$pch <- c(rep(GP$pch[1], 4), GP$pch[2])
        else GP$pch <- rep(GP$pch, 2)
      if (length(GP$pch) == 4 & omnidirectional)
        GP$pch <- c(rep(GP$pch, 2))
    }
    if (is.null(GP$type)) {
      GP$type <- rep("l", 4)
      if (omnidirectional)
        GP$type <- c(GP$type, "l")
    }
    else {
      if (length(GP$type) == 1)
        if (omnidirectional)
          GP$type <- rep(GP$type, 5)
        else GP$type <- rep(GP$type, 4)
      if (length(GP$type) == 2 & omnidirectional)
        GP$type <- c(rep(GP$type[1], 4), GP$type[2])
      if (length(GP$type) == 4 & omnidirectional)
        GP$type <- c(rep(GP$type, 2))
    }
    for (i in 1:4) {
      plot.default(x[[i]]$u, x[[i]]$v,
                   xlim = c(0, max(x[[i]]$u)), ylim = c(0, ymax),
                   type = GP$type[i],
           col = GP$col[i], lwd = GP$lwd[i], lty = GP$lty[i],
           pch = GP$pch[i], xlab=GP$xlab, ylab=GP$ylab)
      if (omnidirectional) {
        lines(x$omnidirectional, type = GP$type[5],
              col = GP$col[5], lwd = GP$lwd[5], lty = GP$lty[5])
        legend(0, ymax, legend = c(substitute(a * degree,
                          list(a = n.o[i])), "omn.", expression()),
               lty = c(GP$lty[i], GP$lty[5]),
               col=c(GP$col[i],  GP$col[5]),
               lwd=c(GP$lwd[i],  GP$lwd[5]))
      }
      else title(main = substitute(a * degree, list(a = n.o[i])),
                 cex = 1.3)
    }
    par(mfrow = temp.mf)
  }
  return(invisible())
}

"rfm.bin" <-
  function (cloud, l = 13, uvec = "default", breaks = "default", nugget.tolerance, 
            estimator.type = c("classical", "modulus"), bin.cloud = FALSE,
            max.dist, keep.NA = FALSE)
{
  estimator.type <- match.arg(estimator.type)
  dbins <- define.bins(max.dist = max(cloud$u), uvec = uvec, breaks = breaks, nugget.tolerance = nugget.tolerance)
  uvec <- dbins$uvec ; bins.lim <- dbins$bins.lim
  nbins <- nc <- length(bins.lim) - 1
  if(is.null(max.dist)) max.dist <- max(bins.lim)
  min.dist <- min(cloud$u)
  if (!is.matrix(cloud$v)) {
    vbin <- rep(0, nc)
    nbin <- rep(0, nc)
    sdbin <- rep(0, nc)
    if (bin.cloud == TRUE | bin.cloud == 'diff') bins.clouds <- list()
    for (i in 1:nc) {
      ind <- (cloud$u > bins.lim[i]) & (cloud$u <= bins.lim[i+1])
      vbin[i] <- mean(cloud$v[ind])
      nbin[i] <- sum(ind)
      if (estimator.type == "modulus") 
        vbin[i] <- ((vbin[i])^4)/(0.914 + (0.988/nbin[i]))
      if (nbin[i] > 0) sdbin[i] <- sqrt(var(cloud$v[ind]))
      else sdbin[i] <- NA
      if (bin.cloud == TRUE) bins.clouds[[i]] <- cloud$v[ind]
      if (bin.cloud == 'diff') bins.clouds[[i]] <- cloud$d[ind]
      NULL
    }
    if (uvec[1] == 0) uvec[1] <- (bins.lim[1] + bins.lim[2])/2
    if (min.dist == 0) {
      ind <- (cloud$u == 0)
      n.zero <- sum(ind)
      v.zero <- mean(cloud$v[ind])
      if (bin.cloud == TRUE | bin.cloud == 'diff') {
        bins.clouds[2:(length(bins.clouds) + 1)] <- bins.clouds[1:nc]
        if(bin.cloud == 'diff') bins.clouds[[1]] <- cloud$d[ind]
        else bins.clouds[[1]] <- cloud$v[ind]
      }
      if (estimator.type == "modulus") 
        v.zero <- ((v.zero)^4)/(0.914 + (0.988/n.zero))
      if (n.zero > 0) 
        sd.zero <- sqrt(var(cloud$v[ind]))
      else sd.zero <- NA
      uvec <- c(0, uvec)
      vbin <- c(v.zero, vbin)
      nbin <- c(n.zero, nbin)
      sdbin <- c(sd.zero, sdbin)
    }
#    if(keep.NA == FALSE){
#      u <- uvec[!is.na(vbin)]
#      v <- vbin[!is.na(vbin)]
#      n <- nbin[!is.na(vbin)]
#      sd <- sdbin[!is.na(vbin)]
#    }
#    else{
      u <- uvec
      v <- vbin
      n <- nbin
      sd <- sdbin
#    }
    if (bin.cloud == TRUE | bin.cloud == 'diff') 
      bins.clouds <- bins.clouds[!is.na(vbin)]
  }
  else {
    if (bin.cloud == TRUE | bin.cloud == 'diff') 
      stop("option bins.cloud=TRUE allowed only for 1 variable")
    nvcols <- ncol(cloud$v)
    vbin <- matrix(0, nrow = nc, ncol = nvcols)
    nbin <- rep(0, nc)
    sdbin <- matrix(0, nrow = nc, ncol = nvcols)
    for (i in 1:nc) {
      ind <- (cloud$u >= bins.lim[i]) & (cloud$u < bins.lim[i+1])
      nbin[i] <- sum(ind)
      for (j in 1:nvcols) {
        vbin[i, j] <- mean(cloud$v[ind, j])
        if (estimator.type == "modulus") 
          vbin[i, j] <- ((vbin[i, j])^4)/(0.914 + (0.988/nbin[i]))
        if (nbin[i] > 0) 
          sdbin[i, j] <- sqrt(var(cloud$v[ind, j]))
        else sdbin[i, j] <- NA
      }
      NULL
    }
    if (uvec[1] == 0) 
      uvec[1] <- (bins.lim[1] + bins.lim[2])/2
    if (min.dist == 0) {
      v.zero <- rep(0, nvcols)
      n.zero <- rep(0, nvcols)
      sd.zero <- rep(0, nvcols)
      for (j in 1:nvcols) {
        ind <- (cloud$u == 0)
        n.zero[j] <- sum(ind)
        v.zero[j] <- mean(cloud$v[ind, j])
        if (estimator.type == "modulus") 
          v.zero[j] <- ((v.zero[j])^4)/(0.914 + (0.988/n.zero[j]))
        if (n.zero[j] > 0) 
          sd.zero[j] <- sqrt(var(cloud$v[ind, j]))
        else sd.zero[j] <- NA
        uvec <- c(0, uvec)
        vbin <- rbind(v.zero, vbin)
        nbin <- c(n.zero, nbin)
        sdbin <- rbind(sd.zero, sdbin)
      }
    }
    if(keep.NA == FALSE){
      u <- uvec[!is.na(vbin[, 1])]
      n <- nbin[!is.na(vbin[, 1])]
      v <- matrix(0, nrow = length(u), ncol = nvcols)
      sd <- matrix(0, nrow = length(u), ncol = nvcols)
    }
    else{
      u <- uvec
      n <- nbin
      v <- matrix(0, nrow = length(u), ncol = nvcols)
      sd <- matrix(0, nrow = length(u), ncol = nvcols)
    }
    for (j in 1:nvcols) {
      if(keep.NA == FALSE){
        v[, j] <- vbin[!is.na(vbin[, j]), j]
        sd[, j] <- sdbin[!is.na(vbin[, j]), j]
      }
      else{
        v[, j] <- vbin[, j]
        sd[, j] <- sdbin[, j]
      }
    }
  }
  if (nugget.tolerance > 1e-12) {
    u[1] <- 0
  }
  result <- list(u = u, v = v, n = n, sd = sd, bins.lim = bins.lim, output.type = "bin")
  if (!is.matrix(cloud$v) && (bin.cloud == TRUE | bin.cloud == 'diff'))
    result$bin.cloud <- bins.clouds
  ##  if (!is.null(class(cloud))) 
  if (length(class(cloud)) > 0) 
  class(result) <- class(cloud)
  return(result)
}

"plot.variogram" <-
  function (x, max.dist, vario.col = "all", scaled = FALSE,  
            var.lines = FALSE,  envelope.obj = NULL,
            pts.range.cex, bin.cloud = FALSE,  ...) 
{
  if(missing(max.dist)) max.dist <- max(x$u)
  Ldots <- list(...)
  if(is.null(Ldots$xlab)) Ldots$xlab <- "distance"
  if(is.null(Ldots$ylab)) Ldots$ylab <- "semi-variance"
  if(is.null(Ldots$ty)){
    if (x$output.type == "bin") Ldots$type <- "p"
    if (x$output.type == "smooth") Ldots$type <- "l"
    if (x$output.type == "cloud") Ldots$type <- "p"
  }
  if(is.null(Ldots$col)) Ldots$col <- 1:6
  if(is.null(Ldots$lty)) Ldots$lty <- 1:5
  if(is.null(Ldots$lwd)) Ldots$lwd <- 1
  if(is.null(Ldots$pch)) Ldots$pch <- NULL
  if(is.null(Ldots$cex)) Ldots$cex <- NULL
  if(is.null(Ldots$add)) Ldots$add <- FALSE
# if (bin.cloud == TRUE &&  Ldots$type != "b") 
#    stop("plot.variogram: object must be a binned variogram with option bin.cloud=TRUE")
  if (bin.cloud == TRUE && all(is.na(x$bin.cloud))) 
    stop("plot.variogram: object must be a binned variogram with option bin.cloud=TRUE")
  if (bin.cloud == TRUE && any(!is.na(x$bin.cloud))) 
    boxplot(x$bin.cloud, varwidth = TRUE, 
            xlab = "distance",
            ylab = paste(x$estimator.type, "variogram"))
  else {
    if(!missing(pts.range.cex)){
      cex.min <- min(pts.range.cex)
      cex.max <- max(pts.range.cex)
      if(cex.min != cex.max){
        pts.prop <- TRUE
        sqn <- sqrt(x$n[x$u <= max.dist])
        pts.cex <- cex.min + ((sqn - min(sqn)) * (cex.max - cex.min) / (max(sqn) - min(sqn)))
      }
      else pts.prop <- FALSE
    }
    else pts.prop <- FALSE 
    u <- x$u[x$u <= max.dist]
    v <- x$v
    if(is.vector(v) | length(v) == length(x$u))
      v <- matrix(v, ncol=1)
    v <- v[x$u <= max.dist,, drop=FALSE]
    if(vario.col == "all")
      vario.col <- 1:dim(v)[2]
    else
      if(!is.numeric(vario.col) | any(vario.col > ncol(v)))
        stop("argument vario.col must be equals to \"all\" or a vector indicating the column numbers to be plotted")
    v <- v[, vario.col, drop=FALSE]
    if (scaled)
      v <- t(t(v)/x$var.mark[vario.col])
    if (is.null(Ldots$ylim)){
      ymax <- max(v)
      if (!is.null(envelope.obj)) 
        ymax <- max(c(envelope.obj$v.upper, ymax))
      Ldots$ylim <- c(0, ymax)
    }
    if(ncol(v) == 1){
      v <- as.vector(v)
      uv <- data.frame(distance=u, semivariance = v)
      if(is.null(list(...)$ylim)){
        if(pts.prop)
          plot(uv, xlim = c(0, max.dist), ylim = Ldots$ylim, cex = pts.cex, ...)
        else
          plot(uv, xlim = c(0, max.dist), ylim = Ldots$ylim, ...)
      }
      else{
        if(pts.prop)
          plot(uv, xlim = c(0, max.dist), ylim = Ldots$ylim, cex = pts.cex)
        else
          plot(uv, xlim = c(0, max.dist), ylim = Ldots$ylim)
      }
    }
    else
      matplot(x=u, y= v, xlim = c(0, max.dist), ylim = Ldots$ylim, 
              xlab = Ldots$xlab, ylab = Ldots$ylab, type = Ldots$type,
              add = Ldots$add, pch = Ldots$pch,
              lty = Ldots$lty, lwd = Ldots$lwd, col = Ldots$col)
    if (var.lines) {
      if (scaled) abline(h = 1, lty = 3)
      else abline(h = x$var.mark, lty = 3)
    }
    if (!is.null(envelope.obj)) {
      lines(u, envelope.obj$v.lower, lty = 4)
      lines(u, envelope.obj$v.upper, lty = 4)
    }
  }
  return(invisible())
}

"lines.variomodel" <-
  function (x, ...)
{
  UseMethod("lines.variomodel")
}

"lines.variomodel.default" <-
  function (x, cov.model, cov.pars, nugget, kappa,
            max.dist, scaled = FALSE, ...)
{
  ## reading model/other components
  if(missing(cov.model)){
    if(missing(x) || is.null(x$cov.model)) 
      stop("argument cov.model must be provided")
    else cov.model <- x$cov.model
  }
  cov.model <- match.arg(cov.model,
                         choices = c("matern", "exponential", "gaussian",
                           "spherical", "circular", "cubic", "wave",
                           "linear", "power", "powered.exponential", "cauchy",
                           "gneiting", "gneiting.matern", "pure.nugget"))
  if(missing(cov.pars)){
    if(missing(x) || is.null(x$cov.pars)) 
      stop("argument cov.pars must be provided")
    else cov.pars <- x$cov.pars
  }
  if(missing(nugget)){
    if(missing(x) || is.null(x$nugget)) 
      stop("argument nugget must be provided")
    else nugget <- x$nugget
  }
  if (cov.model == "matern" | cov.model == "powered.exponential" | 
      cov.model == "cauchy" | cov.model == "gneiting-matern"){
    if(missing(kappa)){
      if(missing(x) || is.null(x$kappa)) 
        stop("argument kappa must be provided")
      else kappa <- x$kappa
    }
  }
  else kappa <- 0.5
  if(missing(max.dist)){
    if(missing(x) || is.null(x$max.dist)) 
      stop("argument max.dist must be provided")
    else max.dist <- x$max.dist
  }
  ## computing the total sill for single or nested model 
  if (is.vector(cov.pars)) 
    sill.total <- nugget + cov.pars[1]
  else sill.total <- nugget + sum(cov.pars[, 1])
  ## checking whether to scale the variogram 
  if (scaled){
    if(is.vector(cov.model))
      cov.pars[1] <-  cov.pars[1]/sill.total
    else cov.pars[,1] <-  cov.pars[,1]/sill.total
    sill.total <- 1
  }
  ## defining a function to plot the variogram curve
  gamma.f <- function(x){
    if(any(cov.model == c("linear", "power"))){
      if(is.vector(cov.pars)){
        if(cov.model == "linear") cov.pars[2] <- 1
        return(nugget + cov.pars[1] * (x^cov.pars[2]))
      }
      else{
        if(length(cov.model) == 1) cov.model <- rep(cov.model, nrow(cov.pars))
        if(length(cov.model) != nrow(cov.pars)) stop("cov.model and cov.pars have incompatible dimentions")
        vals <- rep(nugget, length(x))
        for (i in 1:nrow(cov.pars)){
          if(any(cov.model[i] == c("linear", "power"))){
            if(cov.model[i] == "linear") cov.pars[i,2] <- 1
            vals <- vals + (cov.pars[i,1] * (x^cov.pars[i,2]))
          }
          else vals <- vals + cov.pars[i,1] - cov.spatial(x, cov.model = cov.model[i],
                                                          kappa = kappa, cov.pars = cov.pars[i,])
        }
        return(vals)
      }
    }
    else
      return(sill.total -
             cov.spatial(x, cov.model = cov.model,
                         kappa = kappa, cov.pars = cov.pars))
    
  }
  ## ploting the curve
  curve(gamma.f(x), from=0, to=max.dist, add=TRUE, ...)
  return(invisible())
}

"lines.variomodel.variofit" <-
  "lines.variomodel.likGRF" <-
  function (x, max.dist, scaled = FALSE, ...)
{
  my.l <- list()
  if(missing(max.dist)){
    my.l$max.dist <- x$max.dist
    if (is.null(my.l$max.dist)) 
      stop("argument max.dist needed for this object")
  }
  else
    my.l$max.dist <- max.dist
  if (x$cov.model == "matern" | x$cov.model == "powered.exponential" | 
      x$cov.model == "cauchy" | x$cov.model == "gneiting-matern") 
    my.l$kappa <- x$kappa
  else kappa <- NULL
  if (is.vector(x$cov.pars)) 
    my.l$sill.total <- x$nugget + x$cov.pars[1]
  else my.l$sill.total <- x$nugget + sum(x$cov.pars[, 1])
  my.l$nugget <- x$nugget
  my.l$cov.pars <- x$cov.pars
  my.l$cov.model <- x$cov.model
  if (scaled){
    if(is.vector(x$cov.model))
      my.l$cov.pars[1] <-  my.l$cov.pars[1]/my.l$sill.total
    else my.l$cov.pars[,1] <-  my.l$cov.cov.pars[,1]/my.l$sill.total
    my.l$sill.total <- 1
  }
  gamma.f <- function(x, my.l){
    if(any(my.l$cov.model == c("linear", "power")))
      return(my.l$nugget + my.l$cov.pars[1] * (x^my.l$cov.pars[2]))
    else
      return(my.l$sill.total -
        cov.spatial(x, cov.model = my.l$cov.model,
                    kappa = my.l$kappa,
                    cov.pars = my.l$cov.pars))
  }
  curve(gamma.f(x,my.l=my.l), from=0, to=my.l$max.dist, add=TRUE, ...)
  return(invisible())
}

"lines.variogram" <-
  function (x, max.dist, type = "o", scaled = FALSE, pts.range.cex, ...) 
{
  if(missing(max.dist)) max.dist <- max(x$u)
  if(!missing(pts.range.cex)){
    cex.min <- min(pts.range.cex)
    cex.max <- max(pts.range.cex)
    if(cex.min != cex.max){
      pts.prop <- TRUE
      sqn <- sqrt(x$n[x$u <= max.dist])
      pts.cex <- cex.min + ((sqn - min(sqn)) * (cex.max - cex.min) / (max(sqn) - min(sqn)))
    }
    else pts.prop <- FALSE
  }
  else pts.prop <- FALSE 
  if (scaled) 
    x$v <- x$v/x$var.mark
  if (!is.matrix(x$v)){
    if(pts.prop)
      lines(x$u[x$u <= max.dist], x$v[x$u <= max.dist], 
            type = type, cex = pts.cex, ...)
    else
      lines(x$u[x$u <= max.dist], x$v[x$u <= max.dist], 
            type = type, ...)
  }
  else {
    for (j in 1:ncol(x$v)){
      if(pts.prop)
        lines(x$u[x$u <= max.dist], 
              x$v[x$u <= max.dist, j], type = type, cex = pts.cex, ...)
      else
        lines(x$u[x$u <= max.dist], 
              x$v[x$u <= max.dist, j], type = type, ...)
    }
  }
  return(invisible())
}

"variog.model.env" <-
  function(geodata, coords = geodata$coords, obj.variog,
           model.pars, nsim = 99, save.sim = FALSE, messages) 
{
  call.fc <- match.call()
  obj.variog$v <- NULL
  if(missing(messages))
    messages.screen <- ifelse(is.null(getOption("geoR.messages")), TRUE, getOption("geoR.messages"))
  else messages.screen <- messages
  ##
  ## reading input
  ##
  if(!is.null(model.pars$beta)) beta <- model.pars$beta
  else beta <- 0
  if(!is.null(model.pars$cov.model))
    cov.model <- model.pars$cov.model
  else cov.model <- "exponential"
  if(!is.null(model.pars$kappa)) kappa <- model.pars$kappa
  else kappa <- 0.5
  if(!is.null(model.pars$nugget)) nugget <- model.pars$nugget
  else nugget <- 0
  cov.pars <- model.pars$cov.pars
  if(!is.null(obj.variog$estimator.type))
    estimator.type <- obj.variog$estimator.type
  else estimator.type <- "classical"
  if (obj.variog$output.type != "bin") 
    stop("envelops can be computed only for binned variogram")
  ##
  ## generating simulations from the model with parameters provided
  ##
  if (messages.screen) 
    cat(paste("variog.env: generating", nsim, "simulations (with ",
              obj.variog$n.data, 
              "points each) using the function grf\n"))
  simula <- grf(obj.variog$n.data, grid = as.matrix(coords),
                cov.model = cov.model, cov.pars = cov.pars,
                nugget = nugget, kappa = kappa, nsim = nsim,
                messages = FALSE, lambda = obj.variog$lambda)
  if(messages.screen)
    cat("variog.env: adding the mean or trend\n")
  x.mat <- unclass(trend.spatial(trend=obj.variog$trend, geodata = geodata))
  simula$data <- as.vector(x.mat %*% beta) + simula$data
  ##
  ## computing empirical variograms for the simulations
  ##
  if (messages.screen) 
    cat(paste("variog.env: computing the empirical variogram for the", 
              nsim, "simulations\n"))
  nbins <- length(obj.variog$bins.lim) - 1
  if(is.R()){
    bin.f <- function(sim){
      cbin <- vbin <- sdbin <- rep(0, nbins)  
      temp <- .C("binit",
                 as.integer(obj.variog$n.data),
                 as.double(as.vector(coords[,1])),
                 as.double(as.vector(coords[,2])),
                 as.double(as.vector(sim)),
                 as.integer(nbins),
                 as.double(as.vector(obj.variog$bins.lim)),
                 as.integer(estimator.type == "modulus"),
                 as.double(max(obj.variog$u)),
                 as.double(cbin),
                 vbin = as.double(vbin),
                 as.integer(FALSE),
                 as.double(sdbin),
                 PACKAGE = "geoR")$vbin
      return(temp)
    }
    simula.bins <- apply(simula$data, 2, bin.f)
    simula.bins <- simula.bins[obj.variog$ind.bin,]
  }
  else{
    bin.f <- function(sim, nbins, n.data, coords, bins.lim, estimator.type, max.u){
      cbin <- vbin <- sdbin <- rep(0, nbins)  
      temp <- .C("binit",
                 as.integer(n.data),
                 as.double(as.vector(coords[,1])),
                 as.double(as.vector(coords[,2])),
                 as.double(as.vector(sim)),
                 as.integer(nbins),
                 as.double(as.vector(bins.lim)),
                 as.integer(estimator.type == "modulus"),
                 as.double(max.u),
                 as.double(cbin),
                 vbin = as.double(vbin),
                 as.integer(FALSE),
                 as.double(sdbin),
                 PACKAGE = "geoR")$vbin
      return(temp)
    }
    simula.bins <- apply(simula$data, 2, bin.f, nbins=nbins, n.data=obj.variog$n.data, coords=coords, bins.lim=obj.variog$bins.lim, estimator.type=estimator.type, max.u=max(obj.variog$u))
    simula.bins <- simula.bins[obj.variog$ind.bin,]
  }
  if(save.sim == FALSE) simula$data <- NULL
  ##
  ## computing envelops
  ##
  if (messages.screen) 
    cat("variog.env: computing the envelops\n")
  limits <- apply(simula.bins, 1, range)
  res.env <- list(u = obj.variog$u, v.lower = limits[1, ],
                  v.upper = limits[2,])
  if(save.sim) res.env$simulated <- simula$data
  res.env$call <- call.fc
  class(res.env) <- "variogram.envelope"
  return(res.env)
}

"variog.mc.env" <-
  function (geodata, coords = geodata$coords, data = geodata$data,
            obj.variog, nsim = 99, save.sim = FALSE, messages) 
{
  call.fc <- match.call()
  if(missing(geodata)) geodata <- list(coords=coords, data=data)
  if(missing(messages))
    messages.screen <- ifelse(is.null(getOption("geoR.messages")), TRUE, getOption("geoR.messages"))
  else messages.screen <- messages
  ##
  ## Checking input
  ##
  obj.variog$v <- NULL
  if((is.matrix(data) | is.data.frame(data)))
    if(ncol(data) > 1)
    	stop("envelops can be computed for only one data set at once")
  if(!is.null(obj.variog$estimator.type))
    estimator.type <- obj.variog$estimator.type
  else estimator.type <- "classical"
  ##
  ## transformation
  ##
  if (abs(obj.variog$lambda - 1) > 0.0001) {
    if (abs(obj.variog$lambda) < 0.0001) 
      data <- log(data)
    else data <- ((data^obj.variog$lambda) - 1)/obj.variog$lambda
  }
  ##
  ## trend removal
  ##
  xmat <- unclass(trend.spatial(trend = obj.variog$trend, geodata = geodata))
  if (obj.variog$trend != "cte") {
    if (is.vector(data)) {
      data <- lm(data ~ xmat + 0)$residuals
      names(data) <- NULL
    }
    else {
      only.res <- function(y, x) {
        lm(y ~ xmat + 0)$residuals
      }
      data <- apply(data, 2, only.res, x = xmat)
    }
  }
  ##
  ## generating several "data-sets" by permutating data values
  ##
  if (messages.screen) 
    cat(paste("variog.env: generating", nsim,
              "simulations by permutating data values\n"))
  simula <- list(coords = coords)
  n.data <- length(data)
  perm.f <- function(i, data, n.data){return(data[sample(1:n.data)])}
  simula$data <- apply(as.matrix(1:nsim),1, perm.f, data=data, n.data=n.data)
  ##
  ## computing empirical variograms for the simulations
  ##
  if (messages.screen) 
    cat(paste("variog.env: computing the empirical variogram for the", 
              nsim, "simulations\n"))
  nbins <- length(obj.variog$bins.lim) - 1
  ##
  ##
  if(obj.variog$direction == "omnidirectional"){
    bin.f <- function(sim){
      cbin <- vbin <- sdbin <- rep(0, nbins)  
      temp <- .C("binit",
                 as.integer(obj.variog$n.data),
                 as.double(as.vector(coords[,1])),
                 as.double(as.vector(coords[,2])),
                 as.double(as.vector(sim)),
                 as.integer(nbins),
                 as.double(as.vector(obj.variog$bins.lim)),
                 as.integer(estimator.type == "modulus"),
                 as.double(max(obj.variog$u)),
                 as.double(cbin),
                 vbin = as.double(vbin),
                 as.integer(FALSE),
                 as.double(sdbin),
                 PACKAGE = "geoR")$vbin
      return(temp)
    }
    simula.bins <- apply(simula$data, 2, bin.f)
  }
  else{
    variog.vbin <- function(x, ...)
      variog(geodata = geodata, data = x, 
             uvec = obj.variog$uvec,
             estimator.type = obj.variog$estimator.type, 
             nugget.tolerance = obj.variog$nugget.tolerance, max.dist = obj.variog$max.dist,
             pairs.min = obj.variog$pairs.min,
             direction = obj.variog$direction, tolerance=obj.variog$tolerance,
             messages.screen = FALSE, ...)$v 
    simula.bins <- apply(simula$data, 2, variog.vbin)
  }
  simula.bins <- simula.bins[obj.variog$ind.bin,]
  if(save.sim == FALSE) simula$data <- NULL
  ##
  ## computing envelops
  ##
  if (messages.screen) 
    cat("variog.env: computing the envelops\n")
  limits <- apply(simula.bins, 1, range)
  res.env <- list(u = obj.variog$u, v.lower = limits[1, ],
                  v.upper = limits[2,])
  if(save.sim) res.env$simulations <- simula$data
  res.env$call <- call.fc
  class(res.env) <- "variogram.envelope"
  return(res.env)
}

"lines.variogram.envelope" <-
  function(x, lty=3, ...)
{
  lines(x$u, x$v.lower, ...)
  lines(x$u, x$v.upper, ...)
  return(invisible())
}

define.bins <-
  function(max.dist, uvec = "default", breaks = "default", nugget.tolerance)
{
  if(all(breaks ==  "default")){
    if (all(uvec == "default")) uvec <- 13
    if (all(is.numeric(uvec))){
      if(length(uvec) == 1){
        bins.lim <- seq(0, max.dist, l = uvec+1)
        bins.lim <- c(0, nugget.tolerance, bins.lim[bins.lim >  nugget.tolerance])
        uvec <- 0.5 * (bins.lim[-1] + bins.lim[-length(bins.lim)])
      }
      else{
        uvec <- c(0, uvec)
        nvec <- length(uvec)
        d <- 0.5 * diff(uvec[2:nvec])
        bins.lim <- c(0, (uvec[2:(nvec - 1)] + d), (d[nvec - 2] + uvec[nvec]))
        bins.lim <- c(0, nugget.tolerance, bins.lim[bins.lim >  nugget.tolerance])
      }
    }
    else stop("argument uvec can only take a numeric vector")
  }
  else{
    if(any(!is.numeric(breaks))) stop("argument breaks can only take a numeric vector")
    else bins.lim <- breaks
    bins.lim <- c(0, nugget.tolerance, bins.lim[bins.lim >  nugget.tolerance])
    uvec <- 0.5 * (bins.lim[-1] + bins.lim[-length(bins.lim)])
  }
  return(list(uvec = uvec, bins.lim = bins.lim))
}
"xvalid" <-
  function (geodata, coords = geodata$coords, data = geodata$data, 
            model, reestimate = FALSE, variog.obj = NULL,
            output.reestimate = FALSE, locations.xvalid = "all",
            data.xvalid = NULL, messages, ...) 
{
  ##
  ## Checking and organising input
  ##
  call.fc <- match.call()
  if(missing(messages))
    messages.screen <- ifelse(is.null(getOption("geoR.messages")), TRUE, getOption("geoR.messages"))
  else messages.screen <- messages
  if(missing(geodata)) geodata <- list(coords = coords, data = data)
  n <- nrow(coords)
  data <- as.vector(data)
  if (length(data) != n) stop("coords and data have incompatible sizes")
  xmat <- trend.spatial(trend = model$trend, geodata = geodata)
  if (nrow(xmat) != n) stop("coords and trend have incompatible sizes")
  if(is.null(model$method)) reestimate <- FALSE
  if(is.null(model$aniso.pars)) model$aniso.pars <- c(0,1)
  if(is.null(model$kappa)) model$kappa <- 0.5
  ##
  ## Locations to be used in the cross-validation
  ##
  if(all(locations.xvalid == "all") | is.vector(locations.xvalid)){
    autocross <- TRUE
    if(all(locations.xvalid == "all"))
      locations.xvalid <- 1:n
    else
      if(any(locations.xvalid > n) | !is.numeric(locations.xvalid))
        stop("\nxvalid: vector indicating locations to be validated is not a numeric vector and/or has element(s) with value greater than the number of data loccations")
    crossvalid <- TRUE
  }
  else{
    autocross <- FALSE
    if(is.matrix(locations.xvalid) | is.data.frame(locations.xvalid))
      if(dim(locations.xvalid)[2] <= 2){
        if(dim(locations.xvalid)[2] == 1){
          locations.xvalid <- is.vector(locations.xvalid)
          crossvalid <- TRUE
          if(any(locations.xvalid) > n | length(locations.xvalid) > n)
            stop("incorrect value to the argument locations.xvalid.\nThis must be a numeric vector with numbers indicating the locations to be cross-validated")
        }
        else{
          if(messages.screen)
            cat("xvalid: cross-validation to be performed on locations provided by the user\n")
          if(is.null(data.xvalid))
            stop("the argument \"data.xvalid\" must be provided in order to perform validation on a set of locations different from the original data")
          crossvalid <- FALSE
        }
      }
      else
        if(!is.vector(locations.xvalid) | !is.numeric(locations.xvalid))
          stop("\nargument locations.xvalid must be either:\n a numeric vector with numbers indicating the locations to be cross-validated\n a matrix with coordinates for the locations to be cross-validated.")
        else
          if(any(locations.xvalid) > n | length(locations.xvalid) > n)
            stop("incorrect value to the argument locations.xvalid.\nThis must be a numeric vector with numbers indicating the locations to be cross-validated")
  }
  if(crossvalid == FALSE) n.pt.xv <- dim(locations.xvalid)[[1]]
  else n.pt.xv <- length(locations.xvalid)
  if(messages.screen){
    cat(paste("xvalid: number of data locations       =", n))
    cat("\n")
    cat(paste("xvalid: number of validation locations =", n.pt.xv))
    cat("\n")
    if(crossvalid) cat("xvalid: performing cross-validation at location ... ")
    else  cat("xvalid: performing validation at the locations provided")
    }
  ##
  ## Defining a function to predict at one point
  ##
  if(crossvalid){
    cv.f <- function(ndata, ...) {
      if(messages.screen) cat(paste(ndata, ", ", sep=""))
      ## excluding data point
      coords.out <- coords[ndata, , drop = FALSE]
      data.out <- data[ndata]
      xmat.out <- xmat[ndata, , drop = FALSE]
      cv.coords <- coords[-ndata, ]
      cv.data <- as.vector(data)[-ndata]
      cv.xmat <- xmat[-ndata, , drop = FALSE]
      ## re-estimating the model
      if (reestimate) {
        if(model$method == "ML" | model$method == "REML" | model$method == "RML"){
          fix.pars <- (model$parameters.summary[c("tausq", "kappa", "psiA",
                                                  "psiR", "lambda"), 1] == "fixed")
          val.pars <- model$parameters.summary[c("tausq", "kappa", 
                                                 "psiA", "psiR", "lambda"), 2]
          names(fix.pars) <- c("tausq", "kappa", "psiA", "psiR", 
                               "lambda")
          names(val.pars) <- c("tausq", "kappa", "psiA", "psiR", 
                               "lambda")
          CVmod <- likfit(coords = cv.coords, data = cv.data, 
                          ini = model$cov.pars, fix.nugget = fix.pars["tausq"], 
                          nugget = val.pars["tausq"], fix.kappa = fix.pars["kappa"], 
                          kappa = val.pars["kappa"], fix.psiR = fix.pars["psiR"], 
                          psiR = val.pars["psiR"], fix.psiA = fix.pars["psiA"], 
                          psiA = val.pars["psiA"], fix.lambda = fix.pars["lambda"], 
                          lambda = val.pars["lambda"], cov.model = model$cov.model, 
                          trend = ~cv.xmat + 0, method = model$method, 
                          messages = FALSE, ...)
          if(output.reestimate){
            CVpars <- (CVmod$parameters.summary[c("tausq", "kappa", "psiA", "psiR", "lambda"), 2])
            CVpars <- c(CVmod$cov.pars, CVpars[fix.pars == FALSE])
          } 
        }
        if(model$method == "OLS" | model$method == "WLS"){
          if(is.null(variog.obj))
            stop("xvalid: when argument reestimate = TRUE an object with the fitted variogram model must be provided in the argument variog.obj ")
          CVvar <- variog(coords = cv.coords, data = cv.data, uvec = variog.obj$uvec,
                          trend = variog.obj$trend, lambda = variog.obj$lambda,
                          option = variog.obj$output.type,
                          estimator.type = variog.obj$estimator.type,
                          nugget.tolerance = variog.obj$nugget.tolerance,
                          max.dist = max(variog.obj$u), pairs.min = 2,
                          bin.cloud = FALSE, direction = variog.obj$direction,
                          tolerance = variog.obj$tolerance,
                          unit.angle = "radians",
                          messages = FALSE, ...)
          CVmod <- variofit(vario = CVvar, ini=model$cov.pars, cov.model = model$cov.model,
                            fix.nugget = model$fix.nugget, nugget = model$nugget,
                            fix.kappa = model$fix.kappa, kappa = model$kappa, max.dist = model$max.dist,
                            minimisation.function = model$minimisation.function,
                            weights = model$weights, messages = FALSE, ...)
          if(output.reestimate){
            CVpars <- CVmod$cov.pars
            if(CVmod$fix.nugget == FALSE) CVpars <- c(CVpars, CVmod$nugget)
            if(CVmod$fix.kappa == FALSE) CVpars <- c(CVpars, CVmod$kappa)
          }
        }
      }
      else CVmod <- model
      if(is.null(model$method)){
        fix.pars <- rep(TRUE, 5)
        val.pars <- c(CVmod$nugget, CVmod$kappa, CVmod$aniso.pars, CVmod$lambda)
      }
      else{
        if(model$method == "ML" | model$method == "REML" | model$method == "RML"){
          fix.pars <- (CVmod$parameters.summary[c("tausq", "kappa", 
                                                  "psiA", "psiR", "lambda"), 1] == "fixed")
          val.pars <- CVmod$parameters.summary[c("tausq", "kappa", 
                                                 "psiA", "psiR", "lambda"), 2]
        }
        if(model$method == "OLS" | model$method == "WLS"){
          fix.pars <- c(CVmod$fix.nugget, CVmod$fix.kappa,TRUE,TRUE,TRUE)
          if(is.null(CVmod$kappa)) CVmod$kappa <- 0.5
          val.pars <- c(CVmod$nugget, CVmod$kappa, 0, 1, CVmod$lambda)
        }
      }
      names(fix.pars) <- c("tausq", "kappa", "psiA", "psiR", "lambda")
      names(val.pars) <- c("tausq", "kappa", "psiA", "psiR", "lambda")
      kr <- krige.conv(coords = cv.coords, data = cv.data, loc = coords.out,
                       krige = krige.control(trend.d = ~cv.xmat + 0,
                         trend.l = ~xmat.out + 0, cov.model = CVmod$cov.model, 
                         cov.pars = CVmod$cov.pars, nugget = CVmod$nugget, 
                         kappa = val.pars["kappa"], lambda = val.pars["lambda"], 
                         aniso.pars = val.pars[c("psiA", "psiR")]),
                       output = output.control(mess = FALSE))
      res <- c(data.out, kr$pred, kr$krige.var)
      if(output.reestimate) res <- c(res, CVpars)
      ##, err = (data.out - kr$pred), e.rel = (data.out - kr$pred)/sqrt(kr$krige.var), 
      ##pval = pnorm(data.out, mean = kr$pred, sd = sqrt(kr$krige.var)))
      return(res)
    }
    res <- as.data.frame(t(apply(matrix(locations.xvalid), 1, cv.f)))
  }
  else{
    xmat.val.loc <- trend.spatial(trend = model$trend, geodata = list(coords=locations.xvalid))
    if(is.null(model$method)){
      fix.pars <- rep(TRUE, 5)
      val.pars <- c(mod$nugget, mod$kappa, mod$aniso.pars, mod$lambda)
    }
    if(model$method == "ML" | model$method == "REML" | model$method == "RML"){
      fix.pars <- (model$parameters.summary[c("tausq", "kappa", 
                                              "psiA", "psiR", "lambda"), 1] == "fixed")
      val.pars <- model$parameters.summary[c("tausq", "kappa", 
                                             "psiA", "psiR", "lambda"), 2]
    }
    if(model$method == "OLS" | model$method == "WLS"){
      fix.pars <- c(model$fix.nugget, model$fix.kappa,TRUE,TRUE,TRUE)
      val.pars <- c(model$nugget, model$kappa, 0, 1, model$lambda)
    }
    names(fix.pars) <- c("tausq", "kappa", "psiA", "psiR", "lambda")
    names(val.pars) <- c("tausq", "kappa", "psiA", "psiR", "lambda")
    res <- krige.conv(coords = coords, data = data, loc = locations.xvalid,
                     krige = krige.control(trend.d = ~xmat + 0,
                       trend.l = ~xmat.val.loc + 0, cov.model = model$cov.model, 
                       cov.pars = model$cov.pars, nugget = model$nugget, 
                       kappa = val.pars["kappa"], lambda = val.pars["lambda"], 
                       aniso.pars = val.pars[c("psiA", "psiR")]),
                      output = output.control(mess = FALSE))[1:2]
    res <- data.frame(data.xvalid, res$pred, res$krige.var)
  } 
  if(messages.screen) cat("\nxvalid: end of cross-validation\n")
  if(output.reestimate){
    pars.names <- c("sigmasq", "phi")
    if(model$method == "ML" | model$method == "REML" | model$method == "RML"){
      fix.pars <- (model$parameters.summary[c("tausq", "kappa", 
                                              "psiA", "psiR", "lambda"), 1] == "fixed")
      pars.names <- c(pars.names,(c("tausq", "kappa", "psiA", "psiR", "lambda"))[fix.pars == FALSE])
    }
    if(model$method == "OLS" | model$method == "WLS"){
      if(model$fix.nugget == FALSE) pars.names <- c(pars.names, "tausq")
      if(model$fix.kappa == FALSE) pars.names <- c(pars.names, "kappa")
    }
      names(res) <- c(c("data", "predicted", "krige.var"), pars.names)
  }
  else names(res) <- c("data", "predicted", "krige.var")
  res$error <- res$data - res$pred
  res$std.error <- res$err/sqrt(res$krige.var)
  res$prob <- pnorm(res$data, mean = res$pred, sd = sqrt(res$krige.var))
  if(output.reestimate){
    np <- length(pars.names)
    res <- res[,c((1:3), ((3+np+1):(6+np)),(4:(3+np)))] 
  }
  attr(res,"row.names") <- NULL
  if(autocross){
    if(!is.null(call.fc$geodata))
      attr(res,"geodata.xvalid") <- call.fc$geodata
    else
      attr(res,"locations.xvalid") <- call.fc$locations.xvalid
  }
  else
    if(!is.null(locations.xvalid))
      attr(res,"locations.xvalid") <- call.fc$locations.xvalid
  attr(res, "class") <- "xvalid"
  return(res)
}

"plot.xvalid" <-
  function (x, coords, borders = NULL, ask = TRUE,
            error = TRUE, std.error = TRUE,
            data.predicted = TRUE,
            pp = TRUE, map = TRUE, histogram = TRUE,
            error.predicted = TRUE, error.data = TRUE, ...)
{
  ##
  ## Saving original par() parameters
  ##
  if (is.R()) 
    par.ori <- par(no.readonly = TRUE)
  else par.ori <- par()
  on.exit(par(par.ori))
  ##
  ## checking input
  ##
  if(!is.null(borders)){
    if(!is.matrix(borders) & !is.data.frame(borders))
      stop("argument borders must be a two column matrix or a data frame with the coordinates of the borders")
    else
      if(ncol(borders) > 2)
        stop("argument borders must be a two column matrix or a data frame with the coordinates of the borders")
      else borders <- as.matrix(borders)
  }
  if(error | std.error){
    if(missing(coords)){
      if(!is.null(attr(x,"geodata.xvalid")))
        coords <- eval(attr(x,"geodata.xvalid"))$coords
      if(!is.null(attr(x,"locations.xvalid")))
        coords <- eval(attr(x,"locations.xvalid"))       
    }
    else{
      if(class(coords) == "geodata")
        coords <- coords$coords
    }
    if(!is.matrix(coords) & !is.data.frame(coords))
      stop("argument coords must be a two column matrix or a data frame with the data coordinates")
    else
      if(ncol(coords) > 2)
        stop("argument coords must be a two column matrix or a data frame with the data coordinates")
      else coords <- as.matrix(coords)
  }
  ##
  ## auxiliary computations for plots
  ##
  n <- length(x$data)
  xylim <- range(c(x$data, x$pred))
  prelim <- range(x$pred)
  datlim <- range(x$data)
  errlim <- max(abs(range(x$error)))
  errlim <- c(-errlim, errlim)
  err.std <- sqrt(var(x$error))
  if(n > 90){
    seqerr <- seq(-3.5*err.std, 3.5*err.std, l=15)
    seqstd <- seq(-3.5, 3.5, l=15)
  }
  else{
    seqerr <- seq(-4*err.std, 4*err.std, l=9)
    seqstd <- seq(-4, 4, l=9)
  }
  stdlim <- max(c(3, abs(range(x$std.error))))
  stdlim <- c(-stdlim, stdlim)
  ## indicator for negative and positive errors
  error.cut <- cut(x$error, breaks=c(errlim[1], 0, errlim[2]), include.l=TRUE, labels=FALSE)
  ##
  ## Data vs predicted
  ##
  if(data.predicted){
    par(pty = "s")
    plot(x$pred, x$data, type = "n", xlim = xylim, ylim = xylim,
         ylab = "data", xlab = "predicted")
    points(x$pred, x$data, ...)
    ##    points(x$pred, x$data, pch = (c("x", "+"))[error.cut], col=(c("red", "blue"))[error.cut])
    abline(0,1)
  }
  ##
  par(ask = ask)
  ##
  if(!error | !std.error){
    ##
    ## P-P plot
    ##
    if(pp){
      par(pty = "s")  
      plot(ppoints(n), x$prob[order(x$prob)], xlim=c(0,1), ylim=c(0,1), xlab="theoretical prob", ylab="observed prob")
      abline(0,1)
    }
  }
  if(error){
    ##
    ## Plotting errors
    ##
    ## sizes proportional to errors values
    err.abs <- abs(x$error)
    coords.order <- coords[order(err.abs), ]
    err.order <- err.abs[order(err.abs)]
    cut.order <- error.cut[order(err.abs)]
    r.y <- range(err.order)
    err.size <- 0.7 + ((err.order - r.y[1]) * (2 - 0.7))/(r.y[2] - r.y[1])
    ## equal scale for plot
    coords.lims <- apply(coords, 2, range)
    coords.diff <- diff(coords.lims)
    if (coords.diff[1] != coords.diff[2]) {
      coords.diff.diff <- abs(diff(as.vector(coords.diff)))
      ind.min <- which(coords.diff == min(coords.diff))
      coords.lims[, ind.min] <- coords.lims[, ind.min] + c(-coords.diff.diff, 
                                                           coords.diff.diff)/2
    }
    ##
    if(map){
      par(pty = "s")
      ##
      plot(coords, xlab = "Coord X", ylab = "Coord Y",
           type = "n", 
           xlim = coords.lims[, 1], ylim = coords.lims[, 2])
      if (is.R()) {
        points(coords.order, pch = (c("x", "+"))[cut.order], col=(c("red", "blue"))[cut.order], cex = err.size)
      }
      else
        points(coords.order, pch = (c("x", "+"))[cut.order], col=(c(3, 4))[cut.order], cex = err.size)
      if(!is.null(borders))
        lines(borders)
    }
    ##
    ## errors histogram
    ##
    if(histogram){
      par(pty = "m")
      if(min(x$error) < min(seqerr)) seqerr <- c(min(x$error), seqerr)
      if(max(x$error) > max(seqerr)) seqerr <- c(seqerr, max(x$error))
      hist(x$error, prob=TRUE, main="", breaks=seqerr, xlab="data - predicted")
    }
    ##
    ## errors vs predicted
    ##
    if(error.predicted){
      par(pty = "m")
      plot(x$pred, x$error, type = "n", xlim = prelim, ylim = errlim,
           xlab = "predicted", ylab = "data - predicted")
    ##  points(x$pred, x$error, pch = (c("x", "+"))[error.cut], col=(c("red", "blue"))[error.cut])
      points(x$pred, x$error, ...)
      abline(h=0)
    }
    ##
    ## errors vs data
    ##
    if(error.data){
      par(pty = "m")
      plot(x$data, x$error, type = "n", xlim = datlim, ylim = errlim,
           xlab = "data", ylab = "data - predicted")
      ##      points(x$data, x$error, pch = (c("x", "+"))[error.cut], col=(c("red", "blue"))[error.cut])
      points(x$data, x$error, ...)
      abline(h=0)
      ##
    }
  }
  if(error & std.error){
    ##
    ## P-P plot
    ##
    if(pp){
      par(pty = "s")  
      plot(ppoints(n), x$prob[order(x$prob)], xlim=c(0,1), ylim=c(0,1), xlab="theoretical prob", ylab="observed prob")
      abline(0,1)
    }
  }
  if(std.error){
    ##
    ## Plotting std residuals
    ##
    ## sizes proportional to errors values
    err.abs <- abs(x$std.error)
    coords.order <- coords[order(err.abs), ]
    err.order <- err.abs[order(err.abs)]
    cut.order <- error.cut[order(err.abs)]
    r.y <- range(err.order)
    err.size <- 0.7 + ((err.order - r.y[1]) * (2 - 0.7))/(r.y[2] - r.y[1])
    ## equal scale for plot
    coords.lims <- apply(coords, 2, range)
    coords.diff <- diff(coords.lims)
    if (coords.diff[1] != coords.diff[2]) {
      coords.diff.diff <- abs(diff(as.vector(coords.diff)))
      ind.min <- which(coords.diff == min(coords.diff))
      coords.lims[, ind.min] <- coords.lims[, ind.min] + c(-coords.diff.diff, 
                                                           coords.diff.diff)/2
    }
    ##
    if(map){
      par(pty = "s")
      ##
      plot(coords, xlab = "Coord X", ylab = "Coord Y", type = "n", 
           xlim = coords.lims[, 1], ylim = coords.lims[, 2])
      if (is.R()) {
        points(coords.order, pch = (c("x", "+"))[cut.order], col=(c("red", "blue"))[cut.order], cex = err.size)
      }
      else
        points(coords.order, pch = (c("x", "+"))[cut.order], col=(c(3, 4))[cut.order], cex = err.size)
      if(!is.null(borders))
        lines(borders)
    }
    ##
    ## std. errors histogram
    ##
    if(histogram){
      par(pty = "m")
      if(min(x$std.error) < min(seqstd)) seqstd <- c(min(x$std.error), seqstd)
      if(max(x$std.error) > max(seqstd)) seqstd <- c(seqstd, max(x$std.error))
      hist(x$std.error, prob=TRUE, main="", breaks = seqstd, xlab="std residuals")
    }
    ##
    ## std. errors vs predicted
    ##
    if(error.predicted){
      par(pty = "m")
      plot(x$pred, x$std.error, type = "n", xlim = prelim, ylim = stdlim,
           xlab = "predicted", ylab = "std residuals")
      ##      points(x$pred, x$std.error, pch = (c("x", "+"))[error.cut], col=(c("red", "blue"))[error.cut])
      points(x$pred, x$std.error, ...)
      abline(h=0)
    }
    ##
    ## std. errors vs data
    ##
    if(error.data){
      par(pty = "m")
      plot(x$data, x$std.error, type = "n", xlim = datlim, ylim = stdlim,
           xlab = "data", ylab = "std residuals")
      ##      points(x$data, x$std.error, pch = (c("x", "+"))[error.cut], col=(c("red", "blue"))[error.cut])
      points(x$data, x$std.error,  ...)
      abline(h=0)
      ##
    }
  }
  ##
  return(invisible())
}
".First.lib" <- function(lib, pkg)
{
  library.dynam("geoR", package = pkg, lib.loc = lib)
  cat("\n")
  cat("----------------------------------------------------------\n")
  ## from 1.9-0, package.description is deprecated in favour of
  ## packageDescription (which doesn't exist in previous versions)
  if(!exists("packageDescription",mode="function")){
    pkg.info <- package.description("geoR", lib.loc = lib,
                                    fields=c("Title","Version","Date"))
    pkg.info <- list(Title=pkg.info[1], Version=pkg.info[2],
                     Date=pkg.info[3])
  }
  else pkg.info <- packageDescription("geoR", lib.loc = lib,
                                      fields=c("Title","Version","Date"))
  cat(pkg.info$Title)
  cat("\n")
##  locn <- paste(.path.package(package="geoR"), "doc", "geoRintro.pdf", sep=.Platform$file.sep)
##  cat(paste("See the document \"Introduction to geoR\" (package vignette)\n in", locn, "\n"))
##  cat("Type \"demo(geoR)\" for a demonstration\n")
  cat(paste("geoR version ", pkg.info$Version, " (built on ", pkg.info$Date, ") is now loaded\n", sep=""))
  cat("----------------------------------------------------------\n")
  cat("\n")
  return(invisible(0))
}




