.packageName <- "lspls"
### MSEP.R: MSEP and RMSEP functions.
### $Id: MSEP.R 16 2006-01-06 14:50:56Z bhm $


## MSEP takes a CV-object, and calculates the MSEP
MSEP.lsplsCv <- function(object, ...) {
    if (is.null(object$mode))
        stop("`object' has no `model' component.  Recalculate with `model = TRUE'")
    colMeans((object$pred - c(model.response(model.frame(object))))^2)
}


## RMSEP is a wrapper around MSEP that returns its square root.
RMSEP.lsplsCv <- function(object, ...) sqrt(MSEP(object, ...))
### lspls.R: The user interface function for fitting models
### $Id: lspls.R 14 2006-01-05 10:26:41Z bhm $

lspls <- function(formula, ncomp, data, subset, na.action,
                  model = TRUE, ...)
{
    ## Get the terms
    mt <- terms(formula, keep.order = TRUE)
    ## Get the model frame
    mf <- match.call(expand.dots = FALSE)
    m <- match(c("formula", "data", "subset", "na.action"), names(mf), 0)
    mf <- mf[c(1, m)]                   # Retain only the named arguments
    mf[[2]] <- mt                       # Use the terms instead of the
                                        # formula, to keep the ordering.
    mf[[1]] <- as.name("model.frame")
    mf <- eval(mf, parent.frame())
    ## Get the data matrices
    Y <- model.response(mf, "numeric")
    if (is.matrix(Y)) {
        if (is.null(colnames(Y)))
            colnames(Y) <- paste("Y", 1:dim(Y)[2], sep = "")
    } else {
        Y <- as.matrix(Y)
        colnames(Y) <- deparse(formula[[2]])
    }
    ## All the predictor matrices, in correct order:
    matrices <- apply(attr(mt, "factors"), 2, function(x) mf[,which(x > 0)])
    X <- matrices[[1]]
    Z <- matrices[-1]
    ## Make sure ncomp is a list, and repeat it as needed:
    ncomp <- rep(as.list(ncomp), length = length(Z))

    ## Fit the model:
    z <- orthlspls.fit(Y, X, Z, ncomp, ...)
    ## Build and return the object:
    class(z) <- "lspls"
    z$na.action <- attr(mf, "na.action")
    z$ncomp <- ncomp
    z$call <- match.call()
    z$terms <- mt
    if (model) z$model <- mf
    z
}
### lsplsCv.R: The user interface function for cross-validation
### $Id: lsplsCv.R 14 2006-01-05 10:26:41Z bhm $

lsplsCv <- function(formula, ncomp, data, subset, na.action,
                    segments = 10, segment.type = c("random",
                                   "consecutive", "interleaved"),
                    length.seg, model = TRUE, ...)
{
    ## Get the terms
    mt <- terms(formula, keep.order = TRUE)
    ## Get the model frame
    mf <- match.call(expand.dots = FALSE)
    m <- match(c("formula", "data", "subset", "na.action"), names(mf), 0)
    mf <- mf[c(1, m)]                   # Retain only the named arguments
    mf[[2]] <- mt                       # Use the terms instead of the
                                        # formula, to keep the ordering.
    mf[[1]] <- as.name("model.frame")
    mf <- eval(mf, parent.frame())
    ## Get the data matrices
    Y <- model.response(mf, "numeric")
    if (is.matrix(Y)) {
        if (is.null(colnames(Y)))
            colnames(Y) <- paste("Y", 1:dim(Y)[2], sep = "")
    } else {
        Y <- as.matrix(Y)
        colnames(Y) <- deparse(formula[[2]])
    }
    ## All the predictor matrices, in correct order:
    matrices <- apply(attr(mt, "factors"), 2, function(x) mf[,which(x > 0)])
    X <- matrices[[1]]
    Z <- matrices[-1]
    ## Make sure ncomp is a list, and repeat it as needed:
    ncomp <- rep(as.list(ncomp), length = length(Z))

    ## Set up segments:
    if (is.list(segments)) {
        if (is.null(attr(segments, "type")))
            attr(segments, "type") <- "user supplied"
    } else {
        nObs <- nrow(X)
        if (missing(length.seg)) {
            segments <- cvsegments(nObs, k = segments, type = segment.type)
        } else {
            segments <- cvsegments(nObs, length.seg = length.seg,
                                   type = segment.type)
        }
    }

    ## Build and return the object:
    z <- list()
    class(z) <- "lsplsCv"
    ## Do the cross-validation:
    z$pred <- orthlsplsCv(Y, X, Z, ncomp, segments, ...)
    names(dimnames(z$pred)) <-
        c("obs", "resp", rownames(attr(delete.response(mt), "factors"))[-1])
    z$segments <- segments
    z$na.action <- attr(mf, "na.action")
    z$ncomp <- ncomp
    z$call <- match.call()
    z$terms <- mt
    if (model) z$model <- mf
    z
}
###
### Orthogonal case, fitting of model
###

orthlspls.fit <- function(Y, X, Z, ncomp) {
    ## Parametres:
    nObs <- nrow(X)
    totNumComps <- sum(unlist(ncomp))
    totNumCoefs <- ncol(X) + totNumComps
    if(totNumCoefs > nObs) stop("Too many variables/components selected.")

    ## Containers:
    B <- matrix(nrow = totNumCoefs, ncol = ncol(Y)) # Regr. coefs.
    V <- matrix(nrow = nObs, ncol = totNumCoefs) # Regr. variables
    models <- list()                    # plsr models
    orthCoefs <- list()                 # Orthogonalisation matrices
    ## These two are not strictly neccessary:
    S <- list()                         # Scores
    L <- list()                         # Loadings


    ## Choose PLS fit algorithm.  FIXME: Support other algs?
    pls.fit <- oscorespls.fit

    ## Start with X:
    lsX <- lm.fit(X, Y)
    nVar <- ncol(X)
    ## Extract
    V[,1:nVar] <- X
    B[1:nVar,] <- lsX$coefficients
    res <- lsX$residuals

    ## For testing:
    ##Balt <- B

    ## Walk through Z:
    for (i in seq(along = Z)) {
        ##cat("i =", i, "\n")
        M <- Z[[i]]
        if (is.matrix(M)) {             # Single matrix
            Mo <- orth(M, V[,1:nVar])   # Orth. M against all used variables
            orthCoefs[[i]] <- Corth(M, V[,1:nVar]) # For pred
            models[[i]] <- pls.fit(Mo, res, ncomp[[i]])# Could use Y
            V[,nVar + (1:ncomp[[i]])] <- S[[i]] <- models[[i]]$scores
            L[[i]] <- models[[i]]$loadings
            ## FIXME: Testing:
            ##lmZ <- lm.fit(models[[i]]$scores, res)
            ##Balt[nVar + (1:ncomp[[i]]),] <- lmZ$coefficients
            ## FIXME: This depends on the pls algorithm (at least the scaling):
            B[nVar + (1:ncomp[[i]]),] <- t(models[[i]]$Yloadings)
            res <- models[[i]]$residuals[,,ncomp[[i]]]
            nVar <- nVar + ncomp[[i]]
        } else {                        # Parallell matrices
            Vadd <- matrix(nrow = nObs, ncol = sum(ncomp[[i]])) # The variables to be added in the present step
            added <- 0
            S[[i]] <- list()
            L[[i]] <- list()
            orthCoefs[[i]] <- list()
            models[[i]] <- list()
            for (j in seq(along = M)) {
                ##cat("j =", j, "\n")
                ## Walk through Z[[i]]
                Mo <- orth(M[[j]], V[,1:nVar])
                orthCoefs[[i]][[j]] <- Corth(M[[j]], V[,1:nVar]) # For pred
                models[[i]][[j]] <- pls.fit(Mo, res, ncomp[[i]][[j]])# Could use Y
                Vadd[,added + (1:ncomp[[i]][[j]])] <- S[[i]][[j]] <- models[[i]][[j]]$scores
                L[[i]][[j]] <- models[[i]][[j]]$loadings
                added <- added + ncomp[[i]][[j]]
            }
            V[,nVar + (1:sum(ncomp[[i]]))] <- Vadd
            ## Not strictly neccessary in orth. version:
            lmZ <- lm.fit(Vadd, res)
            B[nVar + (1:sum(ncomp[[i]])),] <- lmZ$coefficients
            ##Balt[nVar + (1:sum(ncomp[[i]])),] <- lmZ$coefficients
            res <- lmZ$residuals
            nVar <- nVar + sum(ncomp[[i]])
        } # if
    } # for
    list(coefficients = B, predictors = V, orthCoefs = orthCoefs,
         models = models, ncomp = ncomp, scores = S, loadings = L, residuals = res)
} # function

## Forenklingstanke: Gjr om alle enkeltmatrisene i Z til lister med
## ett element.  Da blir algoritmene enklere (og burde ikke bli
## nevneverdig saktere).  Ved behov kan man teste p length(M)
## (f.eks. ved beregning av B og ny res).

## FIXME:
## - Change name of 'models' component to 'plsmodels'?
## - Add fitted values?
### orthlsplsCv.R: Cross-validation, orthogonalizing version
###
### $Id: orthlsplsCv.R 17 2006-01-10 14:42:16Z bhm $

## The algorithm is based on recursion, after X has been handled.

orthlsplsCv <- function(Y, X, Z, ncomp, segments, ...) {

    ## The recursive function:
    ## It uses the following variables from orthlsplsCv
    ## - Z: list of spectral matrices
    ## - ncomp: list of #comps to use in the CV
    ## - segment: indices of the segment to be predicted
    ## - cvPreds: array of predictions; dim: c(nObs, nResp, unlist(ncomp))
    ## - pls.fit: the pls fit function
    cvPredRest <- function(indices, prevCalib, prevPred, prevComps, prevRes) {
        ## indices is the indices of the remaining matrices
        ## prevCalib is a matrix with the X vars and scores used in the
        ##   previous calibrations
        ## prevPred is a matrix with the X vars and scores used in the
        ##   previous predictions
        ## prevComps is the numbers of components used in the previous
        ##   calibrations
        ## prevRes is the residuals from the previous calibrations

        ## The general idea is to handle the first matrix or list of
        ## matrices (Z[[indices[1]]]), and then recall itself on the rest of
        ## the matrices.

        ## The matrix/matrices to handle in this call:
        ind <- indices[1]
        M <- Z[[ind]]
        if (is.matrix(M)) {             # A single matrix
            ## Orthogonalise the calibration spectra wrt. prevVars
            Mcal <- M[-segment,]
            Mo <- orth(Mcal, prevCalib)

            ## Orthogonalise the prediction spectra
            Mpred <- M[segment,]
            Mpo <- Mpred - prevPred %*% Corth(Mcal, prevCalib)
            ## mal: Zorig[i,] - Xorig[i,] %*% Co(Xorig[-i,]) %*% Zorig[-i,]

            ## Estimate a model prevRes ~ orth. spectra + res
            plsM <- pls.fit(Mo, prevRes, ncomp[[ind]])
            ## Save scores:
            calScores <- plsM$scores

            ## Predict new scores and response values
            predScores <- sweep(Mpo, 2, plsM$Xmeans) %*% plsM$projection
            ## FIXME: Only for orth.scores alg:
            predVals <- array(dim = c(nrow(predScores), dim(plsM$Yloadings)))
            for (a in 1:ncomp[[ind]])
                predVals[,,a] <-
                    sweep(predScores[,1:a] %*% t(plsM$Yloadings[,1:a, drop=FALSE]),
                          2, plsM$Ymeans, "+")

            ## Add the predictions to the outer cvPreds variable
            ## Alt. 1:  Calculate the 1-index indices manuall, and use
            ## single indexing (probably quickest, but requires a loop).
            ## Alt. 2:  Use matrix indexing with an expanded grid.
            ##eg <- expand.grid(segment, 1:nResp, ncomp[[ind]])
            ##indMat <- do.call("cbind", c(eg[1:2], as.list(prevComps), eg[3]))
            ##cvPreds[indMat] <- cvPreds[indMat] + predVals
            ## Alt. 3: Build and eval an expression which does what we want:
            ncomps <- length(prevComps)
            nrest <- length(dim(cvPreds)) - ncomps - 3
            dummy <- Quote(cvPreds[segment,])
            dummy[4 + seq(along = prevComps)] <- prevComps + 1
            dummy[5 + ncomps] <- -1
            if (nrest > 0) dummy[5 + ncomps + 1:nrest] <- dummy[rep(4, nrest)]
            eval(substitute(dummy <<- dummy + c(predVals), list(dummy = dummy)))

            ## Return if this is the last matrix/set of matrices
            if (length(indices) == 1) return()

            ## Calculate new residuals
            newResid <- - plsM$fitted.values + c(prevRes)

            ## To save space: drop the model object(s)
            rm(plsM)

            ## Recursively call ourself for each number of components in the
            ## present model
            for (i in 0:ncomp[[ind]])
                Recall(indices[-1], # Remove the index of the current matrix
                       cbind(prevCalib, calScores[,seq(length = i)]), # Add the scores we've used
                       cbind(prevPred, predScores[,seq(length = i), drop=FALSE]), # Add the scores we've predicted
                       c(prevComps, i), # and the number of comps
                       if (i > 0) newResid[,,i] else prevRes) # update the residual

        } else {                        # List of parallell matrices
            Scal <- list()              # The current calibration scores
            Spred <- list()             # The current prediction scores
            for (j in seq(along = M)) {
                ## Orthogonalise the calibration spectra wrt. prevVars
                Mcal <- M[[j]][-segment,]
                Mo <- orth(Mcal, prevCalib)

                ## Orthogonalise the prediction spectra
                Mpred <- M[[j]][segment,]
                Mpo <- Mpred - prevPred %*% Corth(Mcal, prevCalib)

                ## Estimate a model prevRes ~ orth. spectra + res
                plsM <- pls.fit(Mo, prevRes, ncomp[[ind]][j])
                ## Save scores:
                Scal[[j]] <- plsM$scores

                ## Predict new scores
                Spred[[j]] <- sweep(Mpo, 2, plsM$Xmeans) %*% plsM$projection
            }
            ## To save space: drop the model object
            rm(plsM)

            ## Loop over the different combinations of #comps:
            nComps <- expand.grid(lapply(ncomp[[ind]], seq, from = 0))
            for (cind in 1:nrow(nComps)) {
                newComps <- nComps[cind,]
                comps <- c(prevComps, unlist(newComps))
                ## Predict new response values
                calScores <-
                    do.call("cbind", mapply(function(B, b) B[,seq(length=b), drop=FALSE],
                                            Scal, newComps, SIMPLIFY = FALSE))
                predScores <-
                    do.call("cbind", mapply(function(B, b) B[,seq(length=b), drop=FALSE],
                                            Spred, newComps, SIMPLIFY = FALSE))
                if (all(newComps == 0)) {
                    newResid <- prevRes
                } else {
                    lsS <- lm.fit(calScores, prevRes) # FIXME: How about intercept/numerical accurracy?
                    newResid <- lsS$residuals
                    predVals <- predScores %*% lsS$coefficients
                    rm(lsS)
                    ## Add the predictions to the outer cvPreds variable.  Build
                    ## and eval an expression which does what we want:
                    nc <- length(comps)
                    nrest <- length(dim(cvPreds)) - nc - 2
                    dummy <- Quote(cvPreds[segment,])
                    dummy[4 + seq(along = comps)] <- comps + 1
                    if (nrest > 0) dummy[4 + nc + 1:nrest] <- dummy[rep(4, nrest)]
                    eval(substitute(dummy <<- dummy + c(predVals),
                                    list(dummy = dummy)))
                }

                if (length(indices) > 1) { # There are more matrices to fit
                    ## Recursively call ourself
                    Recall(indices[-1], # Remove the index of the current matrices
                           cbind(prevCalib, calScores), # Add the scores we've used
                           cbind(prevPred, predScores), # Add the scores we've predicted
                           c(comps), # and the number of comps
                           newResid) # use the new residual

                }
            } ## for
        } ## if
    } ## recursive function

    ## Setup:
    nObs <- nrow(X)
    nResp <- ncol(Y)
    ## cvPreds: the cross-validated predictions:
    cvPreds <- array(0, dim = c(nObs, nResp, unlist(ncomp) + 1))
    ## Build an unevaluated expression that will insert the predictions into
    ## cvPreds[segment,,...,] when evaluated:
    ndim <- length(dim(cvPreds))
    ## This creates an expression with the empty index argument repeated as
    ## many times as neccessary:
    dummy <- Quote(cvPreds[segment,])[c(1:3, rep(4, ndim - 1))]
    ## Substitute this in an assignment statement:
    addPredictions <- substitute(dummy <- predVals, list(dummy = dummy))

    ## Choose PLS fit algorithm.  FIXME: Support other algs?
    pls.fit <- oscorespls.fit

    ## The main cross-validation loop
    temp <- 0
    for (segment in segments) {
        cat(temp <- temp + 1, "")
        ## Handle X
        lsX <- lm.fit(X[-segment,, drop = FALSE], Y[-segment,, drop = FALSE])
        resid <- lsX$residuals
        predVals <- X[segment,, drop = FALSE] %*% lsX$coefficients

        ## Insert the predictions into the cvPred array:
        eval(addPredictions)

        ## Handle the rest of the matrices:
        cvPredRest(indices = 1:length(ncomp),
                   prevCalib = X[-segment,, drop = FALSE],
                   prevPred = X[segment,, drop = FALSE],
                   prevComps = c(),
                   prevRes = resid)
    }
    dimnames(cvPreds) <- c(list(1:nObs, 1:nResp),
                           lapply(unlist(ncomp), function(x) 0:x))
    return(cvPreds)
} ## function
### plots.R:  Plot functions
### $Id: plots.R 16 2006-01-06 14:50:56Z bhm $

###
### Plot method for lspls objects
###

plot.lspls <- function(x, plottype = c("scores", "loadings"), ...) {
    plottype <- match.arg(plottype)
    plotFunc <- switch(plottype,
                       scores = scoreplot.lspls,
                       loadings = loadingplot.lspls)
    plotFunc(x, ...)
}


###
### Scoreplot
###

scoreplot.lspls <- function(object, ...) {
    opar <- par(no.readonly = TRUE)
    on.exit(par(opar))
    par(ask = TRUE)
    for (i in seq(along = object$scores)) {
        if (is.matrix(object$scores[[i]])) {
            scoreplot(object$scores[[i]], comps = 1:object$ncomp[[i]], main = i, ...)
        } else {
            for (j in seq(along = object$scores[[i]])) {
                scoreplot(object$scores[[i]][[j]], comps = 1:object$ncomp[[i]][j], main = paste(i, j, sep = "."), ...)
            }
        }
    }
}


###
### Loadingplot
###

loadingplot.lspls <- function(object, ...) {
    opar <- par(no.readonly = TRUE)
    on.exit(par(opar))
    par(mfrow = n2mfrow(length(unlist(object$ncomp))))
    for (i in seq(along = object$loadings)) {
        if (is.matrix(object$loadings[[i]])) {
            loadingplot(object$loadings[[i]], comps = 1:object$ncomp[[i]], main = i, ...)
        } else {
            for (j in seq(along = object$loadings[[i]])) {
                loadingplot(object$loadings[[i]][[j]], comps = 1:object$ncomp[[i]][j], main = paste(i, j, sep = "."), ...)
            }
        }
    }
}


###
### Plot method for lsplsCv objects:
###
## FIXME: Should maybe be a plot method for (R)MSEP objects...

plot.lsplsCv <- function(x, which = c("RMSEP", "MSEP"), ...) {
    which <- match.arg(which)
    val <- do.call(which, list(x))
    comps <- expand.grid(lapply(dimnames(val)[-1], as.numeric))
    ncomps <- rowSums(comps)
    ncombs <- nrow(comps)
    complabels <- apply(comps, 1, paste, collapse = "")
    mXlab <- "total number of components"
    mYlab <- which
    nResp <- dim(val)[1]
    if (nResp > 1) {
        opar <- par(no.readonly = TRUE)
        on.exit(par(opar))
        par(mfrow = n2mfrow(nResp), oma = c(1, 1, 0, 0) + 0.1,
            mar = c(3, 3, 3, 1) + 0.1)
        xlab <- ""
        ylab <- ""
    } else {
        xlab <- mXlab
        ylab <- mYlab
    }
    val <- aperm(val, c(2:length(dim(val)), 1)) # Make "resp" the last dimension
    for (i in 1:nResp) {
        cval <- c(val)[ncombs * (i - 1) + 1:ncombs]
        plot(ncomps, cval, type = "n", xlab = xlab, ylab = ylab, main = i, ...)
        text(ncomps, cval, labels = complabels)
        oncomps <- min(ncomps):max(ncomps)
        minval <- numeric(length(oncomps))
        for (i in seq(along = oncomps))
            minval[i] <- min(cval[ncomps == oncomps[i]])
        lines(oncomps, minval, lty = 2, col = 2)
    } ## for
    if (nResp > 1) {
        ## Add outer margin text:
        mtext(mXlab, side = 1, outer = TRUE)
        mtext(mYlab, side = 2, outer = TRUE)
    }
} ## function
### predict.lspls: predict method
### $Id: predict.lspls.R 9 2005-12-22 13:01:36Z bhm $

## The plan:  Build up a new `predictors' by calculateing new
## scores, and use object$coefficients to get new predictions.

predict.lspls <- function(object, newdata, type = c("response", "scores"),
                          na.action = na.pass,...) {
    ## Check args:
    type <- match.arg(type)
    ## Build new data matrices:
    tt <- delete.response(terms(object))
    if (missing(newdata) || is.null(newdata))
        mf <- model.frame(object)[-1]
    else {
        mf <- model.frame(tt, newdata, na.action = na.action)
        if (!is.null(cl <- attr(tt, "dataClasses")))
            .checkMFClasses(cl, mf)
    }
    matrices <- apply(attr(tt, "factors"), 2, function(x) mf[,which(x > 0)])
    newX <- matrices[[1]]
    newZ <- matrices[-1]

    ## Parametres:
    nObs <- nrow(newX)

    ## Containers:
    V <- matrix(nrow = nObs, ncol = ncol(object$predictors))

    ## Start with X:
    nVar <- ncol(newX)
    V[,1:nVar] <- newX

    ## Walk through the plsr models:
    for (i in seq(along = object$models)) {
        ##cat("i =", i, "\n")
        M <- newZ[[i]]
        if (is.matrix(M)) {             # Single matrix
            Mo <- M  - V[,1:nVar] %*% object$orthCoefs[[i]]  # Orth. M
            V[,nVar + (1:object$ncomp[[i]])] <-
                sweep(Mo, 2, object$models[[i]]$Xmeans) %*%
                    object$models[[i]]$projection
            nVar <- nVar + object$ncomp[[i]]
        } else {                        # Parallell matrices
            ## The variables to be added in the present step:
            Vadd <- matrix(nrow = nObs, ncol = sum(object$ncomp[[i]]))
            added <- 0
            for (j in seq(along = M)) {
                ##cat("j =", j, "\n")
                ## Walk through Z[[i]]
                Mo <- M[[j]]  - V[,1:nVar] %*% object$orthCoefs[[i]][[j]]
                Vadd[,added + (1:object$ncomp[[i]][[j]])] <-
                    sweep(Mo, 2, object$models[[i]][[j]]$Xmeans) %*%
                        object$models[[i]][[j]]$projection
                added <- added + object$ncomp[[i]][[j]]
            }
            V[,nVar + (1:sum(object$ncomp[[i]]))] <- Vadd
            nVar <- nVar + sum(object$ncomp[[i]])
        } # if
    } # for
    ## Now V contains the new values of the prediction variables
    if (type == "scores")
        return(V)
    else
        return(V %*% object$coefficients)
} # function
###
### Projection utilities
###

orth <- function(M, N) {                # O_N M
    ## FIXME: This must be optimised:
    M - N %*% solve(crossprod(N)) %*% crossprod(N, M)
}
## This function is currently not used in the code:
project <- function(M, N) {  #P_N, M)
    ## FIXME: Must be optimised:
    N %*% solve(crossprod(N)) %*% crossprod(N, M)
}
Corth <- function(M, N) {               #C_N M
    ## FIXME: This must be optimised:
    solve(crossprod(N)) %*% crossprod(N, M)
}
.First.lib <- function(libname, pkgname) {
    ## A small hack (MSEP should be made generic in pls):
    if (!exists("MSEP.default")) {
        MSEP.default <<- MSEP
        MSEP <<- function(object, ...) UseMethod("MSEP")
    }
    ## A small hack (RMSEP should be made generic in pls, or the mvrVal
    ## object should be changed to be a matrix):
    if (!exists("RMSEP.default")) {
        RMSEP.default <<- RMSEP
        RMSEP <<- function(object, ...) UseMethod("RMSEP")
    }
    ## Idea: Make `scoreplot' in pls generic, with methods for matrix,
    ## scores(?), lspls and default (anything that has a 'scores' method that
    ## gives a single matrix).
    ## Dirty hack:
    if (!exists("scoreplot.default")) {
        scoreplot.default <<- scoreplot
        scoreplot <<- function(object, ...) UseMethod("scoreplot")
    }
    ## Idea: Make `loadingplot' in pls generic, with methods for matrix,
    ## loadings(?), lspls and default (anything that has a 'loadings' method that
    ## gives a single matrix).
    ## Dirty hack:
    if (!exists("loadingplot.default")) {
        loadingplot.default <<- loadingplot
        loadingplot <<- function(object, ...) UseMethod("loadingplot")
    }
}
