.packageName <- "boost"
adaboost <- function(xlearn, ylearn, xtest, presel = 200, mfinal = 100)
  {
    ## Feature Preselection
    if (presel > 0)
      {
        s       <- apply(xlearn, 2, score, ylearn)
        quality <- apply(rbind(s,-s+(sum(ylearn==0)*sum(ylearn==1))),2,max)
        genes   <- rev(order(quality))[1:presel]
        xlearn  <- xlearn[, genes]
        xtest   <- xtest[ , genes, drop = FALSE]
      }
    
    ## Initialization
    learn    <- dim(xlearn)[1]         
    test     <- dim(xtest)[1]
    Flearn   <- numeric(learn)
    Ftest    <- numeric(test)
    ptest    <- matrix(0, test, mfinal)
    w        <- rep(1/learn, learn)

    ## Shifting the labels
    ylearn[ylearn==0] <- -1

    ## Boosting Iterations
    for (m in 1:mfinal)
      {
        ## Fitting the tree
        update <- learner(ylearn, w, xlearn, xtest, method, args, bag = 1)
        flearn <- ((update$learn>0.5)*2)-1
        ftest  <- ((update$test> 0.5)*2)-1
         
        ## Updating and probabilities
        fehler   <- sum(w*(flearn!=ylearn))
        if (fehler>0)
          {
            consti   <- log((1-fehler)/fehler)
            w        <- w*exp(consti*(flearn!=ylearn))     
            w        <- pmax(w/sum(w), 1e-24)
            Flearn   <- Flearn + (consti*flearn)
            Ftest    <- Ftest  + (consti*ftest)
          }
        if (fehler==0)
          {
            w        <- w
            Flearn   <- Flearn + (1*flearn)
            Ftest    <- Ftest  + (1*ftest)
          }
        ptest[,m] <- 1/(1+exp((-2)*Ftest))
      }
   
    ## Output
    ptest
  }

bagboost <- function(xlearn, ylearn, xtest, presel = 200,
                     mfinal = 100, bag = 50)
  {
    ## Feature Preselection
    if (presel > 0)
      {
        s       <- apply(xlearn, 2, score, ylearn)
        quality <- apply(rbind(s,-s+(sum(ylearn==0)*sum(ylearn==1))),2,max)
        genes   <- rev(order(quality))[1:presel]
        xlearn  <- xlearn[, genes]
        xtest   <- xtest[ , genes, drop = FALSE]
      }
    
    ## Length of training and test data
    learn    <- dim(xlearn)[1]         
    test     <- dim(xtest)[1]
    
    ## Initialization
    Flearn   <- numeric(learn)             
    Ftest    <- numeric(test)        
    plearn   <- rep(1/2, learn)
    ptest    <- matrix(0, test, mfinal)
    
    ## Boosting Iterations
    for (m in 1:mfinal)
      {
        ## Computation of working response and weights
        w    <- pmax(plearn*(1-plearn), 1e-24)
        z    <- (ylearn-plearn)/w               

        ## Fitting the weak learner
        update <- learner(z, w, xlearn, xtest, method, args, bag)
        flearn <- update$learn
        ftest  <- update$test
         
        ## Updating and probabilities
        Flearn    <- Flearn + (1/2)*flearn
        Ftest     <- Ftest + (1/2)*ftest
        plearn    <- 1/(1+exp((-2)*Flearn))
        ptest[,m] <- 1/(1+exp((-2)*Ftest))
      }
   
    ## Output
    ptest
  }

l2boost <- function(xlearn, ylearn, xtest, presel = 200, mfinal = 100)
  {
    ## Feature Preselection
    if (presel > 0)
      {
        s       <- apply(xlearn, 2, score, ylearn)
        quality <- apply(rbind(s,-s+(sum(ylearn==0)*sum(ylearn==1))),2,max)
        genes   <- rev(order(quality))[1:presel]
        xlearn  <- xlearn[, genes]
        xtest   <- xtest[ , genes, drop = FALSE]
      }
    
    ## Initialization
    learn    <- dim(xlearn)[1]         
    test     <- dim(xtest)[1]
    Flearn   <- rep(0, learn)
    Ftest    <- rep(0, test)
    ptest    <- matrix(0, test, mfinal)

    ## Shifting the labels
    ylearn[ylearn == 0] <- -1

    ## Boosting Iterations
    for (m in 1:mfinal)
      {
        ## Fitting the tree
        ulearn <- ylearn-Flearn
        update <- learner(ulearn,rep(1,learn),xlearn,xtest,method,args,bag=1)
        flearn <- update$learn
        ftest  <- update$test
         
        ## Updating and probabilities
        Flearn    <- Flearn + flearn
        Ftest     <- Ftest  + ftest
        Flearn    <- pmin(Flearn, 1)
        Flearn    <- pmax(Flearn,-1)
        Ftest     <- pmin(Ftest,  1)
        Ftest     <- pmax(Ftest, -1)  
        ptest[,m] <- 1/(1+exp((-2)*Ftest))
      }
   
    ## Output
    ptest
  }

learner <- function(y, w, xlearn, xtest, method, args, bag)
  {
    ## Definitions
    learn  <- dim(xlearn)[1]
    test   <- dim(xtest)[1]
    blearn <- matrix(0, bag, learn)
    btest  <- matrix(0, bag, test)
    
    ## Currently only stumps as learners are supported, no choice of args!!!
    cntrl <- rpart.control(maxdepth = 1, minsplit = learn-1, #minbucket = 1,
                           maxsurrogate = 0, usesurrogate=0, maxcompete = 1,
                           cp = 0, xval = 0)
    
    ## Bagging stumps/trees
    if (bag==1)
      {
        bx         <- xlearn
        fit        <- rpart(y~bx, weights = w/mean(w), control = cntrl)
        bx         <- xtest
        blearn[1,] <- predict(fit)
        btest[1,]  <- predict(fit, newdata = data.frame(bx))
      }
    if (bag>1)
      {
        for (b in 1:bag)
          {
            indices    <- sample(1:learn, learn, replace = TRUE)
            by         <- y[indices]
            bw         <- w[indices]
            bx         <- xlearn[indices,]
            fit        <- rpart(by~bx, weights=bw/mean(bw), control=cntrl)
            bx         <- xlearn
            blearn[b,] <- predict(fit, newdata = data.frame(bx))
            bx         <- xtest
            btest[b,]  <- predict(fit, newdata = data.frame(bx))
          }
      }

    ## Output
    list(learn = apply(blearn, 2, mean), test = apply(btest, 2, mean))
  }

logitboost <- function(xlearn, ylearn, xtest, presel = 200, mfinal = 100)
  {
    ## Feature Preselection
    if (presel > 0)
      {
        s       <- apply(xlearn, 2, score, ylearn)
        quality <- apply(rbind(s,-s+(sum(ylearn==0)*sum(ylearn==1))),2,max)
        genes   <- rev(order(quality))[1:presel]
        xlearn  <- xlearn[, genes]
        xtest   <- xtest[ , genes, drop = FALSE]
      }

    ## Length of training and test data
    learn    <- dim(xlearn)[1]         
    test     <- dim(xtest)[1]
    
    ## Initialization
    Flearn   <- numeric(learn)             
    Ftest    <- numeric(test)        
    plearn   <- rep(1/2, learn)
    ptest    <- matrix(0, test, mfinal)
    
    ## Boosting Iterations
    for (m in 1:mfinal)
      {
        ## Computation of working response and weights
        w    <- pmax(plearn*(1-plearn), 1e-24)
        z    <- (ylearn-plearn)/w               

        ## Fitting the weak learner
        update <- learner(z, w, xlearn, xtest, method, args, bag = 1)
        flearn <- update$learn
        ftest  <- update$test
         
        ## Updating and probabilities
        Flearn    <- Flearn + (1/2)*flearn
        Ftest     <- Ftest + (1/2)*ftest
        plearn    <- 1/(1+exp((-2)*Flearn))
        ptest[,m] <- 1/(1+exp((-2)*Ftest))
      }
   
    ## Output
    ptest
  }

response1 <- function(zufall.x, zufall.y, gene = NULL, signs = NULL)
  {
    ## Searching discriminative genes
    if(is.null(gene) && is.null(signs))
      {
        nog           <- 10
        scores        <- apply(zufall.x,2,score,zufall.y)
        signs         <- scores<((table(zufall.y)[1]*table(zufall.y)[2])/2)
        signs         <- (signs-0.5)*2
        scores        <- abs(scores-(table(zufall.y)[1]*table(zufall.y)[2])/2)
        gene          <- rev(order(scores))[1:nog]
        signs         <- signs[gene]
      }

    ## Matrix of (flipped) genes for the response model
    nos                   <- dim(zufall.x)[1]
    zufall.klein          <- zufall.x[,gene]
    change                <- which(signs[gene]<0)
    zufall.klein[,change] <- -zufall.klein[,change]

    ## Determine probabilities, y-labels and bayes-error
    wert          <- rowSums(zufall.klein)
    wert          <- 5*((wert-mean(wert))/sd(wert))      
    probab        <- exp(wert)/(1+exp(wert))
    simu.y        <- numeric(nos)
    for(i in 1:nos)  simu.y[i] <- rbinom(1, 1, probab[i])
    bayes         <- sum((probab*(probab<=0.5))+((1-probab)*(probab>0.5)))/nos

    ## Output
    attributes(probab) <- NULL
    list(probab=probab, y=simu.y, bayes=bayes, gene=gene, signs=signs)
  }

response2 <- function(zufall.x, zufall.y, gene = NULL, signs = NULL)
  {
    ## Searching discriminative genes
    if(is.null(gene) && is.null(signs))
      {
        nog           <- 25
        scores        <- apply(zufall.x,2,score,zufall.y)
        signs         <- scores<((table(zufall.y)[1]*table(zufall.y)[2])/2)
        signs         <- (signs-0.5)*2
        scores        <- abs(scores-(table(zufall.y)[1]*table(zufall.y)[2])/2)
        gene          <- rev(order(scores))[1:nog]
        signs         <- signs[gene]
      }

    ## Matrix of (flipped) genes for the response model
    nos                   <- dim(zufall.x)[1]
    zufall.klein          <- zufall.x[,gene]
    change                <- which(signs[gene]<0)
    zufall.klein[,change] <- -zufall.klein[,change]
    
    ## Determine probabilities, y-labels and bayes-error
    coef          <- runif(25, 1, 3.5)
    wert          <- zufall.klein%*%coef
    wert          <- 5*((wert-mean(wert))/sd(wert))
    probab        <- exp(wert)/(1+exp(wert))
    simu.y        <- numeric(nos)
    for(i in 1:nos)    simu.y[i] <- rbinom(1, 1, probab[i])
    bayes         <- sum((probab*(probab<=0.5))+((1-probab)*(probab>0.5)))/nos

    ## Output
    attributes(probab)
    list(probab=probab, y=simu.y, bayes=bayes, gene=gene, signs=signs)
  }

response3 <- function(zufall.x, zufall.y, gene = NULL, signs = NULL)
  {
    ## Searching discriminative genes
    if(is.null(gene) && is.null(signs))
      {
        nog           <- 25
        scores        <- apply(zufall.x,2,score,zufall.y)
        signs         <- scores<((table(zufall.y)[1]*table(zufall.y)[2])/2)
        signs         <- (signs-0.5)*2
        scores        <- abs(scores-(table(zufall.y)[1]*table(zufall.y)[2])/2)
        gene          <- rev(order(scores))[1:nog]
        signs         <- signs[gene]
      }

    ## Matrix of (flipped) genes for the response model
    nos                   <- dim(zufall.x)[1]
    zufall.klein          <- zufall.x[,gene]
    change                <- which(signs[gene]<0)
    zufall.klein[,change] <- -zufall.klein[,change]
    
    ## Determine probabilities, y-labels and bayes error
    betas         <- runif(25, 0, 2)
    gammas        <- runif(25, 0, 0.2)
    deltas        <- runif(25, 0, 0.1)
    term3         <- (zufall.klein%*%deltas)
    term2         <- (zufall.klein%*%gammas)
    term1         <- (zufall.klein%*%betas)
    term1         <- term1-mean(term1)
    term2         <- term2-mean(term2)
    term3         <- term3-mean(term3)
    wert          <- term1*(1+term2)*(1+term3)
    wert          <- 10*((wert-mean(wert))/sd(wert))
    wert          <- pmin(200, wert)
    probab        <- exp(wert)/(1+exp(wert))
    simu.y        <- numeric(nos)
    for(i in 1:nos)    simu.y[i] <- rbinom(1, 1, probab[i])
    bayes         <- sum((probab*(probab<=0.5))+((1-probab)*(probab>0.5)))/nos

    ## Output
    attributes(probab) <- NULL
    list(probab=probab, y=simu.y, bayes=bayes, gene=gene, signs=signs)
  }

score <- function(x,y)
  {
    wilcox.test(x[which(y==0)], x[which(y==1)])$statistic
  }
simulator <- function(x, y, respmod = c("none", "resp1", "resp2", "resp3"),
                      nos = 1200, gene = NULL, signs = NULL)
  {
    ## Argument matching
    respmod <- match.arg(respmod)

    ## Determine the correlation structure and the means
    x.0               <- x[which(y==0),]
    sigma.0           <- var(x.0)
    zerleg.0          <- eigen(sigma.0, symmetric = TRUE)
    ew.0              <- zerleg.0$values
    ew.0[ew.0<10^-10] <- 0
    ev.0              <- zerleg.0$vectors
    wurzel.0          <- ev.0%*%diag(sqrt(ew.0))
    mean.0            <- apply(x.0,2,mean)*1
    x.1               <- x[which(y==1),]
    sigma.1           <- var(x.1)
    zerleg.1          <- eigen(sigma.1, symmetric = TRUE)
    ew.1              <- zerleg.1$values
    ew.1[ew.1<10^-10] <- 0
    ev.1              <- zerleg.1$vectors
    wurzel.1          <- ev.1%*%diag(sqrt(ew.1))
    mean.1            <- apply(x.1,2,mean)*1

    ## Simulation of gene expression profiles
    nvars         <- ncol(x)
    n1            <- round(nos/2)
    n2            <- nos-n1
    u             <- matrix(rnorm(nvars*n1), nvars)
    mittel        <- matrix(rep(mean.0,n1),n1,nvars,byrow=TRUE)
    zufall.0      <- t(wurzel.0%*%u) + mittel
    u             <- matrix(rnorm(nvars*n2), nvars)
    mittel        <- matrix(rep(mean.1,n2),n2,nvars,byrow=TRUE)
    zufall.1      <- t(wurzel.1%*%u) + mittel
    zufall.x      <- rbind(zufall.0, zufall.1)
    zufall.y      <- c(rep(0,n1),rep(1,n2))

    ## Calling the response model
    response <- switch(respmod,
                       none  = list(y=zufall.y),
                       resp1 = response1(zufall.x, zufall.y, gene, signs),
                       resp2 = response2(zufall.x, zufall.y, gene, signs),
                       resp3 = response3(zufall.x, zufall.y, gene, signs))

    ## That's it!
    dimnames(zufall.x) <- NULL
    c(list(x=zufall.x), response)
  }

summarize <- function(boost.out, resp, mout=ncol(boost.out), grafik=TRUE)
  {
    mcra <- apply(((boost.out>0.5)*1)!=resp, 2, mean)
    mini <- which.min(mcra)
    mcrs <- round(min(mcra), 4)
    mcrf <- round(mean(((boost.out[,mout]>0.5)*1)!=resp),4)
    cat("\n")
    cat("Minimal mcr:  ",mcrs,"achieved after",mini,"boosting step(s)\n")
    cat("Fixed mcr:    ",mcrf,"achieved after",mout,"boosting step(s)\n")
    if (grafik)
      {
        xax <- "Boosting steps"
        yax <- "Error rate"
        ttl <- "LogitBoost"
        plot(mcra, xlab=xax, ylab=yax, main=ttl, type="l")
      }
  }
    
require(rpart)
