.packageName <- "benchmark"
#line 1 "d:/RCompile/CRANpkg/local/2.13/benchmark/R/algperf-beplot0.R"


#' @param x An object
#' @param ... Additional arguments
#' @rdname benchmark-generics
#' @export
beplot0 <- function(x, ...) {
  UseMethod("beplot0")
}



#' Benchmark experiment plot.
#'
#' The benchmark experiment plot visualizes each benchmark
#' experiment run. The x-axis is a podium with as many places
#' as algorithms. For each benchmark run, the algorithms are
#' sorted according to their performance values and a dot is
#' drawn on the corresponding place. To visualize the count of
#' an algorithm on a specific position, a bar plot is shown for
#' each of podium places.
#'
#' @param x A \code{\link[=warehouse]{AlgorithmPerformance}} object
#' @param xlab A title for the x axis
#' @param ylab A title for the y axis
#' @param lines.show Connect dots of same benchmark runs
#' @param lines.col Line color
#' @param lines.alpha Alpha value of the line color
#' @param lines.lwd Line width
#' @param dots.pch Dot symbol
#' @param dots.cex Dot symbol expansion
#' @param places.lty Type of separator line between podium places
#' @param places.col Color of separator line between podium places
#' @param legendfn Function which draws a legend
#' @param ... Ignored
#' @return Return value of underlying \code{beplot0.matrix}
#' @method beplot0 AlgorithmPerformance
#' @references
#'   See \emph{Eugster and Leisch (2008)} and \emph{Eugster et al. (2008)}
#'   in \code{citation("benchmark")}.
#' @rdname beplot0
#' @S3method beplot0 AlgorithmPerformance
beplot0.AlgorithmPerformance <- function(x, xlab = NULL, ylab = NULL,
                                         lines.show = FALSE, lines.alpha = 0.2,
                                         lines.lwd = 1, lines.col = col,
                                         dots.pch = 19, dots.cex = 1,
                                         places.lty = 2, places.col = 1,
                                         legendfn = function(algs, cols){
                                             legend("topleft", algs, lwd = 1,
                                                    col = cols, bg = "white")},
                                         ...) {

  stopifnot(nlevels(x$datasets[, drop = TRUE]) == 1)
  stopifnot(nlevels(x$performances[, drop = TRUE]) == 1)

  m <- do.call(cbind, split(x$value, x$algorithms))

  if ( is.null(xlab) )
    xlab <- "Podium"

  if ( is.null(ylab) )
    ylab <- levels(x$performances[, drop = TRUE])

  col <- attr(x, "algorithm_colors")

  beplot0(m, col = col, xlab = xlab, ylab = ylab,
          lines.show = lines.show, lines.alpha = lines.alpha, lines.lwd = lines.lwd,
          lines.col = lines.col,
          dots.pch = dots.pch, dots.cex = dots.cex,
          places.lty = places.lty, places.col = places.col, legendfn = legendfn)
}



#' @param x A matrix (row/column = observations/algorithms)
#' @param col Dot colors
#' @param xlab A title for the x axis
#' @param ylab A title for the y axis
#' @param lines.show Connect dots of same benchmark runs
#' @param lines.col Line color
#' @param lines.alpha Alpha value of the line color
#' @param lines.lwd Line width
#' @param dots.pch Dot symbol
#' @param dots.cex Dot symbol expansion
#' @param places.lty Type of separator line between podium places
#' @param places.col Color of separator line between podium places
#' @param legendfn Function which draws a legend
#' @param ... Ignored
#' @return Undefined
#' @method beplot0 matrix
#' @rdname beplot0
#' @S3method beplot0 matrix
beplot0.matrix <- function(x, col = 1:ncol(x),
                           xlab = NULL, ylab = NULL,
                           lines.show = FALSE, lines.alpha = 0.2,
                           lines.lwd = 1, lines.col = col,
                           dots.pch = 19, dots.cex = 1,
                           places.lty = 2, places.col = 1,
                           legendfn = function(algs, cols){
                             legend("topleft", algs, lwd = 1, col = cols, bg = "white")},
                           ...) {

  nalgs <- ncol(x)
  algs <- colnames(x)


  # Medals table (see table.becp):
  ranks <- t(apply(x, 1, rank, ties='random'))
  nranks <- apply(ranks, 2, function(y)table(factor(y, levels=1:nalgs)))

  # Simple rank based global algorithm order
  # (see as.ranking.medalstable):
  barranks <- rank(colSums(x * (nalgs:1)/nalgs), ties='random')
  barorder <- order(barranks)


  ### Plot:
  dotplotborders <- (0:nalgs) * nalgs

  dotplaces <- (1:nalgs) - 0.5
  names(dotplaces) <- names(barranks)[barorder]

  barcols <- col
  dotcols <- col
  linecols <- sapply(lines.col,
                     function(c) {
                       r <- col2rgb(c)
                       rgb(r[1], r[2], r[3],
                           alpha=round(255*lines.alpha),
                           maxColorValue=255)
                     })


  ## Draw it:
  opar <- par(no.readonly = TRUE)
  layout(matrix(c(1,2), nrow=2, byrow=TRUE), height=c(1,0.4))
  mar <- par('mar')

  # Figure 1:
  par(mar=c(0, mar[2], mar[3], mar[4]))
  plot(dotplotborders, rep(max(x), nalgs+1),
       type='n', ylim=range(x, na.rm = TRUE), ylab=ylab, xlab='', axes=F)
  axis(1, at=dotplotborders, labels=NA, lwd=par('lwd'))
  axis(2, lwd=par('lwd'))
  box()

  # Podium place borders:
  abline(v=dotplotborders,
         lty=places.lty, col=places.col)

  # Content:
  linesegments <- function(x, y, ...) {
    n <- length(x)
    segments(x[-n], y[-n], x[-1], y[-1], ...)
  }

  drawthe <- function(fn, col, ...) {
    for ( i in 1:nrow(x) ) {
      r <- ranks[i,]
      o <- order(r)

      performances <- (x[i,])[o]
      places <- (dotplaces[names(r)] + ((r - 1) * nalgs))[o]

      fn(places, performances, col=col[o], ...)
    }
  }

  if ( lines.show )
    drawthe(linesegments, linecols, lwd=lines.lwd)

  drawthe(points, dotcols,
          pch=dots.pch, cex=dots.cex)

  legendfn(names(barranks)[barorder], dotcols[barorder])


  # Figure 2:
  par(mar=c(mar[1], mar[2], 0, mar[4]))
  barplot(t(nranks[,barorder]), beside=TRUE, width=1,
          axes=F, space=c(0,0), border=NA, ylim=c(0, nrow(x)),
          names.arg=paste(1:nalgs, '.', sep=''),
          col=col[barorder], xlab=xlab)
  axis(1, at=c(0, dotplotborders), labels=NA, lwd=par('lwd'))
  box()

  par(opar)
}
#line 1 "d:/RCompile/CRANpkg/local/2.13/benchmark/R/algperf-paircomp.R"
#' @include proto.R
#' @include testprocedure.R
{}



#' Pairwise comparison of algorithm performances
#'
#' @param x An \code{\link{AlgorithmPerformance}} object
#' @param family A \code{\link{Paircomp}} object
#' @param type Draw strict or indifference decision
#' @param ... Ignored
#' @return A \code{PaircompDecision} object; a list with the
#'   elements:
#'   \tabular{rl}{
#'     \code{decision} \tab The incidence matrix representing the
#'       pairwise comparisons\cr
#'     \code{type} \tab The decision type\cr
#'     \code{base} \tab A list with information on the decision base
#'   }
#'
#' @aliases PaircompDecision
#' @references
#'   See \emph{Eugster and Leisch (2008)} and \emph{Eugster et al. (2008)}
#'   in \code{citation("benchmark")}.
#' @rdname algperf-paircomp
#' @export
paircomp <- function(x, family, type = c("<", "="), ...) {
  type <- match.arg(type)

  engine <- do.call(family$new, c(list(x, type), list(...)))
  engine$decision()
}



### Decision family infrastructure: ##################################

PaircompDecision <- function(decision, type, base) {
  structure(list(decision = decision, type = type, base = base),
            class = c("PaircompDecision", "list"))
}



#' @nord
#' @S3method print PaircompDecision
print.PaircompDecision <- function(x, ...) {
  cat(sQuote(x$type), "decision:\n")
  print(x$decision)
}



#' Infrastructure for pairwise comparisons of algorithm performances
#'
#' Available \code{TestPaircomp} implementations:
#' \tabular{rl}{
#'   \code{FriedmanTestPaircomp} \tab Pairwise comparison based on the
#'     non parametric friedman test\cr
#'   \code{LmerTestPaircomp} \tab Pairwise comparison based on a mixed
#'     effects model (function \code{lmer} in package \code{lme4})\cr
#'   \code{PercintTestPaircomp} \tab Pairwise comparison based on the
#'     bootstrap percentile intervals
#' }
#'
#' Available \code{PointPaircomp} implementations:
#' \tabular{rl}{
#'   \code{GenericPointPaircomp} \tab Pairwise comparison based on
#'     point estimates.
#' }
#'
#' @aliases TestPaircomp PointPaircomp
#' @references
#'   See \emph{Eugster and Leisch (2008)} and \emph{Eugster et al. (2008)}
#'   in \code{citation("benchmark")}.
#' @seealso \code{\link{TestProcedure}}
#' @rdname Paircomp
Paircomp <- proto(expr = {
  name <- "Abstract pairwise comparison method"

  new <- function(., x, ...) NULL
  decision <- function(., ...) NULL
})



TestPaircomp <- proto(Paircomp, expr = {
  name <- "Abstract test based pairwise comparison method"
  test <- NULL
})



PointPaircomp <- proto(Paircomp, expr = {
  name <- "Abstract point estimate based pairwise comparison method"
})



### Implementation -- Friedman test based decision: ##################

#' @rdname Paircomp
#' @export
FriedmanTestPaircomp <- proto(TestPaircomp, expr = {
  new <- function(., x, type, significance) {
    stopifnot(FriedmanTest$requirements())

    test <- FriedmanTest$new(x)
    algorithms <- levels(x$algorithms[, drop = TRUE])

    switch(type,
           "<" = LeFriedmanTestPaircomp$proto(test = test,
                                              significance = significance,
                                              algorithms = algorithms),
           "=" = EqFriedmanTestPaircomp$proto(test = test,
                                              significance = significance,
                                              algorithms = algorithms))
  }
})

LeFriedmanTestPaircomp <- proto(FriedmanTestPaircomp, expr = {

  decision <- function(.) {
    result <- emptyLeDecision(.$algorithms)

    gt <- .$test$globalTest()
    pt <- NULL

    if ( gt$getPvalue() < .$significance ) {
      pt <- .$test$pairwiseTest()

      pval <- pt$getPvalue()
      tstat <- pt$getStatistic()

      desc <- pval < .$significance

      sigdirs <- sign(tstat[desc])
      sigpairs <- strsplit(rownames(desc)[desc], ' - ')
      sigpairs[sigdirs == 1] <- lapply(sigpairs[sigdirs == 1], rev)

      for ( p in sigpairs )
        result[p[1], p[2]] <- 1
    }

    PaircompDecision(result, "<",
                     list(globaltest = gt, pairwisetest = pt))
  }
})

EqFriedmanTestPaircomp <- proto(FriedmanTestPaircomp, expr = {

  decision <- function(.) {
    result <- emptyEqDecision(.$algorithms)

    gt <- .$test$globalTest()
    pt <- NULL

    if ( gt$getPvalue() < .$significance ) {
      pt <- .$test$pairwiseTest()

      pval <- pt$getPvalue()

      desc <- pval > .$significance
      sigpairs <- strsplit(rownames(desc)[desc], ' - ')

      for ( p in sigpairs )
        result[p[1], p[2]] <- result[p[2], p[1]] <- 1
    }

    PaircompDecision(result, "=",
                     list(globaltest = gt, pairwisetest = pt))
  }
})



### Implementation -- LMER test based decision: ######################

#' @rdname Paircomp
#' @export
LmerTestPaircomp <- proto(TestPaircomp, expr = {

  new <- function(., x, type, significance, relevance = 0) {
    stopifnot(LmerTest$requirements())

    test <- LmerTest$new(x)
    algorithms <- levels(x$algorithms[, drop = TRUE])

    switch(type,
           "<" = LeLmerTestPaircomp$proto(test = test,
                                          significance = significance,
                                          relevance = relevance,
                                          algorithms = algorithms),
           "=" = EqLmerTestPaircomp$proto(test = test,
                                          significance = significance,
                                          relevance = relevance,
                                          algorithms = algorithms))
  }
})

LeLmerTestPaircomp <- proto(LmerTestPaircomp, expr = {

  decision <- function(.) {
    result <- emptyLeDecision(.$algorithms)

    gt <- .$test$globalTest()
    pt <- NULL

    if ( gt$getPvalue() < .$significance ) {
      pt <- .$test$pairwiseTest()

      ci <- pt$getConfint(1 - .$significance)

      desc <- !(ci[, 'lwr'] < 0 & ci[, 'upr'] > 0)
      desc <- desc & !(ci[, 'lwr'] > -.$relevance & ci[, 'upr'] < .$relevance)

      sigdirs <- sign(ci[desc, 'Estimate'])
      sigpairs <- strsplit(rownames(ci)[desc], ' - ')
      sigpairs[sigdirs == 1] <- lapply(sigpairs[sigdirs == 1], rev)

      for ( p in sigpairs )
        result[p[1], p[2]] <- 1
    }


    PaircompDecision(result, "<",
                     list(model = .$test$model, globaltest = gt,
                          pairwisetest = pt, confint = ci))
  }
})

EqLmerTestPaircomp <- proto(LmerTestPaircomp, expr = {

  decision <- function(.) {
    result <- emptyEqDecision(.$algorithms)

    gt <- .$test$globalTest()
    pt <- NULL

    if ( gt$getPvalue() < .$significance ) {
      pt <- .$test$pairwiseTest()

      ci <- pt$getConfint(1 - .$significance)

      desc <- (ci[, 'lwr'] < 0 & ci[, 'upr'] > 0)
      desc <- desc | (ci[, 'lwr'] > -.$relevance & ci[, 'upr'] < .$relevance)

      sigpairs <- strsplit(rownames(ci)[desc], ' - ')

      for ( p in sigpairs )
        result[p[1], p[2]] <- result[p[2], p[1]] <- 1
    }

    PaircompDecision(result, "=",
                     list(model = .$test$model, globaltest = gt,
                          pairwisetest = pt, confint = ci))
  }
})



### Implementation -- Percentile interval based decision: ############

#' @rdname Paircomp
#' @export
PercintTestPaircomp <- proto(TestPaircomp, expr = {

  new <- function(., x, type, significance) {
    stopifnot(PercintTest$requirements())

    test <- PercintTest$new(x)
    algorithms <- levels(x$algorithms[, drop = TRUE])

    switch(type,
           "=" = EqPercintTestPaircomp$proto(test = test,
                                             significance = significance,
                                             algorithms = algorithms))
  }

  overlap <- function(., x, y) {
    unname(x['upr'] > y['lwr'])
  }
})

EqPercintTestPaircomp <- proto(PercintTestPaircomp, expr = {

  decision <- function(.) {
    result <- emptyEqDecision(.$algorithms)

    ci <- .$test$pairwiseTest()$getConfint(1 - .$significance)

    desc <- !(ci[, 'lwr'] < 0 & ci[, 'upr'] > 0)

    sigpairs <- strsplit(rownames(ci)[desc], ' - ')

    for ( p in sigpairs )
      result[p[1], p[2]] <- result[p[2], p[1]] <- 1

    PaircompDecision(result, "=",
                     list(percint = ci))
  }
})



### Implementation -- Generic point estimate decision: ###############

#' @rdname Paircomp
#' @export
GenericPointPaircomp <- proto(PointPaircomp, expr = {

  new <- function(., x, type, estimator, tolerance = .Machine$double.eps) {
    stopifnot(is.character(type))
    stopifnot(is.character(estimator))

    stopifnot(nlevels(x$datasets[, drop = TRUE]) == 1)
    stopifnot(nlevels(x$performances[, drop = TRUE]) == 1)

    algorithms <- levels(x$algorithms[, drop = TRUE])

    .$proto(data = x, type = type,
            algorithms = algorithms,
            estimator = estimator,
            tolerance = tolerance)
  }

  decision <- function(.) {
    estfn <- match.fun(.$estimator)

    val <- sapply(split(.$data$value, .$data$algorithms), estfn)
    pairs <- sapply(val, function(a) sapply(val, function(b) a - b))

    pairs[abs(pairs) < .$tolerance] <- 0

    result <- switch(.$type,
                     "=" = apply(pairs, c(1, 2), function(x) 0 == x),
                     "<" = apply(pairs, c(1, 2), function(x) 0 < x))

    PaircompDecision(result + 0, .$type, list(statistic = val,
                                              differences = pairs))
  }
})



### Internal functions: ##############################################

emptyLeDecision <- function(algorithms) {
  matrix(0,
         nrow = length(algorithms),
         ncol = length(algorithms),
         dimnames = list(algorithms, algorithms))
}



emptyEqDecision <- function(algorithms) {
  structure(diag(length(algorithms)),
            dimnames = list(algorithms, algorithms))
}
#line 1 "d:/RCompile/CRANpkg/local/2.13/benchmark/R/algperf-preference.R"


#' @param x A \code{\link{PaircompDecision}} object
#' @param verbose Show information during execution
#' @param ... Ignored
#' @return A \code{relation} object
#' @method as.relation PaircompDecision
#' @rdname algperf-paircomp
#' @S3method as.relation PaircompDecision
#' @export
as.relation.PaircompDecision <- function(x, verbose = FALSE, ...) {
  r <- relation(incidence = x$decision, ...)


  if ( x$type == "=" ) {
    props <- check_indifference_preference(r)
    class <- "indiffpref"
  }
  else {
    props <- check_strict_preference(r)
    class <- "strictpref"
    r$.Meta$is_decreasing <- FALSE
  }

  r$.Meta <- c(r$.Meta,
               structure(props, names = sprintf("is_%s", names(props))))

  if ( verbose ) {
    cat(sQuote(x$type), "preference relation:\n")

    for ( p in names(props) ) {
      cat(sprintf("%s = %s:\n", p, props[[p]]))
      print(relation_violations(r, p, TRUE))
    }
  }

  structure(r, class = c(class, class(r)))
}



#' @nord
#' @S3method print indiffpref
print.indiffpref <- function(x, ...) {
  cat("Indifference preference relation:\n")
  if ( relation_is_equivalence(x) )
    print(relation_classes(x))

  invisible(x)
}



#' @nord
#' @S3method print strictpref
print.strictpref <- function(x, ...) {
  cat("Strict preference relation:\n")
  if ( relation_is_irreflexive(x) && relation_is_asymmetric(x) &&
       relation_is_transitive(x) )
    print(as.ranking(x))

  invisible(x)
}



check_indifference_preference <- function(x) {
  list(reflexive = relation_is_reflexive(x),
       symmetric = relation_is_symmetric(x),
       transitive = relation_is_transitive(x))
}



check_strict_preference <- function(x) {
  list(irreflexive = relation_is_irreflexive(x),
       asymmetric = relation_is_asymmetric(x),
       transitive = relation_is_transitive(x),
       negatively_transitive = relation_is_negatively_transitive(x),
       trichotomous = relation_is_trichotomous(x))
}



### Patch 'relations' package: #######################################

#' @rdname algperf-paircomp
#' @export
relation_is_strict_weak_order <- function(x) {
  (relation_is_endorelation(x) &&
   relation_is_irreflexive(x) &&
   relation_is_asymmetric(x) &&
   relation_is_transitive(x) &&
   relation_is_negatively_transitive(x))
}


patch.relation_class_ids <- function (x) {
  if (!is.relation(x))
    stop("Argument 'x' must be a relation.")
  if (!identical(relation_is_crisp(x), TRUE))
    stop("Argument 'x' must be a crisp relation with no missings.")
  if (relation_is_weak_order(x) || relation_is_strict_weak_order(x)) {
    s <- relation_scores(x, "ranks", decreasing = FALSE)
    ids <- match(s, sort(unique(s)))
    names(ids) <- names(s)
    ids
  }
  else if (relation_is_equivalence(x))
    get_class_ids_from_incidence(relation_incidence(x))
  else stop("Can only determine class ids for equivalences and weak orders.")
}


#' @import relations
library(relations)
environment(patch.relation_class_ids) <- getNamespace("relations")
utils:::assignInNamespace("relation_class_ids", patch.relation_class_ids, "relations")
detach("package:relations")
library(relations)
#line 1 "d:/RCompile/CRANpkg/local/2.13/benchmark/R/algperf-visualizations.R"


#' @param x An \code{\link{AlgorithmPerformance}} object
#' @param order.by Function like \code{\link{mean}}, \code{\link{median}},
#'   or \code{\link{max}} to calculate a display order of the algorithms;
#'   or \code{NULL} for no specific order.
#' @param order.performance Name or index of the reference performance
#'   measure to calculate the order.
#' @param dependence.show Show dependence of observations for all, none or
#'   outlier observations.
#' @param dependence.col Color of the dependence line.
#' @param ... Ignored.
#' @return A \code{\link[ggplot2]{ggplot}} object.
#' @method boxplot AlgorithmPerformance
#' @rdname algperf-visualization
#' @importFrom graphics boxplot
#' @S3method boxplot AlgorithmPerformance
boxplot.AlgorithmPerformance <- function(x, order.by = median, order.performance = 1,
                                         dependence.show = c("outliers", "all", "none"),
                                         dependence.col = alpha("black", 0.1), ...) {

  ## Make codetools (R CMD check) happy:
  algorithms <- value <- performances <- datasets <- samples <- NULL


  dependence.show <- match.arg(dependence.show)

  x <- order.algorithms.by(x, order.by, order.performance)

  p <- ggplot(x, aes(algorithms, value))
  p <- p + facet_grid(performances ~ datasets, scales = "free")

  if ( dependence.show == "all" )
    p <- p + geom_line(aes(group = samples), colour = dependence.col)

  if ( dependence.show == "outliers" ) {
    # HACK: currently I don't know how to do that ggplot2-like.
    o <- unique(unlist(lapply(split(x$value, x$algorithms), which.outlier)))
    ox <- x[x$samples %in% o, ]

    p <- p + geom_line(aes(group = samples), data = ox, colour = dependence.col)
  }

  p <- p + geom_boxplot(aes(fill = algorithms)) +
    scale_fill_manual(values = attr(x, "algorithm_colors"))

  p
}



#' @param x An object
#' @param ... Additional arguments
#' @rdname benchmark-generics
#' @export
densityplot <- function(x, ...) {
  UseMethod("densityplot")
}



#' @param x An \code{\link{AlgorithmPerformance}} object
#' @param ... Ignored.
#' @return A \code{\link[ggplot2]{ggplot}} object.
#' @method densityplot AlgorithmPerformance
#' @rdname algperf-visualization
#' @S3method densityplot AlgorithmPerformance
densityplot.AlgorithmPerformance <- function(x, ...) {
  ## Make codetools (R CMD check) happy:
  algorithms <- value <- performances <- datasets <- samples <- NULL


  p <- ggplot(x, aes(x = value, colour = algorithms, group = algorithms))
  p <- p + facet_grid(performances ~ datasets, scales = "free")
  p <- p + geom_density(fill = NA) +
    scale_colour_manual(values = attr(x, "algorithm_colors"))

  p
}



#' @param x An \code{\link{AlgorithmPerformance}} object
#' @param order.by Function like \code{\link{mean}}, \code{\link{median}},
#'   or \code{\link{max}} to calculate a display order of the algorithms;
#'   or \code{NULL} for no specific order.
#' @param order.performance Name or index of the reference performance
#'   measure to calculate the order.
#' @param dependence.show Show dependence of observations for all or none
#'   observations.
#' @param dependence.col Color of the dependence line.
#' @param ... Ignored.
#' @return A \code{\link[ggplot2]{ggplot}} object.
#' @method stripchart AlgorithmPerformance
#' @rdname algperf-visualization
#' @importFrom graphics stripchart
#' @S3method stripchart AlgorithmPerformance
stripchart.AlgorithmPerformance <- function(x, order.by = median, order.performance = 1,
                                            dependence.show = c("none", "all"),
                                            dependence.col = alpha("black", 0.1), ...) {

  ## Make codetools (R CMD check) happy:
  algorithms <- value <- performances <- datasets <- samples <- NULL


  dependence.show <- match.arg(dependence.show)

  x <- order.algorithms.by(x, order.by, order.performance)

  p <- ggplot(x, aes(x = algorithms, y = value, colour = algorithms))
  p <- p + facet_grid(performances ~ datasets, scales = "free")

  if ( dependence.show == "all" )
    p <- p + geom_line(aes(group = samples), colour = dependence.col)

  p <- p + geom_point() +
    scale_colour_manual(values = attr(x, "algorithm_colors"))

  p
}



### Internal functions: ##############################################

order.algorithms.by <- function(x, order.by, order.performance) {
  if ( is.null(order.by) )
    return(x)

  if ( !is.character(order.performance) )
    order.performance <- levels(x$performances)[order.performance]

  order.by <- match.fun(order.by)

  x <- subset(x, performances = order.performance)
  x <- na.omit(x)

  o <- order(sapply(split(x$value, x$algorithms), order.by))
  l <- levels(x$algorithms)

  x$algorithms <- factor(x$algorithms,
                         ordered = TRUE,
                         levels = l[o])

  x
}



which.outlier <- function(x) {
  # Based on base:::boxplot.stats
  coef <- 1.5
  nna <- !is.na(x)
  n <- sum(nna)
  stats <- stats::fivenum(x, na.rm = TRUE)
  iqr <- diff(stats[c(2, 4)])
  out <- if (!is.na(iqr)) {
    x < (stats[2L] - coef * iqr) | x > (stats[4L] + coef *  iqr)
  }
  else {
    !is.finite(x)
  }

  which(out)
}


#line 1 "d:/RCompile/CRANpkg/local/2.13/benchmark/R/algperf.R"
#' @include warehouse.R
{}



#' Return subsets of \code{AlgorithmPerformance} objects
#' @param x An \code{\link{AlgorithmPerformance}} object
#' @param datasets Selected datasets
#' @param algorithms Selected algorithms
#' @param performances Selected performances
#' @param samples Selected samples
#' @param ... Ignored
#' @return An \code{\link{AlgorithmPerformance}} object with just the
#'   selected observations
#' @method subset AlgorithmPerformance
#' @S3method subset AlgorithmPerformance
subset.AlgorithmPerformance <- function(x, datasets = NULL,
                                        algorithms = NULL,
                                        performances = NULL,
                                        samples = NULL, ...) {

  if ( is.null(datasets) )
    datasets <- levels(x$datasets)

  if ( is.null(algorithms) )
    algorithms <- levels(x$algorithms)

  if ( is.null(performances) )
    performances <- levels(x$performances)

  if ( is.null(samples) )
    samples <- levels(x$samples)


  idx <- x$datasets %in% datasets &
         x$algorithms %in% algorithms &
         x$performances %in% performances &
         x$samples %in% samples

  x <- x[idx, ]
  x$datasets <- x$datasets[, drop = TRUE]
  x$algorithms <- x$algorithms[, drop = TRUE]
  x$performances <- x$performances[, drop = TRUE]
  x$samples <- x$samples[, drop = TRUE]

  x
}

#line 1 "d:/RCompile/CRANpkg/local/2.13/benchmark/R/as.warehouse.R"
#' @include warehouse.R
{}



#' Methods to coerce objects to a benchmark experiment warehouse.
#'
#' Coerces a \code{bench.result} object from package \code{mlr} to a
#' \code{\link{warehouse}} object.
#'
#' @param x A \code{bench.result} object from package \code{mlr}
#' @param ... Ignored
#' @return A \code{\link{warehouse}} object
#' @export
#' @title as.warehouse
#' @aliases as.warehouse
#' @rdname as.warehouse
as.warehouse.mlr.bench.result <- function(x, ...) {
  perf <- x@perf

  datasets <- names(perf)
  algorithms <- dimnames(perf[[1]])[[2]]
  performances <- dimnames(perf[[1]])[[3]]
  B <- sapply(perf, function(x) nrow(x) - 1)

  w <- warehouse(datasets, B,
                 algorithms = algorithms,
                 performances = performances)

  for ( d in datasets )
    w$data[[d]]$AlgorithmPerformance[, , ] <-
      perf[[d]][-nrow(perf[[d]]), ,]

  w
}



#' Methods to coerce objects to a benchmark experiment warehouse.
#'
#' Coerces a four dimensional array (1st: sampling, 2nd: algorithms,
#' 3rd: performance measures, 4th: datasets) to a
#' \code{\link{warehouse}} object.
#'
#' @param x A four dimensional array
#' @param ... Ignored
#' @return A \code{\link{warehouse}} object
#' @export
#' @rdname as.warehouse
as.warehouse.array4dim <- function(x, ...) {
  stopifnot(length(dim(x)) == 4)

  B <- dim(x)[1]
  algorithms <- dimnames(x)[[2]]
  performances <- dimnames(x)[[3]]
  datasets <- dimnames(x)[[4]]

  w <- warehouse(datasets, B,
                 algorithms = algorithms,
                 performances = performances)

  for ( d in length(datasets) )
    w$data[[d]]$AlgorithmPerformance[, , ] <- x[, , , d]

  w
}

#line 1 "d:/RCompile/CRANpkg/local/2.13/benchmark/R/benchmark.R"
#' @include warehouse.R
{}



#' Function to execute benchmark experiments and collect all data the
#' package can analyze. For more sophisticated benchmark experiments
#' we suggest the usage of the \code{mlr} package.
#'
#' @param datasets List of data.frames
#' @param sampling Sampling function, see \code{\link{benchmark-sampling}}.
#' @param algorithms List of algorithms; i.e., functions which take
#'   a model formula and a data.frame to fit a model. Note that a
#'   \code{\link[stats]{predict}} function must be defined as well.
#' @param performances List of performance measure functions; i.e.,
#'   functions with arguments \code{yhat} and \code{y}. See, e.g.,
#'   \code{\link{benchmark-comptime}}.
#' @param characteristics \code{\link{DatasetCharacteristics}} object
#' @param test \code{\link{TestProcedure}} object
#' @param test.burnin Number of burn-in replications
#' @param verbose Show information during execution
#' @return A \code{\link{warehouse}} object
#' @seealso \code{\link{warehouse}}, \code{\link{as.warehouse}},
#'   \code{\link{benchmark-sampling}}, \code{\link{benchmark-comptime}}
#' @title Benchmark experiment execution
#' @export
benchmark <- function(datasets, sampling, algorithms = NULL,
                      performances = NULL, characteristics = NULL,
                      test = NULL, test.burnin = 3, verbose = TRUE) {

  call <- match.call()


  ## Check what to do:
  if ( !is.list(datasets) )
    datasets <- list(datasets)

  B <- attr(sampling, "B")

  doAlgorithmPerformances <- FALSE
  doCharacterization <- FALSE
  doTest <- FALSE

  ndatasets <- deparseArgs(call$datasets)
  nalgorithms <- NULL
  nperformances <- NULL
  ncharacteristics <- NULL
  ntests <- NULL

  if ( !is.null(algorithms) && !is.null(performances) ) {
    if ( !is.list(algorithms) )
      algorithms <- list(algorithms)

    if ( !is.list(performances) )
      performances <- list(performances)

    nalgorithms <- deparseArgs(call$algorithms)
    nperformances <- deparseArgs(call$performances)

    doAlgorithmPerformances <- TRUE

    if ( !is.null(test) ) {
      stopifnot(test$requirements())

      ntests <- c("pvalue", "statistic")
      doTest <- TRUE
    }
  }
  else {
    stopifnot(is.null(algorithms) && is.null(performances))
  }

  if ( !is.null(characteristics) ) {
    ncharacteristics <- characteristics$characteristics()
    doCharacterization <- TRUE
  }


  ## Warehouse:
  warehouse <- warehouse(datasets = ndatasets, B = B,
                         algorithms = nalgorithms,
                         performances = nperformances,
                         characteristics = ncharacteristics,
                         tests = ntests)


  ## Loop:
  for ( m in seq(along = datasets) ) {
    printMsg(sprintf('m = %s\n', m), verbose = verbose)

    if ( doCharacterization )
      warehouse$data[[m]]$DatasetBasisCharacterization[, ] <-
            characterize(datasets[[m]], characteristics)


    samples <- sampling(nrow(datasets[[m]]$data()))


    for ( b in seq(length = B) ) {
      printMsg(sprintf('  b = %s\n', b), verbose = verbose)


      if ( doCharacterization )
        warehouse$data[[m]]$DatasetCharacterization[b, ] <-
            characterize(datasets[[m]],
                         characteristics,
                         index = samples$L[[b]])


      if ( doAlgorithmPerformances ) {
        for ( k in seq(along = algorithms) ) {
          printMsg(sprintf('    k = %s\n', k), verbose = verbose)

          ftime <- system.time(
            fit <- algorithms[[k]](as.formula(datasets[[m]]$formula()),
                                   data = datasets[[m]]$data(index = samples$L[[b]])))

          ptime <- system.time(
            pred <- predict(fit,
                            newdata = datasets[[m]]$input(index = samples$T[[b]])))

          for ( p in seq(along = performances ) ) {
            printMsg(sprintf('      p = %s\n', p), verbose = verbose)

            warehouse$data[[m]]$AlgorithmPerformance[b, k, p] <-
                performances[[p]](pred,
                                  datasets[[m]]$response(index = samples$T[[b]])[[1]])
          }

        }

        if ( doTest & b > test.burnin ) {
          printMsg(sprintf('    test\n'), verbose = verbose)

          accdat <- warehouse$viewAlgorithmPerformance(dataset = m)
          accdat <- na.omit(accdat)
          accdat$samples <- accdat$samples[, drop = TRUE]

          acctest <- test$new(accdat)$globalTest()
          warehouse$data[[m]]$TestResult[b, ] <- c(acctest$getPvalue(),
                                                   acctest$getStatistic())
        }
      }
    }

    printMsg('\n')
  }


  warehouse
}



### Sampling functions: ##############################################

#' Sampling functions.
#'
#' Functions to create a set of learning and test samples using a specific
#' resampling method.
#'
#' @param B Number of learning samples
#' @return List with bootstrap learning and test samples
#' @seealso \code{\link{benchmark}}
#' @rdname benchmark-sampling
#' @aliases benchmark-sampling
#' @export
bs.sampling <- function(B) {
  structure(B = B,
  function(n) {
    L <- lapply(1:B, function(.) sample(1:n, replace = TRUE))

    list(L = L,
         T = lapply(L, function(.) setdiff(1:n, .)))
  })
}



#' @param B Number of learning samples
#' @param psize Size of subsample
#' @return List with subsampling learning and test samples
#' @rdname benchmark-sampling
#' @export
sub.sampling <- function(B, psize) {
  structure(B = B, psize = psize,
  function(n) {
    size <- ceiling(n * psize)
    L <- lapply(1:B, function(.) sample(1:n, size, replace = FALSE))

    list(L = L,
         T = lapply(L, function(.) setdiff(1:n, .)))
  })
}



#' @param k Number of cross-validation samples
#' @return List with cross-validation learning and test samples
#' @rdname benchmark-sampling
#' @export
cv.sampling <- function(k) {
  structure(B = k,
  function(n) {
    T <- split(sample(1:n), rep(1:k, length = n))

    list(L = lapply(T, function(.) setdiff(1:n, .)),
         T = T)
  })
}



### Dummy time performance functions: ################################

#' Dummy functions to enable fitting and prediction time as performance
#' measures.
#'
#' @param yhat Ignored
#' @param y Ignored
#' @return Time (User and System) used for the model fitting
#' @seealso \code{\link{benchmark}}
#' @rdname benchmark-comptime
#' @aliases benchmark-comptime
#' @export
fittime <- function(yhat, y) {
  t <- get("ftime", envir = parent.frame())
  t[1] + t[2]
}



#' @param yhat Ignored
#' @param y Ignored
#' @return Time (User and System) used for the prediction
#' @rdname benchmark-comptime
#' @export
predicttime <- function(yhat, y) {
  t <- get("ptime", envir = parent.frame())
  t[1] + t[2]
}



### Internal functions: ##############################################

printMsg <- function(x = "", newline = FALSE, verbose = TRUE) {
  if ( verbose )
    cat(sprintf("%s%s", x, ifelse(newline, "\n", "")))
}



deparseArgs <- function(x) {
  y <- as.character(x)
  if ( length(y) > 1 )
    y <- y[-1]

  y
}

#line 1 "d:/RCompile/CRANpkg/local/2.13/benchmark/R/bsgraph.R"

#' Benchmark experiment graph.
#'
#' The benchmark summary plot takes the individual benchmark
#' experiment results into account. The y-axis represents the
#' data sets, the x-axis a podium with as many places as
#' candidate algorithms.
#'
#' @param x The object to plot
#' @param ... Unused
#' @export
bsgraph0 <- function(x, ...) {
  UseMethod('bsgraph0')
}


#' @param x A \code{\link{dist}} object
#' @param ndists.show The number of distance levels to show
#' @param edge.col The color of edges (one or one for each distance level)
#' @param edge.lwd The line width of edges (one or one for each distance level)
#' @param node.fill The colors of nodes
#' @return The return value of \code{\link{bsgraph0.graphNEL}}
#' @method bsgraph0 dist
#' @S3method bsgraph0 dist
#' @rdname bsgraph0
bsgraph0.dist <- function(x, ndists.show = length(sort(unique(x))),
                          edge.col = gray(0.7), edge.lwd = 1,
                          node.fill = NULL, ...) {

  data <- as.matrix(x)

  nodes <- colnames(data)
  nnodes <- length(nodes)

  dists <- sort(unique(x))
  ndists <- length(dists)
  dshow <- dists[seq_len(ndists.show)]
  ndshow <- length(dshow)

  edge.col <- rep(edge.col, ndshow)
  edge.lwd <- rep(edge.lwd, ndshow)
  edge.len <- ceiling((1.2)^(seq_len(ndists)-1))
  edge.weight <- rev(seq_len(ndists))
  edge.lty <- c(rep('solid', ndshow),
                rep('blank', length(dists)-ndshow))

  graph <- new('graphNEL', nodes=nodes, edgemode='undirected')
  edgeAttrs <- list()
  nodeAttrs <- list()

  for ( i in 1:(nnodes-1) ) {
    for ( j in (i+1):nnodes ) {
      s <- data[i,j]

      if ( s %in% dshow ) {
        t <- which(s == dists)

        graph <- addEdge(nodes[i], nodes[j], graph, edge.weight[t])

        n <- paste(nodes[i], nodes[j], sep='~')
        edgeAttrs$len[n] <- edge.len[t]
        edgeAttrs$color[n] <- edge.col[t]
        edgeAttrs$lwd[n] <- edge.lwd[t]
        edgeAttrs$lty[n] <- edge.lty[t]
      }
    }
  }

  if ( !is.null(node.fill) )
    nodeAttrs$fillcolor[nodes] <- node.fill

  bsgraph0(graph, nodeAttrs=nodeAttrs, edgeAttrs=edgeAttrs)
}


#' @param x A \code{\link[graph]{graphNEL} object
#' @param layoutType Defines the layout engine
#' @return Invisible return of the \code{\link[Rgraphviz]{Ragraph}} object
#' @method bsgraph0 graphNEL
#' @S3method bsgraph0 graphNEL
#' @rdname bsgraph0
bsgraph0.graphNEL <- function(x, layoutType = 'neato', ...) {

  attrs <- getDefaultAttrs(layoutType=layoutType)
  attrs$node$fixedsize <- TRUE
  attrs$node$fontsize <- 20

  ag <- agopen(x, '', layoutType=layoutType, attrs = attrs, ...)
  plot(ag)

  # Redraw nodes for beauty:
  par(new=TRUE)
  ag2 <- ag
  ag2@AgEdge <- list()
  plot(ag2)


  invisible(ag)
}


#line 1 "d:/RCompile/CRANpkg/local/2.13/benchmark/R/bsplot.R"

#' Benchmark experiment summary plot.
#'
#' The benchmark summary plot takes the individual benchmark
#' experiment results into account. The y-axis represents the
#' data sets, the x-axis a podium with as many places as
#' candidate algorithms.
#'
#' @param x The object to plot.
#' @param ... Unused
#' @export
bsplot0 <- function(x, ...) {
  UseMethod('bsplot0')
}


#' @param x A \code{\link{becp}} object
#' @param stat A matrix with statistics to display (rows are
#'   the algorithms, columns the data sets)
#' @param ds.order Data set order
#' @param alg.order Algorithm order
#' @method bsplot0 relation_ensemble
#' @S3method bsplot0 relation_ensemble
#' @rdname bsplot0
bsplot0.relation_ensemble <- function(x, stat = NULL, ds.order = NULL, alg.order = NULL, ...) {
  rm <- bsranking(x)

  if ( !is.null(ds.order) ) {
    rm <- rm[,ds.order]
    stat <- stat[,ds.order]
  }
  if ( !is.null(alg.order) ) {
    rm <- rm[alg.order,]
    stat <- stat[alg.order,]
  }


  bsplot0(rm, stat=stat, ...)
}


#' @param x A \code{\link{becp}} object
#' @param stat A matrix with statistics to display (rows are
#'   the algorithms, columns the data sets)
#' @param col Colors of the algorithms
#' @param xlab A title for the x axis
#' @param ylab A title for the y axis
#' @param sig.lwd Line width of the significance sperator line
#' @param stat.col Colors of the statistics
#' @param ylab.las \code{las} of the labels of the y axis
#' @method bsplot0 matrix
#' @S3method bsplot0 matrix
#' @rdname bsplot0
bsplot0.matrix <- function(x, stat = NULL,
                           col = structure(seq_len(nrow(x)) + 1,
                           names = rownames(x)),
                           ylab = 'Datasets', xlab = 'Podium', sig.lwd = 4,
                           stat.col = NULL, ylab.las = NULL, ...) {

  griddim <- dim(x)
  nalgs <- griddim[1]
  nds <- griddim[2]

  rtable <- apply(x, 2, function(y)names(sort(y)))


  ### Grid:
  rmargin <- 0.1
  rwidth <- 1
  rheight <- 1 - 2 * rmargin

  xleft <- (seq_len(nalgs)-1) * rwidth
  xright <- seq_len(nalgs) * rwidth
  ybottom <- rep(rmargin, nalgs)
  ytop <- rep((1-rmargin), nalgs)

  gxleft <- rep(xleft, nds)
  gxright <- rep(xright, nds)
  gybottom <- rep(ybottom, nds) + rep(seq_len(nds)-1, each=nalgs)
  gytop <- rep(ytop, nds) + rep(seq_len(nds)-1, each=nalgs)


  ### Significant lines:
  sx <- apply(x, 2, sort)
  nosig <- matrix(FALSE, nrow=nalgs, ncol=nds)

  for ( i in 1:(nalgs-1) )
    nosig[i,] <- sx[i,] == sx[i+1,]

  nosig[nalgs,] <- TRUE
  nosig <- as.vector(nosig)

  lx <- gxright[!nosig]
  lytop <- gytop[!nosig]
  lybottom <- gybottom[!nosig]


  ### Statistic bars:
  if ( !is.null(stat) ) {
    s <- matrix(NA, nrow=nalgs, ncol=nds)

    for ( i in seq_len(nds) )
      s[,i] <- stat[rtable[,i],i]

    sxleft <- gxleft
    sxright <- rep(seq_len(nalgs)-1,nds) + as.vector((s / max(s) * rwidth))
    sybottom <- gybottom + 0.1
    sytop <- gytop - 0.1
  }


  ### Plot:
  plot(1, type='n', xlim=c(0,nalgs), ylim=c(0,nds),
       axes=FALSE, xlab=xlab, ylab=ylab, ...)

  axis(1, labels=FALSE)
  mtext(paste(seq_len(nalgs), '.', sep=''),1,
        at=0.5+(seq_len(nalgs)-1), line=1)
  axis(2, at=0.5+(seq_len(nds)-1), labels=colnames(x), las = ylab.las)
  box()

  rect(gxleft, gybottom, gxright, gytop,
       col=col[as.vector(rtable)], border=NA)

  if ( !is.null(stat) )
    rect(sxleft, sybottom, sxright, sytop,
         col=stat.col[as.vector(rtable)], border=NA)

  mapply(function(x, yb, yt) {
           lines(rep(x,2), c(yb,yt), lend='butt', lwd=sig.lwd)
         },
         lx, lybottom, lytop)


  invisible(NULL)
}


bsranking <- function(x) {
  algs <- unlist(relation_domain(x)[[1]])
  rm <- sapply(x,
               function(r)
               sort(rank(relation_scores(r, decreasing = FALSE),
                         ties.method = "min"))[algs])
  rm
}



#line 1 "d:/RCompile/CRANpkg/local/2.13/benchmark/R/datachar-visualizations.R"



#' @param x A \code{\link{DatasetCharacterization}} object
#' @param y Ignored
#' @param lines Draw observation dependency lines
#' @param points Draw observation points
#' @param null.line Draw null line
#' @param null.line.col Null line color
#' @param basis Draw basis characterization of the dataset
#' @param basis.col Color of basis characterization
#' @param ... Ignored
#' @return A \code{\link[ggplot2]{ggplot}} object.
#' @method plot DatasetCharacterization
#' @rdname datachar-visualization
#' @importFrom graphics plot
#' @S3method plot DatasetCharacterization
plot.DatasetCharacterization <- function(x, y = NULL, lines = TRUE, points = TRUE,
                                         null.line = TRUE, null.line.col = gray(0.7),
                                         basis = TRUE, basis.col = NULL, ...) {

  ## Make codetools (R CMD check) happy:
  characteristics <- value <- datasets <- samples <- NULL


  stopifnot(nlevels(x$datasets[, drop = TRUE]) == 1)

  x <- ddply(x, "characteristics", dcscale)
  data <- subset(x, subset = samples != "basis")
  data.basis <- subset(x, subset = samples == "basis")

  p <- ggplot(data, aes(characteristics, value, group = samples))

  if ( null.line )
    p <- p + geom_hline(aes(yintercept = 0), colour = null.line.col)

  if ( lines )
    p <- p + geom_line()

  if ( points )
    p <- p + geom_point()

  if ( (nrow(data.basis) > 0) && basis ) {
    if ( is.null(basis.col) )
      basis.col <- default_colors(n = 1)

    p <- p + geom_line(data = data.basis,
                       aes(characteristics, value, group = samples),
                       colour = basis.col)

    p <- p + geom_point(data = data.basis,
                        aes(characteristics, value, group = samples),
                        colour = basis.col)
  }

  p <- p + scale_y_continuous('', breaks = c(-0.2, seq(0, 1, by = 0.2)),
                              labels = c("NA", seq(0, 1, by = 0.2))) +
           scale_x_discrete("Characteristics") +
           theme_update(axis.text.x = theme_text(angle = 90, hjust = 1))

  p
}



plot.DatasetBasisCharacterization <- function(x, y) {

}



### Internal functions: ##############################################

dcscale <- function(x) {
  x$value <- dcscale0(x$value)
  x
}



dcscale0 <- function(x) {
  rx <- range(x, na.rm = TRUE)

  if ( rx[1] == rx[2] )
    return(rep(1, length = length(x)))

  sx <- (x - min(x, na.rm = TRUE)) /
      (max(x, na.rm = TRUE) - min(x, na.rm = TRUE))

  sx[is.na(sx)] <- -0.2

  sx
}

#line 1 "d:/RCompile/CRANpkg/local/2.13/benchmark/R/dataset-characteristics.R"
#' @include proto.R
{}



#' Dataset characteristics.
#'
#' "Abstract" proto object with the methods:
#' \tabular{rl}{
#'   \code{requirements()} \tab Ensures that all needed packages
#'     are available\cr
#'   \code{map()} \tab Returns the list of map functions\cr
#'   \code{reduce()} \tab Returns the list of reduce functions
#' }
#' An implementation has to override these methods.
#'
#' Available implementations:
#' \tabular{rlll}{
#'   \code{StatlogCharacteristics} \tab Implementation of the StatLog
#'     project dataset characteristics
#' }
#'
#' @seealso \code{\link{characterize}}, \code{\link{datachar-visualization}}
#' @references
#'   See \emph{Eugster et al. (2010)} in \code{citation("benchmark")}.
#'
#'   R. D. King, C. Feng and A. Sutherland. STATLOG: Comparison of
#'   classification algorithms on large real-world problems. Applied
#'   Artifical Intelligence, 9, 1995.
#' @rdname DatasetCharacteristics
DatasetCharacteristics <- proto(expr = {
  name <- "Generic"

  requirements <- function(., ...) NULL

  map <- function(., ...) list()
  reduce <- function(., ...) list()


  pprint <- function(., ...) {
    cat(.$name, "characteristics\n")
  }

  psummary <- function(., ...) {
    chars <- .$characteristics(which = "reduce", flat = TRUE)

    .$print(...)
    cat(paste(" ", chars, collapse = "\n"), "\n")
  }

  characteristics <- function(., which = c("reduce", "map"), flat = TRUE, ...) {
    traverse.tree <- function(tree, level = NULL) {
      l <- lapply(names(tree),
                  function(nodename) {
                    if ( is.null(tree[[nodename]]) )
                      return(NULL)

                    if ( is(tree[[nodename]], "list") )
                      return(traverse.tree(tree[[nodename]],
                                           c(level, nodename)))

                    NA
                })

      structure(l, names = names(tree))
    }

    which <- match.arg(which)
    chars <- traverse.tree(do.call(which, list(), envir = .))

    if ( flat )
      names(unlist(chars))
    else
      chars
  }
})



### Definition helper functions: #####################################

o <- function(...) {
  fs <- list(...)
  function(...) Reduce(function(x, f) f(x), fs, ...)
}



p <- function(fn, args) {
  structure(list(fn = fn, args = args), class = 'p')
}



### Implementation -- StatLog characteristics: #######################

#' @rdname DatasetCharacteristics
#' @export
StatlogCharacteristics <- proto(DatasetCharacteristics, expr = {
  name <- "Statlog"

  requirements <- function(., ...) {
    stopifnot(require(e1071))
    stopifnot(require(entropy))

    TRUE
  }

  map <- function(.) {
    map <- list()

    map$input <- list(n = nrow,
                      attr = ncol,
                      factor = list(attr = ncol,
                                    . = list(nlevels = nlevels,
                                             entropy = o(na.omit, as.integer,
                                                         entropy.empirical))),
                      numeric = list(attr = ncol,
                                     mac = mac,
                                     . = list(skewness = o(na.omit, skewness),
                                              kurtosis = o(na.omit, kurtosis))))

    map$response <- list(factor = list(. = list(cl = nlevels,
                                                entropy = o(na.omit, as.integer,
                                                            entropy.empirical))))

    map$input2response <- list(numeric2factor = list(fcc = fcc,
                                                     frac1 = frac1),
                               factor2factor = list(. = list(mi = mi)))
    map
  }

  reduce <- function(.) {
    reduce <- list()

    reduce$input <- list(n = identity,
                         attr = identity,
                         factor = list(attr = na0,
                                       . = list(bin = p(binary, list(c("input", "factor", ".", "nlevels"))),
                                                entropy = mean,
                                                nlevels = NULL)),
                         numeric = list(attr = na0,
                                        mac = mean,
                                        . = list(skewness = mean,
                                                 kurtosis = mean)))

    reduce$response <- list(factor = list(. = list(cl = identity,
                                                   entropy = identity)))

    reduce$input2response <- list(numeric2factor = list(fcc = identity,
                                                        frac1 = identity),
                                  factor2factor = list(. = list(mi = mean),
                                                       enattr = p(enattr, list(c("response", "factor", ".", "entropy"),
                                                                               c("input2response", "factor2factor", ".", "mi"))),
                                                       nsratio = p(nsratio, list(c("input", "factor", ".", "entropy"),
                                                                                 c("input2response", "factor2factor", ".", "mi")))))
    reduce
  }
})

StatlogCharacteristics <- structure(StatlogCharacteristics,
                                    class = c("characteristics",
                                              class(StatlogCharacteristics)))



### Implementation of needed characteristics: ########################

na0 <- function(x) {
  ifelse(is.na(x), 0, x)
}

enc <- function(x) {
  y <- matrix(0, nrow=length(x), ncol=nlevels(x))
  y[cbind(seq(length(x)), as.numeric(x))] <- 1
  y
}

mac <- function(x) {
  x <- as.matrix(x)

  if ( ncol(x) == 1 )
    return(NA)

  drop(sapply(seq(length = ncol(x)),
              function(i)
              sqrt(summary(lm(x[, i] ~ x[, -i]))$r.squared)))
}

fcc <- function(x, y) {
  x <- as.matrix(x)
  y <- unlist(y)
  max(cancor(x, enc(y))$cor)
}

frac1 <- function(x, y) {
  x <- as.matrix(x)
  y <- unlist(y)
  cor <- cancor(x, enc(y))$cor
  lambda <- cor^2 / (1 - cor^2)

  max(lambda) / sum(lambda)
}

mi <- function(x, y) {
  mi.plugin(cbind(as.integer(x),
                  as.integer(y)))
}

binary <- function(x, ...) {
  na0(sum(x == 2))
}

enattr <- function(x, y, ...) {
  x / y
}

nsratio <- function(x, y, ...) {
  (x - y) / y
}
#line 1 "d:/RCompile/CRANpkg/local/2.13/benchmark/R/dataset-characterization.R"
#' @include dataset.R
#' @include dataset-characteristics.R
{}



#' Implements a map/reduce approach to characterize a dataset with
#' given dataset characteristics.
#'
#' @param x A \code{\link[=as.dataset]{dataset}} object
#' @param y A \code{\link{DatasetCharacteristics}} object
#' @param verbose Show information during execution
#' @param index Characterize only a subset
#' @param ... Ignored
#' @return The characterization matrix (1 row and as many columns as
#'   characteristics
#' @examples
#'   data("iris")
#'   ds <- as.dataset(Species ~ ., iris)
#'   characterize(ds, StatlogCharacteristics)
#' @title Dataset characterization framework
#' @seealso \code{\link{datachar-visualization}}
#' @references
#'   See \emph{Eugster et al. (2010)} in \code{citation("benchmark")}.
#' @export
characterize <- function(x, y, verbose = FALSE, index = NULL, ...) {
  stopifnot(is(x, 'dataset'))
  stopifnot(is(y, 'characteristics'))

  stopifnot(y$requirements())

  d <- map(x, y, verbose = verbose, index = index)
  d <- reduce(d, y, verbose = verbose)

  d <- as.matrix(as.data.frame(d))
  #rownames(d) <- x$dataname()

  d
}



### Characterization map/reduce framework: ###########################

map <- function(x, y, ...) {
  UseMethod('map')
}


map.dataset <- function(x, y, verbose = TRUE, index = NULL, ...) {
  stopifnot(is(y, 'characteristics'))

  traverse.tree <- function(tree, level = NULL) {
    l <- lapply(names(tree),
                function(nodename) {
                  if ( is(tree[[nodename]], 'list') )
                    return(traverse.tree(tree[[nodename]],
                                         c(level, nodename)))

                  if ( verbose )
                    cat(sprintf('map: %s -> %s\n', paste(level, collapse = '.'),
                                                   nodename))

                  d <- x$dataparts(level, index = index)

                  if ( length(d) == 0 )
                    return(NA)

                  sapply(d, function(x) do.call(tree[[nodename]], unname(x)))
              })

    structure(l, names = names(tree))
  }

  structure(traverse.tree(y$map()),
            class = c('mapped.dataset', 'list'),
            name = attr(y, 'name'))
}



reduce <- function(x, y, ...) {
  UseMethod('reduce')
}


reduce.mapped.dataset <- function(x, y, verbose = TRUE, ...) {
  stopifnot(is(y, 'characteristics'))

  traverse.tree <- function(tree, level = NULL) {
    lapply(names(tree),
           function(nodename) {
             if ( is(tree[[nodename]], 'list') )
               return(traverse.tree(tree[[nodename]],
                                    c(level, nodename)))

             if ( verbose )
               cat(sprintf('reduce: %s\n', paste(c(level, nodename), collapse = '.')))

             f <- tree[[nodename]]

             if ( is.function(f) )
               x[[c(level, nodename)]] <<- f(x[[c(level, nodename)]])

             if ( is.null(f) )
               x[[c(level, nodename)]] <<- NULL

             if ( is(f, 'p') )
               x[[c(level, nodename)]] <<- do.call(f$fn,
                                                   lapply(f$args,
                                                          function(.) x[[.]]))
           })
  }

  traverse.tree(y$reduce())

  structure(x, class = c('reduced.dataset', class(x)),
            name = attr(y, 'name'))
}


#line 1 "d:/RCompile/CRANpkg/local/2.13/benchmark/R/dataset.R"


#' A dataset abstraction to simplify the calculation of dataset
#' characteristics.
#' @param formula A symbolic description of the dataset
#' @param data The data frame
#' @param ordered.as.factor Interpret ordered factors as factors
#' @param integer.as.numeric Interpret integer variables as numerics
#' @return A proto object with an additional S3 class \code{dataset}
#' @examples
#'   data("iris")
#'   ds <- as.dataset(Species ~ ., iris)
#'   ds
#'
#'   str(ds$response())
#'   str(ds$dataparts(c("input", "numeric")))
#' @export
as.dataset <- function(formula, data, ordered.as.factor = TRUE,
                       integer.as.numeric = TRUE) {

  call <- match.call()


  ## Compute dataset structure:
  details <- function(x, which) {
    classes <- sapply(data[, x, drop = FALSE],
                      function(var) {
                        if ( is.ordered(var) & ordered.as.factor )
                          return("factor")

                        if ( is.integer(var) & integer.as.numeric )
                          return("numeric")

                        class(var)
                      })


    list(list(structure(list(x), names = which)),
         lapply(split(classes, classes),
                function(x)
                list(list(structure(list(names(x)), names = which)),
                     structure(list(list(lapply(names(x),
                                                function(.)
                                                structure(list(.), names = which)))),
                               names = "."))))
  }

  i2r.details <- function(x, y) {
    grid <- function(...)
      expand.grid(..., KEEP.OUT.ATTRS = FALSE, stringsAsFactors = FALSE)

    grid.details <- function(gx, gy) {
      varx <- x[[2]][[gx]][[c(1, 1, 1)]]
      vary <- y[[2]][[gy]][[c(1, 1, 1)]]

      var.grid <- grid(input = varx,
                       response = vary)
      var.grid <- apply(var.grid, 1, list)
      var.grid <- lapply(var.grid, "[[", 1)
      var.grid <- lapply(var.grid, as.list)

      list(list(list(input = varx, response = vary)),
           structure(list(list(var.grid)), names = "."))
    }

    class.grid <- grid(names(x[[2]]), names(y[[2]]))

    list(list(list(input = x[[c(1, 1, 1)]],
                   response = y[[c(1, 1, 1)]])),
         structure(apply(class.grid, 1,
                         function(x)
                         grid.details(x[[1]], x[[2]])),
                   names = apply(class.grid, 1, paste, collapse = "2")))
  }

  formula <- terms(formula, data = data)
  variables <- as.character(as.list(attr(formula, "variables"))[-1])
  response <- variables[attr(formula, "response")]
  input  <- setdiff(variables, response)

  input.details <- details(input, "input")
  response.details <- details(response, "response")

  attributes(formula) <- NULL


  ## Dataset proto object:
  ds <- proto(expr = {

    ## Variables:
    .data = data
    .formula <- as.formula(formula)
    .dataname <- deparse(call$data)
    .call <- call

    .structure <- list(list(list(variables)),
                       list(input = input.details,
                            response = response.details,
                            input2response = i2r.details(input.details,
                                                         response.details)))


    ## Getter:
    variables <- function(., x = NULL) {
      m <- paste(sprintf("[[2]]$%s", x), collapse = "")
      e <- "[[1]]"
      g <- sprintf(".$.structure%s%s", m, e)

      eval(parse(text = g))
    }

    dataparts <- function(., x = NULL, index = NULL) {
      vars <- .$variables(x)

      if ( is.null(index) )
        index <- seq(length = nrow(.$.data))

      d <- lapply(vars,
                  function(v)
                  structure(lapply(v,
                                   function(i)
                                   .$.data[index, i, drop = ('.' %in% x)]),
                            names = names(v)))
      d
    }

    formula <- function(.) {
      f <- .$.formula
      attributes(f) <- NULL
      f
    }

    dataname <- function(.) {
      .$.dataname
    }

    input <- function(., index = NULL) {
      .$dataparts("input", index = index)[[c(1, 1)]]
    }

    response <- function(., index = NULL) {
      .$dataparts("response", index = index)[[c(1, 1)]]
    }

    data <- function(., index = NULL) {
      .$dataparts(index = index)[[c(1, 1)]]
    }


    ## Default proto methods:
    pprint <- function(., ...) {
      cat("Dataset object:\n")
      cat(.$dataname(), "-> ")
      print(.$formula())
    }
  })


  structure(ds, class = c("dataset", class(ds)))
}



#line 1 "d:/RCompile/CRANpkg/local/2.13/benchmark/R/proto.R"



print.proto <- function(x, ...) {
  x$pprint(...)
}



pprint <- function (x, ...) {
  print(as.list(x), ...)
}



#' @S3method summary proto
#' @nord
summary.proto <- function(object, ...) {
  object$psummary(...)
}

#line 1 "d:/RCompile/CRANpkg/local/2.13/benchmark/R/testprocedure.R"



### Test procedure infrastructure: ###################################

#' Infrastructure for test procedures.
#'
#' Available \code{TestProcedure} and corresponding \code{TestResult}
#' implementations:
#' \tabular{rl}{
#'   \code{FriedmanTest} \tab Test procedure based on the
#'     non parametric friedman test\cr
#'   \code{LmerTest} \tab Test procedure based on a mixed
#'     effects model (function \code{lmer} in package \code{lme4})\cr
#'   \code{PercintTest} \tab Test procedure based on the
#'     bootstrap percentile intervals
#' }
#'
#' @references
#'   See \emph{Eugster and Leisch (2008)} and \emph{Eugster et al. (2008)}
#'   in \code{citation("benchmark")}.
#' @rdname TestProcedure
TestProcedure <- proto(expr = {
  requirements <- function(., ...) NULL
  new <- function(., ...) NULL
  globalTest <- function(., ...) NULL
  pairwiseTest <- function(., ...) NULL
})



TestResult <- proto(expr = {
  new <- function(., ...) NULL
  getPvalue <- function(., ...) NULL
  getStatistic <- function(., ...) NULL
  getConfint <- function(., ...) NULL
})



### Implementation -- Friedman test: #################################

#' @rdname TestProcedure
#' @export
FriedmanTest <- proto(TestProcedure, expr = {

  requirements <- function(.) {
    stopifnot(require("coin"))
    stopifnot(require("multcomp"))

    TRUE
  }

  new <- function(., data) {
    stopifnot(nlevels(data$datasets[, drop = TRUE]) == 1)
    stopifnot(nlevels(data$performances[, drop = TRUE]) == 1)

    .$proto(data = data)
  }

  globalTest <- function(.) {
    t <- friedman_test(value ~ algorithms | samples, data = .$data)

    FriedmanGlobalTestResult$new(t)
  }

  pairwiseTest <- function(.) {
    t <- symmetry_test(value ~ algorithms | samples, data = .$data,
                       alternative = "two.sided",
                       teststat = "max",
                       xtrafo = function(d) {
                         trafo(d, factor_trafo = function(x)
                               model.matrix(~ x - 1) %*% t(contrMat(table(x), "Tukey")))
                       },
                       ytrafo = function(d) {
                         trafo(d, numeric_trafo = rank, block = .$data$samples)
                       })

    FriedmanPairwiseTestResult$new(t)
  }
})



FriedmanGlobalTestResult <- proto(TestResult, expr = {
  new <- function(., test) {
    .$proto(test = test)
  }

  getPvalue <- function(.) {
    pvalue(.$test)
  }

  getStatistic <- function(.) {
    statistic(.$test)
  }
})



FriedmanPairwiseTestResult <- proto(TestResult, expr = {
  new <- function(., test) {
    .$proto(test = test)
  }

  getPvalue <- function(.) {
    if ( nlevels(.$test@statistic@x$algorithms) == 2 )
      .$.pvalue()
    else
      pvalue(.$test, method = "single-step")
  }

  getStatistic <- function(.) {
    if ( nlevels(.$test@statistic@x$algorithms) == 2 )
      .$.statistic()
    else
      statistic(.$test, type = "linear")
  }

  .pvalue <- function(.) {
    ret <- as.matrix(pvalue(.$test))
    rownames(ret) <- paste(rev(levels(.$test@statistic@x$algorithms)), collapse = " - ")
    colnames(ret) <- ""

    ret
  }

  .statistic <- function(.) {
    ret <- as.matrix(statistic(.$test))
    rownames(ret) <- paste(rev(levels(.$test@statistic@x$algorithms)), collapse = " - ")
    colnames(ret) <- ""

    ret
  }
})



### Implementation -- Lmer test: #####################################

#' @rdname TestProcedure
#' @export
LmerTest <- proto(TestProcedure, expr = {

  requirements <- function(.) {
    stopifnot(require("lme4"))
    stopifnot(require("multcomp"))

    TRUE
  }

  new <- function(., data) {
    stopifnot(nlevels(data$performances[, drop = TRUE]) == 1)

    model <- {
      if ( nlevels(data$datasets[, drop = TRUE]) == 1 )
        lmer(value ~ algorithms + (1 | samples), data = data)
      else
        lmer(value ~ algorithms * datasets + (1 | datasets/samples), data = data)
    }

    .$proto(model = model)
  }

  globalTest <- function(.) {
    K <- diag(length(fixef(.$model)))[-1, , drop = FALSE]
    rownames(K) <- names(fixef(.$model))[-1]

    t <- glht(.$model, linfct = K)

    LmerGlobalTestResult$new(t)
  }

  pairwiseTest <- function(.) {
    t <- glht(.$model, linfct = mcp(algorithms = "Tukey"))

    LmerPairwiseTestResult$new(t)
  }
})



LmerGlobalTestResult <- proto(TestResult, expr = {
  new <- function(., test) {
    .$proto(test = test)
  }

  getPvalue <- function(.) {
    as.numeric(summary(.$test, test = Chisqtest())$test$pvalue)
  }

  getStatistic <- function(.) {
    as.numeric(summary(.$test, test = Chisqtest())$test$fstat)
  }
})



LmerPairwiseTestResult <- proto(TestResult, expr = {
  new <- function(., test) {
    .$proto(test = test)
  }

  getPvalue <- function(.) {
    s <- summary(.$test)
    ret <- as.matrix(s$test$pvalues)
    rownames(ret) <- names(s$test$tstat)
    colnames(ret) <- ""

    ret
  }

  getStatistic <- function(.) {
    s <- summary(.$test)
    ret <- as.matrix(s$test$tstat)
    rownames(ret) <- names(s$test$tstat)
    colnames(ret) <- ""

    ret
  }

  getConfint <- function(., significance) {
    confint(.$test, level = significance)$confint
  }
})



### Implementation -- Percentile interval test: ######################

#' @rdname TestProcedure
#' @export
PercintTest <- proto(TestProcedure, expr = {
  requirements <- function(.) {
    TRUE
  }

  new <- function(., data) {
    stopifnot(nlevels(data$datasets[, drop = TRUE]) == 1)
    stopifnot(nlevels(data$performances[, drop = TRUE]) == 1)

    .$proto(data = data)
  }

  pairwiseTest <- function(.) {
    PercintPairwiseTestResult$new(data = .$data)
  }
})


PercintPairwiseTestResult <- proto(TestResult, expr = {
  new <- function(., data) {
    .$proto(data = data)
  }

  getConfint <- function(., significance) {
    pci <- .$percint(significance)
    pairs <- t(combn(rownames(pci), 2))

    ret <- matrix(nrow = nrow(pairs), ncol = 3)
    rownames(ret) <- apply(pairs, 1, paste, collapse = " - ")
    colnames(ret) <- c("Estimate", "lwr", "upr")

    for ( i in seq(length = nrow(pairs)) ) {
      ret[i, 1] <- NA
      ret[i, 2] <- pci[pairs[i, 1], 1] - pci[pairs[i, 2], 1]
      ret[i, 3] <- pci[pairs[i, 1], 2] - pci[pairs[i, 2], 2]
    }

    ret
  }

  percint <- function(., significance) {
    ci <- t(sapply(split(.$data$value, .$data$algorithms), .$.pci, significance))
    structure(ci, class = c("percint", class(ci)))
  }

  .pci <- function(., x, significance) {
    s <- sort(x)
    B <- length(x)

    c(lwr = s[ceiling(B * significance)],
      upr = s[ceiling(B * (1 - significance))])
  }
})



plot.percint <- function(x, y = NULL, ...) {
  stopifnot(require("multcomp"))

  t <- list(confint = cbind(Estimate = NA, x))
  multcomp:::plot.confint.glht(t, main = NA, xlab = NA, ...)
}
#line 1 "d:/RCompile/CRANpkg/local/2.13/benchmark/R/testres-analysis.R"


contsigseq <- function(x, significance) {
  
  ## Make codetools (R CMD check) happy:
  tests <- value <- NULL

  x <- subset(x, subset = tests == "pvalue", select = value)
  x <- x < significance

  i <- length(x)
  while ( x[i] == TRUE )
    i <- i - 1

  if ( i == length(x) )
    return(NA)

  i + 1
}


#line 1 "d:/RCompile/CRANpkg/local/2.13/benchmark/R/testres-visualizations.R"


#' Visualization methods for (sequential) test results.
#' @param x An \code{\link{TestResult}} object
#' @param ... Ignored.
#' @return A \code{\link[ggplot2]{ggplot}} object.
#' @method plot TestResult
#' @rdname testres-visualization
#' @importFrom graphics plot
#' @S3method plot TestResult
plot.TestResult <- function(x, ...) {

  ## Make codetools (R CMD check) happy:
  value <- tests <- datasets <- samples <- NULL


  p <- ggplot(x, aes(samples, value))
  p <- p + facet_grid(tests ~ datasets, scale = "free_y")
  p <- p + geom_line()

  p
}
#line 1 "d:/RCompile/CRANpkg/local/2.13/benchmark/R/warehouse.R"
#' @include proto.R
{}



#' Benchmark experiment warehouse.
#'
#' \code{warehouse} is the constructor function for a benchmark experiment
#' warehouse.
#'
#' A benchmark experiment warehouse collects all data during a benchmark
#' experiment (\code{\link{benchmark}} is a proof-of-concept implementation).
#' Different views (based on the collected data) provide cleaned parts
#' of the data for further analyses.
#'
#' Implemented views:
#' \enumerate{
#'   \item \code{viewAlgorithmPerformance()}: returns a data frame (S3
#'   class \code{AlgorithmPerformance}) with columns \code{samples,
#'   datasets, algorithms, performances} (factors with the corresponding
#'   levels) and the column \code{value} with the corresponding
#'   performance value.
#'
#'   \item \code{viewDatasetCharacterization()}: returns a data frame
#'   (S3 class \code{DatasetCharacterization}) with columns \code{samples,
#'   datasets, characteristics, value}.
#'
#'   \item \code{viewDatasetBasisCharacterization()}: returns a data
#'   frame (S3 class \code{DatasetBasisCharacterization}) with columns
#'   \code{datasets, characteristics, value}.
#'
#'   \item \code{viewTestResult()}: returns a data frame (S3 class
#'   \code{TestResult}) with columns \code{samples, datasets, tests, value}.
#' }
#'
#' @param datasets Names of the datasets
#' @param B Number of benchmark runs
#' @param algorithms Names of the candidate algorithms
#' @param performances Names of the performance measures
#' @param characteristics Names of the dataset characteristics
#' @param tests Names of the monitored test measures
#' @return Proto object with different views (see Details).
#' @seealso \code{\link{benchmark}}, \code{\link{as.warehouse}}
#' @aliases AlgorithmPerformance DatasetCharacterization
#'   DatasetBasisCharacterization TestResult
#' @export
warehouse <- function(datasets, B,
                      algorithms = NULL,
                      performances = NULL,
                      characteristics = NULL,
                      tests = NULL) {

  if ( length(datasets) != length(B) )
    B <- rep(B, length(datasets))


  a <- mapply(DatasetList, datasets, B,
              MoreArgs = list(algorithms = algorithms,
                              performances = performances,
                              characteristics = characteristics,
                              tests = tests),
              SIMPLIFY = FALSE)
  names(a) <- datasets


  ## Proto object and default data views:
  a <- proto(data = a)

  a$meta <- list(datasets = datasets, B = B,
                 algorithms = algorithms,
                 performances = performances,
                 characteristics = characteristics,
                 tests = tests,
                 algorithm_colors = default_colors(algorithms = algorithms))


  if ( !is.null(algorithms) & !is.null(performances) ) {
    setViewAlgorithmPerformance(a)

    if ( !is.null(tests) )
      setViewTestResult(a)
  }

  if ( !is.null(characteristics) ) {
    setViewDatasetCharacterization(a)
    setViewDatasetBasisCharacterization(a)
  }


  structure(a, class = c("warehouse", class(a)))
}



### Data views: ######################################################

setViewAlgorithmPerformance <- function(object) {

  object$viewAlgorithmPerformance <- function(.,
                                              datasets = NULL,
                                              algorithms = NULL,
                                              performances = NULL) {

    if ( is.null(datasets) )
      datasets <- .$meta$datasets

    if ( is.null(algorithms) )
      algorithms <- .$meta$algorithms

    if ( is.null(performances) )
      performances <- .$meta$performances


    view <- lapply(.$data[datasets],
                   function(ds)
                   ds$AlgorithmPerformance[,
                                           algorithms,
                                           performances,
                                           drop = FALSE])
    attr(view, "varname") <- "datasets"

    view <- melt(view)
    view$datasets <- as.factor(view$datasets)
    view$samples <- as.factor(view$samples)

    view <- view[, c("samples", "datasets", "algorithms", "performances", "value")]


    structure(view, class = c("AlgorithmPerformance", class(view)),
              algorithm_colors = .$meta$algorithm_colors)
  }

  invisible(NULL)
}



setViewDatasetCharacterization <- function(object) {

  object$viewDatasetCharacterization <- function(.,
                                                 datasets = NULL,
                                                 characteristics = NULL,
                                                 basis = TRUE) {

    if ( is.null(datasets) )
      datasets <- .$meta$datasets

    if ( is.null(characteristics) )
      characteristics <- .$meta$characteristics


    view <- lapply(.$data[datasets],
                   function(ds)
                   ds$DatasetCharacterization[,
                                              characteristics,
                                              drop = FALSE])
    attr(view, "varname") <- "datasets"

    view <- melt(view)
    view$datasets <- as.factor(view$datasets)
    view$samples <- as.factor(view$samples)

    view <- view[, c("samples", "datasets", "characteristics", "value")]

    if ( basis ) {
        basis <- .$viewDatasetBasisCharacterization(datasets = datasets,
                                                    characteristics = characteristics)
        basis$samples <- "basis"
        view <- rbind(view, basis)
    }


    structure(view, class = c("DatasetCharacterization", class(view)))
  }

  invisible(NULL)
}



setViewDatasetBasisCharacterization <- function(object) {

  object$viewDatasetBasisCharacterization <- function(.,
                                                      datasets = NULL,
                                                      characteristics = NULL) {

    if ( is.null(datasets) )
      datasets <- .$meta$datasets

    if ( is.null(characteristics) )
      characteristics <- .$meta$characteristics


    view <- lapply(.$data[datasets],
                   function(ds)
                   ds$DatasetBasisCharacterization[,
                                                   characteristics,
                                                   drop = FALSE])
    attr(view, "varname") <- "datasets"

    view <- melt(view)
    view$datasets <- as.factor(view$datasets)

    view <- view[, c("datasets", "characteristics", "value")]


    structure(view, class = c("DatasetBasisCharacterization", class(view)))
  }

  invisible(NULL)
}



setViewTestResult <- function(object) {

  object$viewTestResult <- function(.,
                                    datasets = NULL,
                                    tests = NULL) {

    if ( is.null(datasets) )
      datasets <- .$meta$datasets

    if ( is.null(tests) )
      tests <- .$meta$tests


    view <- lapply(.$data[datasets],
                   function(ds)
                   ds$TestResult[,
                                 tests,
                                 drop = FALSE])
    attr(view, "varname") <- "datasets"

    view <- melt(view)
    view$datasets <- as.factor(view$datasets)

    view <- view[, c("samples", "datasets", "tests", "value")]


    structure(view, class = c("TestResult", class(view)))
  }

  invisible(NULL)
}



### Internal data structures: ########################################

WarehouseArray <- function(B, ..., class) {
  d <- list(...)

  dim <- c(B, sapply(d, length))
  dimnames <- c(list(samples = NULL), d)

  a <- array(NA_integer_, dim = dim, dimnames = dimnames)

  structure(a, class = c(class, class(a)))
}



AlgorithmPerformanceArray <- function(B, algorithms, performances) {
  WarehouseArray(B, algorithms = algorithms, performances = performances,
                 class = "AlgorithmPerformanceArray")
}



DatasetCharacterizationArray <- function(B, characteristics) {
  WarehouseArray(B, characteristics = characteristics,
                 class = "DatasetCharacterizationArray")
}



TestResultArray <- function(B, tests) {
  WarehouseArray(B, tests = tests,
                 class = "TestResultArray")
}



DatasetList <- function(dataset, B,
                        algorithms = NULL,
                        performances = NULL,
                        characteristics = NULL,
                        tests = NULL) {

  a <- list()

  if ( !is.null(algorithms) && !is.null(performances) ) {
    a$AlgorithmPerformance <- AlgorithmPerformanceArray(B, algorithms,
                                                        performances)
  }

  if ( !is.null(characteristics) ) {
    a$DatasetCharacterization <- DatasetCharacterizationArray(B, characteristics)
    a$DatasetBasisCharacterization <- DatasetCharacterizationArray(1, characteristics)
  }

  if ( !is.null(tests) ) {
    a$TestResult <- TestResultArray(B, tests)
  }


  structure(a, class = c("DatasetList", class(a)),
            dataset = dataset)
}



### Internal functions: ##############################################

default_colors <- function(n = length(algorithms), algorithms = NULL) {
  # Based on ggplot2:::ScaleHue
  h <- c(0, 360) + 15
  l <- 65
  c <- 100

  start <- 1
  direction <- -1

  rotate <- function(x) (x + start) %% 360 * direction

  if ( (diff(h) %% 360) < 1 ) {
    h[2] <- h[2] - 360 / n
  }

  structure(grDevices::hcl(h = rotate(seq(h[1], h[2], length = n)),
                           c = c, l = l),
            names = algorithms)
}


