"AutoD2" <-
function(series, lags=c(1, nrow(series)/3), step=1, plotit=TRUE, add=FALSE,...) {
    call <- match.call()
	data <- deparse(substitute(series))
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		require(ts)
		if (is.null(class(series)) || class(series)[1] != "mts")
			stop("series must be a multiple regular time series object")
		Unit <- attr(series, "units")
	} else {
		# Rem: cannot test if it is a time series
		Unit <- attr(attr(series, "tspar"), "units")
	}
	UnitTxt <- GetUnitText(series)
	# Test the length of the series, range and step...
	n <- nrow(series)
	if (length(lags) < 2)
	  	stop("lags must be a vector with 2 values: (min, max)")
	if (lags[1] < 1 || lags[1] > n/3)
	   	stop("lags must be larger or equal to 1, and smaller or equal to n/3")
	if (lags[2] < 1 || lags[2] > n/3)
	   	stop("lags must be larger or equal to 1, and smaller or equal to n/3")	
	Lags.vec <- seq(lags[1], lags[2], step)
	if (length(Lags.vec) < 2)
	   	stop("Less than 2 lags. Redefine intervals or step")
	D2.vec <- Lags.vec
	    
	# Calculate AutoD2 for each lag
	x <- as.matrix(series)
	for (i in 1:length(Lags.vec)) {
		k <- Lags.vec[i]
		g1 <- x[1:(n-k),]
		g2 <- x[(k+1):n,]
		sd1 <- apply(g1, 2, var)^.5
		sd2 <- apply(g2, 2, var)^.5
		g1 <- t(t(g1)/sd1)
		g2 <- t(t(g2)/sd2)
		m1 <- apply(g1, 2, mean)
		m2 <- apply(g2, 2, mean)
		m <- m1 - m2
		g1 <- scale(g1)
		g2 <- scale(g2)
		S1 <- t(g1) %*% g1
		S2 <- t(g2) %*% g2
		S <- solve((S1 + S2)/(2*(n-k)-2))
		D2.vec[i] <- (t(m) %*% S) %*% m 
	}
	res <- list(lag=Lags.vec, D2=D2.vec)
	as.data.frame(res)
	res$call <- call
	res$data <- data
	res$type <- "AutoD2"
	res$units.text <- UnitTxt
	attr(res, "units") <- Unit
		
	# Do we need to plot the graph?
	if (plotit == TRUE) {
		if (add == TRUE) {
			lines(res$lag, res$D2, ...)
		} else {
			plot(res$lag, res$D2, type="b", xlab=paste("lag (", UnitTxt, ")", sep=""), ylab="D2", main=paste("AutoD2 for:", data), ...)
		}
	}
	class(res) <- "D2"
	res 	# Return results
}
"CenterD2" <-
function(series, window=nrow(series)/5, plotit=TRUE, add=FALSE, type="l", level=0.05, lhorz=TRUE, lcol=2, llty=2,...) {
	call <- match.call()
	data <- deparse(substitute(series))
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		require(ts)
		if (is.null(class(series)) || class(series)[1] != "mts")
			stop("series must be a multiple regular time series object")
		Unit <- attr(series, "units")
	} else {
		# Rem: cannot test if it is a time series
		Unit <- attr(attr(series, "tspar"), "units")
	}
	UnitTxt <- GetUnitText(series)
	# Test the length of the serie, range and step...
	n <- nrow(series)
	if (window[1] < 1 || window[1] > n)
	   	stop("window must be larger or equal to 1, and smaller or equal to n")
	Lags.vec <- 1:n
	D2.vec <- Lags.vec
	# Calculate CenterD2 for each lag
	x <- as.matrix(series)
	w <- scale(x[1:window,])
	R <- solve(cor(w))
	for (i in 1:window) {
		v <- w[i,]
		D2.vec[i] <- (t(v) %*% R) %*% v
	}
	for (j in 1:(n-window)) {
		w <- scale(x[j+(1:window),])
		R <- solve(cor(w))
		v <- w[window,]
		D2.vec[window+j] <- (t(v) %*% R) %*% v
	}
	res <- list(lag=Lags.vec, D2=D2.vec)
	as.data.frame(res)
	res$call <- call
	res$data <- data
	res$type <- "CenterD2"
	res$window <- window
	res$level <- level
	res$chisq <- qchisq(1-level, ncol(series))
	res$units.text <- UnitTxt
	attr(res, "units") <- Unit
		
	# Do we need to plot the graph?
	if (plotit == TRUE) {
		if (add == TRUE) {
			lines(res$lag, res$D2, ...)
		} else {
			plot(res$lag, res$D2, type=type, xlab=paste("lag (", UnitTxt, ")", sep=""), ylab="D2", main=paste("CenterD2 for:", data), ...)
			if (lhorz == TRUE) {
				abline(h=res$chisq, col=lcol, lty=llty)
			}
		}
	}
	class(res) <- "D2"
	res 	# Return results
}
"CrossD2" <-
function(series, series2, lags=c(1, nrow(series)/3), step=1, plotit=TRUE, add=FALSE, ...) {
    call <- match.call()
	data1 <- deparse(substitute(series))
	data2 <- deparse(substitute(series2))
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		require(ts)
		if (is.null(class(series)) || class(series)[1] != "mts")
			stop("series must be a multiple regular time series object")
		if (is.null(class(series2)) || class(series2)[1] != "mts")
			stop("series2 must be a multiple regular time series object")
		
		Unit <- attr(series, "units")
	} else {
		# Rem: cannot test if these are multiple time series
		Unit <- attr(attr(series, "tspar"), "units")
	}
	if (nrow(series) != nrow(series2))
		stop("series and series2 must have same row number")
	if (ncol(series) != ncol(series2))
		stop("series and series2 must have same column number")	
	UnitTxt <- GetUnitText(series)
	# Test the length of the series, range and step...
	n <- nrow(series)
	if (length(lags) < 2)
	  	stop("lags must be a vector with 2 values: (min, max)")
	if (lags[1] < 1 || lags[1] > n/3)
	   	stop("lags must be larger or equal to 1, and smaller or equal to n/3")
	if (lags[2] < 1 || lags[2] > n/3)
	   	stop("lags must be larger or equal to 1, and smaller or equal to n/3")	
	Lags.vec <- seq(lags[1], lags[2], step)
	if (length(Lags.vec) < 2)
	   	stop("Less than 2 lags. Redefine intervals or step")
	D2.vec <- Lags.vec
	    
	# Calculate CrossD2 for each lag
	x1 <- as.matrix(series)
	x2 <- as.matrix(series2)
	for (i in 1:length(Lags.vec)) {
		k <- Lags.vec[i]
		g1 <- x1[1:(n-k),]
		g2 <- x2[(k+1):n,]
		sd1 <- apply(g1, 2, var)^.5
		sd2 <- apply(g2, 2, var)^.5
		g1 <- t(t(g1)/sd1)
		g2 <- t(t(g2)/sd2)
		m1 <- apply(g1, 2, mean)
		m2 <- apply(g2, 2, mean)
		m <- m1 - m2
		g1 <- scale(g1)
		g2 <- scale(g2)
		S1 <- t(g1) %*% g1
		S2 <- t(g2) %*% g2
		S <- solve((S1 + S2)/(2*(n-k)-2))
		D2.vec[i] <- (t(m) %*% S) %*% m 
	}
	res <- list(lag=Lags.vec, D2=D2.vec)
	as.data.frame(res)
	res$call <- call
	res$data <- data1
	res$data2 <- data2
	res$type <- "CrossD2"
	res$units.text <- UnitTxt
	attr(res, "units") <- Unit
		
	# Do we need to plot the graph?
	if (plotit == TRUE) {
		if (add == TRUE) {
			lines(res$lag, res$D2, ...)
		} else {
			plot(res$lag, res$D2, type="b", xlab=paste("lag (", UnitTxt, ")", sep=""), ylab="D2", main=paste("CrossD2 for:", data1, "versus", data2), ...)
		}
	}
	class(res) <- "D2"
	res 	# Return results
}
"GetUnitText" <-
function(series) {
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		Unit <- attr(series, "units")
	} else {
		Unit <- attr(attr(series, "tspar"), "units")	# In Splus, "units" is an attribute of "tspar"!!!
	}
	frequency <- frequency(series)
	deltat <- deltat(series)
	if (frequency == 1) pre <- "" else {
		if (round(frequency) == frequency) pre <- paste("1/", frequency, " ", sep="") else {
			pre <- paste(deltat, " ")
		}
	}
	if (is.null(Unit)) UnitTxt <- "" else {
		# Make sure unit is correctly spelled
		if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
			# Rem: R v. 1.4.0 add casefold(), but 1.3.1 has only tolower()!!!
			if (tolower(Unit) == "years") Unit <- "years"
			if (tolower(Unit) == "year") Unit <- "years"
			if (tolower(Unit) == "y") Unit <- "years"
			if (tolower(Unit) == "weeks") Unit <- "weeks"
			if (tolower(Unit) == "week") Unit <- "weeks"
			if (tolower(Unit) == "days") Unit <- "days"
			if (tolower(Unit) == "day") Unit <- "days"
			if (tolower(Unit) == "d") Unit <- "days"
			if (tolower(Unit) == "hours") Unit <- "hours"
			if (tolower(Unit) == "hour") Unit <- "hours"
			if (tolower(Unit) == "h") Unit <- "hours"
			if (tolower(Unit) == "minutes") Unit <- "min"
			if (tolower(Unit) == "minute") Unit <- "min"
			if (tolower(Unit) == "min") Unit <- "min"
			if (tolower(Unit) == "secondes") Unit <- "sec"
			if (tolower(Unit) == "seconde") Unit <- "sec"
			if (tolower(Unit) == "sec") Unit <- "sec"	
		} else {												# We are in Splus
			if (casefold(Unit) == "years") Unit <- "years"
			if (casefold(Unit) == "year") Unit <- "years"
			if (casefold(Unit) == "y") Unit <- "years"
			if (casefold(Unit) == "weeks") Unit <- "weeks"
			if (casefold(Unit) == "week") Unit <- "weeks"
			if (casefold(Unit) == "days") Unit <- "days"
			if (casefold(Unit) == "day") Unit <- "days"
			if (casefold(Unit) == "d") Unit <- "days"
			if (casefold(Unit) == "hours") Unit <- "hours"
			if (casefold(Unit) == "hour") Unit <- "hours"
			if (casefold(Unit) == "h") Unit <- "hours"
			if (casefold(Unit) == "minutes") Unit <- "min"
			if (casefold(Unit) == "minute") Unit <- "min"
			if (casefold(Unit) == "min") Unit <- "min"
			if (casefold(Unit) == "secondes") Unit <- "sec"
			if (casefold(Unit) == "seconde") Unit <- "sec"
			if (casefold(Unit) == "sec") Unit <- "sec"
		}
		UnitTxt <- paste(pre, Unit, sep="")
		# Select some particular cases
		if (Unit == "years" & frequency == 12) UnitTxt <- "months"
		if (Unit == "years" & frequency == 24) UnitTxt <- "two-weeks"
		if (Unit == "years" & frequency == 4) UnitTxt <- "quarters"
		if (Unit == "weeks" & frequency == 7) UnitTxt <- "days"
		if (Unit == "days" & frequency == 1/7) UnitTxt <- "weeks"
		if (Unit == "days" & frequency == 24) UnitTxt <- "hours"
		if (Unit == "hours" & frequency == 1/24) UnitTxt <- "days"
		if (Unit == "hour" & frequency == 60) UnitTxt <- "min"
		if (Unit == "min" & frequency == 1/60) UnitTxt <- "hours"
		if (Unit == "min" & frequency == 60) UnitTxt <- "sec"
		if (Unit == "sec" & frequency == 1/60) UnitTxt <- "min"
	}
	UnitTxt
}
"abund" <-
function(x, f = 0.2) {
	call <- match.call()
	# Rem: we could decide to store the initial data into res$data
	# To free memory, we will just store a call to these data
	# The drawback is that initial data should not be modified
	# between 'abund' and 'extract.abund'!!!
	Data <- deparse(substitute(x))
	x <- as.data.frame(x)	# We want to be sure to work on a data frame!
	q <- nrow(x)
	# Percentage of non-null values in each series
	percnonull <- apply((x != 0), 2, sum)/q*100
	percnull <- 100 - percnonull
	# Log of total number of individuals counted in each series + 1, in percent of the most abundant one
	nbrind <- log(apply(x, 2, sum))
	# If there were some columns full of zeros, log() return -Inf => reset them to 0
	nbrind[is.infinite(nbrind)] <- 0
	percind <- nbrind/max(nbrind)*100
	# We adjust the value of f if necessary
	if (!is.numeric(f)) 
		stop("abund: f must be numeric")
	if (f <= 0) {
		if (f != 0) warning("f must not be smaller than zero. Was adjusted to 0.00001")
		f <- 0.00001
	}
	if (f >= 1) {
		if (f != 1) warning("f must not be larger than one. Was adjusted to 0.99999")
		f <- 0.99999
	}
	# Perform the sorting of descriptors
	srt <- f*percind + (1 - f)*percnull		# Calculate criterion for sort of 'local abundance'
	srtlist <- sort.list(srt)				# Sort in ascending order
	# Sort descriptors accordingly and rescale them for cumsum calculation
	srt <- srt[srtlist]
	percindsrt <- percind[srtlist]
	pi <- (percindsrt-min(percindsrt))/(max(percindsrt)-min(percindsrt))*100
	percnonullsrt <- percnonull[srtlist]
	pn <- (percnonullsrt-min(percnonullsrt))/(max(percnonullsrt)-min(percnonullsrt))*100
	cumsm <- cumsum(pi-pn)
	# Rescale cumsm between 0 and 100
	cumsm <- ((cumsm-min(cumsm))/(max(cumsm)-min(cumsm)))*100
	names(cumsm) <- names(percindsrt)
	names(srtlist) <- names(percindsrt)
	names(srt) <- names(percindsrt)
	res <- list(data=Data, vr=srtlist, sort=srt, cumsum=cumsm, p.log.ind=percindsrt, p.nonull=percnonullsrt, f=f, call=call)		# Create a list containing the result
	class(res) <- "abund"						# and turn it into an 'abund' object
	res											# Return the result
}
"buysbal" <-
function(x, y=NULL, frequency=12, units="years", datemin=NULL, dateformat="m/d/Y", count=FALSE) {
	# Check the frequency argument
	if (!is.numeric(frequency))
		stop("frequency must be a numeric value")
	frequency <- round(frequency)
	if (frequency < 2)
		stop("frequency must be a positive integer larger or equal to 2")
	# If y is not null, we should have two vectors: time in x, and data in y
	if (!is.null(y)) {
		if (!is.vector(x) || !is.vector(y) || length(x) != length(y))
			stop("x and y must be vectors of the same length")
		# Make sure x and y are vectors
		x <- as.vector(x)
		y <- as.vector(y)
	} else {		# We must have a time series in x
		# Require a library in R
		if (exists("is.R") && is.function(is.R) && is.R()) require(ts)
		if (!is.tseries(x))
			stop("x must be a regular time series if y is not provided")
		y <- as.vector(x)
		x <- as.vector(time(x))
	}
	# Verify units argument (currently, only "years" and "days" are accepted)
	UNITS <- c("years", "days")
	units <- pmatch(units, UNITS)
	if (is.na(units)) 
		stop("invalid units value")
	if (units == -1) 
		stop("ambiguous units value")
	x <- switch(units,
		"years"=x,									# No transformation required
		"days"=daystoyears(x, datemin, dateformat))	# Transform in years
	# Create factors for "years" and "cycle"
	years <- floor(x)
	cycle <- floor((x - years+0.00001)*frequency) + 1
	# Rem: we had to add a small value to cope with rounding errors with time series!
	# Check if all cycle values happen
	nocycle <- setdiff(1:frequency, cycle)
	# If at least one value does not happen, we must create dummy entries
	if (length(nocycle) > 0) {
		n <- length(nocycle)	
		cycle <- c(cycle, nocycle)
		years <- c(years, years[1:n])
		y <- c(y, rep(NA, n))
	}
	if (count == TRUE) { # Indicate the number of observations in each case
		y[!is.na(y)] <- 1
		tab <- tapply(y, list(years, cycle), FUN="sum", simplify=TRUE, na.rm=TRUE)
		tab[is.nan(tab)] <- 0
		tab[is.na(tab)] <- 0
	} else {			 # Create the Buys-Ballot table
		tab <- tapply(y, list(years, cycle), FUN="mean", simplify=TRUE, na.rm=TRUE)
		# We get mixed combinations of NA and NaN for missing entries => change NaN into NA
		tab[is.nan(tab)] <- NA
	}
	tab
}
"daystoyears" <-
function (x, datemin=NULL, dateformat="m/d/Y") {
	x <- x
	datemin <- datemin
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R,
		defyearorig <- 1970
		# In R, we use POSIXt
		if (length(datemin) > 0 && !any(class(datemin) == "POSIXt")) {	# We must convert
			# To be compatible with chron() and with Splus,  we accept format as "d/m/y"
			# which is converted into %d/%m/%y. Warning! Still must make difference between
			# "y" that corresponds to year in "89" and "Y" for full-spelled years in "1989"!!!
			# make necessary conversions to accept old formats
			dateformat <- sub("month", "%B", dateformat)	# Full spelled month
			dateformat <- sub("mon", "%b", dateformat)		# Abbreviated month
			dateformat <- sub("m", "%m", dateformat)		# month - numeric
			dateformat <- sub("d", "%d", dateformat)	    # day
			dateformat <- sub("y", "%y", dateformat)		# year (two digits)
			dateformat <- sub("Y", "%Y", dateformat)		# Year (four digits)
			datemin <- strptime(as.character(datemin), format=dateformat)
		}
		# Julian is adapted from julian.default in lib chron 2.2-19 (this way we don't require chron!)
		"Julian" <- function(x, d, y) {
			if(is.null(origin. <- getOption("chron.origin")))
		            origin. <- c(month = 1, day = 1, year = 1970)	# Default origin in R
		    m <- c(origin.[1], x)               # prepend month of new origin
		    d <- c(origin.[2], d)               # prepend day of new origin
		    y <- c(origin.[3], y)               # prepend year of new origin
		    # code from julian date in the S book (p.269)
		    y <- y + ifelse(m > 2, 0, -1)
		    m <- m + ifelse(m > 2, -3, 9)
		    c <- y %/% 100
		    ya <- y - 100 * c
		    out <- ((146097 * c) %/% 4 + (1461 * ya) %/% 4 + (153 * m + 2) %/% 5 + d + 1721119)
			## now subtract the new origin from all dates
			if(all(origin. == 0))
			    out <- out[-1]
			else
			    out <- out[-1] - out[1]
			# orig according to S algorithm
			out
		}
			
		if (length(datemin) > 0) {
			dateminval <-  Julian(datemin$mon+1, datemin$mday, datemin$year+1900)
			# now we shift the whole x series so as the minimal day matches dateminval
			x <- x - trunc(min(x, na.rm=TRUE)) + dateminval
		}
	} else {												# We are in Splus
		defyearorig <- 1960
		if (length(datemin) > 0) {
			dateminval <- as.numeric(chron(datemin, "00:00:00", format=c(dateformat, "h:m:s")))
			# now we shift the whole x series so as the minimal day matches dateminval
			x <- x - trunc(min(x, na.rm=TRUE)) + dateminval
		}
	}
	# We have days as units. We want years with a "linear scale", i.e.: 1 year = 365.25 days, 1 month = 1/12 years
	# We want also the integer value reflect exactly the current year, i.e.: 1997.xxx for dates in the year 1997
	if(is.null(yearorig <- options("chron.origin")$year))
		yearorig <- defyearorig	
	x <- x/365.25 + yearorig
	x
}
"decaverage" <-
function(x, type="additive", order=1, times=1, sides=2, ends="fill", weights=NULL) {
	call <- match.call()
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		x <- as.ts(x)
	} else {												# We are in S+
		x <- as.rts(x)
	}
	if (is.matrix(x) && ncol(x) != 1) 
	       stop("only univariate series are allowed")
	if (!is.numeric(times) || times <= 0)
		stop("times must be a positive number")
	if (!is.numeric(sides) || (sides != 1 & sides != 2))
		stop("specify only 1 or 2 for sides")
	# if weights exists, we use it in priority. Otherwise, we build the weights specification
	if (is.null(weights)) {
		if (is.character(order)) {
			if (is.na(pmatch(order, "periodic")))
				stop("order must be a positive number or \"periodic\"")
			freq <- frequency (x)
			# frequency must be at least 2
			if (freq < 2)
				stop("for a periodic smoothing, frequency of the series must be equal or higher than 2")
			order <- freq %/% 2
			weights <- rep(1, 2*order+1)
			if (freq == 2*order) {		# freq is even => weights is odd, and we must give 0.5 for weight at each end
				weights[1] <- 0.5
				weights[length(weights)] <- 0.5
			}
		} else {		# Order is numeric, so it must be a positive number
			if (!is.numeric(order) || order <= 0)
				stop("order must be a positive number or \"periodic\"")
			weights <- rep(1,2*order+1)
		}
	} else {			# weights is defined
		if (length(weights) < 2)
			stop("weights must contain at least 2 elements")
		order <- length(weights) %/% 2
	}
	# Check the type argument
	TYPES <- c("additive", "multiplicative")
		typeindex <- pmatch(type, TYPES)
		if (is.na(typeindex)) 
			stop("invalid type value")
		if (typeindex == -1) 
			stop("ambiguous type value")
		# make sure type is fully spelled
		type <- switch(typeindex,
				"additive"="additive",
				"multiplicative"="multiplicative")
	# Check the ends argument and treat the series accordingly (add calculated arguments at the beginning and at the end)
	ENDS <- c("NAs", "fill", "circular", "periodic")
	endsindex <- pmatch(ends, ENDS)
	if (is.na(endsindex)) 
		stop("invalid ends value")
	if (endsindex == -1) 
		stop("ambiguous ends value")
	# make sure ends is fully spelled
	ends <- switch(endsindex,
				"NAs"="NAs",
				"fill"="fill",
				"circular"="circular",
				"periodic"="periodic")
	# create our own specs component
	specs <- list(method="average", type=type, order=order, times=times, sides=sides, ends=ends, weights=weights)
	# we recuperate units from x
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		units <- attr(x, "units")
	} else {
		units <- attr(attr(x, "tspar"), "units")
	}
	# We define functions that pads elements at end of the vector
	padmean <- function(x, order, sides) {
		n <- length(x)
		if (sides == 2)	{		# pads at each end
			if (order > n) order <- n
			paddedx <- NULL
			paddedx[(1:n)+order] <- x
			paddedx[1:order] <- mean(x[1:order], na.rm=TRUE)
			paddedx[(1:order)+n+order] <- mean(x[(n - order + 1):n], na.rm=TRUE)
			# Rem: we dont change tspar, because we will eliminate these values latter!
			cut <- c(order+1, order+n)
		} else {				# pads only at left
			if (2*order > n) order <- n/2
			paddedx <- NULL
			paddedx[(1:n)+2*order] <- x
			paddedx[1:(2*order)] <- mean(x[1:(2*order)], na.rm=TRUE)
			# Rem: we dont change tspar, because we will eliminate these values latter!
			cut <- c(2*order+1, 2*order+n)
		}
		res <- list(x=paddedx, circular=FALSE, cut=cut)
		res
	}
	# In the next function, we take the equivalent sequence of first year to pad before first year
	# and the equivalent function of last year to pad after last year
	padper <- function(x, sides) {
		n <- length(x)
		f <- frequency(x)
		# We must have at least two complete cycles here!
		if (n < 2*f) {
			warning("you need at least two complete cycles to use ends = \"periodic\"")
			# We don't change the series
			res <- list(x=x, circular=FALSE, cut=c(1,n))
		} else {		# Requirements to calculate padper are met
			if (sides == 2)	{		# pads at each end
				pos <- f %/% 2
				pos0 <- f - pos + 1
				paddedx <- NULL
				paddedx[(1:n)+pos] <- x
				paddedx[1:pos] <- x[pos0:f]
				paddedx[(1:pos)+n+pos] <- x[(1:pos)+n-f]
				# Rem: we dont change tspar, because we will eliminate these values later!
				cut <- c(pos+1, pos+n)
			} else {				# pads only at left
				pos <- (f %/% 2) * 2
				pos0 <- f - pos + 1
				paddedx <- NULL
				paddedx[(1:n)+pos] <- x
				paddedx[1:pos] <- x[pos0:f]
				# Rem: we dont change tspar, because we will eliminate these values later!
				cut <- c(pos+1, pos+n)
			}
			res <- list(x=paddedx, circular=FALSE, cut=cut)
		}
		res
	}
	n <- length(x)
	if (exists("is.R") && is.function(is.R) && is.R())	# We are in R
		require(ts)		# We need ts library
	filtered <- x						# We don't change the initial series, but a copy of it
	filt <- weights/sum(weights)		# Scale down weights
	for (i in 1:times) {
		# Pad elements at ends of vector x
		padx <- switch(endsindex,
				"NAs"=list(x=filtered, circular=FALSE, cut=c(1,n)),		# We don't have to change the series
				"fill"=padmean(filtered, order, sides),					# We take the mean of ends elements and put them at extremes
				"circular"=list(x=filtered, circular=TRUE, cut=c(1,n)),	# We don't change the series, but change circular (done latter)
				"periodic"=padper(filtered, sides))						# We add elements from first and last period at the right place
		circular <- padx$circular
		cut <- padx$cut
		# perform filtering
		filtered <- filter(padx$x, filter=filt, method="convolution", sides=sides, circular=circular)
		# Now we have to cut the vector x according to cut (we don't use the function window for that since we didn't changed tspar!)
		if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
			filtered <- as.ts(as.vector(filtered)[cut[1]:cut[2]])
			tsp(filtered) <- tsp(x)
			filtered<- as.ts(filtered)
		} else {												# We are in S+
			filtered <- as.rts(as.vector(filtered)[cut[1]:cut[2]])
			tspar(filtered) <- tspar(x)
			filtered <- as.rts(filtered)
		}
	}
	# Calculate residuals
	if (type == "additive") {
		residuals <- x - filtered
	} else {
		residuals <- x / filtered
	}
	series <- ts.union(filtered, residuals)
	# create our own 'tsd' structure
	res <- list(ts="series", series=series, weights=weights, units=units, specs=specs, call=call)
	class(res) <- "tsd"		# change the class of the object to 'tsd'
	res
}
"deccensus" <-
function(x, type="multiplicative", trend=FALSE) {			# Only the multiplicative model is allowed. Use loess for an additive seasonal decomposition
	# But here we also offer the possibility of using an additive model
	call <- match.call()
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		require(ts)
		x <- as.ts(x)
	} else {												# We are in S+
		x <- as.rts(x)
	}
	if (is.matrix(x) && ncol(x) != 1) 
		stop("only univariate series are allowed")
	# Check the type argument
	TYPES <- c("additive", "multiplicative")
		typeindex <- pmatch(type, TYPES)
		if (is.na(typeindex)) 
			stop("invalid type value")
		if (typeindex == -1) 
			stop("ambiguous type value")
		# make sure type is fully spelled
		type <- switch(typeindex,
				"additive"="additive",
				"multiplicative"="multiplicative")
	if (type == "additive")
		stop("'census' method allows only a multiplicative model. Use 'loess' instead.")
	# create our own specs component
	specs <- list(method="census", type=type)
	# we recuperate units from x
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		units <- attr(x, "units")
	} else {
		units <- attr(attr(x, "tspar"), "units")
	}
	# perform filtering
	n <- length(x)
	period <- frequency(x)
    if (period < 2 || n < 3 * period) 
        stop("series is not periodic or has less than three periods")
   # if (period < 2 || n < 4 * period) 
   #     stop("series is not periodic or has less than four periods")
    if (n %% period != 0)
    	stop("the series must have an integer number of periods")
    if(sum(is.na(x)) > 0)
		stop("no missing value allowed for this method")
	
	X <- as.vector(x)
	# 1) This is to test if there is a seasonal fluctuation
	test <- c(NA, 200*X[2:(n-1)]/(X[1:(n-2)]+X[3:n]), NA)
	test <- split(test, cycle(x))
	test.s <- NULL
	for (i in 1:period)
		test.s[i] <- mean(test[[i]], na.rm=TRUE)
	# If any of the values in test.s is NaN or Inf, impossible to apply the CENSUS II method (sparse data?)
	if (sum(is.na(test.s)) > 0 | sum(test.s == Inf) > 0) {
		warning("The 'census' decomposition is not calculable for one series")
		series <- x
		#dimnames(series)[[2]] <- "serie"		# No decomposition was performed
		Amp <- NULL
	} else {
		# 2) A mobile mean with a window equal to frequency is applied
		if (period %% 2 == 0) { 
			filter1 <- c(1/(2*period), rep(1/period, period-1), 1/(2*period)) 
		} else {
			filter1 <- rep(1/period, period)
		}
		A <- filter(x, filter1, method="convolution")

		# 3) First estimation of S x I (except for 6 terms at the beginning and at the end)
		B <- x / A 			#* 100
		B2 <- as.vector(B)
		nc <- n/period
		dim(B2) <- c(period, nc)
	
		# 4) Regulation of B (order 2 mobile mean accross years), and we add two values at each size first
		S2 <- B2
		nr2 <- period/2
		Sa <- S2[1:nr2, 2:nc]
		Sa.lead <- apply(Sa[,1:2], 1, mean)
		Sa.trail <- apply(Sa[,(nc-2):(nc-1)], 1, mean)
		Sa <- cbind(Sa.lead, Sa.lead, Sa, Sa.trail, Sa.trail)
		Sa <- t(apply(Sa, 1, filter, rep(1/5, 5), method="convolution"))
		S2[1:nr2, 2:nc] <- Sa[,3:(nc+1)]
		Sb <- S2[(nr2+1):period, 1:(nc-1)]
		Sb.lead <- apply(Sb[,1:2], 1, mean)
		Sb.trail <- apply(Sb[,(nc-2):(nc-1)], 1, mean)
		Sb <- cbind(Sb.lead, Sb.lead, Sb, Sb.trail, Sb.trail)
		Sb <- t(apply(Sb, 1, filter, rep(1/5, 5), method="convolution"))
		S2[(nr2+1):period, 1:(nc-1)] <- Sb[,3:(nc+1)]
		S <- S2
		dim(S) <- NULL
		# Detection of extreme values:
		s <- sqrt(apply((B2 - S2)^2, 1, sum, na.rm=TRUE)/(nc-2))
		s2 <- rep(1.96*s, nc)
		dim(s2) <- c(period, nc)
		is.xtreme <- abs(B2 - S2) > s2
		is.xtreme[is.na(is.xtreme)] <- FALSE
		# Replacement of extreme values
		B2.rep <- B2
		B2.rep[, 2:(nc-1)] <- (B2[, 1:(nc-2)] + B2[, 2:(nc-1)] + B2[, 3:nc]) / 3
		B2.rep[1:nr2, 2] <- B2.rep[1:nr2, 3]
		B2.rep[(nr2+1):period, 1] <- B2.rep[(nr2+1):period, 2]
		B2.rep[1:nr2, nc] <- B2.rep[1:nr2, nc-1]
		B2.rep[(nr2+1):period, nc-1] <- B2.rep[(nr2+1):period, nc-2]
		B2[is.xtreme] <- B2.rep[is.xtreme]
		# Replace missing values by values for same month and previous/next year
		B2[1:nr2, 1] <- B2[1:nr2, 2]
		B2[(nr2+1):period, nc] <- B2[(nr2+1):period, nc-1]
		# Centering of values
		B2.cent <- rep(apply(B2, 2, sum), period)
		dim(B2.cent) <- c(nc, period)
		B2.cent <- t(B2.cent)
		S1 <- B2 * (period * 100) / B2.cent
				
		# 5) First approximation of S1 with a mobile mean
		S1.lead <- apply(S1[,1:2], 1, mean)
		S1.trail <- apply(S1[,(nc-1):nc], 1, mean)
		S1 <- cbind(S1.lead, S1.lead, S1, S1.trail, S1.trail)
		S1 <- t(apply(S1, 1, filter, c(1/9, 2/9, 3/9, 2/9, 1/9), method="convolution"))
		S1 <- S1[, 3:(nc+2)]
	
		# 6) First approximation of CI1
		dim(S1) <- NULL
		CI1 <- x / S1
				
		# 7) First approximation of C1 (add 7 values at begin and end and apply Spencer filter)
		C1 <- c(rep(mean(CI1[1:4]), 7), CI1, rep(mean(CI1[(n-3):n]), 7))
		C1 <- filter(C1, c(-3,-6,-5,3,21,46,67,74,67,46,21,3,-5,-6,-3)/320, method="convolution")
		C1 <- as.vector(C1[8:(n+7)])
	
		# 8) First approximation of I1
		I1 <- CI1 / C1*100		#*100
		Amp <- sum(abs(I1[2:n] - I1[1:(n-1)]))/(n-1)
		if (is.na(Amp)) {
				warning("The 'census' decomposition is not calculable for one series (Amp == NA)")
				series <- x
		} else {
		
			# 9) Second approximation of SI
			D <- x / C1*100			#*100
			B2 <- as.vector(D)
			dim(B2) <- c(period, nc)
			S2.lead <- apply(B2[,1:2], 1, mean)
			S2.trail <- apply(B2[,(nc-1):nc], 1, mean)
			S2 <- cbind(S2.lead, S2.lead, B2, S2.trail, S2.trail)
			S2 <- t(apply(S2, 1, filter, rep(1/5, 5), method="convolution"))
			S2 <- S2[, 3:(nc+2)]
			# Detection of extreme values:
			s <- sqrt(apply((B2 - S2)^2, 1, sum)/(nc-2))
			s2 <- rep(1.96*s, nc)
			dim(s2) <- c(period, nc)
			is.xtreme <- abs(B2 - S2) > s2
			is.xtreme[is.na(is.xtreme)] <- FALSE
			# Replacement of extreme values
			B2.rep <- B2
			B2.rep[, 2:(nc-1)] <- (B2[, 1:(nc-2)] + B2[, 2:(nc-1)] + B2[, 3:nc]) / 3
			B2.rep[, 1] <- B2.rep[, 2]
			B2.rep[, nc] <- B2.rep[, nc-1]
			B2[is.xtreme] <- B2.rep[is.xtreme]
			# Centering of values
			B2.cent <- rep(apply(B2, 2, sum), period)
			dim(B2.cent) <- c(nc, period)
			B2.cent <- t(B2.cent)
			S1 <- B2 * (period * 100) / B2.cent
		
			# 10) Final season indices
			if (Amp > 2) {
				S1.lead <- apply(S1[,1:3], 1, mean)
				S1.trail <- apply(S1[,(nc-2):nc], 1, mean)
				S1 <- cbind(S1.lead, S1.lead, S1.lead, S1, S1.trail, S1.trail, S1.trail)
				S1 <- t(apply(S1, 1, filter, c(1/16, 2/16, 3/16, 4/16, 3/16, 2/16, 1/16), method="convolution"))
				S <- S1[, 4:(nc+3)]
			} else {
				S1.lead <- apply(S1[,1:2], 1, mean)
				S1.trail <- apply(S1[,(nc-1):nc], 1, mean)
				S1 <- cbind(S1.lead, S1.lead, S1, S1.trail, S1.trail)
				S1 <- t(apply(S1, 1, filter, c(1/9, 2/9, 3/9, 2/9, 1/9), method="convolution"))
				S <- S1[, 3:(nc+2)]
			}
			
			# 11) Final deseasoned series CI
			dim(S) <- NULL
			CI <- x / S	*100
		
			# 12) Final cyclic trend component C
			C <- c(rep(mean(CI[1:4]), 7), CI, rep(mean(CI[(n-3):n]), 7))
			C <- filter(C, c(-3,-6,-5,3,21,46,67,74,67,46,21,3,-5,-6,-3)/320, method="convolution")
			C <- as.vector(C[8:(n+7)])
	
			# 13) Final random component I
			I <- CI / C	*100
			Amp <- sum(abs(I1[2:n] - I1[1:(n-1)]))/(n-1)

			# Concatenate series
			if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
				if (trend == FALSE) {
					S <- as.ts(S)
					tsp(S) <- tsp(CI)
					series <- ts.union(CI, S/100)
					dimnames(series)[[2]] <- c("deseasoned", "seasonal")
				} else {
					S <- as.ts(S)
					tsp(S) <- tsp(I)
					C <- as.ts(C)
					tsp(C) <- tsp(I)
					series <- ts.union(C, S/100, I/100)
					dimnames(series)[[2]] <- c("trend", "seasonal", "residuals")
				}
			} else{													# We are in S+
				if (trend == FALSE) {
					S <- as.rts(S)
					tspar(S) <- tspar(CI)
					series <- ts.union(CI, S/100)
					dimnames(series)[[2]] <- c("deseasoned", "seasonal")
				} else {
					S <- as.rts(S)
					tspar(S) <- tspar(I)
					C <- as.rts(C)
					tspar(C) <- tspar(I)
					series <- ts.union(C, S/100, I/100)
					dimnames(series)[[2]] <- c("trend", "seasonal", "residuals")
				}
			}
		}
	}
	# create our own 'tsd' structure
	res <- list(ts="series", series=series, test.seasons=test.s, amplitude=Amp, units=units, model.type="multiplicative", specs=specs, call=call)
	class(res) <- "tsd"		# change the class of the object to 'tsd'
	res
}
"decdiff" <-
function(x, type="additive", lag=1, order=1, ends="fill") {
	call <- match.call()
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		x <- as.ts(x)
	} else {												# We are in S+
		x <- as.rts(x)
	}
	if (is.matrix(x) && ncol(x) != 1) 
		stop("only univariate series are allowed")
	if (!is.numeric(lag) || lag <= 0)
		stop("lag must be a positive number")
	if (!is.numeric(order) || order <= 0)
		stop("order must be a positive number")
	# Check the type argument
	TYPES <- c("additive", "multiplicative")
		typeindex <- pmatch(type, TYPES)
		if (is.na(typeindex)) 
			stop("invalid type value")
		if (typeindex == -1) 
			stop("ambiguous type value")
		# make sure type is fully spelled
		type <- switch(typeindex,
					"additive"="additive",
					"multiplicative"="multiplicative")
	# Check the ends argument and treat the series accordingly
	ENDS <- c("NAs", "fill", "drop")
	endsindex <- pmatch(ends, ENDS)
	if (is.na(endsindex)) 
		stop("invalid ends value")
	if (endsindex == -1) 
		stop("ambiguous ends value")
	# make sure ends is fully spelled
	ends <- switch(endsindex,
				"NAs"="NAs",
				"fill"="fill",
				"drop"="drop")
	# create our own specs component
	specs <- list(method="diff", type=type, lag=lag, order=order, ends=ends)
	# we recuperate units from x
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		units <- attr(x, "units")
	} else {
		units <- attr(attr(x, "tspar"), "units")
	}
	if (exists("is.R") && is.function(is.R) && is.R())	# We are in R
		require(ts)		# We need ts library
	# The next function add enough data to the left (either NA or the mean of first few values)
	# to obtain a series of the same length as x after difference
	padleft <- function(x, Lag, fill) {
		if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
			x <- window(x, start=start(lag(x, Lag)), end=end(x), extend=TRUE)
		} else {												# We are in S+
			x <-  ts.union(lag(x, Lag), x)[,2]
		}
		if (fill == TRUE)			# We fill padded data with the mean of first few values
			x[1:Lag] <- mean(x[(1:Lag)+Lag], na.rm=TRUE)
		x
	}
	filtered <- switch(endsindex,
					"NAs"=padleft(x, lag*order, fill=FALSE),				# We add NA's in front of the series
					"fill"=padleft(x, lag*order, fill=TRUE),				# We add the mean of first values in front of the series
					"drop"=x)												# We keep x like that
	# perform filtering
	filtered <- diff(filtered, lag=lag, difference=order)
	# Calculate residuals
	if (type == "additive") {
		residuals <- x - filtered
	} else {
		residuals <- x / filtered
	}
	series <- ts.intersect(filtered, residuals)
	# create our own 'tsd' structure
	res <- list(ts="series", series=series, units=units, specs=specs, call=call)
	class(res) <- "tsd"		# change the class of the object to 'tsd'
	res
}
"decevf" <-
function(x, type="additive", lag=5, axes=1:2) {
	call <- match.call()
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		x <- as.ts(x)
	} else {												# We are in S+
		x <- as.rts(x)
	}
	if (is.matrix(x) && ncol(x) != 1) 
		stop("only univariate series are allowed")
	if (!is.numeric(axes) || any(axes <= 0))
		stop("axes must be a vector of positive numbers (ex 1:3)")
	if (!is.numeric(lag) || lag <= 0 || lag < max(axes))
		stop("lag must be a positive number higher or equal to axes max value")
	# Check the type argument
	TYPES <- c("additive", "multiplicative")
		typeindex <- pmatch(type, TYPES)
		if (is.na(typeindex)) 
			stop("invalid type value")
		if (typeindex == -1) 
			stop("ambiguous type value")
		# make sure type is fully spelled
		type <- switch(typeindex,
				"additive"="additive",
				"multiplicative"="multiplicative")
	# create our own specs component
	specs <- list(method="evf", type=type, lag=lag, axes=axes)
	# we recuperate units from x
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		units <- attr(x, "units")
	} else {
		units <- attr(attr(x, "tspar"), "units")
	}
	# perform filtering
	if (exists("is.R") && is.function(is.R) && is.R()) { # We are in R
		require(mva)
		# Create the matrix with lagged series from 0 to lag
		xlagmat <- embed(x, lag)
	} else {	# We are in S+
		x2 <- as.vector(x)
		n <- length(x2)
		m <- n - lag + 1
		xlagmat <- matrix(x2[1:m + rep(lag:1, rep(m, lag)) - 1], m)
	}
	# Perform a pca decomposition of this matrix
	x.pca <- princomp(xlagmat)
	# Rotated vectors are obtained by:
	# sweep(x, 2, x.pca$center) %*% x.pca$loadings == predict(x.pca)
	# original vectors are recalculated with:
	# sweep(predict(x.pca) %*% solve(x.pca$loadings, 2, x.pca$center, FUN="+")
	# for evf, we just keep some of the components in solve(x.pca$loadings)
	invloadings <- solve(x.pca$loadings)		# inverse of loadings matrix, i.e., eigenvectors
	settonul <- is.na(match(1:lag, axes))
	invloadings[settonul,] <- 0					# those are the component we drop
	xlagmat.recalc <- sweep(predict(x.pca) %*% invloadings, 2, x.pca$center, FUN="+")
	# Then we need to take the mean for diagonals to calculated filtered values of initial series
	xmat.recalc <- matrix(NA, nrow= length(x), ncol=lag)
	n <- nrow(xlagmat.recalc)
	for (i in 1:lag)
		xmat.recalc[1:n+(lag-i), i] <- xlagmat.recalc[,i]
	# perform column means to get filtered time series
	filtered <- apply(xmat.recalc, 1, mean, na.rm=TRUE)
	if (exists("is.R") && is.function(is.R) && is.R()) { # We are in R
		filtered <- ts(filtered, start=start(x), frequency=frequency(x))
	} else { # We are in S+
		filtered <- rts(filtered, start=start(x), frequency=frequency(x))
	}
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		require(ts)		# We need ts library
	} else {		# We are in S+
		attr(filtered, "tspar") <- attr(x, "tspar") 	# This is to avoid a warning under S+
	}
	# Calculate residuals
	if (type == "additive") {
		residuals <- x - filtered
	} else {
		residuals <- x / filtered
	}
	series <- ts.union(filtered, residuals)
	# create our own 'tsd' structure
	res <- list(ts="series", series=series, units=units, specs=specs, call=call)
	class(res) <- "tsd"		# change the class of the object to 'tsd'
	res
}
"decloess" <-
function(x, type="additive", s.window=NULL, s.degree=0, t.window=NULL, t.degree=2, robust=FALSE, trend=FALSE) {
	# loess allows only an additive model
	call <- match.call()
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		x <- as.ts(x)
	} else {												# We are in S+
		x <- as.rts(x)
	}
	if (is.matrix(x)) 
        stop("only univariate series are allowed")
    # Check the type argument
	TYPES <- c("additive", "multiplicative")
		typeindex <- pmatch(type, TYPES)
		if (is.na(typeindex)) 
			stop("invalid type value")
		if (typeindex == -1) 
			stop("ambiguous type value")
		# multiplicative model not handled (yet)
		if (typeindex == 2) 
			stop("multiplicative model not handle by this function. Use deccensus() instead")
		# make sure type is fully spelled
		type <- switch(typeindex,
				"additive"="additive",
				"multiplicative"="multiplicative")
	if (type == "multiplicative")
		stop("'loess' method allows only an additive model. Use 'census' instead.")
	# create our own specs component
	specs <- list(method="loess", type=type, s.window=s.window, s.degree=s.degree, t.window=t.window, t.degree=t.degree, robust=robust, trend=trend)
	# we recuperate units from x
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		units <- attr(x, "units")
	} else {
		units <- attr(attr(x, "tspar"), "units")
	}
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		require(ts)		# We need ts library
		if (t.degree == 2) t.degree <- 1		# Only 0 or 1 for R
		res.stl <- stl(x, s.window=s.window, s.degree=s.degree, t.window=t.window, t.degree=t.degree, robust=robust)
		if (trend == TRUE) {
			series <- cbind(res.stl$time.series[, "trend"], res.stl$time.series[, "seasonal"], res.stl$time.series[, "remainder"])
			dimnames(series)[[2]] <- c("trend", "seasonal", "residuals")
		} else {			# residuals is trend + remainder in the additive model (otherwise, we recalculate them)
			series <- cbind(res.stl$time.series[, "trend"] + res.stl$time.series[, "remainder"], res.stl$time.series[, "seasonal"])
			dimnames(series)[[2]] <- c("deseasoned", "seasonal")
		}
		# create our own 'tsd' structure
		res <- list(ts="series", series=series, weights=res.stl$weights, units=units, specs=specs, call=call)
	} else {												# We are in S+
		if (trend == TRUE)
			warning("S+ cannot calculate trend with this method!")
		if (t.degree == 0) t.degree <- 1		# Only 1 or 2 for S+
		res.stl <- stl(x, ss.window=s.window, ss.degree=s.degree, s.window=t.window, s.degree=t.degree, ss.robust=robust)
		deseasoned <- as.rts(res.stl$remainder)
		if (type == "additive") {	# This is the way residuals are returned in S+
			seasonal <- as.rts(res.stl$seasonal)
		} else {					# We have to recalculate them
			seasonal <- x / deseasoned
		}
		series <- ts.union(deseasoned, seasonal)
		# create our own 'tsd' structure
		res <- list(ts="series", series=series, weights=res.stl$weights, units=units, specs=specs, call=call)
	}
	class(res) <- "tsd"		# change the class of the object to 'tsd'
	res
}
"decmedian" <-
function(x, type="additive", order=1, times=1, ends="fill") {
	call <- match.call()
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		x <- as.ts(x)
	} else {												# We are in S+
		x <- as.rts(x)
	}
	if (is.matrix(x) && ncol(x) != 1) 
		stop("only univariate series are allowed")
	if (!is.numeric(order) || order <= 0)
		stop("order must be a positive number")
	if (!is.numeric(times) || times <= 0)
		stop("times must be a positive number")
	# Check the type argument
	TYPES <- c("additive", "multiplicative")
		typeindex <- pmatch(type, TYPES)
		if (is.na(typeindex)) 
			stop("invalid type value")
		if (typeindex == -1) 
			stop("ambiguous type value")
		# make sure type is fully spelled
		type <- switch(typeindex,
				"additive"="additive",
				"multiplicative"="multiplicative")
	# Check the ends argument and treat the series accordingly
	ENDS <- c("NAs", "fill")
	endsindex <- pmatch(ends, ENDS)
	if (is.na(endsindex)) 
		stop("invalid ends value")
	if (endsindex == -1) 
		stop("ambiguous ends value")
	# make sure ends is fully spelled
	ends <- switch(endsindex,
				"NAs"="NAs",
				"fill"="fill")
	if (endsindex == 1) na.rm <- FALSE else na.rm <- TRUE
	# create our own specs component
	specs <- list(method="median", type=type, order=order, times=times, ends=ends)
	# we recuperate units from x
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		units <- attr(x, "units")
	} else {
		units <- attr(attr(x, "tspar"), "units")
	}
	# perform filtering
	filtmedian <- function(x, n, order, term, na.rm) {
		X <- NULL
		X[(1:n) + order] <- x
		X[1:order] <- NA
		X[(1:order)+n+order] <- NA
		f <- NULL
		for (i in (1:n))
			f[i] <- median(X[(1:term) + i - 1], na.rm=na.rm)
		f
	}
	term <- 2*order + 1
	n <- length(x)
	filtered <- x						# We don't change the initial series, but a copy of it
	for (i in 1:times)
		filtered <- filtmedian(filtered, n=n, order=order, term=term, na.rm=na.rm)
	filtered <- ts(filtered, start=start(x), frequency=frequency(x))
	# Calculate residuals
	if (type == "additive") {
		residuals <- x - filtered
	} else {
		residuals <- x / filtered
	}
	if (exists("is.R") && is.function(is.R) && is.R())	# We are in R
		require(ts)		# We need ts library
	series <- ts.union(filtered, residuals)
	# create our own 'tsd' structure
	res <- list(ts="series", series=series, units=units, specs=specs, call=call)
	class(res) <- "tsd"		# change the class of the object to 'tsd'
	res
}
"decreg" <-
function(x, xreg, type="additive") {
	call <- match.call()
	if (is.matrix(x) && ncol(x) != 1) 
		stop("only univariate series are allowed")
	if (length(x) != length(xreg))
		stop("x and xreg must have same row number")	
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		require(ts)		# We need ts library
		x <- as.ts(x)
		xreg <- as.ts(xreg)
		# Make sure "tsp" attributes are the same for both series
		attr(xreg, "tsp") <- attr(x, "tsp")
	#		stop("time series must have same time scale")
	} else {												# We are in S+
		x <- as.rts(x)
		xreg <- as.rts(xreg)
		# Make sure "tspar" attributes are the same for both series
		attr(xreg, "tspar") <- attr(x, "tspar")
	}
	# Check the type argument
	TYPES <- c("additive", "multiplicative")
		typeindex <- pmatch(type, TYPES)
		if (is.na(typeindex)) 
			stop("invalid type value")
		if (typeindex == -1) 
			stop("ambiguous type value")
		# make sure type is fully spelled
		type <- switch(typeindex,
				"additive"="additive",
				"multiplicative"="multiplicative")
	# create our own specs component
	specs <- list(method="reg", type=type, xreg=xreg)
	# we recuperate units from x
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		units <- attr(x, "units")
	} else {
		units <- attr(attr(x, "tspar"), "units")
	}
	model <- xreg
	# Calculate residuals
	if (type == "additive") {
		residuals <- x - model
	} else {
		residuals <- x / model
	}
	series <- ts.union(model, residuals)
	# create our own 'tsd' structure
	res <- list(ts="series", series=series, units=units, specs=specs, call=call)
	class(res) <- "tsd"		# change the class of the object to 'tsd'
	res
}
"disjoin" <-
function(x) {
	# x must be a factor data, for instance obtained using cut()
	# Can be tested only in R
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		if (is.null(class(x)) || class(x) != "factor")
			stop("x must be a variable of class 'factor', use cut() to create classes")
	}
	n <- length(x)
	nf <- length(levels(x))
	pos <- split(1:n, cut(Z, breaks=cuts))
	for (i in 1:nf) {
		temp <- rep(0, n)
		temp[pos[[i]]] <- 1
		if (i == 1) res <- temp else res <- cbind(res, temp)
	}
	dimnames(res) <- list(names(x), levels(x))
	res
}
"disto" <-
function(x, max.dist=nrow(x)/4, plotit=TRUE, disto.data=NULL) {
	if (is.null(disto.data)) {	# Calculate distogram
		call <- match.call()
		data <- deparse(substitute(x))
		if (exists("is.R") && is.function(is.R) && is.R())	# We are in R
			require(mva)
		x <- as.matrix(x)
		if (is.null(ncol(x)) || ncol(x) < 2)
			stop("There must be at least two columns (series) in the dataset")
		n <- nrow(x)
		if (is.null(n) || n < 10)
			stop("There must be at least 10 observations in the series")
		max.dist <- round(max.dist)
		if (max.dist < 0) max.dist <- round(n/3)
		if( max.dist >= n) max.dist <- n-1
		distance <- dist(1:n)
		x2 <- x^2
		val <- dist((x2 / apply(x2, 1, sum))^.5)^2
		val <- data.frame(distance=as.numeric(distance), distogram=as.numeric(val))
    	# Calculate mean values for each distance
    	res <- rep(0, max.dist)
    	for (i in 1:max.dist) {
    		res[i] <- mean(val[val$distance == i,]$distogram, na.rm=TRUE)/2	
    	}
    	res <- list(distance=1:max.dist, distogram=res)
    	res <- as.data.frame(res)
    	attr(res, "data") <- data
    	attr(res, "call") <- call
    } else {		# Use disto.data instead
    	res <- disto.data
    }
    if (plotit == TRUE) {	# plot the distogram
    	plot(res$distance, res$distogram, type="l", xlab="distance", ylab="delta", main=paste("Distogram for:", attr(res, "data")))
    }
    res
}
"escouf" <-
function(x, level=1, verbose=TRUE) {
	call <- match.call()
	# Rem: we could decide to store the initial data into res$data
	# To free memory, we will just store a call to these data
	# The drawback is that initial data should not be modified
	# between 'ecouf' and 'extract.escouf'!!!
	Data <- deparse(substitute(x))
	# Calculate the trace of a matrix (sum of its diagonal elements)
	Trace <- function(x) {sum(c(x)[1 + 0:(min(dim(x)) - 1) * (dim(x)[1] + 1)], na.rm=TRUE)}
		
	x <- as.data.frame(x)	# We want to be sure to work on a data frame!
	Names <- names(x)
	p <- ncol(x)
	Rvmax <- 0
	vt <- 1:p					# Variable to test
	vr <- NULL					# Held variables
	vrt <- NULL					# Temporarily held variables
	RV <- NULL					# Final held variables
	for (i in 1:p) {			# Loop on the number of variables
		for (j in 1:(p-i+1)) {	# loop on variables
			if (!is.null(vr)) {	# New table
				x2 <- cbind(x, x[vr], x[vt[j]])
			} else {
				x2 <- cbind(x, x[vt[j]])
			}	
			Rtot <- cor(x2)		# Correlations table
			Ryy <- Rtot[1:p, 1:p]
			Rxx <- Rtot[(p+1):(p+i), (p+1):(p+i)]
			Rxy <- Rtot [(p+1):(p+i), 1:p]
			Ryx <- t(Rxy)
			rv <- Trace(Ryx %*% Rxy)/sqrt(Trace(Ryy %*% Ryy)*Trace(Rxx %*% Rxx))	# rv calculation
			if (rv>Rvmax) {
				Rvmax <- rv		# Test on rv
				vrt <- vt[j]	# Temporarily held variable
			}
		}
		vr[i] <- vrt
		vt <- vt[vt!=vr[i]]		# Reidentify variables to test
		RV[i] <- Rvmax			# Final held variable
		if (verbose==TRUE) {
			vrStr <- format(c(vr[i], 111))[1]
			cat("Variable", vrStr, "incorporated, RV =", Rvmax, "\n")
			if (exists("is.R") && is.function(is.R) && is.R() && R.Version()$os == "Win32") {	# We are in R Windows
				flush.console()}
		}
		if (Rvmax>level) break	# Stop iteration (level reached)
	}
	names(vr) <- Names[vr]		# Gives variable names to vr
	names(RV) <- Names[vr]		# ... and to RV
	res <- list(data=Data, vr=vr, RV=RV, calc.level=level, vars=c(p, length(vr)), call=call)		# Create a list containing the result
	class(res) <- "escouf"		# and turn it into an 'escouf' object
	res							# Return the result
}
"extract" <-
function (e, n, ...)
	UseMethod("extract", e, n, ...)
"extract.abund" <-
function(e, n, left=TRUE, ...) {
	if (missing(n)) n <- e$n
	if (is.null(n))
		stop("You must provide a value n for extraction!")
	p <- length(e$p.log.ind)
	# Verify n
	if (n < 0) n <- 0
	if (n > p) n <- p
	if (left == TRUE) { 			# Extract left part
		Res <- as.data.frame(eval(parse(text=e$data))[,e$vr][,1:n])
	} else {						# Extract right part
		Res <- as.data.frame(eval(parse(text=e$data))[,e$vr][,-(1:n)])
	}
	Res
}
"extract.escouf" <-
function(e, n, level=e$level, ...) {
	if (missing(n)) n <- NULL
	if (!is.null(n)) {		# We want to extract n variables
		# We calculate the corresponding level
		n <- abs(round(n))			# Make sure n is a positive integer!
		if (n>length(e$RV)-1) {		# We extract all variables
			level <- 1
		} else {					# We calculate the level
			level <-(e$RV[n]+e$RV[n+1])/2
		}
	}
	if (is.null(level)) {		# look if object$level exist
		if (is.null(e$level)) {
			stop("You must provide a level value for extraction!")
		} else {
			level <- e$level
		}
	}
	# Check the validity of level
	if (level>1 || level<0) stop("level must be a value between 0 and 1!")
	# level must not be lower than e$calc.level, otherwise we don't have enough information!
	if (level>e$calc.level) stop("level is higher that the one used in calculation, unable to fully extract Escoufier's matrix at this level!")
	Res <- eval(parse(text=e$data))[e$vr[e$RV<level]]
	Res
}
"extract.regul" <-
function(e, n, series=NULL, ...) {
	if (missing(n)) n <- ncol(e$y)
	nc <- ncol(e$y)
	if (is.null(nc)) nc <- 1		# if ncol() return null, we have just a single vector
	# if series is provided, we use it in priority
	if (is.null(series)) {
		if (n > nc) {
			if (nc > 1) warning(paste("Only", nc, "series exist in the object. Extract all series."))
			n <- nc
		}
		# We create a series value that correspond to the extraction of n first series
		series <- 1:n
	}
	if (nc == 1) {
		warning("Only one series in the object. Extract it.")
		y <- e$y[[1]]
	} else {			# Use series to determine which series to extract
		y <- as.matrix(e$y)[, series]
	}		
	# The treatment is different in R and in S+
	# In R, we create a 'ts' object, in S+, we create a 'rts' object
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		res <- ts(y, start=e$tspar$start, frequency=e$tspar$frequency)
		attr(res, "units") <- e$units
	} else {												# We are in S+
		res <- rts(y, start=e$tspar$start, frequency=e$tspar$frequency, units=e$units)
	}
	res
}
"extract.tsd" <-
function(e, n, series=NULL, components=NULL, ...) {
	if (missing(n)) n <- length(e$ts)
	ns <- length(e$ts)
	# if series is provided, we use it in priority
	if (is.null(series)) {
		if (n > ns) {
			warning(paste("Only", ns, "series exist in the object. Extract all series."))
			n <- ns
		}
		# We create a series value that correspond to the extraction of n first series
		series <- 1:n
	} else {					# If series is provided, we test it 
		if (is.character(series)) {
			names <- e$ts
			series <- pmatch(series, names, nomatch=0)
		} else {
			if (sum(series) > 0) series <- match(series, 1:ns, nomatch=0) else series <- c(1:ns)[match(-1:-ns, series, nomatch=0) == 0]
		}
		series <- series[series != 0]
		if (length(series) < 1)
			stop("series argument is invalid, or series does not exist in this object")
	}
	# Extract the series
	if (length(series) == 1) {
		if (ns == 1) {
			if (is.null(components)) res <- e$series else {
				if (is.character(components)) {
					names <- dimnames(e$series)[[2]]
					comp <- pmatch(components, names, nomatch=0)
				} else {
					if (sum(components) > 0) comp <- match(components, 1:ncol(e$series), nomatch=0) else comp <- c(1:ncol(e$series))[match(-1:-ncol(e$series), components, nomatch=0) == 0]
				}
				comp <- comp[comp != 0]
				if (length(comp) < 1)
					stop("No such components in the series")
				res <- e$series[, comp]
			}
		} else {
			if (is.null(components)) res <- e$series[[series]] else {
				if (is.character(components)) {
					names <- dimnames(e$series[[series]])[[2]]
					comp <- pmatch(components, names, nomatch=0)
				} else {
					if (sum(components) > 0) comp <- match(components, 1:ncol(e$series[[series]]), nomatch=0) else comp <- c(1:ncol(e$series[[series]]))[match(-1:-ncol(e$series[[series]]), components, nomatch=0) == 0]
				}
				comp <- comp[comp != 0]
				if (length(comp) < 1)
					stop("No such components in the series")
				res <- e$series[[series]][, comp]
			}
		}
	} else {
		res <- NULL
		for (i in series) {
			if (is.null(components)) ser <- e$series[[i]] else {
				if (is.character(components)) {
					names <- dimnames(e$series[[i]])[[2]]
					comp <- pmatch(components, names, nomatch=0)
				} else {
					if (sum(components) > 0) comp <- match(components, 1:ncol(e$series[[i]]), nomatch=0) else comp <- c(1:ncol(e$series[[i]]))[match(-1:-ncol(e$series[[i]]), components, nomatch=0) == 0]
				}
				comp <- comp[comp != 0]
				if (length(comp) > 0) {
					ser <- e$series[[i]][, comp]
					names <- dimnames(e$series[[i]])[[2]]
					names <- names[comp]
					if (is.null(res)) {
						res <- ser
						cnames <- paste(e$ts[i], ".", names, sep="")
					} else {
						res <- cbind(res, ser)
						cnames <- c(cnames, paste(e$ts[i], ".", names, sep=""))
					}
				}
			}
		}
		if (is.null(res))
			stop("nothing to extract!")
		dimnames(res)[[2]] <- cnames
	}
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		res <- as.ts(res)
		attr(res, "units") <- e$units
	} else {												# We are in S+
		res <- as.rts(res)
		attr(attr(res, "tspar"), "units") <- e$units
	}
	res
}
"extract.turnogram" <-
function(e, n, level=e$level, FUN=e$fun, drop=0, ...) {
	if (missing(n)) n <- NULL
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		data <- as.ts(eval(parse(text=e$data)))
		data <- window(data, start=start(data) + drop)
		if (level == 1) {	# Simply return the original time series
			res <- data
		} else {
			if (is.null(n) || n > length(data)) n <- length(data)
							
			# Check the validity of level
			if (level < 1 || level > n/3) stop("level must be a value between 1 and n/3!")
			res <- aggregate(data, nfrequency=frequency(data)/level, FUN=FUN)
		}
		if (NROW(res) < 10)
			warning("The extracted series contains very few data (n < 10)")
	} else {	# We are in S+
		data <- as.rts(eval(parse(text=e$data)))
		data <- window(data, start=start(data) + drop)
		if (level == 1) {	# Simply return the original time series
			res <- data
		} else {
			if (is.null(n) || n > length(data)) n <- length(data)
			# Check the validity of level
			if (level < 1 || level > n/3) stop("level must be a value between 1 and n/3!")
			res <- aggregate(data, nf=frequency(data)/level, fun=FUN)
		}
		if (length(res) < 10)
		warning("The extracted series contains very few data (n < 10)")
	}
	res
}
"extract.turnpoints" <-
function(e, n, no.tp=0, peak=1, pit=-1, ...) {
	if (missing(n)) n <- length(e)
	res <- rep(no.tp, length.out=e$n)
	res[e$pos[e$peaks]] <- peak
	res[e$pos[e$pits]] <- pit
	# Keep only the first n points
	if (n < length(res) & n > 0) res <- res[1:n]
	res
}
"first" <-
function(x, na.rm=FALSE) {
	if (na.rm) 
		x <- x[!is.na(x)]
	x[1]
}
"hist.regul" <-
function(x, nclass=30, col=c(4, 5, 2), xlab=paste("Time distance in", x$units, "with start =", min(x$x), ", n = ", length(x$x), ", deltat =", x$tspar$deltat), ylab=paste("Frequency, tol =", x$specs$tol), main="Number of matching observations", plotit=TRUE, ...) {
	# The next function actually draw the histogram
	regul.hist <- function(X, Col, Xlab, Ylab, Main, PlotIt, ...) {
		# Prepare the vector of data
		if (X$specs$tol.type == "none")
			stop("tol.type was 'none', all observations were interpolated!")
		Tol <- X$specs$tol
		if (Tol == 0) HT <- 1.001 else HT <- 101*Tol/100
		Data <- abs(X$match.dist)
		Data[is.infinite(Data)] <- HT 			# Inf are replaced by a value higher than Tol
		Data[Data == 0] <- -0.00001				# For putting exact matching values in a separate category
		# Don't draw, but get vectors of results
		res <- hist(Data, nclass=nclass, plot=FALSE)
		classes <- res$breaks[2:length(res$breaks)]
		ncl <- length(classes)
		classes[ncl] <- Inf
		counts <- res$counts
		counts <- counts[counts != 0]
		lc <- length(counts)
		counts2 <- NULL
		for (i in 1:lc) {
			counts2[i] <- sum(counts[1:i])
		}
		names(counts2) <- names(counts)
		# Create a vector for colors, so as the first and last classes are drawn in a different color
		cols <- NULL
		cols[1] <- Col[1]
		if (ncl > 2) cols[2:(ncl-2)] <- Col[2]
		cols[ncl] <- Col[3]
		# Actually draw the histogram
		if (PlotIt == TRUE)
			hist(Data, nclass=nclass, col=cols, xlab=Xlab, ylab=Ylab, main=Main)
		counts2
	}
	invisible(regul.hist(x, col, xlab, ylab, main, plotit, ...))
}
"identify.abund" <-
function(x, label.pts=FALSE, lvert=TRUE, lvars=TRUE, col=2, lty=2, ...) {
	p <- length(x$p.log.ind)
	Xcoords <- (1:p)
	if (label.pts == FALSE) {			# We want to identify a break
		n <- identify(c(Xcoords, Xcoords, Xcoords), c(x$p.log.ind, x$cumsum, x$p.nonull), n=1, plot=FALSE)
		if (length(n)==0)	# Operation aborted!
			stop("No position indicated on the graph!")
		if (n > p) n <- n - p		# We didn't clicked on the first curve
		if (n > p) n <- n - p		# We didn't clicked on the second curve either
		cat("Number of variables extracted:", n, "on a total of", length(x$p.log.ind), "\n")
		# And eventually draw the lines
		lines.abund(x, n, lvert, lvars, col, lty, ...)
	} else {						# We just want to label points in the graph
		vr <- as.character(x$vr)
		labels <- c(vr, vr, vr)
		n <- identify(c(Xcoords, Xcoords, Xcoords), c(x$p.log.ind, x$cumsum, x$p.nonull), labels=labels)
		n[n > p ] <- n[n > p] - p			# We didn't clicked on the first curve
		n[n > p ] <- n[n > p] - p			# We didn't clicked on the second curve either
		n <- x$vr[unique(n)]
		print(n)
	}
	# We return the value n obtained
	invisible(n)
}
"identify.escouf" <-
function(x, lhorz=TRUE, lvert=TRUE, lvars=TRUE, col=2, lty=2, ...) {
	# We suppose both the RV graph and the RV.diff graph are drawn
	# So, we will use points of both graphs!
	# Calculate RV'
	n <- length(x$RV) - 1
	RVd <- x$RV[2:(n+1)] - x$RV[1:n]
	# Scale RV' to the same range as RV
	RVds <- (RVd-min(RVd))/max(RVd)*(max(x$RV)-min(x$RV))+min(x$RV)
	Xcoords <- (1:n)+0.5
	pos <- identify(c(Xcoords, Xcoords), c(x$RV[1:n], RVds), n=1, plot=FALSE)
	if (length(pos)==0)	# Operation aborted!
		stop("No position indicated on the graph!")
	if (pos>n) pos <- pos-n		# We clicked on the RV.diff curve
	# Now we calculate the corresponding level
	level <- (x$RV[pos]+x$RV[pos+1])/2
	cat("Level:", level, "\n")
	# And eventually draw the lines
	lines.escouf(x, level, lhorz, lvert, lvars, col, lty, ...)
	# We return the level obtained
	invisible(level)
}
"identify.local.trend" <-
function(x, ...) {
	x2 <- x
	class(x2) <- NULL
	x2 <- as.ts(x2)
	pos <- identify(x2, ...)
	# Calculate trends for the successive segments using slope and k
	n <- length(pos)
	k <- attr(x, "k")
	if (n > 1) {
		x2.val <- x2[pos]
		slopes <- (x2.val[2:n] - x2.val[1:(n-1)]) / (pos[2:n] - pos[1:(n-1)]) + k
	} else {
		slopes <- NA
	}
	res <- list(pos=pos, trends=slopes, k=k)
	res
}
"identify.regul" <-
function(x, series=1, col=3, label="#", ...) {
	labels <- rep(label, length.out=length(x$xini))
	i <- series
	if (i > ncol(x$yini))
		stop("This series does not exist")
	n <- identify(x$xini, x$yini[,i], labels=labels, col=col, ...)
	n.vec <- rep(0, length.out=length(x$xini))
	n.vec[n] <- 1
	n.vec
}
"identify.turnogram" <-
function(x, lvert=TRUE, col=2, lty=2, ...) {
	n <- length(x$interval)
	if (x$type == "Complete") {
		pos <- identify(c(x$interval, x$interval, x$interval), c(x$info, x$info.min, x$info.max), n=1, plot=FALSE)
	} else {
		pos <- identify(x$interval, x$info, n=1, plot=FALSE)
	}
	if (length(pos)==0)	# Operation aborted!
		stop("No position indicated on the graph!")
	if (pos > n) pos <- pos - n		# We clicked on either min or max curve
	if (pos > n) pos <- pos - n		# We clicked on max curve
	# Now we calculate the corresponding level
	level <- x$interval[pos]
	cat("Level      :", level, "\n")
	cat("Information:", x$info[pos], "\n")
	cat("Probability:", 2^-abs(x$info[pos]), "\n")
	cat("Nbr of obs.:", x$n[pos], "\n")
	cat("Turnpoints :", x$turns[pos], "\n")
	# And eventually draw the lines
	if (lvert == TRUE) abline(v=level, col=col, lty=lty, ...)
	# We return the level obtained
	invisible(level)
}
"is.tseries" <-
function(x) {
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		require(ts)
		is.ts(x)
	} else {												# We are in S+
		is.ts(x) | is.rts(x)
	}
}
"last" <-
function(x, na.rm=FALSE) {
	if (na.rm) 
		x <- x[!is.na(x)]
	x[length(x)]
}
"lines.abund" <-
function(x, n=x$n, lvert=TRUE, lvars=TRUE, col=2, lty=2, ...) {
	# The following function actually draws the lines
	abund.lines <- function(X, N, Lvert, Lvars, Col, Lty, ...) {
		if (is.null(N)) { 	# Missing n argument
			stop("You must provide a value for n!")
		} else {				# We draw the lines
			p <- length(X$p.log.ind)
			# Verify N
			if (N < 0) N <- 0
			if (N > p) N <- p
			if (Lvert==TRUE)		# We draw a vertical line
				lines(c(N+0.5, N+0.5), c(-10,110), lty=Lty, col=Col)
			if (Lvars==TRUE)		# We change colors of selected variables labels
				if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
					axis(1, 1:N, labels=as.character(X$vr[1:N]), col.axis=Col)
				} else {	# We are in S+, axis color is set by col
					axis(1, 1:N, labels=as.character(X$vr[1:N]), col=Col)
				}
		}
	}
	invisible(abund.lines(x, n, lvert, lvars, col, lty, ...))
}
"lines.escouf" <-
function(x, level=x$level, lhorz=TRUE, lvert=TRUE, lvars=TRUE, col=2, lty=2, ...) {
	# The next function actually draw the lines
	escouf.lines <- function(X, Level, Lhorz, Lvert, Lvars, Col, Lty, ...) {
		if (is.null(Level)) { 	# Missing level argument
			stop("You must provide a value for level!")
		} else {				# We draw the lines
			n <- length(X$RV)
			if (Lhorz==TRUE)		# We draw also an horizontal line
				lines(c(1, n), c(Level, Level), lty=Lty, col=Col)
			# How many variables do we keep?
			nvars <- length(X$RV[X$RV<Level])
			if (Lvert==TRUE)		# We draw also a vertical line
				lines(c(nvars+0.5, nvars+0.5), c(-0.1,1.5), lty=Lty, col=Col)
			if (Lvars==TRUE)		# We change also colors of selected variables labels
				if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
					axis(1, 1:nvars, labels=as.character(X$vr[1:nvars]), col.axis=Col)
				} else {	# We are in S+, axis color is set by col
					axis(1, 1:nvars, labels=as.character(X$vr[1:nvars]), col=Col)
				}
		}
	}
	invisible(escouf.lines(x, level, lhorz, lvert, lvars, col, lty, ...))
}
"lines.regul" <-
function(x, series=1, col=3, lty=1, plot.pts=TRUE,...) {
	# The next function actually draw the lines
	regul.lines <- function(X, Series, Col, Lty, Plot.pts, ...) {
		i <- Series
		if (i > ncol(X$y))
			stop("This series does not exist")
		# Calculate the time span
		xlbr <- min(X$x, na.rm=TRUE)
		xubr <- max(X$x, na.rm=TRUE)
		# Calculate the y span
		ylbr <- min(X$y[,i], na.rm=TRUE)
		yubr <- max(X$y[,i], na.rm=TRUE)
		# Trace the regulated series (but without NA values)
		xv <- X$x
		yv <- X$y[,i]
		xv <- xv[!is.na(yv)]
		yv <- yv[!is.na(yv)]
		lines(xv, yv, col=Col, lty=Lty)
		if (Plot.pts == TRUE) {					# plot points of regular series
			points(xv, yv, col=Col, pch="+")
			# Indicate matching points
			points(X$x[is.finite(X$match.dist)], X$y[is.finite(X$match.dist), i], col=Col, pch="O")
		}
		# Indicate spanning of regulated series
		lines(c(xlbr, xlbr), c(ylbr, yubr/3*2), col=Col, lty=2, type="l")
		lines(c(xubr, xubr), c(ylbr, yubr/3*2), col=Col, lty=2, type="l")
	}
	invisible(regul.lines(x, series, col, lty, plot.pts, ...))
}
"lines.stat.slide" <-
function(x, stat="mean", col=3, lty=1, ...) {
	# The next function actually draws the lines
	stat.slide.lines <- function(X, Stat, Col, Lty, ...) {
		# Verify if Stat is among possible values
		STATS <- c("min", "max", "median", "mean", "pos.median", "pos.mean", "geo.mean", "pen.mean")
		stat.idx <- pmatch(Stat, STATS)
		if (is.na(stat.idx)) 
			stop("invalid stat value")
		if (stat.idx == -1) 
			stop("ambiguous stat value")
		ysld <- switch(stat.idx,
				"min"=unlist(X$stat["min",]),
				"max"=unlist(X$stat["max",]),
				"median"=unlist(X$stat["median",]),
				"mean"=unlist(X$stat["mean",]),
				"pos.median"=unlist(X$stat["pos.median",]),
				"pos.mean"=unlist(X$stat["pos.mean",]),
				"geo.mean"=unlist(X$stat["geo.mean",]),
				"pen.mean"=unlist(X$stat["pen.mean",]))
		# Verify that there is something in ysld
		if ( sum(is.na(ysld)) == length(ysld))
			stop(paste(Stat, "was not calculated in x!"))
		# Construct x and y vectors for the sliding statistics
		xsld <- sort(rep(X$xcut,2))
		yn <- length(ysld)
		ysld[2:(2*yn+1)] <- ysld[floor(seq(1,yn+0.5, by=0.5))]
		ysld[1] <- min(X$x,na.rm=TRUE)
		ysld[2*yn+2] <- min(X$x,na.rm=TRUE)
		lines(xsld, ysld, type="l", col=Col, lty=Lty, ...)
	}
	invisible(stat.slide.lines(x, stat, col, lty, ...))
}
"lines.turnpoints" <-
function(x, max=TRUE, min=TRUE, median=TRUE, col=c(4, 4, 2), lty=c(2, 2, 1), ...) {
	# The next function actually draws the graph
	turnpoints.lines <- function(X, Max, Min, Median, Col, Lty, ...) {
		x.peaks <- X$pos[X$peaks]
		y.peaks <- X$points[X$peaks]
		y.peaks.approx <- approx(x.peaks, y.peaks, X$pos, method="linear")$y
		x.pits <- X$pos[X$pits]
		y.pits <- X$points[X$pits]
		y.pits.approx <- approx(x.pits, y.pits, X$pos, method="linear")$y
		y.median <- y.pits.approx + (y.peaks.approx - y.pits.approx) / 2
		if (Max)
			lines(x.peaks, y.peaks, col=Col[1], lty=Lty[1], ...)
		if (Min)
			lines(x.pits, y.pits, col=Col[2], lty=Lty[2], ...)
		if (Median)
			lines(X$pos, y.median, col=Col[3], lty=Lty[3], ...)
	}
	col <- rep(col, length.out=3)
	lty <- rep(lty, length.out=3)
	invisible(turnpoints.lines(x, max, min, median, col, lty, ...))	
}
"local.trend" <-
function(x, k=mean(x), plotit=TRUE, type="l", cols=1:2, ltys=2:1, xlab="Time", ylab="cusum", ...) {
	call <- match.call()
	Data <- deparse(substitute(x))
	if (!is.null(ncol(x)))
		stop("only univariate series are allowed")
	if (length(x) < 3)
		stop("you need at least 3 values in the series")
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		require(ts)
		x <- as.ts(x)
	} else {												# We are in S+
		x <- as.rts(x)
	}
	x2 <- cumsum(x-k)
	# put x at the same scale as x2
	xmin <- min(x)
	xmax <-max(x)
	x2min <- min(x2)
	x2max <-max(x2)
	x <- (x - xmin) / (xmax - xmin) * (x2max - x2min) + x2min
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		x2.ts <- ts(x2, frequency=frequency(x), start=start(x))
	} else {												# We are in S+
		x2.ts <- rts(x2, frequency=frequency(x), start=start(x))
	}
	if (plotit == TRUE) {
		if (length(cols) < 2) cols <- rep(cols, 2)
		if (length(ltys) < 2) ltys <- rep(ltys, 2)
		plot(x, type=type, col=cols[1], lty=ltys[1], xlab=xlab, ylab=ylab, ...)
		lines(x2.ts, col=cols[2], lty=ltys[2])
	}
	res <- x2.ts
	attr(res, "k") <- k
	attr(res, "data") <- Data
	attr(res, "call") <- call
	class(res) <- c("local.trend", class(x2.ts))		# turn it into a 'local.trend' object
	res
}
"match.tol" <-
function(x, table, nomatch=NA, tol.type="both", tol=0) {
	# replace interpolated/extrapolated value yout(i) by a real observation if there is one in the time scale
	# xout(i) +/- tol if tol.type = "center"
	# xout(i) + tol if tol.type = "right"
	# xout(i) - tol if tol.type = "left"
	# nothing if tol.type = "none"
	# and by an exact matching if tol = 0 and tol.type != "none"
	# If, after rounding, there are several candidates for matching, only the closest value is kept
	# The matrix algorithm used here work only for round fraction of deltat for tol!! (tol = deltat, tol = deltat/2, etc...)
	
	# Right match (in case of several matches, the closest is the first one, reported correctly by match)
	match.tol.right <- function(x, table, tol, nomatch=NA) {
		if (tol == 0) {
			posr <- match(x, table, nomatch)
		} else {			# Warning: tol must be a round fraction of deltat
			xornd <- round((x - min(x, na.rm=TRUE)) / tol)
			xrnd <- floor((table - min(x, na.rm=TRUE)) / tol)
			posr <- match(xornd, xrnd, nomatch)
		}
		posr
	}
	# Left match (in case of several matches, the closest is the last one => apply match on the reversed vector)
	match.tol.left <- function(x, table, tol, nomatch=NA) {
		if (tol == 0) {
			posl <- match(x, table, nomatch)
		} else {			# Warning: tol must be a round fraction of deltat
			xornd <- round((x - min(x, na.rm=TRUE)) / tol)
			xrnd <- ceiling((table - min(x, na.rm=TRUE)) / tol)
			xol <- length(xornd)
			xl <- length(xrnd)
			posl <- match(xornd[xol:1], xrnd[xl:1], nomatch)
			# posl is reversed and indices are changed to correspond to a usual match, but keeping last one in case of ex aequos
			posl <- xl + 1 - posl[xol:1]
		}
		posl
	}
	# match on both sides. We search for tol on left and on right
	match.tol.both <- function(x, table, tol, nomatch=NA) {
		if (tol == 0) {
			posr <- match(x, table, nomatch)
		} else {			# Warning: tol must be a round fraction of deltat
			# Seach right
			xornd <- round((x - min(x, na.rm=TRUE)) / tol)
			xrnd <- floor((table - min(x, na.rm=TRUE)) / tol)
			posr <- match(xornd, xrnd, nomatch)
			# Search left
			xrnd <- ceiling((table - min(x, na.rm=TRUE)) / tol)
			xol <- length(xornd)
			xl <- length(xrnd)
			posl <- match(xornd[xol:1], xrnd[xl:1], nomatch)
			# posl is reversed and indices are changed to correspond to a usual match, but keeping last one in case of ex aequos
			posl <- xl + 1 - posl[xol:1]
			# we keep only posr, but replace NA by values of posl, and also in case of both left, and right matches
			# we compare which one is closest. If left match is closest than right match, we replace also in posr
			repl <- (is.na(posr)) | ((x - table[posl]) < (table[posr] - x))		# In case of same distance, we keep right match
			repl[is.na(repl)] <- FALSE
			posr[repl] <- posl[repl]
		}
		posr
	}
				
	# match.tol starts here
	table <- sort(table)					# Make sure table is sorted in increasing order
	table <- table[!is.na(table)]			# Eliminate missing values
	n <- length(table)
	# does x be regularly spaced?
	spaces <- x[2:length(x)] - x[1:(length(x)-1)]
	if (max(spaces) - min(spaces) > max(spaces)/100)
		stop("x must be a regularly spaced vector for match.tol!")
	# We verify also that tol is a round fraction of the space in x
	space <- mean(spaces)
	if (is.null(tol) || tol == 0) {
		tol <- 0
		tol2 <- 0
	} else {
		tol2 <- abs(tol)
		if (tol2 > space) tol2 <- space else {
			tol2 <- space/round(space/tol2)
		}
	}
	if (tol2 != tol)
		cat(paste("'tol' was adjusted to", tol2, "\n\n"))
	# match according to tol.type
	TOL.TYPES <- c("left", "both", "right", "none")
	tol.idx <- pmatch(tol.type, TOL.TYPES)
	if (is.na(tol.idx)) 
		stop("invalid tol.type value")
	if (tol.idx == -1) 
		stop("ambiguous tol.type value")
	# calculate the matching vector
	pos <- switch(tol.idx,
		  	"left"=match.tol.left(x, table, tol2),
		  	"both"=match.tol.both(x, table, tol2),
			"right"=match.tol.right(x, table, tol2),
			"none"=match(x, table, nomatch=NA))
	pos
}
"pennington" <-
function(x, calc="all", na.rm=FALSE) {
	# Calculation of Pennington's mean
	pen.mean <- function(n, m, MeanLogVal, VarLogVal) {
		# Calculation of Gm(t)
		Gmt <- function(m, t) {
			# This function calculate a single j term for the sum
			SumTerm <- function(m, t, j) {ST <- exp((2*j-1)*log(m-1)+j*log(t)-j*log(m)-sum(log(c(2:j-1)*2-1+m))-sum(log(c(1:j)))); ST}
			# Sum is calculated iteratively to a precision of 0.01%
			# If t=0, Gmt=1. m must not be 0 or 1, but this is treated in the body of the main function
			if (t==0) Sum <- 1 else {
				Sum <- 1+t*(m-1)/m	# we already put the first terms in Sum
				it <- 1		# iteration count (will start at 2)
				del <- 1	# iterative adjustment
				while (del>0.0001 && (it <- it+1)<20) {
					Sum2 <- Sum+SumTerm(m, t, it)
					del <- abs((Sum-Sum2)/Sum)
					Sum <- Sum2
				}
			}
			Sum
		}
		
		# No non-zero values => return 0
		if (m==0) PenMean <- 0 else {
			# Only one non-zero value => return x1/n
			if (m==1) PenMean <- exp(MeanLogVal)/n else {
				# Calculation with the full formula
				PenMean <- m/n*exp(MeanLogVal)*Gmt(m,VarLogVal/2)
			}
		}
		PenMean
	}
	
	# Calculation of Pennington's variance
	pen.var <- function(n, m, MeanLogVal, VarLogVal) {
		# Calculation of Gm(t)
		Gmt <- function(m, t) {
			# This function calculate a single j term for the sum
			SumTerm <- function(m, t, j) {ST <- exp((2*j-1)*log(m-1)+j*log(t)-j*log(m)-sum(log(c(2:j-1)*2-1+m))-sum(log(c(1:j)))); ST}
			# Sum is calculated iteratively to a precision of 0.01%
			# If t=0, Gmt=1. m must not be 0 or 1, but this is treated in the body of the main function
			if (t==0) Sum <- 1 else {
				Sum <- 1+t*(m-1)/m	# we already put the first terms in Sum
				it <- 1		# iteration count (will start at 2)
				del <- 1	# iterative adjustment
				while (del>0.0001 && (it <- it+1)<20) {
					Sum2 <- Sum+SumTerm(m, t, it)
					del <- abs((Sum-Sum2)/Sum)
					Sum <- Sum2
					# cat(it, Sum, "\n")	# To get all iterations
				}
			}
			Sum
		}

		# No non-zero values => return 0
		if (m==0) PenVar <- 0 else {
			# Only one non-zero value => return x1^2/n
			if (m==1) PenVar <- (exp(MeanLogVal))^2/n else {
				# Calculation with the full formula
				PenVar <- m/n*exp(2*MeanLogVal)*(Gmt(m,2*VarLogVal)-(m-1)/(n-1)*Gmt(m,(m-2)/(m-1)*VarLogVal))
			}
		}
		PenVar
	}
		
	# Calculation of Pennington's variance of the mean
	pen.mean.var <- function(n, m, MeanLogVal, VarLogVal) {
		# Calculation of Gm(t)
		Gmt <- function(m, t) {
			# This function calculate a single j term for the sum
			SumTerm <- function(m, t, j) {ST <- exp((2*j-1)*log(m-1)+j*log(t)-j*log(m)-sum(log(c(2:j-1)*2-1+m))-sum(log(c(1:j)))); ST}
			# Sum is calculated iteratively to a precision of 0.01%
			# If t=0, Gmt=1. m must not be 0 or 1, but this is treated in the body of the main function
			if (t==0) Sum <- 1 else {
				Sum <- 1+t*(m-1)/m	# we already put the first terms in Sum
				it <- 1		# iteration count (will start at 2)
				del <- 1	# iterative adjustment
				while (del>0.0001 && (it <- it+1)<20) {
					Sum2 <- Sum+SumTerm(m, t, it)
					del <- abs((Sum-Sum2)/Sum)
					Sum <- Sum2
					# cat(it, Sum, "\n")	# To get all iterations
				}
			}
			Sum
		}

		# No non-zero values => return 0
		if (m==0) PenMeanVar <- 0 else {
			# Only one non-zero value => return x1^2/n
			if (m==1) PenMeanVar <- (exp(MeanLogVal))^2/n else {
				# Calculation with the full formula
				PenMeanVar <- m/n*exp(2*MeanLogVal)*(m/n*(Gmt(m,VarLogVal/2))^2-(m-1)/(n-1)*Gmt(m,(m-2)/(m-1)*VarLogVal))
			}
		}
		PenMeanVar
	}
		
	
	# This is the core part of pennington!
	# If na.rm=FALSE and some missing data, must return NA
	if (na.rm==FALSE & length(x[!is.na(x)])<length(x)) IsNa <- TRUE else IsNa <- FALSE
	# N is the nbr of values (excluding missing ones)
	N <- sum(as.numeric(!is.na(x)))
	# If no remaining values after eliminating missing values, return NA
	if (N==0) IsNa <- TRUE
	# End of tests => either return NA or further calculate
	if (IsNa==TRUE) {
		if (calc=="all") Result <- unlist(list(mean=NA, var=NA, mean.var=NA)) else Result <- NA
	} else {		# Calculate
		# LogVal contains log of all non-zero and non-missing values
		LogVal <- log(x[!is.na(x) & x>0])
		# M is the number of non-zero values
		M <- length(LogVal)
		# Calculation of mean and variance
		MeanLogVal <- mean(LogVal)
		VarLogVal <- var(LogVal)
		# Depending on the calc arg, we calculate the mean, var or var.mean
		Result <- switch(calc,
			mean = pen.mean(N, M, MeanLogVal, VarLogVal),
			var = pen.var(N, M, MeanLogVal, VarLogVal),
			mean.var = pen.mean.var(N, M, MeanLogVal, VarLogVal),
			all = unlist(list(mean=pen.mean(N, M, MeanLogVal, VarLogVal), var=pen.var(N, M, MeanLogVal, VarLogVal), mean.var=pen.mean.var(N, M, MeanLogVal, VarLogVal))))
	}
	Result	
}
"pgleissberg" <-
function(n, k, lower.tail=TRUE, two.tailed=FALSE) {
	# Make sure n and k have same length
	if (length(n) > length(k)) k <- rep(k, length.out=length(n))
	if (length(n) < length(k)) n <- rep(n, length.out=length(k))
	# Calculate Gleissberg probability for each (n, k) pair
	res <- rep(0, length(n))
	OK <- (n >= 3 & k >= 0 & k <= n-2)
	if (sum(OK) > 0) {
		ncalc <- n[OK]
		kcalc <- k[OK]
		rescalc <- rep(0, length(ncalc))
		Norm <- (ncalc > 50)
		if (sum(Norm) > 0) {
			# Normal approximation of Gleissberg distribution
			nnorm <- ncalc[Norm]
			knorm <- kcalc[Norm]
			Mean <- 2 / 3 * (nnorm - 2)
			Var <- (16 * nnorm - 29) / 90
			if (two.tailed == TRUE) {	# two-sided probability
				resnorm <- pnorm(knorm, Mean, sqrt(Var))
				rightpart <- resnorm > 0.5
				resnorm[rightpart] <- 1 - resnorm[rightpart]
				resnorm <- 2 * resnorm
			} else {					# one-sided probability
				if (lower.tail == TRUE) {
					resnorm <- pnorm(knorm, Mean, sqrt(Var))
				} else {
					resnorm <- 1 - pnorm(knorm, Mean, sqrt(Var))
				}
			}
			rescalc[Norm] <- resnorm
		}
		if (sum(!Norm) > 0) {
			# Calculate exact Gleissberg distribution
			# This is normally loaded from gleissberg.table
			# but if it fails, it can be recalculated with:
			"gleissberg.calc" <- function() {
				n <- 50
				k <- 48
				Gleiss <- matrix(0, n - 2, k + 1)
				N <- nrow(Gleiss)
				K <- ncol(Gleiss)
				Gleiss[,1] <- 2
				Gleiss[1, 2] <- 4
				for (n in 2:N) {
					Gleiss[n, 2] <- 2*Gleiss[n-1, 2] + 2*Gleiss[n-1, 1]
					for (k in 3:K) {
						for (n in (k-1):N) {
							Gleiss[n, k] <- k*Gleiss[n-1, k] + 2*Gleiss[n-1,k-1] + (n-k+2)*Gleiss[n-1, k-2]
						}
					}
				}
				Gleiss <- Gleiss / gamma(4:51)		# gamma(n + 1) is equivalent to n!
				# This is the probability, giving any (n, k) pair... but we want a table of right-tailed cumulated probabilities
				Gleiss <- t(apply(t(Gleiss), 2, cumsum))
				if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
					assign(".gleissberg.table", Gleiss, env = .GlobalEnv)	
				} else {												# We are in S+
					assign(".gleissberg.table", Gleiss, where = 0)
				}
				invisible(NULL)
			}
			
			# Determination of Gleissberg probability
			ng <- ncalc[!Norm]
			kg <- kcalc[!Norm]
			if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
				if (length(objects(envir=.GlobalEnv, all.names=TRUE, pattern=".gleissberg.table")) == 0) {	# Table not found
					try(data(gleissberg.table))
					if (length(objects(envir=.GlobalEnv, all.names=TRUE, pattern=".gleissberg.table")) == 0) {	# Table still not found
						cat("Creating Gleissberg distribution table...\n\n")
						if (R.Version()$os == "Win32") {flush.console()}
						gleissberg.calc()
					}
				}
			} else {												# We are in S+
				if (exists(".gleissberg.table") == FALSE) {	# Table not found
					try(data(gleissberg.table))
					if (exists(".gleissberg.table") == FALSE) {	# Table still not found
						cat("Creating Gleissberg distribution table...\n\n")
						gleissberg.calc()
					}
				}
			}
			.gleissberg.table <- as.matrix(.gleissberg.table)
			if (two.tailed == TRUE) { 	# two-sided probability
				# As Gleissberg distribution is asymmetric, we have to calculate both sides independently
				mu <- 2 / 3 * (ng - 2)
				delta <- abs(mu - kg)
				resg1 <- .gleissberg.table[ng - 2, mu - delta + 1]
				resg2 <- 1 - .gleissberg.table[ng - 2, mu + delta + 1]
				resg <- resg1 + resg2
				if (!is.null(ncol(resg))) resg <- diag(resg)
			} else {					# one-sided probability
				resg <- .gleissberg.table[ng - 2, kg + 1]
				if (!is.null(ncol(resg))) resg <- diag(resg)
				if (lower.tail == FALSE) resg <- 1 - resg
				}
			rescalc[!Norm] <- resg
		}
		res[OK] <- rescalc
	}
	res
}
"plot.abund" <-
function(x, n=x$n, lvert=TRUE, lvars=TRUE, lcol=2, llty=2, all=TRUE, dlab=c("cumsum", "% log(ind.)", "% non-zero"), dcol=c(1,2,4), dlty=c(par("lty"), par("lty"), par("lty")), dpos=c(1.5, 20), type="l", xlab="variables", ylab="abundance", main=paste("Abundance sorting for:",x$data, "with f =", round(x$f, 4)), ...) {
	# The following function actually draws the graph
	abund.graph <- function(X, N, Lvert, Lvars, Lcol, Llty, All, Dlab, Dcol, Dlty, Dpos, Type, Xlab, Ylab, Main, ...) {
		p <- length(X$p.log.ind)
		plot(X$p.log.ind, type="n", ylim=c(0, 100), xlab=Xlab, ylab=Ylab, main=Main, xaxs="i", xaxt="n", ...)
		axis(1, 1:p, labels=as.character(X$vr))
		# Do we plot all lines or not?
		if (All == FALSE) {
			lines(1:p, X$cumsum, col=Dcol[1], lty=Dlty[1], type=Type)
			# Since there is only one line, we don't need a legend!
		} else {
			lines(1:p, X$p.log.ind, col=Dcol[2], lty=Dlty[2], type=Type)
			lines(1:p, X$p.nonull, col=Dcol[3], lty=Dlty[3], type=Type)
			lines(1:p, X$cumsum, col=Dcol[1], lty=Dlty[1], type=Type)
			# Draw the legend
			legend(Dpos[1], Dpos[2], Dlab, col=Dcol, lty=Dlty)
		}
		if (is.null(N)==FALSE) { # We draw the lines
			# Verify N
			if (N < 0) N <- 0
			if (N > p) N <- p
			if (Lvert==TRUE)		# We draw a vertical line
				lines(c(N+0.5, N+0.5), c(-10,110), lty=Llty, col=Lcol)
			if (Lvars==TRUE) {		# We change colors of selected variables labels
				if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
					axis(1, 1:N, labels=as.character(X$vr[1:N]), col.axis=Lcol)
				} else {	# We are in S+, axis color is set by col
					axis(1, 1:N, labels=as.character(X$vr[1:N]), col=Lcol)
				}
			}
		}
	}
	invisible(abund.graph(x, n, lvert, lvars, lcol, llty, all, dlab, dcol, dlty, dpos, type, xlab, ylab, main, ...))
}
"plot.escouf" <-
function(x, level=x$level, lhorz=TRUE, lvert=TRUE, lvars=TRUE, lcol=2, llty=2, diff=TRUE, dlab="RV' (units not shown)", dcol=4, dlty=par("lty"), dpos=0.8, type="s", xlab="variables", ylab="RV", main=paste("Escoufier's equivalent vectors for:",x$data), ...) {
	# The next function actually draw the graph
	escouf.graph <- function(X, Level, Lhorz, Lvert, Lvars, Lcol, Llty, Diff, Dlab, Dcol, Dlty, Dpos, Type, Xlab, Ylab, Main, ...) {
		n <- length(X$RV)
		plot(X$RV, type=Type, xlab=Xlab, ylab=Ylab, main=Main, xaxs="i", xaxt="n", ...)
		axis(1, 1:length(X$RV), labels=as.character(X$vr))
		# Do we plot also RV.diff?
		if (Diff==TRUE) {
			# Calculate RV.diff
			RV.diff <- X$RV[2:n] - X$RV[1:(n-1)]
			# Scale RV.diff to the same range as RV
			RVd <- RV.diff
			RVds <- (RVd-min(RVd))/max(RVd)*(max(X$RV)-min(X$RV))+min(X$RV)
			# Plot the line
			lines((1:(n-1))+0.5, RVds, col=Dcol, lty=Dlty)
			# Draw the label
			xPos <- n*Dpos
			yInd <- round(xPos); if (yInd<length(RVds)) yInd <- length(RVds)
			if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
				text(xPos, RVds[yInd], Dlab, pos=3, col=Dcol)
			} else {	# We are in S+, and the pos option does not exist for the text function!
				yoffset <- (max(X$RV)-min(X$RV))/20
				text(xPos, RVds[yInd]+yoffset, Dlab, col=Dcol)
			}
		}
		if (is.null(Level)==FALSE) { # We draw the lines
			if (Lhorz==TRUE)		# We draw also a horizontal line
				lines(c(1, n), c(Level, Level), lty=Llty, col=Lcol)
			# How many variables do we keep?
			nvars <- length(X$RV[X$RV<Level])
			if (Lvert==TRUE)		# We draw also a vertical line
				lines(c(nvars+0.5, nvars+0.5), c(-0.1,1.5), lty=Llty, col=Lcol)
			if (Lvars==TRUE)		# We change also colors of selected variables labels
				if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
					axis(1, 1:nvars, labels=as.character(X$vr[1:nvars]), col.axis=Lcol)
				} else {	# We are in S+, axis color is set by col
					axis(1, 1:nvars, labels=as.character(X$vr[1:nvars]), col=Lcol)
				}
		}
			
	}
	invisible(escouf.graph(x, level, lhorz, lvert, lvars, lcol, llty, diff, dlab, dcol, dlty, dpos, type, xlab, ylab, main, ...))
}
"plot.regul" <-
function(x, series=1, col=c(1,2), lty=c(par("lty"), par("lty")), plot.pts=TRUE, leg=FALSE, llab=c("initial", x$specs$methods[series]), lpos=c(1.5, 10), xlab=paste("Time (", x$units, ")", sep=""), ylab="Series", main=paste("Regulation of", names(x$y)[series]), ...) {
	# The next function actually draw the graph
	regul.graph <- function(X, Series, Col, Lty, Plot.pts, Leg, Llab, Lpos, Xlab, Ylab, Main, ...) {
		i <- Series
		if (i > ncol(X$y))
			stop("This series does not exist")
		# Calculate the total time span
		xlbi <- min(X$xini, na.rm=TRUE)
		xubi <- max(X$xini,na.rm=TRUE)
		xlbr <- min(X$x, na.rm=TRUE)
		xubr <- max(X$x, na.rm=TRUE)
		xlb <- min(xlbi, xlbr)
		xub <- max(xubi, xubr)
		xspan <- c(xlb, xub)
		# Calculate the y span
		ylbi <- min(X$yini[,i], na.rm=TRUE)
		yubi <- max(X$yini[,i], na.rm=TRUE)
		ylbr <- min(X$y[,i], na.rm=TRUE)
		yubr <- max(X$y[,i], na.rm=TRUE)
		ylb <- min(ylbi, ylbr)
		yub <- max(yubi, yubr)
		yspan <- c(ylb, yub)
		plot(xspan, yspan, type="n", xlab=Xlab, ylab=Ylab, main=Main, ...)
		# Trace the initial series
		lines(X$xini, X$yini[,i], col=Col[1], lty=Lty[1])
		# Trace the regulated series (but without NA values)
		xv <- X$x
		yv <- X$y[,i]
		xv <- xv[!is.na(yv)]
		yv <- yv[!is.na(yv)]
		lines(xv, yv, col=Col[2], lty=Lty[2])
		if (Plot.pts == TRUE) {					# plot points of regular series
			points(xv, yv, col=Col[2], pch="+")
			# Indicate matching points
			points(X$x[is.finite(X$match.dist)], X$y[is.finite(X$match.dist), i], col=Col[2], pch="O")
		}
		# Indicate respective spanning of initial and regulated series
		lines(c(xlbi, xlbi), c(ylb+yub/3, yub), col=Col[1], lty=2, type="l")
		lines(c(xubi, xubi), c(ylb+yub/3, yub), col=Col[1], lty=2, type="l")
		lines(c(xlbr, xlbr), c(ylb, yub/3*2), col=Col[2], lty=2, type="l")
		lines(c(xubr, xubr), c(ylb, yub/3*2), col=Col[2], lty=2, type="l")
		# If Leg is TRUE, print a legend
		if (Leg == TRUE) {
			legend(Lpos[1], Lpos[2], Llab, col=Col, lty=Lty)
		}
	}
	invisible(regul.graph(x, series, col, lty, plot.pts, leg, llab, lpos, xlab, ylab, main, ...))
}
"plot.stat.slide" <-
function(x, stat="mean", col=c(1,2), lty=c(par("lty"), par("lty")), leg=FALSE, llab=c("series", stat), lpos=c(1.5, 10), xlab="time", ylab="y", main=paste("Sliding statistics"), ...) {
	# The next function actually draws the graph
	stat.slide.graph <- function(X, Stat, Col, Lty, Leg, Llab, Lpos, Xlab, Ylab, Main, ...) {
		# Verify if Stat is among possible values
		STATS <- c("min", "max", "median", "mean", "pos.median", "pos.mean", "geo.mean", "pen.mean")
		stat.idx <- pmatch(Stat, STATS)
		if (is.na(stat.idx)) 
			stop("invalid stat value")
		if (stat.idx == -1) 
			stop("ambiguous stat value")
		ysld <- switch(stat.idx,
				"min"=unlist(X$stat["min",]),
				"max"=unlist(X$stat["max",]),
				"median"=unlist(X$stat["median",]),
				"mean"=unlist(X$stat["mean",]),
				"pos.median"=unlist(X$stat["pos.median",]),
				"pos.mean"=unlist(X$stat["pos.mean",]),
				"geo.mean"=unlist(X$stat["geo.mean",]),
				"pen.mean"=unlist(X$stat["pen.mean",]))
		# Verify that there is something in ysld
		if ( sum(is.na(ysld)) == length(ysld))
			stop(paste(Stat, "was not calculated in x!"))
		plot(X$x, X$y, type="l", col=Col[1], lty=Lty[1], xlab=Xlab, ylab=Ylab, main=Main, ...)
		# Construct x and y vectors for the sliding statistics
		xsld <- sort(rep(X$xcut,2))
		yn <- length(ysld)
		ysld[2:(2*yn+1)] <- ysld[floor(seq(1,yn+0.5, by=0.5))]
		ysld[1] <- min(X$x,na.rm=TRUE)
		ysld[2*yn+2] <- min(X$x,na.rm=TRUE)
		lines(xsld, ysld, type="l", col=Col[2], lty=Lty[2], ...)
		# If Leg is TRUE, print a legend
		if (Leg == TRUE) {
			legend(Lpos[1], Lpos[2], Llab, col=Col, lty=Lty)
		}
		
	}
	invisible(stat.slide.graph(x, stat, col, lty, leg, llab, lpos, xlab, ylab, main, ...))
}
"plot.tsd" <-
function (x, series=1, stack=TRUE, resid=TRUE, col=par("col"), lty=par("lty"), labels=dimnames(X)[[2]], leg = TRUE, lpos=c(0,0), xlab="time", ylab="series", main=paste("Series decomposition by", x$specs$method, "-", x$specs$type), ...) {
    ser <- x$ts
    if (length(ser) == 1) {		# We have the decomposition of a single series in the object
    	if (series != 1)
    		stop("the series does not exist in this object")
    	sers <- unclass(x$series)
    } else {					# We have the decomposition of several series in the object
    	if (series < 1 || series > length(ser))
    		stop("the series does not exist in this object")
    	sers <- unclass(x$series[[series]])
    }
    model.type <- x$specs$type		# Additive or multiplicative
    if (is.null(model.type))
    	model.type <- "additive"	# By default
    ncomp <- ncol(sers)
    if (model.type == "additive") {
    	series <- drop(sers %*% rep(1, ncomp))
    	# Equivalent to: series <- apply(sers, 1, sum)
    } else {
    	series <- apply(sers, 1, prod)
    }
    X <- cbind(series = series, sers)
    # Does the last series represent residuals?
    if (dimnames(sers)[[2]][ncomp] == "residuals") {
    	if (resid == TRUE) nplot <- ncomp + 1 else nplot <- ncomp
    } else {		# No residuals
    	nplot <- ncomp + 1
    	resid <- FALSE
    }
    col <- rep(col, nplot)
    lty <- rep(lty, nplot)
    if (stack == TRUE) {
    	oldpar <- par("mar", "oma", "mfrow", "tck")
    	on.exit(par(oldpar))
    	par(mar = c(0, 6, 0, 6), oma = c(6, 0, 4, 0), tck = -0.01)
    	par(mfrow = c(nplot, 1))
    	for (i in 1:nplot) {
    		plot.type <- if (i < nplot | resid == FALSE) "l" else if (model.type == "additive") "h" else "p"
    		
    		# Solution using segments for residuals based on 1
    		#x.1 <- rnorm(1000,10)  # random numbers around 10
			#plot(x.1,type='n')
			#segments(seq(x.1),10,seq(x.1),x.1)  # plot segments basing on 10
    		
        	plot(X[, i], type = plot.type, xlab = "", ylab = "", axes = FALSE, col = col[i], lty = lty[i], ...)
        	if (i == nplot & resid == TRUE & min(X[, i]) < 0 & max(X[, i]) > 0) 
            	if (model.type == "additive") abline(h = 0) else abline(h = 1)
            box()
        	right <- i%%2 == 0
        	axis(2, labels = !right)
        	axis(4, labels = right)
        	mtext(labels[i], 2, 3)
        }
    	axis(1, labels = TRUE)
    	axis(3, labels = FALSE)
    	mtext(xlab, 1, 3)
    } else {		# Stack is false
		X <- as.ts(X)
    	if (resid == TRUE) {
			X <- as.ts(X)
		} else {
			n <- ncol(X) - 1	
			X <- as.ts(X[,1:n])	
		}
		if (exists("is.R") && is.function(is.R) && is.R()) {			# We are in R
    		ts.plot(X, gpars=list(col=col, lty=lty, xlab=xlab, ylab=ylab, main=main))
    	} else {														# We are in S+
			ts.plot(X, col=col, lty=lty, xlab=xlab, ylab=ylab, main=main)	
		}
    	if (leg == TRUE) legend(lpos[1], lpos[2], labels[1:n], col=col[1:n], lty=lty[1:n])
    }
    invisible()
}
"plot.turnogram" <-
function(x, level=0.05, lhorz=TRUE, lvert=TRUE, lcol=2, llty=2, xlog=TRUE, xlab=paste("interval (", x$units.text, ")", sep=""), ylab="I (bits)", main=paste(x$type, "turnogram for:",x$data), sub=paste(x$fun, "/", x$proba), ...) {
	# The next function actually draws the graph
	turnogram.graph <- function(X, Level, Xlog, Lhorz, Lvert, Lcol, Llty, Xlab, Ylab, Main, Sub, ...) {
		Ilevel <- -log(Level, base=2)
		if (Xlog == TRUE) xlogstr <- "x" else xlogstr <- ""
		if (X$proba == "two-tailed probability") {
			imin <- -1.1*Ilevel
			two.tailed <- TRUE
		} else {
			imin <- 0
			two.tailed <- FALSE
		}
		if (X$type == "Simple") {
			yrange.dat <- c(X$info, imin, 1.1*Ilevel)
			yrange <- c(min(yrange.dat), max(yrange.dat))
			plot(X$interval, X$info, type="l", log=xlogstr, ylim=yrange, xlab=Xlab, ylab=Ylab, main=Main, sub=Sub, ...)
		} else {
			yrange <- c(min(c(X$info.min, imin)), max(c(X$info.max, 1.1*Ilevel)))
			plot(X$interval, X$info, type="l", log=xlogstr, ylim=yrange, xlab=Xlab, ylab=Ylab, main=Main, sub=Sub, ...)
			lines(X$interval, X$info.min)
			lines(X$interval, X$info.max)
		}
		if (Lhorz == TRUE) {
			if (two.tailed == TRUE) {
				abline(h=0)
				abline(h=-Ilevel, lty=Llty, col=Lcol)
			}
			abline(h=Ilevel, lty=Llty, col=Lcol)
		}
		if (Lvert == TRUE) abline(v=X$level, lty=Llty, col=Lcol)
	}
	invisible(turnogram.graph(x, level, xlog, lhorz, lvert, lcol, llty, xlab, ylab, main, sub, ...))
}
"plot.turnpoints" <-
function(x, level=0.05, lhorz=TRUE, lcol=2, llty=2, type="l", xlab="data number", ylab=paste("I (bits), level = ", level*100, "%", sep=""), main=paste("Information (turning points) for:",x$data), ...) {
	# The next function actually draws the graph
	turnpoints.graph <- function(X, Level, Lhorz, Lcol, Llty, Type, Xlab, Ylab, Main, Sub, ...) {
		plot(X$tppos, X$info, type=Type, xlab=Xlab, ylab=Ylab, main=Main, ...)
		abline(h=-log(Level, base=2), lty=Llty, col=Lcol)
	}
	invisible(turnpoints.graph(x, level[1], lhorz, lcol, llty, type, xlab, ylab, main, ...))
}
"print.abund" <-
function(x, ...) {
	cat("\nSorting of descriptors according to abundance for:", x$data, "\n\n")
	cat("Coefficient f:", x$f, "\n")
	if (!is.null(x$n)) {						# How many variables do we keep?
		cat("Extraction of: ", x$n, " variable(s) from a total of ", length(x$vr), "\n", sep="")
	} else {
		cat(length(x$vr), " variables sorted\n", sep="")
	}
	cat("\n")
	invisible(x)
}
"print.escouf" <-
function(x, ...) {
	cat("\nEscoufier's method of equivalent vectors for:", x$data, "\n\n")
	cat("Calculation level:", x$calc.level, "\n")
	cat(x$vars[2], "variable(s) calculated on a total of", x$vars[1], "\n")
	if (!is.null(x$level)) {
		# How many variables do we keep at this level?
		nvars <- length(x$RV[x$RV<x$level])
		cat("Extraction level : ", x$level, " = ", nvars, " variable(s)\n", sep="")
	}
	cat("\n")
	invisible(x)
}
"print.regul" <-
function(x, ...) {
	if (is.null(names(x$y))) {		# Only one vector is regulated
		cat("Regulation using method :", x$specs$methods, "\n")
	} else {						# y is a data.frame
		cat("Regulation of, by \"method\" :\n")
		methods <- x$specs$methods
		names(methods) <- names(x$y)
		print(methods)
	}
	cat("\nArguments for \"methods\" :\n")
	args <- NULL
	args[1] <- x$specs$tol.type
	args[2] <- x$specs$tol
	args[3] <- x$specs$rule
	args[4] <- x$specs$f
	args[5] <- x$specs$periodic
	args[6] <- x$specs$window
	args[7] <- x$specs$split
	names(args) <- c("tol.type", "tol", "rule", "f", "periodic", "window", "split")
	print(args)
	if (x$specs$rule == 1) {
		cat("\n", sum(x$match.dist == 1/0), "interpolated values on", length(x$match.dist), "(", sum(x$match.dist == -1/0), "NAs padded at ends )\n")
	} else {			# We allowed extrapolation
		cat("\n", sum(x$match.dist == 1/0), "interpolated and", sum(x$match.dist == -1/0), "extrapolated values on", length(x$match.dist), "\n")
	}		# Rem: 1/0 stands for Inf and -1/0 stands for -Inf
	cat("\nTime scale :\n")
	tsp <- NULL
	if (length(x$tspar$start) == 1) {
		tsp[1] <- x$tspar$start[1]
	} else {
		tsp[1] <- x$tspar$start[1] + (x$tspar$start[2] - 1) * x$tspar$frequency
	}
	tsp[2] <- x$tspar$deltat
	tsp[3] <- x$tspar$frequency
	names(tsp) <- c("start", "deltat", "frequency")
	print(tsp)
	cat("Time units :", x$units, "\n\n")
	cat("call :", deparse(x$call), "\n")
	invisible(x)
}
"print.specs.regul" <-
function(x, ...) {
	print(unlist(unclass(x)), quote=FALSE)
	invisible(x)
}
"print.specs.tsd" <-
function(x, ...) {
	print(unlist(unclass(x)), quote=FALSE)
	invisible(x)
}
"print.stat.slide" <-
function(x, ...) {
	print(x$stat)
	invisible(x)
}
"print.summary.abund" <-
function(x, ...) {
	cat("\nSorting of descriptors according to abundance for:", x$data, "\n\n")
	cat("Coefficient f:", x$f, "\n")
	if (!is.null(x$n)) {						# How many variables do we keep?
		cat("Extraction of: ", x$n, " variable(s) from a total of ", length(x$vr), "\n", sep="")
	} else {
		cat(length(x$vr), " variables sorted\n", sep="")
	}
	cat("\nNumber of individuals (% of most abundant in log):\n")
	print(x$p.log.ind)
	cat("\nPercent of non-zero values:\n")
	print(x$p.nonull)
	invisible(x)
}
"print.summary.escouf" <-
function(x, ...) {
	cat("\nEscoufier's method of equivalent vectors for:", x$data, "\n\n")
	cat("Calculation level:", x$calc.level, "\n")
	cat(x$vars[2], "variable(s) calculated on a total of", x$vars[1], "\n")
	if (!is.null(x$level)) {
		# How many variables do we keep at this level?
		nvars <- length(x$RV[x$RV<x$level])
		cat("Extraction level : ", x$level, " = ", nvars, " variable(s)\n", sep="")
	}
	cat("RV:\n")
	print(x$RV)
	invisible(x)
}
"print.summary.regul" <-
function(x, ...) {
	if (is.null(names(x$y))) {		# Only one vector is regulated
		cat("Regulation using method :", x$specs$methods, "\n")
		dat <- as.data.frame(cbind(x$x, x$y, x$match.dist))
		names(dat) <- c("Time", "Series", "Regul")
	} else {						# y is a data.frame
		cat("Regulation of, by \"method\" :\n")
		methods <- x$specs$methods
		names(methods) <- names(x$y)
		print(methods)
		dat <- as.data.frame(cbind(x$x, x$y, x$match.dist))
		Names <- NULL
		nc <- ncol(x$y) + 2
		Names[1] <- "Time"
		Names[2:(nc-1)] <- names(x$y)
		Names[nc] <- "Match.obs"
		names(dat) <- Names
	}
	cat("\nArguments for \"methods\" :\n")
	args <- NULL
	args[1] <- x$specs$tol.type
	args[2] <- x$specs$tol
	args[3] <- x$specs$rule
	args[4] <- x$specs$f
	args[5] <- x$specs$periodic
	args[6] <- x$specs$window
	args[7] <- x$specs$split
	names(args) <- c("tol.type", "tol", "rule", "f", "periodic", "window", "split")
	print(args)
	if (x$specs$rule == 1) {
		cat("\n", sum(x$match.dist == 1/0), "interpolated values on", length(x$match.dist), "(", sum(x$match.dist == -1/0), "NAs padded at ends )\n")
	} else {			# We allowed extrapolation
		cat("\n", sum(x$match.dist == 1/0), "interpolated and", sum(x$match.dist == -1/0), "extrapolated values on", length(x$match.dist), "\n")
	}		# Rem: 1/0 stands for Inf and -1/0 stands for -Inf
	cat("\nTime scale :\n")
	tsp <- NULL
	if (length(x$tspar$start) == 1) {
		tsp[1] <- x$tspar$start[1]
	} else {
		tsp[1] <- x$tspar$start[1] + (x$tspar$start[2] - 1) * x$tspar$frequency
	}
	tsp[2] <- x$tspar$deltat
	tsp[3] <- x$tspar$frequency
	names(tsp) <- c("start", "deltat", "frequency")
	print(tsp)
	cat("Time units :", x$units, "\n\n")
	cat("call :", deparse(x$call), "\n\n")
	print(dat)
	invisible(x)
}
"print.summary.tsd" <-
function(x, ...) {
    cat(" Call:\n")
    cat(" ")
    dput(x$call)
    ser <- x$ts
    if (length(ser) == 1) {		# We have the decomposition of a single series in the object
    	cat("\nComponents\n")
    	print(x$series)
    } else {					# We have the decomposition of several series in the object
    	cat("\nSeries:\n")
		print(x$ts)
		cat("\n")
    	for (i in 1:length(ser)) {
    		cat("\nComponents for", ser[i], "\n")
    		print(x$series[[i]][1:10,])
    	}
    }
	invisible(x)
}
"print.summary.turnogram" <-
function(x, ...) {
	cat(x$type, "turnogram for:", x$data, "\n\n")
	cat("options      :", x$fun, "/", x$proba, "\n")
	cat("call         :", deparse(x$call), "\n")
	maxinfo <- max(x$info)
	pos <- x$info == maxinfo
	cat("max. info.   : ", max(x$info), " at interval ", x$interval[pos], " (P = ", 2^-abs(max(x$info)), ": ", x$turns[pos], " turning points for ", x$n[pos], " observations)", "\n", sep="")
	cat("extract level: ", x$level, " (", x$units.text, ")\n\n", sep="")
	if (x$type == "Complete") {
		data <- list(interval=x$interval, n=x$n, turns=x$turns, turns.min=x$turns.min, turns.max=x$turns.max, info=x$info, info.min=x$info.min, info.max=x$info.max)
	} else {
		data <- list(interval=x$interval, n=x$n, turns=x$turns, info=x$info)
	}
	print(as.data.frame(data))
	invisible(x)
}
"print.summary.turnpoints" <-
function(x, ...) {
	cat("Turning points for:", x$data, "\n\n")
	cat("nbr observations  :", x$n, "\n")
	cat("nbr ex-aequos     :", sum(x$exaequos), "\n")
	if (x$firstispeak) {
		cat("nbr turning points:", x$nturns, "(first point is a peak)\n")
		typep <- c("peak", "pit")
	} else {
		cat("nbr turning points:", x$nturns, "(first point is a pit)\n")
		typep <- c("pit", "peak")
	}
	cat("E(p) =", 2 / 3 * (x$n - 2), "Var(p) =", (16 * x$n - 29) / 90, "(theoretical)\n")
	cat("\n")
	# construct the table summarizing all turning points
	typepts <- rep(typep, length.out=x$nturns)
	tablepts <- as.data.frame(list(point=x$tppos, type=typepts, proba=x$proba, info=x$info))
	print(tablepts)
	invisible(x)
}
"print.tsd" <-
function(x, ...) {
    cat(" Call:\n")
    cat(" ")
    dput(x$call)
    ser <- x$ts
    if (length(ser) == 1) {		# We have the decomposition of a single series in the object
    	cat("\nComponents\n")
    	print(dimnames(x$series)[[2]])
    } else {					# We have the decomposition of several series in the object
    	cat("\nSeries:\n")
		print(x$ts)
		cat("\n")
		for (i in 1:length(ser)) {
			cat("\nComponents for", ser[i], "\n")
			print(dimnames(x$series[[i]])[[2]])
    	}
	}
	invisible(x)
}
"print.turnogram" <-
function(x, ...) {
	cat(x$type, "turnogram for:", x$data, "\n\n")
	cat("options      :", x$fun, "/", x$proba, "\n")
	cat("intervals    :", min(x$interval), "..", max(x$interval), x$units.text, "/ step =", x$interval[2] - x$interval[1], "\n")
	cat("nbr of obs.  :", max(x$n), "..", min(x$n), "\n")
	maxinfo <- max(x$info)
	pos <- x$info == maxinfo
	cat("max. info.   : ", max(x$info), " at interval ", x$interval[pos], " (P = ", 2^-abs(max(x$info)), ": ", x$turns[pos], " turning points for ", x$n[pos], " observations)", "\n", sep="")
	cat("extract level: ", x$level, " (", x$units.text, ")\n\n", sep="")
	invisible(x)
}
"print.turnpoints" <-
function(x, ...) {
	cat("Turning points for:", x$data, "\n\n")
	cat("nbr observations  :", x$n, "\n")
	cat("nbr ex-aequos     :", sum(x$exaequos), "\n")
	if (x$firstispeak) {
		cat("nbr turning points:", x$nturns, "(first point is a peak)\n")
	} else {
		cat("nbr turning points:", x$nturns, "(first point is a pit)\n")
	}
	cat("E(p) =", 2 / 3 * (x$n - 2), "Var(p) =", (16 * x$n - 29) / 90, "(theoretical)\n")
	cat("\n")
	invisible(x)
}
"regarea" <-
function (x, y = NULL, xmin=min(x), n=length(x), deltat=(max(x)-min(x))/(n-1), rule=1, window=deltat, interp=FALSE, split=100) {
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		x <- xy.coords(x, y)
		y <- x$y
		x <- x$x
	} else {												# We are in S+
		x <- x
		y <- y
	}
	# Make sure data are sorted in increasing order according to x
	srt <- sort.list(x)
	x <- x[srt]
	y <- y[srt]
	# Check arguments
	if (!is.numeric(x) || !is.numeric(y)) 
		stop("regarea: x and y must be numeric")
    nx <- length(x)
    if (nx != length(y)) 
        stop("x and y must have equal lengths")
    if (nx < 2) 
        stop("regarea requires at least two values to interpolate")
    # Eliminate entries with missing values
    ok <- !(is.na(x) | is.na(y))
    x <- x[ok]
    y <- y[ok]
    nx <- length(x)
    if (nx < 2) 
        stop("regarea requires at least two non-missing values to interpolate")
    if (window <= 0)
	    stop("the window must be a positive number")
	# The next tree lines must be activated if one want at least one observation INSIDE each window (but not necessary)
	#largestgap <- max(x[2:nx]-x[1:(nx-1)])
	#if (window <= largestgap)		# The window must contain at least one value everywhere!
    #	stop(paste("the window must be wider than the largest gap in the series (", largestgap, ")", sep=""))
    if (n <= 0) 
    	stop("regarea requires n >= 1")
    if (deltat <= 0) 
    	stop("regarea requires deltat > 0")
    xout <- 0:(n-1) * deltat + xmin							# vector of xout regular sequence
    xmax <- xout[n]
    if (!is.numeric(split)) 
		stop("regarea: split must be numeric")
	split <- round(split)
	if (split < 1) split <- 1								# split must be a positive integer!
    # Misc values required for calculations
    halfwin <- window/2
    xlb <- xmin - halfwin - 1								# first area must extend at least below lowest window
    xlb <- min(xlb, x[1])
    xub <- xmax + halfwin + 1								# last area must extend at least above highest window
    xub <- max(xub, x[nx])
	# all x must be positive => shift the scale to meet this requirement
    if (xlb <= 0) {
    	shift <- (-xlb + 1)
    	xout <- xout + shift
    	x <- x + shift
    	xlb <- xlb + shift
    	xub <- xub + shift
    } else shift <- 0
    # vector containing lower bound (x1i) of areas for each data xi
    x1 <- NULL
    x1[1] <- xlb											# Make sure the first area include (xmin - window/2)
    x1[2:nx] <- (x[1:(nx-1)]+x[2:nx])/2						# Areas start in the middle of two data
    # vector containing upper bound (x2i) of areas for each data xi
    x2 <- NULL
    x2[1:(nx-1)] <- x1[2:nx]								# One upper bound is equal to the next lower bond
    x2[nx] <- xub											# Make sure the last area include (xmax + window/2)
    # matrix containing lower bound of each window for xout
    u1 <- xout - halfwin
	# To avoid too large matrices, and a lot of wasted calculations, the series is split is shorter sub-series
    # that are analyzed sequencially
    seq <- ceiling(n/split)
    yout <- NULL
    for (i in 1:seq) {
    	# we select submatrices corresponding to the current sequence to analyse
    	sn <- split
    	sl <- (i - 1) * split + 1
    	su <- i * split
    	if (su > n) {										# When the last sequence is partial only (less than n elements remains)
    		su <- n
    		sn <- su - sl + 1
    	}
    	smin <- xout[sl] - halfwin
    	smax <- xout[su] + halfwin
    	slx <- sum(x1 < smin)
    	sux <- sum(x2 < smax) + 1
    	snx <- sux - slx + 1
    	# submatrices xs1 and xs2 are extracted and duplicated sn times accross columns
    	xs1 <- rep(x1[slx:sux], sn)
     	dim(xs1) <- c(snx, sn)
    	xs2 <- rep(x2[slx:sux], sn)
		dim(xs2) <- c(snx, sn)
    	# submatrices us1 and us2 are extracted and duplicated across snx rows
    	us1 <- rep(u1[sl:su], snx)
    	dim(us1) <- c(sn, snx)
    	us1 <- t(us1)
    	us2 <- us1 + window
    	# keep only maximal value of xs1 or us1
    	us1 <- pmax(xs1, us1)
    	# keep only minimal value of xs2 or us2
    	us2 <- pmin(xs2, us2)
    	# fi = u2i - u1i
    	xs1 <- us2 - us1
    	# negative values for f indicate areas totally outside the window => f is reset to 0 for them
    	xs1[xs1<0] <- 0
    	# approximation of yout (for the submatrix) is the sum of areas (y * f, in columns) divided by the window length
    	yout[sl:su] <- apply((y[slx:sux] * xs1), 2, sum)/window
    }
    # If interp is false, we replace matching values of yout by y for each xout that match an x
    if (interp == FALSE) {
    	pos <- match(xout, x, nomatch=NA)
    	youtmatch <- y[pos]											# Any matching value in the right place in yout, the rest is NAs
    	posmatch <- !is.na(pos)
    	yout[posmatch] <- youtmatch[posmatch]						# and they replace extrapolated values in yout
    }
    # If rule == 1, we still must replace values outside {x[1], x[nx]} by NA
    if (rule == 1)
    	yout[xout < x[1] | xout > x[nx]] <- NA
    # Restore initial xout values
    xout <- xout - shift
    # And finally, we return the list of reguled data (x, y)
    list(x = xout, y = yout)
}
"regconst" <-
function (x, y=NULL, xmin=min(x), n=length(x), deltat=(max(x)-min(x))/(n-1), rule=1, f=0) {
	# We use approx() for the calculations
	# but we first need to construct xout
	if (n <= 0) 
    	stop("regconst requires n >= 1")
    if (deltat <= 0) 
    	stop("regconst requires deltat > 0")
	xout <- 0:(n-1) * deltat + xmin
	# In S+, missing values are not allowed, but in R it is OK
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		# Leave the series like this
	} else {												# We are in S+
		# Eliminate missing values
		ok <- !(is.na(x) | is.na(y))
		x <- x[ok]
    	y <- y[ok]
	}
	# Make sure data are sorted in increasing order according to x
	srt <- sort.list(x)
	x <- x[srt]
	y <- y[srt]
	res <- approx(x, y, xout, method="constant", rule=rule, f=f)
	res
}
"reglin" <-
function (x, y=NULL, xmin=min(x), n=length(x), deltat=(max(x)-min(x))/(n-1), rule=1) {
	# We use approx() for the calculations
	# but we first need to construct xout
	if (n <= 0) 
    	stop("reglin requires n >= 1")
    if (deltat <= 0) 
    	stop("reglin requires deltat > 0")
	xout <- 0:(n-1) * deltat + xmin
	# In S+, missing values are not allowed, but in R it is OK
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		# Leave the series like this
	} else {												# We are in S+
		# Eliminate missing values
		ok <- !(is.na(x) | is.na(y))
		x <- x[ok]
	   	y <- y[ok]
	}
	# Make sure data are sorted in increasing order according to x
	srt <- sort.list(x)
	x <- x[srt]
	y <- y[srt]
	res <- approx(x, y, xout, method="linear", rule=rule)
	res
}
"regspline" <-
function (x, y=NULL, xmin=min(x), n=length(x), deltat=(max(x)-min(x))/(n-1), rule=1, periodic=FALSE) {
	# We use spline() for the calculations
	# but we first need to calculate xmax
	if (n <= 0) 
    	stop("regspline requires n >= 1")
    if (deltat <= 0) 
    	stop("regspline requires deltat > 0")
	xmax <- (n-1) * deltat + xmin
	# ... and eliminate missing values
	ok <- !(is.na(x) | is.na(y))
	x <- x[ok]
    y <- y[ok]
	# Make sure data are sorted in increasing order according to x
	srt <- sort.list(x)
	x <- x[srt]
	y <- y[srt]
	# the call to the function spline() is different in S+ and in R!
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		if (periodic == TRUE) {
			res <- spline(x, y, n=n, method="periodic", xmin=xmin, xmax=xmax)
		} else {
			res <- spline(x, y, n=n, method="fmm", xmin=xmin, xmax=xmax)
		}
		# Rem: there is also a method="natural" in R, but we don't use it here!
	} else {												# We are in S+
		# In S+ the spline function does not really care of xmin, xmax and n!!!
		# So, we need to interpolate a lot more points, and use approx() in it
		# To get the regulated vector we really requested
		restmp <- spline(x, y, n=50*n, periodic=periodic, xmin=xmin, xmax=xmax)
		xout <- 0:(n-1)*deltat + xmin
		res <- approx(restmp$x, restmp$y, xout, method="linear", rule=2)
	}
	# The spline interpolations sometimes return interpolated values lower than the minimum
	# or higher than the maximum. This is eliminated by assigning them minimum or maximum value
	# For instance, a regulated count of species could return ngative values => set the the
	# minimum count (usually 0)
	ymin <- min(y)
	ymax <- max(y)
	res$y[res$y < ymin] <- ymin
	res$y[res$y > ymax] <- ymax
	# If rule == 1, we still must replace values outside {x[1], x[nx]} by NA
    if (rule == 1)
    	res$y[res$x < x[1] | res$x > x[length(x)]] <- NA	
	res
}
"regul" <-
function (x, y = NULL, xmin=min(x), n=length(x), units="days", frequency=NULL, deltat=1/frequency, datemin=NULL, dateformat="m/d/Y", tol=NULL, tol.type="both", methods="linear", rule=1, f=0, periodic=FALSE, window=(max(x)-min(x))/(n-1), split=100, specs=NULL) {
	call <- match.call()
	# Do we have specs?
	if (!is.null(specs)) {
		# Verify it is an objct of the right class 'specs.regul'
		specs.class <- class(specs)
		if (is.null(specs.class) || specs.class != "specs.regul")
			stop("specs must be a 'specs.regul' object.")
		# For each argument we look if it was not explicitly given (in this case, we take value in specs)
		arg.names <- names(call)
		if (pmatch("xmin", arg.names, 0) == 0)
			xmin <- specs$xmin
		if (pmatch("n", arg.names, 0) == 0)
			n <- specs$n
		if (pmatch("frequency", arg.names, 0) == 0)
			frequency <- specs$frequency
		if (pmatch("deltat", arg.names, 0) == 0)
			deltat <- specs$deltat
		if (pmatch("units", arg.names, 0) == 0)
			units <- specs$units
		if (pmatch("datemin", arg.names, 0) == 0)
			datemin <- specs$datemin
		if (pmatch("dateformat", arg.names, 0) == 0)
			dateformat <- specs$dateformat
		if (pmatch("tol", arg.names, 0) == 0)
			tol <- specs$tol
		if (pmatch("tol.type", arg.names, 0) == 0)
			tol.type <- specs$tol.type
		if (pmatch("methods", arg.names, 0) == 0)
			methods <- specs$methods
		if (pmatch("rule", arg.names, 0) == 0)
			rule <- specs$rule
		if (pmatch("f", arg.names, 0) == 0)
			f <- specs$f
		if (pmatch("periodic", arg.names, 0) == 0)
			periodic <- specs$periodic
		if (pmatch("window", arg.names, 0) == 0)
			window <- specs$window
		if (pmatch("split", arg.names, 0) == 0)
			split <- specs$split	
	}
	# Evaluate arguments now
	x <- x
	y <- y
	xmin <- xmin
	n <- n
	units <- units
	frequency <- frequency
	deltat <- deltat
	# tol <- tol		# Must be evaluated only after recalculation of deltat!
	tol.type <- tol.type
	datemin <- datemin
	dateformat <- dateformat
	methods <- methods
	rule <- rule
	f <- f
	periodic <- periodic
	window <- window
	split <- split
	# Verify arguments that are not verified further down
	# x, y, xmin, tol.type, methods, periodic, window, split are verified elsewhere
	# we don't care for units and rule
	# Set frequency and deltat correctly
	if (is.null(frequency)) {
		if (is.null(deltat))
			deltat <- 1					# value by default when none is provided = 1
			frequency <- 1/deltat
		} else {							# frequency provided. Make sure deltat is its inverse
			deltat <- 1/frequency
	}
	# deltat must be a strictly positive value
	if (deltat <= 0)
		stop("deltat must be a positive number")
	# Make sure tol is defined
	tol <- tol
	if (is.null(tol)) tol <- 0
	# n must be a positive integer
	if (n <= 0)
		stop("n must be a positive number")
	if (n != round(n))
		stop("n must be an integer number")
	# create specs
	specs <- list(xmin=xmin, n=n, frequency=frequency, deltat=deltat, units=units, datemin=datemin, dateformat=dateformat, tol=tol, tol.type=tol.type, methods=methods, rule=rule, f=f, periodic=periodic, window=window, split=split)
	# Make sure data are sorted in increasing order with time
	srt <- sort.list(x)
	x <- x[srt]
	if (is.null(ncol(y))) y <- y[srt] else y <- y[srt,]
	# If datemin is provided, we shift x accordingly
	if (length(datemin)>0) {
		dateminval <- yearstodays(daystoyears(1, datemin, dateformat))
		xminval <- trunc(min(x, na.rm=TRUE))
		x <- x - xminval + dateminval
		xmin <- xmin - xminval + dateminval
	}
	# If units is "daystoyears", there is a special treatment to transform the time-scale!
	if (units == "daystoyears") {
		# We have days as units. We want years with a "linear scale", i.e.: 1 year = 365.25 days, 1 month = 1/12 years
		# We want also the integer value reflect exactly the current year, i.e.: 1997.xxx for dates in the year 1997
		if(is.null(yearorig <- options("chron.origin")$year)) {
			if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
				yearorig <- 1970
			} else {												# We are in Splus
				yearorig <- 1960
			}
		}
		x <- x/365.25 + yearorig
		xmin <- xmin/365.25 + yearorig
		cat(paste("A 'tol' of", tol, "in 'days' is", tol/365.25, "in 'years'\n"))
		tol <- tol/365.25
		window <- window/365.25
		units <- "years"					# and we reset units to "years"
	}
	# Verify the methods argument
	METHODS <- c("constant", "linear", "spline", "area")
	# repeat methods a certain times to make sure it match at least the number of columns to regulate
	nser <- ncol(y)
	if (is.null(nser)) nser <- 1		# when y is a vector
	if (length(methods) < nser)
		methods <- rep(methods, length.out=nser)	
	methindex <- NULL
	for (i in 1:length(methods)) {
		methindex[i] <- pmatch(methods[i], METHODS)
		if (is.na(methindex[i])) 
			stop(paste("invalid regulation method:", methods[i]))
		if (methindex[i] == -1) 
			stop(paste("ambiguous regulation method:", methods[i]))
	}
	# Regulate each column of the matrix in turn
	# If y is a vector, only one call is required
	if (is.vector(y)) {
		# Choose the method
		res <- switch(methindex[1],
		    	"constant"=regconst(x, y, xmin=xmin, n=n, deltat=deltat, rule=rule, f=f),
		    	"linear"=reglin(x, y, xmin=xmin, n=n, deltat=deltat, rule=rule),
				"spline"=regspline(x, y, xmin=xmin, n=n, deltat=deltat, rule=rule, periodic=periodic),
				"area"=regarea(x, y, xmin=xmin, n=n, deltat=deltat, rule=rule, window=window, interp=TRUE, split=split))
		xout <- res$x
		yout <- res$y
		yout <- as.data.frame(yout)
		names(yout) <- "Series"
	} else {
		# If x is not a vector, it is treated as a data frame
		# A result will be returned in a data frame with corresponding columns
		y <- as.data.frame(y)
		# We keep the same column headers
		NamesV <- names(y)
		yout <- NULL
		# Calculation is performed alternatively on each column
		for (i in 1:nser) {
			res <- switch(methindex[i],
		    		"constant"=regconst(x, y[,i], xmin=xmin, n=n, deltat=deltat, rule=rule, f=f),
		    		"linear"=reglin(x, y[,i], xmin=xmin, n=n, deltat=deltat, rule=rule),
					"spline"=regspline(x, y[,i], xmin=xmin, n=n, deltat=deltat, rule=rule, periodic=periodic),
					"area"=regarea(x, y[,i], xmin=xmin, n=n, deltat=deltat, rule=rule, window=window, interp=TRUE, split=split))
			# The next if condition to avoid error at the first deltat
			if (is.null(yout)==TRUE) yout <- data.frame(res$y) else yout <- cbind(yout, res$y)
		}
		# Change names of columns to match the original data frame
		names(yout) <- NamesV
		# Get xout
		xout <- res$x
	}
	# make sure methods are fully spelled
	methods <- NULL
	for (i in 1:nser) {
		methods[i] <- switch(methindex[i],
			    	"constant"="constant",
			    	"linear"="linear",
					"spline"="spline",
					"area"="area")
	}
	# make sure yini is a date frame
	yini <- as.data.frame(y)
	if (ncol(yini) == 1) names(yini) <- "Initial"
	# now we calculate match according to tol.type
	TOL.TYPES <- c("left", "both", "right", "none")
	tol.idx <- pmatch(tol.type, TOL.TYPES)
	if (is.na(tol.idx)) 
		stop("invalid tol.type value")
	if (tol.idx == -1) 
		stop("ambiguous tol.type value")
	# make sure tol.type is fully spelled
	tol.type <- switch(tol.idx,
			    "left"="left",
			    "both"="both",
				"right"="right",
				"none"="none")
	if (is.null(tol)) tol <- 0 else tol <- abs(tol)
	pos <- match.tol(xout, x, tol.type=tol.type, tol=tol)
	# We must recalculate tol here, to keep the right value
	if (tol > deltat) tol2 <- deltat else tol2 <- deltat/round(deltat/tol)
	# we replace corresponding values for each series in yout by indexed values of the same series in y
	repl <- yini[pos, ]
	yout <- as.matrix(yout)			# The next instruction does not work on data frames in R!
	yout[!is.na(repl)] <- repl[!is.na(repl)]
	yout <- as.data.frame(yout)
	# which regular date match observations? Put distance for a matching observation, Inf for an interpolated observation, -Inf for an extrapolated observation
	match.dist <- xout - x[pos]
	match.dist[is.na(match.dist)] <- Inf
	match.dist[(xout < min(x, na.rm=TRUE) | xout > max(x, na.rm=TRUE)) & match.dist == Inf] <- -Inf
	# create tspar
	# start is a numeric vector of 2 numbers indicating integer and fractional part of start.
	# its combination must be a multiple of deltat => adjust to the closest neighbour
	dateini <- xout[1]
	intini <- trunc(dateini)									# Integer part of start
	# decini must be lower than frequency
	if (intini != dateini & frequency > 1) {
		decini <- floor((dateini - intini) * frequency) + 1			# "Decimal" part of start must be a number times of frequency for 'ts' objects
	} else {
		decini <- NULL
	}
	tspar <- list("start"=c(intini, decini), "deltat"=deltat, "frequency"=frequency)
	# create the list containing the results
	res <- list(x=xout, y=yout, xini=x, yini=yini, tspar=tspar, units=units, match.dist=match.dist, specs=specs, call=call)
	class(res) <- "regul"										# and turn it into a 'regul' object
	res															# Return the result
}
"regul.adj" <-
function(x, xmin=min(x), frequency=NULL, deltat=(max(x, na.rm=TRUE)-min(x, na.rm=TRUE))/(length(x)-1), tol=deltat, tol.type="both", nclass=50, col=c(4, 5, 2), xlab=paste("Time distance"), ylab=paste("Frequency"), main="Number of matching observations", plotit=TRUE, ...) {
	xmin <- xmin
	frequency <- frequency
	deltat <- deltat
	x <- sort(x)				# Make sure x is sorted in increasing order
	x <- x[!is.na(x)]			# Eliminate missing values
	n <- length(x)
	if (is.null(frequency)) {
		if (is.null(deltat)) {
			stop("You must define at least one of frequency or deltat")
		} else {
			frequency <- 1/deltat
		}
	} else {
		deltat <- 1/frequency
	}
	# We verify also that tol is a round fraction of deltat
	if (is.null(tol) || tol == 0) tol2 <- 0 else {
		tol2 <- abs(tol)
		if (tol2 > deltat) tol2 <- deltat else {
			tol2 <- deltat/round(deltat/tol2)
		}
	}
	# We calculate highest n, so as there is no extrapolation
	if (max(x) < xmin) {		# There is no solution!
		nx <- 0
		stop("xmin is higher than max value in x!")
	} else {					# We calculate nx
		nx <- floor((max(x) - xmin)/deltat) + 1
	}
	# create xout vector
	xout <- 0:(nx-1) * deltat + xmin
	# calculate the matching vector
	pos <- match.tol(xout, x, tol.type=tol.type, tol=tol2)
	# which regular date match observations? Put distance for a matching observation, Inf for an interpolated observation, -Inf for an extrapolated observation
	match.dist <- xout - x[pos]
	match.dist[is.na(match.dist)] <- Inf
	match.dist[(xout < min(x) | xout > max(x)) & match.dist == Inf] <- -Inf
	# Number of matches
	match <- sum(is.finite(match.dist))
	exact.match <- sum(match.dist == 0)
	# Construct the params vector
	params <- c(xmin, nx, deltat, tol2)
	names(params) <- c("xmin", "n", "deltat", "tol")
	# Draw the graph, if plot is TRUE
	if (plotit == TRUE) {
		if (tol2 == 0) HT <- 1.001 else HT <- 101*tol2/100
		Data <- abs(match.dist)
		Data[is.infinite(Data)] <- HT 			# Inf are replaced by a value higher than Tol
		Data[Data == 0] <- -0.00001				# For putting exact matching values in a separate category
		# Don't draw, but get vectors of results
		res <- hist(Data, nclass=nclass, plot=FALSE)
		classes <- res$breaks[2:length(res$breaks)]
		ncl <- length(classes)
		classes[ncl] <- Inf
		counts <- res$counts
		names(counts) <- classes
		# Create a vector for colors, so as the first and last classes are drawn in a different color
		cols <- NULL
		cols[1] <- col[2]
		if (sum(Data == -0.00001) > 0) cols[1] <- col[1]
		if (ncl > 2) cols[2:(ncl-1)] <- col[2]
		cols[ncl] <- col[3]
		# Actually draw the histogram
		hist(Data, nclass=nclass, col=cols, xlab=xlab, ylab=ylab, main=main)
		counts <- counts[counts != 0]
		lc <- length(counts)
		counts2 <- NULL
		for (i in 1:lc) {
			counts2[i] <- sum(counts[1:i])
		}
		names(counts2) <- names(counts)
		res <- list(params=params, match=match, exact.match=exact.match, match.counts=counts2)
	} else {
		res <- list(params=params, match=match, exact.match=exact.match)
	}
	res
}
"regul.screen" <-
function(x, weight=NULL, xmin=min(x), frequency=NULL, deltat=(max(x, na.rm=TRUE)-min(x, na.rm=TRUE))/(length(x)-1), tol=deltat/5, tol.type="both") {

	regul.screen.calc <- function(x, weight, xmin, deltat, tol, tol.type) {
		xmin <- xmin
		deltat <- deltat
		# We verify that tol is a round fraction of deltat
		if (is.null(tol) || tol == 0) tol2 <- 0 else {
			tol2 <- abs(tol)
			if (tol2 > deltat) tol2 <- deltat else {
				tol2 <- deltat/round(deltat/tol2)
			}
		}
		# We calculate highest n, so as there is no extrapolation
		if (max(x) < xmin) {		# There is no solution!
			nx <- 0
			stop("xmin is higher than max value in x!")
		} else {					# We calculate nx
			nx <- floor((max(x) - xmin)/deltat) + 1
		}
		# create xout vector
		xout <- 0:(nx-1) * deltat + xmin
		# calculate the matching vector
		pos <- match.tol(xout, x, tol.type=tol.type, tol=tol2)
		# which regular date match observations? Put distance for a matching observation, Inf for an interpolated observation, -Inf for an extrapolated observation
		match.dist <- xout - x[pos]
		match.dist[is.na(match.dist)] <- Inf
		match.dist[(xout < min(x) | xout > max(x)) & match.dist == Inf] <- -Inf
		# Weighted number of matches
		weight.vec <- weight[pos]
		match.vec <- as.numeric(is.finite(match.dist))
		match <- sum(match.vec * weight.vec, na.rm=TRUE)
		exact.match.vec <- as.numeric(match.dist == 0)
		exact.match <- sum(exact.match.vec * weight.vec,na.rm=TRUE)
		res <- list(tol=tol2, nx=nx, match=match, exact.match=exact.match)
		res
	}
	
	# regul.screen starts here
	if (!is.numeric(x))
		stop ("x must be a numerical vector")
	if (length(x) < 2)
		stop ("x must contain at least two values")
	keep <- !is.na(x)			# Eliminate missing values
	x <- x[keep]
	if (length(x) < 2)
		stop ("x must contain at least two non missing values")
	if (is.null(weight)) {										# create a uniform weight vector of 1's
		weight <- rep(1, length.out=length(x))
	} else {													# eliminate values corresponding to missing data in x
		weight <- weight[keep]
	}
	weight[is.na(weight)] <- 0									# replace missing values in weight by 0 (corresp. value in x will be ignored)
	weight[weight < 0] <- 0										# do the same for negative values
	if (length(weight) != length(x))
		stop("x and weight must be vectors of equal length")
	# make sure x is sorted in increasing order
	srt <- sort.list(x)
	x <- x[srt]
	weight <- weight[srt]
	if (is.null(deltat)) {
		if (is.null(frequency)) {
			stop("You must define at least one of frequency or deltat")
		} else {
			deltat <- 1/frequency
		}
	}
	if (is.null(tol) | sum(is.na(tol)) > 0) tol <- deltat/5		# Default value
	# We need a tol value for each deltat
	tol <- rep(tol, length.out=length(deltat))
	# We loop on the various values of deltat, and then on the different values of xmin
	# and report three tables: 1) nx, 2) total.matches, 3) total.exact.matches
	nx <- length(xmin)
	nd <- length(deltat)
	rnames <- paste("x=", xmin, sep="")
	cnames <- paste("d=", deltat, sep="")
	dnames <- list(rnames, cnames)
	ttol <- NULL
	tnx <- matrix(nrow=nx, ncol=nd, dimnames=dnames)
	ttotal <- matrix(nrow=nx, ncol=nd, dimnames=dnames)
	texact <- matrix(nrow=nx, ncol=nd, dimnames=dnames)
	for (i in 1:nx) {
		for (j in 1:nd) {
			res <- regul.screen.calc(x, weight, xmin=xmin[i], deltat=deltat[j], tol=tol[j], tol.type=tol.type)
			ttol[j] <- res$tol
			tnx[i,j] <- res$nx
			ttotal[i,j] <- res$match
			texact[i,j] <- res$exact.match
		}
	}
	names(ttol) <- cnames
	res <- list(tol=ttol, n=tnx, nbr.match=ttotal, nbr.exact.match=texact)
	res
}
"specs" <-
function(x, ...)
	UseMethod("specs", x, ...)
"specs.regul" <-
function(x, ...) {
	structure(x$specs, class="specs.regul")
}
"specs.tsd" <-
function(x, ...) {
	structure(x$specs, class="specs.tsd")
}
"stat.desc" <-
function(x, basic=TRUE, desc=TRUE, norm=FALSE, p=.95) {
	# This function performs all calculations on a single vector
	# Missing data allowed and stripped out before calculations
	stat.desc.vec <- function(x, basic, desc, norm, p) {
		# If x is a	list, we transform it into a vector
		x <- unlist(x)
		if (!is.numeric(x)) {	# Not a numeric vector!
			Nbrval <- NA; Nbrnull <- NA; Nbrna <-NA
			Median <- NA; Mean <- NA; StdDev <- NA
			if (basic==TRUE) {
				Res1 <- list(nbr.val=NA, nbr.null=NA, nbr.na=NA, min=NA, max=NA, range=NA, sum=NA)
			} else Res1 <- NULL
			if (desc==TRUE) {
				CIMean <- NA; names(CIMean) <- p		# To get CI.mean.pValue
				Res2 <- list(median=NA, mean=NA, SE.mean=NA, CI.mean=NA, var=NA, std.dev=NA, coef.var=NA)
			} else Res2 <- NULL
			if (norm==TRUE) {
				Res3 <- list(skewness=NA, skew.2SE=NA, kurtosis=NA, kurt.2SE=NA, normtest.W=NA, normtest.p=NA)
			} else Res3 <- NULL
		} else {			# Vector contains numbers, we can perform calcs
			Nbrna <- sum(as.numeric(is.na(x)))
			# We could use na.rm=TRUE everywhere, but it is faster
			# to remove all missing values once at the beginning
			x <- x[!is.na(x)]
			Nbrval <- length(x)
			Nbrnull <- sum(as.numeric(x==0))
			if (basic==TRUE) {
				Min <- min(x)
				Max <- max(x)
				Range <- Max-Min
				Sum <- sum(x)
				Res1 <- list(nbr.val=Nbrval, nbr.null=Nbrnull, nbr.na=Nbrna, min=Min, max=Max, range=Range, sum=Sum)
			} else Res1 <- NULL
			Median <- median(x); names(Median) <- NULL	# To correct a bug!?
			Mean <- mean(x)
			Var <- var(x)
			StdDev <- sqrt(Var)
			SEMean <- StdDev/sqrt(Nbrval)
			if (desc==TRUE) {
				CIMean <- qt((0.5+p/2), (Nbrval-1))*SEMean
				# (0.5+p/2) because we need a two-sided t distribution for the CI!!!
				names(CIMean) <- p
				CoefVar <- StdDev/Mean
				Res2 <- list(median=Median, mean=Mean, SE.mean=SEMean, CI.mean=CIMean, var=Var, std.dev=StdDev, coef.var=CoefVar)
			} else Res2 <- NULL
			if (norm==TRUE) {
				Skew <- sum((x-mean(x))^3)/(length(x)*sqrt(var(x))^3)			# From e1071 R library
				Kurt <- sum((x-mean(x))^4)/(length(x)*var(x)^2) - 3				# Idem
				SE <- sqrt(6*Nbrval*(Nbrval-1)/(Nbrval-2)/(Nbrval+1)/(Nbrval+3))
				# +/- sqrt(6/Nbrval) for Nbrval>100, see Sokal & Rohlf, p. 139
				Skew.2SE <- Skew/(2*SE)
				# If abs(Skew.2SE)>1 then Skew is signific., see Systat 9 p. I-212
				SE <- sqrt(24*Nbrval*((Nbrval-1)^2)/(Nbrval-3)/(Nbrval-2)/(Nbrval+3)/(Nbrval+5))
				# +/- sqrt(24/Nbrval) for Nbrval > 150, same ref.
				Kurt.2SE <- Kurt/(2*SE)
				# Same remark as for Skew.2SE!
				if (exists("is.R") && is.function(is.R) && is.R()) {
					# This is the Shapiro-Wilk test of normality
					require(ctest)		# For Kolmogorov-Smirnov or Shapiro-Wilk normality tests in R
					Ntest <- shapiro.test(x)
					Ntest.W <- Ntest$statistic; names(Ntest.W) <- NULL
					Ntest.p <- Ntest$p.value
				} else {	# We are in Splus
					# No normality test currently available!
					Ntest.W <- NA; Ntest.p <- NA
				}
				Res3 <- list(skewness=Skew, skew.2SE=Skew.2SE, kurtosis=Kurt, kurt.2SE=Kurt.2SE, normtest.W=Ntest.W, normtest.p=Ntest.p)
			} else Res3 <- NULL
		}
		# We collect all results together
		Res <- unlist(c(Res1, Res2, Res3))
		# If basic, desc & norm are all false, we return just minimal calculations
		if (length(Res)==0) Res <- unlist(list(nbr.val=Nbrval, nbr.null=Nbrnull, nbr.na=Nbrna, median=Median, mean=Mean, std.dev=StdDev))
		Res
	}
	
	# This is the body of stat.desc
	Basic <- basic; Desc <- desc; Norm <- norm; P <- p
	# If x is a vector, stat.desc returns a vector with results
	# TO DO: allow also time series (ts) => is.data.frame, or stat.desc.vec?
	if (is.vector(x)) stat.desc.vec(x, Basic, Desc, Norm, P) else {
		# If x is not a vector, it is treated as a data frame
		# A result will be returned in a data frame with corresponding columns
		x <- as.data.frame(x)
		# We keep the same column headers
		NamesV <- names(x)
		StatM <- NULL
		# Calculation is performed alternatively on each column
		for (i in 1:ncol(x)) {
			StatV <- stat.desc.vec(x[i], Basic, Desc, Norm, P)
			# The next if condition to avoid error at the first step
			if (is.null(StatM)==TRUE) StatM <- data.frame(StatV) else StatM <- cbind(StatM, StatV)
		}
		# We change names of columns to match the original data frame
		names(StatM) <- NamesV
		StatM
	}
}
"stat.pen" <-
function(x, basic=FALSE, desc=FALSE) {
	# This function performs all calculations on a single vector
	# Missing data allowed and stripped out before calculations
	stat.pen.vec <- function(x, basic, desc) {
		# If x is a	list, we transform it into a vector
		x <- unlist(x)
		if (!is.numeric(x)) {	# Not a numeric vector!
			Nbrval <- NA; Nbrnull <- NA; Nbrna <-NA
			Median <- NA; Mean <- NA; StdDev <- NA
			if (basic==TRUE) {
				Res1 <- list(nbr.val=NA, nbr.null=NA, perc.numm=NA, nbr.na=NA)
			} else Res1 <- NULL
			if (desc==TRUE) {
				Res2 <- list(median=NA, mean=NA, var=NA, std.dev=NA, pos.median=NA, pos.mean=NA, pos.var=NA, pos.std.dev=NA, geo.mean=NA)
			} else Res2 <- NULL
			Res3 <- list(pen.mean=NA, pen.var=NA, pen.std.dev=NA, pen.mean.var=NA)
		} else {			# Vector contains numbers, we can perform calcs
			Nbrna <- sum(as.numeric(is.na(x)))
			# We could use na.rm=TRUE everywhere, but it is faster
			# to remove all missing values once at the beginning
			x <- x[!is.na(x)]
			Nbrval <- length(x)
			Nbrnull <- sum(as.numeric(x==0))
			if (basic==TRUE) {
				Percnull <- Nbrnull/Nbrval*100
				Res1 <- list(nbr.val=Nbrval, nbr.null=Nbrnull, percnull=Percnull, nbr.na=Nbrna)
			} else Res1 <- NULL
			if (desc==TRUE) {
				Median <- median(x); names(Median) <- NULL	# To correct a bug!?
				Mean <- mean(x)
				Var <- var(x)
				StdDev <- sqrt(Var)
				xpos <- x[x>0]
				if (length(xpos)==0) {	# No positive values!
					# If at least one zero, everything is 0, else everything is NA
					if (NbrNull>0) {
						PosMedian <- 0; PosMean <- 0; PosVar <- 0; PosStdDev <- 0; GeoMean <- 0
					} else {
						PosMedian <- NA; PosMean <- NA; PosVar <- NA; PosStdDev <- NA; GeoMean <- NA 
					}
				} else {
					PosMedian <- median(xpos); names(PosMedian) <- NULL
					PosMean <- mean(xpos)
					PosVar <- var(xpos)
					PosStdDev <- sqrt(PosVar)
					GeoMean <- exp(mean(log(xpos)))
				}
				Res2 <- list(median=Median, mean=Mean, var=Var, std.dev=StdDev, pos.median=PosMedian, pos.mean=PosMean, pos.var=PosVar, pos.std.dev=PosStdDev, geo.mean=GeoMean)
			} else Res2 <- NULL
			Pen <- pennington(x, calc="all")
			names(Pen) <- NULL
			PMean <- Pen[1]
			PVar <- Pen[2]
			PStdDev <- sqrt(PVar)
			PMeanVar <- Pen[3]
			Res3 <- list(pen.mean=PMean, pen.var=PVar, pen.std.dev=PStdDev, pen.mean.var=PMeanVar)
		}
		# We collect all results together
		Res <- unlist(c(Res1, Res2, Res3))
		Res
	}
	
	# This is the body of stat.pen
	Basic <- basic; Desc <- desc
	# If x is a vector, stat.pen returns a vector with results
	if (is.vector(x)) stat.pen.vec(x, Basic, Desc) else {
		# If x is not a vector, it is treated as a data frame
		# A result will be returned in a data frame with corresponding columns
		x <- as.data.frame(x)
		# We keep the same column headers
		NamesV <- names(x)
		StatM <- NULL
		# Calculation is performed alternatively on each column
		for (i in 1:ncol(x)) {
			StatV <- stat.pen.vec(x[i], Basic, Desc)
			# The next if condition to avoid error at the first step
			if (is.null(StatM)==TRUE) StatM <- data.frame(StatV) else StatM <- cbind(StatM, StatV)
		}
		# We change names of columns to match the original data frame
		names(StatM) <- NamesV
		StatM
	}
}
"stat.slide" <-
function(x, y, xcut=NULL, xmin=min(x), n=NULL, frequency=NULL, deltat=1/frequency, basic=FALSE, desc=FALSE, norm=FALSE, pen=FALSE, p=.95) {
	# This function performs all calculations on a single bloc
	# Missing data allowed and stripped out before calculations
	stat.slide.cut <- function(y, xmin, xmax, basic, desc, norm, pen, p) {
		# If y is a	list, we transform it into a vector
		y <- unlist(y)
		if (!is.numeric(y)) {	# Not a numeric vector!
			Nbrval <- NA; Nbrnull <- NA; Nbrna <-NA
			Median <- NA; Mean <- NA; StdDev <- NA
			if (basic==TRUE) {
				Res1 <- list(nbr.val=NA, nbr.null=NA, nbr.na=NA, min=NA, max=NA, range=NA, sum=NA)
			} else Res1 <- NULL
			if (desc==TRUE) {
				CIMean <- NA; names(CIMean) <- p		# To get CI.mean.pValue
				Res2 <- list(median=NA, mean=NA, SE.mean=NA, CI.mean=NA, var=NA, std.dev=NA, coef.var=NA)
			} else Res2 <- NULL
			if (norm==TRUE) {
				Res3 <- list(skewness=NA, skew.2SE=NA, kurtosis=NA, kurt.2SE=NA, normtest.W=NA, normtest.p=NA)
			} else Res3 <- NULL
			if (pen==TRUE) {
				Res4 <- list(pos.median=NA, pos.mean=NA, geo.mean=NA, pen.mean=NA, pen.var=NA, pen.std.dev=NA, pen.mean.var=NA)
			} else Res4 <- NULL
		} else {			# Vector contains numbers, we can perform calcs
			# First two entries are xmin and xmax for each cut
			Res0 <- list(xmin=xmin, xmax=xmax)
			Nbrna <- sum(as.numeric(is.na(y)))
			# We could use na.rm=TRUE everywhere, but it is faster
			# to remove all missing values once at the beginning
			y <- y[!is.na(y)]
			Nbrval <- length(y)
			Nbrnull <- sum(as.numeric(y==0))
			if (basic==TRUE) {
				Min <- min(y)
				Max <- max(y)
				Range <- Max-Min
				Sum <- sum(y)
				Res1 <- list(nbr.val=Nbrval, nbr.null=Nbrnull, nbr.na=Nbrna, min=Min, max=Max, range=Range, sum=Sum)
			} else Res1 <- NULL
			Median <- median(y); names(Median) <- NULL	# To correct a bug!?
			Mean <- mean(y)
			Var <- var(y)
			StdDev <- sqrt(Var)
			SEMean <- StdDev/sqrt(Nbrval)
			if (desc==TRUE) {
				CIMean <- qt((0.5+p/2), (Nbrval-1))*SEMean
				# (0.5+p/2) because we need a two-sided t distribution for the CI!!!
				names(CIMean) <- p
				CoefVar <- StdDev/Mean
				Res2 <- list(median=Median, mean=Mean, SE.mean=SEMean, CI.mean=CIMean, var=Var, std.dev=StdDev, coef.var=CoefVar)
			} else Res2 <- NULL
			if (norm==TRUE) {
				Skew <- sum((y-mean(y))^3)/(length(y)*sqrt(var(y))^3)			# From e1071 R library
				Kurt <- sum((y-mean(y))^4)/(length(y)*var(y)^2) - 3				# Idem
				SE <- sqrt(6*Nbrval*(Nbrval-1)/(Nbrval-2)/(Nbrval+1)/(Nbrval+3))
				# +/- sqrt(6/Nbrval) for Nbrval>100, see Sokal & Rohlf, p. 139
				Skew.2SE <- Skew/(2*SE)
				# If abs(Skew.2SE)>1 then Skew is signific., see Systat 9 p. I-212
				SE <- sqrt(24*Nbrval*((Nbrval-1)^2)/(Nbrval-3)/(Nbrval-2)/(Nbrval+3)/(Nbrval+5))
				# +/- sqrt(24/Nbrval) for Nbrval > 150, same ref.
				Kurt.2SE <- Kurt/(2*SE)
				# Same remark as for Skew.2SE!
				if (exists("is.R") && is.function(is.R) && is.R()) {
					# This is the Shapiro-Wilk test of normality
					require(ctest)		# For Shapiro-Wilk normality tests in R
					Ntest <- shapiro.test(y)
					Ntest.W <- Ntest$statistic; names(Ntest.W) <- NULL
					Ntest.p <- Ntest$p.value
				} else {	# We are in Splus
					# No normality test currently available!
					Ntest.W <- NA; Ntest.p <- NA
				}
				Res3 <- list(skewness=Skew, skew.2SE=Skew.2SE, kurtosis=Kurt, kurt.2SE=Kurt.2SE, normtest.W=Ntest.W, normtest.p=Ntest.p)
			} else Res3 <- NULL
			if (pen==TRUE) {
				ypos <- y[y>0]
				PosMedian <- median(ypos); names(PosMedian) <- NULL
				PosMean <- mean(ypos)
				GeoMean <- exp(mean(log(ypos)))
				Pen <- pennington(y, calc="all")
				names(Pen) <- NULL
				PMean <- Pen[1]
				PVar <- Pen[2]
				PStdDev <- sqrt(PVar)
				PMeanVar <- Pen[3]
				Res4 <- list(pos.median=PosMedian, pos.mean=PosMean, geo.mean=GeoMean, pen.mean=PMean, pen.var=PVar, pen.std.dev=PStdDev, pen.mean.var=PMeanVar)
			} else Res4 <- NULL
		}
		# We collect all results together
		Res <- unlist(c(Res0, Res1, Res2, Res3, Res4))
		# If basic, desc, norm & pen are all false, we return just minimal calculations
		if (length(Res)==2) Res <- unlist(list(xmin=xmin, xmax=xmax, nbr.val=Nbrval, nbr.null=Nbrnull, nbr.na=Nbrna, min=min(y), max=max(y), median=Median, mean=Mean, std.dev=StdDev))
		Res
	}
	
	# This is the body of stat.slide
	call <- match.call()
	Basic <- basic; Desc <- desc; Norm <- norm; Pen <- pen; P <- p
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		x <- xy.coords(x, y)
		y <- x$y
		x <- x$x
	} else {												# We are in S+
		x <- x
		y <- y
	}
	# Make sure data are sorted in increasing order according to x
	srt <- sort.list(x)
	x <- x[srt]
	y <- y[srt]
	# Check arguments
	if (!is.numeric(x) || !is.numeric(y)) 
		stop("stat.slide: x and y must be numeric")
	nx <- length(x)
	if (nx != length(y)) 
	    stop("x and y must have equal lengths")
	if (nx < 3) 
	    stop("stat.slide requires at least three values for x and y")
	# Eliminate entries with missing values
	ok <- !(is.na(x) | is.na(y))
	x <- x[ok]
	y <- y[ok]
	nx <- length(x)
	if (nx < 3) 
	    stop("stat.slide requires at least three non-missing values for x and y")
	# The different sequences are provided in xcut, or are calculated from xmin, frequency/deltat and n
	if (is.null(xcut)) {									# calculate regular sequences
		if (is.null(deltat) | deltat <= 0) 
	    	stop("stat.slide requires deltat > 0")
		# if n is not defined, then it is calculated
		if (is.null(n)) {
			n <- ceiling((max(x)-min(x))/deltat)
		}
		xcut <- 0:n * deltat + xmin							# vector of xcut regular sequence
	}
	StatM <- NULL
	ncut <- length(xcut) - 1
	# Calculation is performed alternatively on each bloc
	for (i in 1:ncut) {
		StatV <- stat.slide.cut(y[x >= xcut[i] & x < xcut[i+1]], xcut[i], xcut[i+1], Basic, Desc, Norm, Pen, P)
		# The next if condition to avoid error at the first step
		if (is.null(StatM)==TRUE) StatM <- data.frame(StatV) else StatM <- cbind(StatM, StatV)
	}
	# We change names of columns to represent the cuts
	NamesV <- paste("[", xcut[1:ncut], ",", xcut[2:(ncut+1)], "[", sep="")
	names(StatM) <- NamesV
	StatM
	res <- list(stat=StatM, x=x, y=y, xcut=xcut, call=call)			# Create a list containing the result
	class(res) <- "stat.slide"								# and turn it into a 'stat.slide' object
	res
}
"summary.abund" <-
function(object, ...)
	structure(object, class=c("summary.abund", class(object)))
"summary.escouf" <-
function(object, ...)
	structure(object, class=c("summary.escouf", class(object)))
"summary.regul" <-
function(object, ...)
	structure(object, class=c("summary.regul", class(object)))
"summary.tsd" <-
function(object, ...)
	structure(object, class=c("summary.tsd", class(object)))
"summary.turnogram" <-
function(object, ...)
	structure(object, class=c("summary.turnogram", class(object)))
"summary.turnpoints" <-
function(object, ...)
	structure(object, class=c("summary.turnpoints", class(object)))
"trend.test" <-
function(tseries, R=1) {
	Call <- deparse(substitute(tseries))
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		require(ts)
		x <- as.ts(as.matrix(tseries))
		Names <- colnames(x)
	} else {												# We are in S+
		x <- as.rts(tseries)
		Names <- names(x)
	}
	if (R < 2) {	# Simple test
		if (is.matrix(x) == TRUE) {	# Multiple time series	
			n <- ncol(x)
			Time <- time(x)
			res <- NULL
			res[[1]] <- list(statistics=1)
			for (i in 1:n) {
				res[[i]] <- cor.test(x[,i], Time, alternative="two.sided", method="spearman")
				res[[i]]$data.name <- paste(Names[i], " and time(", Names[i], ")", sep="")
			}
			names(res) <- Names
		} else {							# Single time series
			res <- cor.test(x, time(x), alternative="two.sided", method="spearman")
			res$data.name <- paste(Call, " and time(", Call, ")", sep="")
		}
	} else {		# Bootstrap test
		# Spearman's rho for a single time series
		test.trend <- function(Tseries) {
			rho <- cor(rank(Tseries), rank(time(Tseries)))
			rho
		}
		# Spearman's rho used for multiple time series
		test.trends <- function(Tseries) {
			data.rank <- apply(Tseries, 2, rank)
			rhos <- apply(data.rank, 2, cor, rank(time(Tseries)))
			rhos
		}	
		if (exists("is.R") && is.function(is.R) && is.R()) {		# We are in R
			require(boot)
			
			if (is.matrix(x) == TRUE && ncol(x) > 1) {
				res <- tsboot(x, test.trends, R = R, sim = "fixed", l = 1)
			} else {
				dim(x) <- NULL
				res <- tsboot(x, test.trend, R = R, sim = "fixed", l = 1)
			}
			boot.t <- res$t
			boot.t0 <- res$t0
			boot.R <- res$R
		} else {		# We are in S+
			if (is.matrix(x) == TRUE) {
				res <- bootstrap(as.matrix(x), test.trends, B = R)
			} else {
				res <- bootstrap(as.vector(x), test.trend, B = R)
            }
			boot.t <- res$replicates
			boot.t0 <- res$observed
			boot.R <- res$B	
		}
		# Calculate P-value associated with the bootstrap test
		n <- ncol(boot.t)
		if (is.null(n)) {		# Single test
			if (boot.t0 > 0) {	# Count larger values
				P <- sum(boot.t > boot.t0) / boot.R
			} else {			# Count smaller values
				P <- sum(boot.t < boot.t0) / boot.R
			}
		} else {				# Multiple tests
			P <- NULL
			if (boot.t0 > 0) {	# Count larger values
				for (i in 1:n)
					P[i] <- sum(boot.t[,i] > boot.t0[i]) / boot.R
			} else {			# Count smaller values
				for (i in 1:n)
					P[i] <- sum(boot.t[,i] < boot.t0[i]) / boot.R
			}
			names(P) <- dimnames(boot.t)[[2]]
			res$p.value <- P
		}
	}
	res
}
"tsd" <-
function (x, specs=NULL, method="loess", type=if(method == "census") "multiplicative" else "additive", lag=1, axes=1:5, order=1, times=1, sides=2, ends="fill", weights=NULL, s.window=NULL, s.degree=0, t.window=NULL, t.degree=2, robust=FALSE, trend=FALSE, xreg=NULL) {
	call <- match.call()
    if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
    	x <- as.ts(x)
    } else {												# We are in S+
		x <- as.rts(x)
	}
  	# Do we have specs?
 	if (!is.null(specs)) {
 		# Verify it is an object of the right class 'specs.tsd'
 		specs.class <- class(specs)
 		if (is.null(specs.class) || specs.class != "specs.tsd")
 			stop("specs must be a 'specs.tsd' object.")
 		# For each argument we look if it was not explicitly given (in this case, we take value in specs)
 		arg.names <- names(call)
 		if (pmatch("method", arg.names, 0) == 0)
 			method <- specs$method
 		if (pmatch("type", arg.names, 0) == 0)
 			type <- specs$type
 		if (pmatch("lag", arg.names, 0) == 0)
 			lag <- specs$lag
 		if (pmatch("axes", arg.names, 0) == 0)
 			axes <- specs$axes
 		if (pmatch("order", arg.names, 0) == 0)
 			order <- specs$order
 		if (pmatch("times", arg.names, 0) == 0)
 			times <- specs$times
 		if (pmatch("sides", arg.names, 0) == 0)
 			sides <- specs$sides
 		if (pmatch("ends", arg.names, 0) == 0)
 			ends <- specs$ends
 		if (pmatch("weights", arg.names, 0) == 0)
 			weights <- specs$weights
 		if (pmatch("s.window", arg.names, 0) == 0)
 			s.window <- specs$s.window
 		if (pmatch("s.degree", arg.names, 0) == 0)
 			s.degree <- specs$s.degree
 		if (pmatch("t.window", arg.names, 0) == 0)
 			t.window <- specs$t.window
 		if (pmatch("t.degree", arg.names, 0) == 0)
 			t.degree <- specs$t.degree
 		if (pmatch("robust", arg.names, 0) == 0)
 			robust <- specs$robust
 		if (pmatch("trend", arg.names, 0) == 0)
 			trend <- specs$trend
 		if (pmatch("xreg", arg.names, 0) == 0)
 			xreg <- specs$xreg
 	}
 	# Evaluate arguments now
 	method <- method
 	type <- type
 	lag <- lag
 	axes <- axes
 	order <- order
 	times <- times
 	sides <- sides
 	ends <- ends
 	weights <- weights
 	s.window <- s.window
 	s.degree <- s.degree
 	t.window <- t.window
 	t.degree <- t.degree
 	robust <- robust
 	trend <- trend
 	xreg <- xreg
 	nser <- ncol(x)
	if (is.null(nser)) nser <- 1		# when x is a single vector
 	# Verify the method argument
	METHODS <- c("diff", "average", "median", "evf", "reg", "census", "loess")
	methindex <- pmatch(method, METHODS)
	if (is.na(methindex)) 
		stop(paste("invalid decomposition method:", method))
	if (methindex == -1) 
		stop(paste("ambiguous decomposition method:", method))
	# make sure method is fully spelled
	method <- switch(methindex,
				"diff"="diff",
				"average"="average",
				"median"="median",
				"evf"="evf",
				"reg"="reg",
				"census"="census",
				"loess"="loess")
	# Verify the type argument
	TYPES <- c("additive", "multiplicative")
		typeindex <- pmatch(type, TYPES)
		if (is.na(typeindex)) 
			stop(paste("invalid decomposition type:", type))
		if (typeindex == -1) 
			stop(paste("ambiguous decomposition type:", type))
		# make sure type is fully spelled
		type <- switch(typeindex,
					"additive"="additive",
					"multiplicative"="multiplicative")
	# Create a specs list
	specs <- list(method=method, type=type, lag=lag, axes=axes, order=order, times=times, sides=sides, ends=ends, weights=weights, s.window=s.window, s.degree=s.degree, t.window=t.window, t.degree=t.degree, robust=robust, trend=trend, xreg=xreg)
	# Decompose each series in turn
	# If only one series, then only one call is required
	if (nser == 1) {
		# Choose the method
		res <- switch(methindex,
		    	"diff"=decdiff(x, type=type, lag=lag, order=order, ends=ends),
		    	"average"=decaverage(x, type=type, order=order, times=times, sides=sides, ends=ends, weights=weights),
				"median"=decmedian(x, type=type, order=order, times=times, ends=ends),
				"evf"=decevf(x, type=type, lag=lag, axes=axes),
				"reg"=decreg(x, xreg=xreg, type=type),
				"census"=deccensus(x, type=type, trend=trend),
				"loess"=decloess(x, type=type, s.window=s.window, s.degree=s.degree, t.window=t.window, t.degree=t.degree, robust=robust, trend=trend))
		# res is already a 'tsd' object, we just have to change specs to make sure all args are included
		res$specs <- specs
	} else {
		# If x has multiple series, we must deal with each one in turm
		res <- NULL
		# We keep the names of the series in ts
		res$ts <- dimnames(x)[[2]]
		# We initiate series
		res$series <- list(1)
		if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
			res$units <- attr(x, "units")
		} else {											# We are in Splus
			res$units <- attr(attr(x, "tspar"), "units")
		}
		res$specs <- specs
		res$call <- call
		# Calculation is performed alternatively on each column
		for (i in 1:nser) {
			res1 <- switch(methindex,
		    	"diff"=decdiff(x[,i], type=type, lag=lag, order=order, ends=ends),
				"average"=decaverage(x[,i], type=type, order=order, times=times, sides=sides, ends=ends, weights=weights),
				"median"=decmedian(x[,i], type=type, order=order, times=times, ends=ends),
				"evf"=decevf(x[,i], type=type, lag=lag, axes=axes),
				"reg"=decreg(x[,i], xreg[,i], type=type),
				"census"=deccensus(x[,i], type=type, trend=trend),
				"loess"=decloess(x[,i], type=type, s.window=s.window, s.degree=s.degree, t.window=t.window, t.degree=t.degree, robust=robust, trend=trend))
			# Add the series in res
			res$series[[i]] <- res1$series
			# If we get weights, add them too
			res$weights <- res1$weights
		}
	}
	class(res) <- "tsd"
	res
}
"tseries" <-
function(x) {
	if (is.null(class(x)) && class(x) != "regul" && class(x) != "tsd")
		stop("x must be a 'regul' or a 'tsd' object")
	if (class(x) == "regul") {
		if (ncol(x$y) == 1) y <- x$y[[1]] else y <- as.matrix(x$y)
		# The treatment is different in R ans in S+
		# In R, we create a 'ts' object, in S+, we create a 'rts' object
		if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
			res <- ts(y, start=x$tspar$start, frequency=x$tspar$frequency)
			attr(res, "units") <- x$units
		} else {												# We are in S+
			res <- rts(y, start=x$tspar$start, frequency=x$tspar$frequency, units=x$units)
		}
	}
	if (class(x) == "tsd") {
		if (length(x$ts) == 1) {		# We have a decomposition of a single series
			# x$series is already a ts or rts object
			res <- x$series
		} else {						# We have the decomposition of several series
			# bind all series together
			res <- x$series[[1]]
			cnames <- paste(x$ts[1], ".", dimnames(x$series[[1]])[[2]], sep="")
			for (i in 2:length(x$ts)) {
				res2 <- x$series[[i]]
				cnames <- c(cnames, paste(x$ts[i], ".", dimnames(x$series[[i]])[[2]], sep=""))
				res <- cbind(res, res2)
			}
			dimnames(res)[[2]] <- cnames
		}
		if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
			attr(res, "units") <- x$units
		} else {												# We are in Splus
			attr(attr(res, "tspar"), "units") <- x$units
		}
	}
	res
}
"turnogram" <-
function(series, intervals=c(1, length(series)/5), step=1, complete=FALSE, two.tailed=TRUE, FUN=mean, plotit=TRUE, level=0.05, lhorz=TRUE, lvert=FALSE, xlog=TRUE) {
	call <- match.call()
	data <- deparse(substitute(series))
	fun <- deparse(substitute(FUN))
	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
		if (is.null(class(series)) || class(series) != "ts")
			stop("series must be a single regular time series")
		Unit <- attr(series, "units")
	} else {												# We are in Splus
		# Cannot test if this is a time series in Splus
		Unit <- attr(attr(series, "tspar"), "units")
	}
	UnitTxt <- GetUnitText(series)
	# Determine if there are many successive sequences of zeros
	X <- as.vector(series)
	N <- length(X)
	strips <- c(X[1]-1, X[1:(N-1)]) != X | X != 0
  	StrippedSeries <- X[strips]
  	# Test the length of the series, range and step...
  	n <- length(X)
  	ns <- length(StrippedSeries)
  	if (n > ns + 0.1 * n)
  		warning("There are more than 10% of data as successive zeros, turnogram could be biased!")
    if (length(intervals) < 2)
    	stop("Interval must be a vector with 2 values: (min, max)")
    if (intervals[1] < 1 || intervals[1] > n/3)
    	stop("Interval must be larger or equal to 1, and smaller or equal to n/3")
    if (intervals[2] < 1 || intervals[2] > n/3)
    	stop("Interval must be larger or equal to 1, and smaller or equal to n/3")	
    Inter.vec <- seq(intervals[1], intervals[2], step)
    if (length(Inter.vec) < 2)
    	stop("Less than 2 intervals. Redefine intervals or step")
	n.vec <- nturns.vec <- nturns.min.vec <- nturns.max.vec <- Info.vec <- Info.min.vec <- Info.max.vec <- Inter.vec
    
    # Calculate the first step for the turnogram
	turnogram.step1 <- function(Series, Interval, Complete, Fun) {
		if (Complete == FALSE) {		# We just start intervals from the first observation
			if (length(Series) %% Interval !=0) Series <- c(Series, rep(NA, Interval - (length(Series) %% Interval)))
			dim(Series) <- c(Interval, length(Series) %/% Interval)
			x <- apply(Series, 2, Fun, na.rm=TRUE)		# Because there is much chance to get some NAs appended at the end of the series!
			n <- length(x)
			Nturns <- turnpoints(x)$nturns
			res <- list(n=n, nturns=Nturns)
		} else {					# We calculate each possible interval
			n <- Nturns.vec <- NULL
			for (j in Interval:1) {
				Ser <- Series[j:length(Series)]
				if (length(Ser) %% Interval !=0) Ser <- c(Ser, rep(NA, Interval - (length(Ser) %% Interval)))
				dim(Ser) <- c(Interval, length(Ser) %/% Interval)
				x <- apply(Ser, 2, Fun, na.rm=TRUE)
				Nturns.vec[j] <- turnpoints(x)$nturns	
				n[j] <- length(x)
			}
			res <- list(n=n, nturns=Nturns.vec)
		}
		res
	}
    
    # Calculate all first steps (n and nturns) for the turnogram
	if (complete == FALSE) {
		for (i in 1:length(Inter.vec)) {
			res <- turnogram.step1(X, Inter.vec[i], Complete=complete, Fun=FUN)
			n.vec[i] <- res$n
			nturns.vec[i] <- res$nturns
		}
		# Calculate I (bits of information) according to either Gleissberg (n <= 50), or normal approximations (n > 50)
		Info.vec <- -log(pgleissberg(n.vec, nturns.vec, two.tailed=two.tailed), base=2)
		if (two.tailed == TRUE) {	# We have to change sign if k > mu.
			rightpart <- nturns.vec > 2 * (n.vec - 2) / 3
			Info.vec[rightpart] <- -Info.vec[rightpart]
		}
		# By default the extraction level is set to the interval corresponding to the maximum info value
		Level <- Inter.vec[match(max(Info.vec), Info.vec)]
		res <- list(interval=Inter.vec, n=n.vec, turns=nturns.vec, info=Info.vec, level=Level)
	} else {
		for (i in 1:length(Inter.vec)) {
			res <- turnogram.step1(X, Inter.vec[i], Complete=complete, Fun=FUN)
			n.vec[i] <- max(res$n)		# To change this!!!
			nturns <- res$nturns
			nturns.vec[i] <- mean(nturns)
			nturns.min.vec[i] <- min(nturns)
			nturns.max.vec[i] <- max(nturns)
			infos <- -log(pgleissberg(res$n, nturns, two.tailed=two.tailed), base=2)
			if (two.tailed == TRUE) {	# We have to change sign if k > mu.
				rightpart <- nturns > 2 * (res$n - 2) / 3
				infos[rightpart] <- -infos[rightpart]
			}
			Info.vec[i] <- mean(infos)
			Info.min.vec[i] <- min(infos)
			Info.max.vec[i] <- max(infos)
			# By default the extraction level is set to the interval corresponding to the maximum info value
			Level <- Inter.vec[match(max(Info.vec), Info.vec)]
			res <- list(interval=Inter.vec, n=n.vec, turns=nturns.vec, turns.min=nturns.min.vec, turns.max=nturns.max.vec, info=Info.vec, info.min=Info.min.vec, info.max=Info.max.vec, level=Level)
		}
	}
	as.data.frame(res)
	res$call <- call
	res$data <- data
	if (complete == TRUE) res$type <- "Complete" else res$type <- "Simple"
	res$fun <- fun
	if (two.tailed == TRUE) res$proba <- "two-tailed probability" else res$proba <- "one-tailed probability"
	res$units.text <- UnitTxt
	attr(res, "units") <- Unit
	
	# Do we need to plot the graph for the turnogram?
	if (plotit == TRUE) {
		Ilevel <- -log(level, base=2)
		if (xlog == TRUE) xlogstr <- "x" else xlogstr <- ""
		if (two.tailed == TRUE) imin <- -1.1*Ilevel else imin <- 0
		subtext <- paste(fun, "/", res$proba)
		if (complete == FALSE) {
			yrange.dat <- c(res$info, imin, 1.1*Ilevel)
			yrange <- c(min(yrange.dat), max(yrange.dat))
			plot(res$interval, res$info, type="l", log=xlogstr, ylim=yrange, xlab=paste("interval", UnitTxt), ylab="I (bits)", main=paste("Simple turnogram for:", data), sub=subtext)
		} else {
			yrange <- c(min(c(res$info.min, imin)), max(c(res$info.max, 1.1*Ilevel)))
			plot(res$interval, res$info, type="l", log=xlogstr, ylim=yrange, xlab=paste("interval (", UnitTxt, ")", sep=""), ylab="I (bits)", main=paste("Complete turnogram for:", data), sub=subtext)
			lines(res$interval, res$info.min)
			lines(res$interval, res$info.max)
		}
		if (lhorz == TRUE) {
			if (two.tailed == TRUE) {
				abline(h=0)
				abline(h=-Ilevel, lty=2, col=2)
			}
			abline(h=Ilevel, lty=2, col=2)
		}
		if (lvert == TRUE) abline(v=Level, lty=2, col=2)
	}
	class(res) <- "turnogram"
	res 	# Return results
}
"turnpoints" <-
function(x) {
	data <- deparse(substitute(x))
	if (is.null(ncol(x)) == FALSE)
		stop("Only one series can be treated at a time")
	if (exists("is.R") && is.function(is.R) && is.R())	# We are in R
		require(ts)
    x <- as.vector(x)
    n <- length(x)
    diffs <- c(x[1]-1, x[1:(n-1)]) != x
    uniques <- x[diffs]
    n2 <- length(uniques)
    poss <- (1:n)[diffs]
    exaequos <- c(poss[2:n2], n+1) - poss - 1
    if (n2 < 3) {			#We need at least 3 unique values!!!
    	warning("Less than 3 unique values, no calculation!")
    	nturns <- NA
    	firstispeak <- FALSE
    	peaks <- rep(FALSE, n2)
    	pits <- rep(FALSE, n2)
    	tppos <- NA
    	proba <- NA
    	info <- NA
    } else {
    	if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
    		ex <- embed(uniques, 3)	# Works only in R!
    		peaks <- c(FALSE, max.col(ex) == 2, FALSE)
    		pits <- c(FALSE, max.col(-ex) == 2, FALSE)
    	} else {												# We are in S+
    		m <- n2 - 2
    		ex <- matrix(uniques[1:m + rep(3:1, rep(m, 3)) - 1], m)
    		peaks <- c(FALSE, apply(ex, 1, max, na.rm=TRUE) == ex[, 2], FALSE)
    		pits <- c(FALSE, apply(ex, 1, min, na.rm=TRUE) == ex[, 2], FALSE)
    	}
    	tpts <- peaks | pits
    	if (sum(tpts) == 0) {	# No turning point
    		nturns <- 0
    		firstispeak <- FALSE
			peaks <- rep(FALSE, n2)
			pits <- rep(FALSE, n2)
			tppos <- NA
			proba <- NA
    	    info <- NA
    	} else {
    		tppos <- (poss + exaequos)[tpts] 	# This way, we consider the last element of duplicates, as in PASSTEC 2000
    		tptspos <- (1:n2)[tpts] 
    		firstispeak <- tptspos[1] == (1:n2)[peaks][1] 
    		nturns <- length(tptspos)
    		if (nturns < 2) {
    			inter <- n2 + 1
    			posinter1 <- tptspos[1]
    		} else {
    			inter <- c(tptspos[2:nturns], n2) - c(1, tptspos[1:(nturns-1)]) + 1
    			posinter1 <- tptspos - c(1, tptspos[1:(nturns-1)])
    		}
    		posinter2 <- inter - posinter1
			posinter <- pmax(posinter1, posinter2)
			proba <- 2 / (inter * gamma(posinter) * gamma(inter - posinter + 1))
    		info <- -log(proba, base=2)
    	}
    }
    res <- list(data=data, n=n, points=uniques, pos=(poss + exaequos), exaequos=exaequos, nturns=nturns, firstispeak= firstispeak, peaks=peaks, pits=pits, tppos=tppos, proba=proba, info=info)
    class(res) <- "turnpoints"
    res
}
"vario" <-
function(x, max.dist=length(x)/3, plotit=TRUE, vario.data=NULL) {
	if (is.null(vario.data)) {	# Calculate variogram
		call <- match.call()
		data <- deparse(substitute(x))
		if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
			require(ts)
			require(mva)
			x <- as.ts(x)
		} else {												# We are in S+
			x <- as.rts(x)
		}
		n <- length(x)
		if (n < 10) # Need at least 10 data
			stop("There must be at least 10 observations in the series")
		max.dist <- round(max.dist)
		if (max.dist < 0) max.dist <- round(n/3)
		if( max.dist >= n) max.dist <- n-1
		distance <- dist(1:n)
		val <- outer(x, x, function(X, Y) ((X - Y)^2)/2)
    	val <- val[lower.tri(val)]
    	val <- data.frame(distance=as.numeric(distance), semivario=val)
    	# Calculate mean values for each distance
    	res <- rep(0, max.dist)
    	for (i in 1:max.dist) {
    		res[i] <- mean(val[val$distance == i,]$semivario, na.rm=TRUE)	
    	}
    	res <- list(distance=1:max.dist, semivario=res)
    	res <- as.data.frame(res)
    	attr(res, "data") <- data
    	attr(res, "call") <- call
    } else {		# Use vario.data instead
    	res <- vario.data
    }
    if (plotit == TRUE) {	# plot the variogram
    	plot(res$distance, res$semivario, type="l", xlab="distance", ylab="gamma", main=paste("Semi-variogram for:", attr(res, "data")))
    }
    res
}
"yearstodays" <-
function (x, xmin=NULL) {
	x <- x
	xmin <- xmin
	if (!is.null(xmin)) {		# xmin is given
		x <- x * 365.25
		x <- x - min(x, na.rm=TRUE) + xmin
	} else {					# xmin is not given, we construct julian dates (compatibles with chron, dates,...)
		if(is.null(yearorig <- options("chron.origin")$year))
			if (exists("is.R") && is.function(is.R) && is.R()) {	# We are in R
				yearorig <- 1970
			} else {												# We are in Splus
				yearorig <- 1960
			}
		x <- (x - yearorig) * 365.25
	}
	x
}
