R : Copyright 2005, The R Foundation for Statistical Computing Version 2.1.1 (2005-06-20), ISBN 3-900051-07-0 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for a HTML browser interface to help. Type 'q()' to quit R. > ### *
> ### > attach(NULL, name = "CheckExEnv") > assign(".CheckExEnv", as.environment(2), pos = length(search())) # base > ## add some hooks to label plot pages for base and grid graphics > setHook("plot.new", ".newplot.hook") > setHook("persp", ".newplot.hook") > setHook("grid.newpage", ".gridplot.hook") > > assign("cleanEx", + function(env = .GlobalEnv) { + rm(list = ls(envir = env, all.names = TRUE), envir = env) + RNGkind("default", "default") + set.seed(1) + options(warn = 1) + delayedAssign("T", stop("T used instead of TRUE"), + assign.env = .CheckExEnv) + delayedAssign("F", stop("F used instead of FALSE"), + assign.env = .CheckExEnv) + sch <- search() + newitems <- sch[! sch %in% .oldSearch] + for(item in rev(newitems)) + eval(substitute(detach(item), list(item=item))) + missitems <- .oldSearch[! .oldSearch %in% sch] + if(length(missitems)) + warning("items ", paste(missitems, collapse=", "), + " have been removed from the search path") + }, + env = .CheckExEnv) > assign("..nameEx", "__{must remake R-ex/*.R}__", env = .CheckExEnv) # for now > assign("ptime", proc.time(), env = .CheckExEnv) > grDevices::postscript("wle-Examples.ps") > assign("par.postscript", graphics::par(no.readonly = TRUE), env = .CheckExEnv) > options(contrasts = c(unordered = "contr.treatment", ordered = "contr.poly")) > options(warn = 1) > library('wle') > > assign(".oldSearch", search(), env = .CheckExEnv) > assign(".oldNS", loadedNamespaces(), env = .CheckExEnv) > cleanEx(); ..nameEx <- "binary" > > ### * binary > > flush(stderr()); flush(stdout()) > > ### Name: binary > ### Title: Convert decimal base number to binary base > ### Aliases: binary > ### Keywords: arith > > ### ** Examples > > binary(2) $binary [1] 0 1 $dicotomy [1] FALSE TRUE > binary(10,dim=5) $binary [1] 0 1 0 1 0 $dicotomy [1] FALSE TRUE FALSE TRUE FALSE > > > > cleanEx(); ..nameEx <- "mle.aic" > > ### * mle.aic > > flush(stderr()); flush(stdout()) > > ### Name: mle.aic > ### Title: Akaike Information Criterion > ### Aliases: mle.aic > ### Keywords: regression > > ### ** Examples > > library(wle) > > data(hald) > > cor(hald) [,1] [,2] [,3] [,4] [,5] [1,] 1.0000000 0.7307175 0.8162526 -0.53467068 -0.82130504 [2,] 0.7307175 1.0000000 0.2285795 -0.82413376 -0.24544511 [3,] 0.8162526 0.2285795 1.0000000 -0.13924238 -0.97295500 [4,] -0.5346707 -0.8241338 -0.1392424 1.00000000 0.02953700 [5,] -0.8213050 -0.2454451 -0.9729550 0.02953700 1.00000000 > > result <- mle.aic(y.hald~x.hald) > > summary(result,num.max=10) Call: mle.aic(formula = y.hald ~ x.hald) Akaike Information Criterion (AIC): (Intercept) x.hald1 x.hald2 x.hald3 x.hald4 aic [1,] 1 1 1 0 0 62.83 [2,] 1 1 1 0 1 63.17 [3,] 1 1 1 1 0 63.19 [4,] 1 1 0 1 1 63.65 [5,] 0 1 1 1 1 63.94 [6,] 1 1 1 1 1 65.15 [7,] 1 1 0 0 1 65.64 [8,] 1 0 1 1 1 67.49 [9,] 1 0 0 1 1 82.52 [10,] 0 1 1 0 1 84.03 Printed the first 10 best models > > > > cleanEx(); ..nameEx <- "mle.cp" > > ### * mle.cp > > flush(stderr()); flush(stdout()) > > ### Name: mle.cp > ### Title: Mallows Cp > ### Aliases: mle.cp > ### Keywords: regression > > ### ** Examples > > library(wle) > > data(hald) > > cor(hald) [,1] [,2] [,3] [,4] [,5] [1,] 1.0000000 0.7307175 0.8162526 -0.53467068 -0.82130504 [2,] 0.7307175 1.0000000 0.2285795 -0.82413376 -0.24544511 [3,] 0.8162526 0.2285795 1.0000000 -0.13924238 -0.97295500 [4,] -0.5346707 -0.8241338 -0.1392424 1.00000000 0.02953700 [5,] -0.8213050 -0.2454451 -0.9729550 0.02953700 1.00000000 > > result <- mle.cp(y.hald~x.hald) > > summary(result) Call: mle.cp(formula = y.hald ~ x.hald) Mallows Cp: (Intercept) x.hald1 x.hald2 x.hald3 x.hald4 cp [1,] 1 1 1 0 0 2.678 [2,] 1 1 1 0 1 3.018 [3,] 1 1 1 1 0 3.041 [4,] 1 1 0 1 1 3.497 [5,] 0 1 1 1 1 3.793 [6,] 1 1 1 1 1 5.000 Printed the first 6 best models > > plot(result) > > > > cleanEx(); ..nameEx <- "mle.cv" > > ### * mle.cv > > flush(stderr()); flush(stdout()) > > ### Name: mle.cv > ### Title: Cross Validation Selection Method > ### Aliases: mle.cv > ### Keywords: regression > > ### ** Examples > > library(wle) > > data(hald) > > cor(hald) [,1] [,2] [,3] [,4] [,5] [1,] 1.0000000 0.7307175 0.8162526 -0.53467068 -0.82130504 [2,] 0.7307175 1.0000000 0.2285795 -0.82413376 -0.24544511 [3,] 0.8162526 0.2285795 1.0000000 -0.13924238 -0.97295500 [4,] -0.5346707 -0.8241338 -0.1392424 1.00000000 0.02953700 [5,] -0.8213050 -0.2454451 -0.9729550 0.02953700 1.00000000 > > result <- mle.cv(y.hald~x.hald) > > summary(result) Call: mle.cv(formula = y.hald ~ x.hald) Cross Validation selection criteria: (Intercept) x.hald1 x.hald2 x.hald3 x.hald4 cv [1,] 1 1 1 0 0 9.851 [2,] 1 1 1 0 1 12.260 [3,] 1 1 0 0 1 12.450 [4,] 1 1 1 1 0 12.900 [5,] 1 1 0 1 1 13.660 [6,] 0 1 1 1 1 14.810 [7,] 1 0 1 1 1 18.020 [8,] 1 1 1 1 1 24.430 [9,] 1 0 0 1 1 30.090 [10,] 0 1 1 0 1 35.540 [11,] 1 0 1 1 0 70.170 [12,] 1 0 0 0 1 108.000 [13,] 1 0 1 0 0 109.600 [14,] 0 0 1 0 1 132.100 [15,] 0 0 1 1 1 145.000 [16,] 1 0 1 0 1 154.900 [17,] 1 1 0 0 0 156.400 [18,] 0 1 1 1 0 166.600 [19,] 1 0 0 1 0 235.800 [20,] 1 1 0 1 0 245.200 Printed the first 20 best models > > > > cleanEx(); ..nameEx <- "mle.stepwise" > > ### * mle.stepwise > > flush(stderr()); flush(stdout()) > > ### Name: mle.stepwise > ### Title: Stepwise, Backward and Forward selection methods > ### Aliases: mle.stepwise > ### Keywords: regression > > ### ** Examples > > > library(wle) > > data(hald) > > cor(hald) [,1] [,2] [,3] [,4] [,5] [1,] 1.0000000 0.7307175 0.8162526 -0.53467068 -0.82130504 [2,] 0.7307175 1.0000000 0.2285795 -0.82413376 -0.24544511 [3,] 0.8162526 0.2285795 1.0000000 -0.13924238 -0.97295500 [4,] -0.5346707 -0.8241338 -0.1392424 1.00000000 0.02953700 [5,] -0.8213050 -0.2454451 -0.9729550 0.02953700 1.00000000 > > result <- mle.stepwise(y.hald~x.hald) > > summary(result) Call: mle.stepwise(formula = y.hald ~ x.hald) Forward selection procedure F.in: 4 Last 3 iterations: (Intercept) x.hald1 x.hald2 x.hald3 x.hald4 [1,] 1 0 0 0 1 22.800 [2,] 1 1 0 0 1 108.200 [3,] 1 1 1 0 1 5.026 > > > > > cleanEx(); ..nameEx <- "plot.mle.cp" > > ### * plot.mle.cp > > flush(stderr()); flush(stdout()) > > ### Name: plot.mle.cp > ### Title: Plot the Mallows Cp > ### Aliases: plot.mle.cp > ### Keywords: regression > > ### ** Examples > > library(wle) > > data(hald) > > result <- mle.cp(y.hald~x.hald) > > plot(result,num.max=7) > > > > > cleanEx(); ..nameEx <- "plot.wle.cp" > > ### * plot.wle.cp > > flush(stderr()); flush(stdout()) > > ### Name: plot.wle.cp > ### Title: Plot the Weighted Mallows Cp > ### Aliases: plot.wle.cp > ### Keywords: regression robust > > ### ** Examples > > library(wle) > x.data <- c(runif(60,20,80),runif(5,73,78)) > e.data <- rnorm(65,0,0.6) > y.data <- 8*log(x.data+1)+e.data > y.data[61:65] <- y.data[61:65]-4 > z.data <- c(rep(0,60),rep(1,5)) > plot(x.data, y.data, xlab="X", ylab="Y") > xx.data <- cbind(x.data, x.data^2, x.data^3, log(x.data+1)) > result <- wle.cp(y.data~xx.data) > plot(result,num.max=15) > > > > cleanEx(); ..nameEx <- "plot.wle.lm" > > ### * plot.wle.lm > > flush(stderr()); flush(stdout()) > > ### Name: plot.wle.lm > ### Title: Plots for the Linear Model > ### Aliases: plot.wle.lm > ### Keywords: regression robust > > ### ** Examples > > library(wle) > > data(artificial) > > result <- wle.lm(y~x1+x2+x3, data=artificial, boot=40, group=6, num.sol=2) > > result Call: wle.lm(formula = y ~ x1 + x2 + x3, data = artificial, boot = 40, group = 6, num.sol = 2) Coefficients: (Intercept) x1 x2 x3 [1,] -0.19670 0.08969 0.03875 -0.05298 [2,] -0.94351 0.15653 0.18912 0.18152 Scale estimate: 0.5561 0.6688 Number of solutions 2 > > plot.wle.lm(result) # all plots, default behavior > > plot.wle.lm(result, roots=1) # only first root, one plot for window > > par(mfcol=c(2,2)) > plot.wle.lm(result, roots=1) # only first root, as usual > > plot.wle.lm(result, roots=2, which=1, which.main=0) > # only second root, only residual vs fitted values plot > > plot.wle.lm(result, which=1) > # main plot + residual vs fitted values plot for each root > > par(mfcol=c(3,2)) > plot.wle.lm(result, which=1) > # main plot + residual vs fitted values plot for each root all in the same window > > > > > graphics::par(get("par.postscript", env = .CheckExEnv)) > cleanEx(); ..nameEx <- "wle.aic" > > ### * wle.aic > > flush(stderr()); flush(stdout()) > > ### Name: wle.aic > ### Title: Weighted Akaike Information Criterion > ### Aliases: wle.aic > ### Keywords: regression robust > > ### ** Examples > > library(wle) > > x.data <- c(runif(60,20,80),runif(5,73,78)) > e.data <- rnorm(65,0,0.6) > y.data <- 8*log(x.data+1)+e.data > y.data[61:65] <- y.data[61:65]-4 > z.data <- c(rep(0,60),rep(1,5)) > > plot(x.data,y.data,xlab="X",ylab="Y") > > xx.data <- cbind(x.data,x.data^2,x.data^3,log(x.data+1)) > colnames(xx.data) <- c("X","X^2","X^3","log(X+1)") > > result <- wle.aic(y.data~xx.data,boot=10,group=10,num.sol=2) > > summary(result) Call: wle.aic(formula = y.data ~ xx.data, boot = 10, group = 10, num.sol = 2) Weighted Akaike Information Criterion (WAIC): (Intercept) xx.dataX xx.dataX^2 xx.dataX^3 xx.datalog(X+1) waic [1,] 1 0 0 0 1 92.27 [2,] 0 1 0 0 1 92.66 [3,] 0 0 1 0 1 93.38 [4,] 0 0 1 1 1 93.62 [5,] 0 1 0 1 1 93.91 [6,] 0 1 1 0 1 94.06 [7,] 1 0 0 1 1 94.25 [8,] 1 0 1 0 1 94.27 [9,] 1 1 0 0 1 94.27 [10,] 0 0 0 1 1 94.31 [11,] 1 1 1 1 1 94.57 [12,] 1 1 1 0 0 94.89 [13,] 0 1 1 1 1 95.14 [14,] 1 0 1 1 1 95.39 [15,] 1 1 0 1 1 95.65 [16,] 1 1 1 0 1 95.85 [17,] 1 1 0 1 0 96.30 [18,] 1 1 1 1 0 96.61 [19,] 0 0 0 0 1 97.46 [20,] 1 0 1 1 0 107.70 Printed the first 20 best models > > result <- wle.aic(y.data~xx.data+z.data,boot=10,group=10,num.sol=2) > > summary(result) Call: wle.aic(formula = y.data ~ xx.data + z.data, boot = 10, group = 10, num.sol = 2) Weighted Akaike Information Criterion (WAIC): (Intercept) xx.dataX xx.dataX^2 xx.dataX^3 xx.datalog(X+1) z.data waic [1,] 1 0 0 0 1 1 101.0 [2,] 0 1 0 0 1 1 101.4 [3,] 0 0 1 0 1 1 102.2 [4,] 0 0 1 1 1 1 102.2 [5,] 0 1 0 1 1 1 102.5 [6,] 0 1 1 0 1 1 102.7 [7,] 1 1 1 1 1 1 102.9 [8,] 1 0 0 1 1 1 102.9 [9,] 1 0 1 0 1 1 103.0 [10,] 1 1 0 0 1 1 103.0 [11,] 0 0 0 1 1 1 103.1 [12,] 1 1 1 0 0 1 103.4 [13,] 0 1 1 1 1 1 103.6 [14,] 1 0 1 1 1 1 103.9 [15,] 1 1 0 1 1 1 104.2 [16,] 1 1 1 0 1 1 104.5 [17,] 1 1 0 1 0 1 104.7 [18,] 1 1 1 1 0 1 105.2 [19,] 0 0 0 0 1 1 106.1 [20,] 1 0 1 1 0 1 116.0 Printed the first 20 best models > > > > > cleanEx(); ..nameEx <- "wle.aic.ar" > > ### * wle.aic.ar > > flush(stderr()); flush(stdout()) > > ### Name: wle.aic.ar > ### Title: Weighted Akaike Information Criterion for AR models > ### Aliases: wle.aic.ar wle.ar.wls > ### Keywords: robust ts > > ### ** Examples > > > data(rocky) > > res <- wle.aic.ar(x=rocky, order=c(6,0), group=50, group.start=30, method="WLS") > res Call: wle.aic.ar(x = rocky, order = c(6, 0), group = 50, group.start = 30, method = "WLS") Weighted Akaike Information Criterion (WAIC): ar sar include.mean waic [1,] 0 0 1 530.8 [2,] 6 0 1 537.7 [3,] 3 0 0 859.3 [4,] 5 0 0 859.9 [5,] 4 0 0 861.1 [6,] 3 0 1 861.3 [7,] 6 0 0 861.9 [8,] 5 0 1 861.9 [9,] 2 0 0 862.5 [10,] 4 0 1 863.1 [11,] 2 0 1 864.5 [12,] 1 0 0 873.3 [13,] 1 0 1 875.3 Printed the first 13 best models > plot(res$full.model$weights) > > > > cleanEx(); ..nameEx <- "wle.ar" > > ### * wle.ar > > flush(stderr()); flush(stdout()) > > ### Name: wle.ar > ### Title: Fit Autoregressive Models to Time Series - Preliminary Version > ### Aliases: wle.ar wle.ar.ao wle.ar.start wle.ar.step wle.ar.matrix > ### Keywords: robust ts > > ### ** Examples > > data(lh) > wle.ar(x=lh, order=c(3,0), group=30) $coef ar1 ar2 ar3 intercept 0.57515582 -0.07746981 -0.19993021 1.63941419 $sigma2.coef ar1 ar2 ar3 intercept 0.01666993 0.02203779 0.01322118 0.07534020 $sigma2 [1] 0.1851104 $arma ar ma sar sma period diff sdiff 3 0 0 0 1 0 0 $resid Time Series: Start = 1 End = 48 Frequency = 1 1 2 3 4 5 6 0.76058581 -0.61978816 -0.43386060 -0.15402810 -0.13899693 -0.69697532 7 8 9 10 11 12 0.40038515 -0.12621441 0.01580331 -0.43928368 -0.23621181 -0.37744509 13 14 15 16 17 18 0.12987398 -0.59319091 1.03562028 0.29937932 -0.17213503 -0.10465482 19 20 21 22 23 24 0.14418818 -0.29451183 -0.22193019 -0.34517114 0.55236538 0.32697816 25 26 27 28 29 30 -0.49583877 -0.19005157 -0.01175463 0.72505328 0.14743398 0.01715682 31 32 33 34 35 36 0.31212517 -0.10336879 0.38670749 -0.01682718 -0.55852716 -0.26894857 37 38 39 40 41 42 -0.49790091 -0.61057486 0.11145374 0.86111164 0.40516051 0.52304427 43 44 45 46 47 48 0.37845449 0.14850286 -0.09490716 1.37396376 0.08756118 0.21836916 $resid.without.ao Time Series: Start = 1 End = 48 Frequency = 1 [1] 0.76058581 -0.61978816 -0.43386060 -0.15402810 -0.13899693 -0.69697532 [7] 0.40038515 -0.12621441 0.01580331 -0.43928368 -0.23621181 -0.37744509 [13] 0.12987398 -0.59319091 1.03562028 0.29937932 -0.17213503 -0.10465482 [19] 0.14418818 -0.29451183 -0.22193019 -0.34517114 0.55236538 0.32697816 [25] -0.49583877 -0.19005157 -0.01175463 0.72505328 0.14743398 0.01715682 [31] 0.31212517 -0.10336879 0.38670749 -0.01682718 -0.55852716 -0.26894857 [37] -0.49790091 -0.61057486 0.11145374 0.86111164 0.40516051 0.52304427 [43] 0.37845449 0.14850286 -0.09490716 1.37396376 0.08756118 0.21836916 $resid.with.ao Time Series: Start = 1 End = 48 Frequency = 1 [1] 0.76058581 -0.61978816 -0.43386060 -0.15402810 -0.13899693 -0.69697532 [7] 0.40038515 -0.12621441 0.01580331 -0.43928368 -0.23621181 -0.37744509 [13] 0.12987398 -0.59319091 1.03562028 0.29937932 -0.17213503 -0.10465482 [19] 0.14418818 -0.29451183 -0.22193019 -0.34517114 0.55236538 0.32697816 [25] -0.49583877 -0.19005157 -0.01175463 0.72505328 0.14743398 0.01715682 [31] 0.31212517 -0.10336879 0.38670749 -0.01682718 -0.55852716 -0.26894857 [37] -0.49790091 -0.61057486 0.11145374 0.86111164 0.40516051 0.52304427 [43] 0.37845449 0.14850286 -0.09490716 1.37396376 0.08756118 0.21836916 $x.ao Time Series: Start = 1 End = 48 Frequency = 1 [1] 2.4 2.4 2.4 2.2 2.1 1.5 2.3 2.3 2.5 2.0 1.9 1.7 2.2 1.8 3.2 3.2 2.7 2.2 2.2 [20] 1.9 1.9 1.8 2.7 3.0 2.3 2.0 2.0 2.9 2.9 2.7 2.7 2.3 2.6 2.4 1.8 1.7 1.5 1.4 [39] 2.1 3.3 3.5 3.5 3.1 2.6 2.1 3.4 3.0 2.9 $call wle.ar(x = lh, order = c(3, 0), group = 30) $series [1] "lh" $weights 1 2 3 4 5 6 7 8 0.8770214 0.8513499 0.9842693 0.9685031 0.9562108 0.9763833 0.9122962 0.9478814 9 10 11 12 13 14 15 16 0.9980096 0.9816906 0.9987265 0.9901146 0.9473250 0.8583436 0.6223688 0.9959195 17 18 19 20 21 22 23 24 0.9837913 0.9585056 0.9463381 0.9894398 0.9983096 0.9832950 0.9884632 0.9889650 25 26 27 28 29 30 31 32 0.9618264 0.9953712 0.9970886 0.9083033 0.9516891 0.9988067 0.9825639 0.9608227 33 34 35 36 37 38 39 40 0.9129801 0.9997837 0.9804700 0.9996453 0.9617339 0.8332837 0.9843228 0.8415040 41 42 43 44 45 46 47 48 0.9241724 0.9952801 0.9353634 0.9538838 0.9800901 0.2406930 0.9784559 0.7477231 $weights.with.ao [1] 0.8770201 0.8513511 0.9842691 0.9685028 0.9562109 0.9763832 0.9122955 [8] 0.9478819 0.9980095 0.9816905 0.9987265 0.9901147 0.9473258 0.8583447 [15] 0.6223692 0.9959196 0.9837911 0.9585063 0.9463386 0.9894398 0.9983096 [22] 0.9832950 0.9884627 0.9889649 0.9618263 0.9953713 0.9970886 0.9083015 [29] 0.9516894 0.9988068 0.9825638 0.9608232 0.9129796 0.9997837 0.9804706 [36] 0.9996453 0.9617339 0.8332842 0.9843234 0.8415045 0.9241718 0.9952797 [43] 0.9353622 0.9538848 0.9800905 0.2406931 0.9784566 0.7477230 $weights.without.ao [1] 0.8770201 0.8513511 0.9842691 0.9685028 0.9562109 0.9763832 0.9122955 [8] 0.9478819 0.9980095 0.9816905 0.9987265 0.9901147 0.9473258 0.8583447 [15] 0.6223692 0.9959196 0.9837911 0.9585063 0.9463386 0.9894398 0.9983096 [22] 0.9832950 0.9884627 0.9889649 0.9618263 0.9953713 0.9970886 0.9083015 [29] 0.9516894 0.9988068 0.9825638 0.9608232 0.9129796 0.9997837 0.9804706 [36] 0.9996453 0.9617339 0.8332842 0.9843234 0.8415045 0.9241718 0.9952797 [43] 0.9353622 0.9538848 0.9800905 0.2406931 0.9784566 0.7477230 $tot.sol [1] 1 $not.conv [1] 0 $ao.position $ao.position[[1]] NULL attr(,"class") [1] "wle.arima" > > > > cleanEx(); ..nameEx <- "wle.binomial" > > ### * wle.binomial > > flush(stderr()); flush(stdout()) > > ### Name: wle.binomial > ### Title: Robust Estimation in the Binomial Model > ### Aliases: wle.binomial print.wle.binomial > ### Keywords: models robust > > ### ** Examples > > library(wle) > > set.seed(1234) > > x <- rbinom(20,p=0.2,size=10) > wle.binomial(x,size=10) Call: wle.binomial(x = x, size = 10) p: [,1] [1,] 0.1791 Number of solutions 1 > > x <- c(rbinom(20,p=0.2,size=10),rbinom(10,p=0.9,size=10)) > wle.binomial(x,size=10) Call: wle.binomial(x = x, size = 10) p: [,1] [1,] 0.2053 Number of solutions 1 > > > > > cleanEx(); ..nameEx <- "wle.cp" > > ### * wle.cp > > flush(stderr()); flush(stdout()) > > ### Name: wle.cp > ### Title: Weighted Mallows Cp > ### Aliases: wle.cp > ### Keywords: regression robust > > ### ** Examples > > library(wle) > > x.data <- c(runif(60,20,80),runif(5,73,78)) > e.data <- rnorm(65,0,0.6) > y.data <- 8*log(x.data+1)+e.data > y.data[61:65] <- y.data[61:65]-4 > z.data <- c(rep(0,60),rep(1,5)) > > plot(x.data,y.data,xlab="X",ylab="Y") > > xx.data <- cbind(x.data,x.data^2,x.data^3,log(x.data+1)) > colnames(xx.data) <- c("X","X^2","X^3","log(X+1)") > > result <- wle.cp(y.data~xx.data,boot=10,group=10,num.sol=2) > > summary(result) Call: wle.cp(formula = y.data ~ xx.data, boot = 10, group = 10, num.sol = 2) Weighted Mallows Cp: (Intercept) xx.dataX xx.dataX^2 xx.dataX^3 xx.datalog(X+1) 1 1 1 1 1 wcp 5 Printed the first 1 best models > > plot(result,num.max=15) > > result <- wle.cp(y.data~xx.data+z.data,boot=10,group=10,num.sol=2) > > summary(result) Call: wle.cp(formula = y.data ~ xx.data + z.data, boot = 10, group = 10, num.sol = 2) Weighted Mallows Cp: (Intercept) xx.dataX xx.dataX^2 xx.dataX^3 xx.datalog(X+1) 1 1 1 1 1 z.data wcp 1 6 Printed the first 1 best models > > plot(result,num.max=15) > > > > cleanEx(); ..nameEx <- "wle.cv" > > ### * wle.cv > > flush(stderr()); flush(stdout()) > > ### Name: wle.cv > ### Title: Model Selection by Weighted Cross-Validation > ### Aliases: wle.cv > ### Keywords: regression robust > > ### ** Examples > > library(wle) > > set.seed(1234) > > x.data <- c(runif(60,20,80),runif(5,73,78)) > e.data <- rnorm(65,0,0.6) > y.data <- 8*log(x.data+1)+e.data > y.data[61:65] <- y.data[61:65]-4 > z.data <- c(rep(0,60),rep(1,5)) > > plot(x.data,y.data,xlab="X",ylab="Y") > > xx.data <- cbind(x.data,x.data^2,x.data^3,log(x.data+1)) > colnames(xx.data) <- c("X","X^2","X^3","log(X+1)") > > result <- wle.cv(y.data~xx.data,boot=20,num.sol=2) > > summary(result) Call: wle.cv(formula = y.data ~ xx.data, boot = 20, num.sol = 2) Weighted Cross Validation selection criteria: (Intercept) xx.dataX xx.dataX^2 xx.dataX^3 xx.datalog(X+1) wcv [1,] 0 0 0 1 1 0.3409 [2,] 0 0 1 0 1 0.3411 [3,] 0 1 0 0 1 0.3426 [4,] 1 0 0 0 1 0.3477 [5,] 0 0 0 0 1 0.3519 [6,] 1 0 1 0 1 0.3873 [7,] 0 1 1 0 1 0.3880 [8,] 1 0 0 1 1 0.3887 [9,] 1 1 0 0 1 0.3887 [10,] 0 1 0 1 1 0.3923 [11,] 0 0 1 1 1 0.3981 [12,] 1 1 1 0 0 0.4294 [13,] 1 1 1 1 0 0.4537 [14,] 0 1 1 1 1 0.4595 [15,] 1 1 0 1 0 0.4623 [16,] 1 0 1 1 1 0.4672 [17,] 1 1 0 1 1 0.4730 [18,] 1 1 1 0 1 0.4795 [19,] 1 1 0 0 0 0.5453 [20,] 1 0 1 1 0 0.6071 Printed the first 20 best models > > result <- wle.cv(y.data~xx.data+z.data,boot=20,num.sol=2, + monte.carlo=1000,split=50) > > summary(result) Call: wle.cv(formula = y.data ~ xx.data + z.data, monte.carlo = 1000, split = 50, boot = 20, num.sol = 2) Weighted Cross Validation selection criteria: (Intercept) xx.dataX xx.dataX^2 xx.dataX^3 xx.datalog(X+1) z.data wcv [1,] 0 1 0 0 1 1 0.3987 [2,] 0 0 1 0 1 1 0.3988 [3,] 0 0 0 1 1 1 0.3999 [4,] 1 0 0 0 1 1 0.4011 [5,] 0 0 0 0 1 1 0.4125 [6,] 0 1 1 0 1 1 0.4169 [7,] 0 1 0 1 1 1 0.4175 [8,] 1 0 0 1 1 1 0.4176 [9,] 1 0 1 0 1 1 0.4180 [10,] 0 0 1 1 1 1 0.4187 [11,] 1 1 0 0 1 1 0.4192 [12,] 1 1 1 0 0 1 0.4415 [13,] 1 1 1 0 1 1 0.4462 [14,] 1 1 0 1 1 1 0.4476 [15,] 1 1 1 1 0 1 0.4485 [16,] 1 0 1 1 1 1 0.4489 [17,] 0 1 1 1 1 1 0.4501 [18,] 1 1 0 1 0 1 0.4610 [19,] 1 1 1 1 1 1 0.5028 [20,] 1 0 1 1 0 1 0.5556 Printed the first 20 best models > > > > > cleanEx(); ..nameEx <- "wle.gamma" > > ### * wle.gamma > > flush(stderr()); flush(stdout()) > > ### Name: wle.gamma > ### Title: Robust Estimation in the Gamma model > ### Aliases: wle.gamma print.wle.gamma > ### Keywords: models robust > > ### ** Examples > > library(wle) > > x <- rgamma(n=100, shape=2, scale=2) > > wle.gamma(x) Call: wle.gamma(x = x) Scale: [1] 1.499 Rate: [1] 0.667 Shape: [1] 2.479 Number of solutions 1 > > x <- c(rgamma(n=30, shape=2, scale=2), rgamma(n=100, shape=20, scale=20)) > > wle.gamma(x, boot=10, group=10, num.sol=2) # depending on the sample, one or two roots. Call: wle.gamma(x = x, boot = 10, group = 10, num.sol = 2) Scale: [1] 495.6 Rate: [1] 0.002018 Shape: [1] 0.5887 Number of solutions 1 > > > > > cleanEx(); ..nameEx <- "wle.lm" > > ### * wle.lm > > flush(stderr()); flush(stdout()) > > ### Name: wle.lm > ### Title: Fitting Linear Models using Weighted Likelihood > ### Aliases: wle.lm > ### Keywords: robust regression > > ### ** Examples > > library(wle) > # You can find this data set in: > # Hawkins, D.M., Bradu, D., and Kass, G.V. (1984). > # Location of several outliers in multiple regression data using > # elemental sets. Technometrics, 26, 197-208. > # > data(artificial) > > result <- wle.lm(y.artificial~x.artificial,boot=40,num.sol=3) > > summary(result) Call: wle.lm(formula = y.artificial ~ x.artificial, boot = 40, num.sol = 3) Root 1 Weighted Residuals: Min 1Q Median 3Q Max -1.2517 -0.4987 0.0000 0.5422 1.3085 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -0.94351 0.12792 -7.376 3.46e-10 *** x.artificial1 0.15653 0.07809 2.005 0.049112 * x.artificial2 0.18912 0.07168 2.638 0.010382 * x.artificial3 0.18152 0.05021 3.615 0.000581 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.6688 on 66.03095 degrees of freedom Multiple R-Squared: 0.9665, Adjusted R-squared: 0.965 F-statistic: 634.7 on 3 and 66.03095 degrees of freedom, p-value: 0 Call: wle.lm(formula = y.artificial ~ x.artificial, boot = 40, num.sol = 3) Root 2 Weighted Residuals: Min 1Q Median 3Q Max -0.9095 -0.3758 0.0000 0.3889 1.0291 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -0.19670 0.10419 -1.888 0.064 . x.artificial1 0.08969 0.06655 1.348 0.183 x.artificial2 0.03875 0.04076 0.951 0.346 x.artificial3 -0.05298 0.03549 -1.493 0.141 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.5561 on 58.6243 degrees of freedom Multiple R-Squared: 0.04914, Adjusted R-squared: 0.0004795 F-statistic: 1.01 on 3 and 58.6243 degrees of freedom, p-value: 0.3949 > > plot(result) > > > > cleanEx(); ..nameEx <- "wle.lm.summaries" > > ### * wle.lm.summaries > > flush(stderr()); flush(stdout()) > > ### Name: wle.lm.summaries > ### Title: Accessing Linear Model Fits for wle.lm > ### Aliases: coef.wle.lm formula.wle.lm fitted.wle.lm model.frame.wle.lm > ### summary.wle.lm summary.wle.lm.root weights.wle.lm print.wle.lm > ### print.summary.wle.lm print.summary.wle.lm.root > ### Keywords: robust regression > > ### ** Examples > > library(wle) > # You can find this data set in: > # Hawkins, D.M., Bradu, D., and Kass, G.V. (1984). > # Location of several outliers in multiple regression data using > # elemental sets. Technometrics, 26, 197-208. > # > data(artificial) > > result <- wle.lm(y.artificial~x.artificial,boot=40,group=6,num.sol=3) > > #summary only for the first root > summary(result,root=1) Call: wle.lm(formula = y.artificial ~ x.artificial, boot = 40, group = 6, num.sol = 3) Root 1 Weighted Residuals: Min 1Q Median 3Q Max -1.2517 -0.4987 0.0000 0.5422 1.3085 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -0.94351 0.12792 -7.376 3.46e-10 *** x.artificial1 0.15653 0.07809 2.005 0.049112 * x.artificial2 0.18912 0.07168 2.638 0.010382 * x.artificial3 0.18152 0.05021 3.615 0.000581 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.6688 on 66.03095 degrees of freedom Multiple R-Squared: 0.9665, Adjusted R-squared: 0.965 F-statistic: 634.7 on 3 and 66.03095 degrees of freedom, p-value: 0 > #summary for all the roots > summary(result,root="ALL") Call: wle.lm(formula = y.artificial ~ x.artificial, boot = 40, group = 6, num.sol = 3) Root 1 Weighted Residuals: Min 1Q Median 3Q Max -1.2517 -0.4987 0.0000 0.5422 1.3085 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -0.94351 0.12792 -7.376 3.46e-10 *** x.artificial1 0.15653 0.07809 2.005 0.049112 * x.artificial2 0.18912 0.07168 2.638 0.010382 * x.artificial3 0.18152 0.05021 3.615 0.000581 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.6688 on 66.03095 degrees of freedom Multiple R-Squared: 0.9665, Adjusted R-squared: 0.965 F-statistic: 634.7 on 3 and 66.03095 degrees of freedom, p-value: 0 Call: wle.lm(formula = y.artificial ~ x.artificial, boot = 40, group = 6, num.sol = 3) Root 2 Weighted Residuals: Min 1Q Median 3Q Max -0.9095 -0.3758 0.0000 0.3889 1.0291 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -0.19670 0.10419 -1.888 0.064 . x.artificial1 0.08969 0.06655 1.348 0.183 x.artificial2 0.03875 0.04076 0.951 0.346 x.artificial3 -0.05298 0.03549 -1.493 0.141 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.5561 on 58.62431 degrees of freedom Multiple R-Squared: 0.04914, Adjusted R-squared: 0.0004795 F-statistic: 1.01 on 3 and 58.62431 degrees of freedom, p-value: 0.3949 > > > > cleanEx(); ..nameEx <- "wle.normal" > > ### * wle.normal > > flush(stderr()); flush(stdout()) > > ### Name: wle.normal > ### Title: Robust Estimation in the Normal Model > ### Aliases: wle.normal > ### Keywords: models robust > > ### ** Examples > > library(wle) > > data(cavendish) > > result <- wle.normal(cavendish) > > result Call: wle.normal(x = cavendish) Location: [1] 5.469 Scale: [1] 0.1879 Number of solutions 1 > > result <- wle.normal(cavendish,boot=20,num.sol=1) > > barplot(result$weights,col=2,xlab="Observations", + ylab="Weights",ylim=c(0,1), + names.arg=seq(1:length(result$weights))) > > > > > cleanEx(); ..nameEx <- "wle.normal.mixture" > > ### * wle.normal.mixture > > flush(stderr()); flush(stdout()) > > ### Name: wle.normal.mixture > ### Title: Robust Estimation in the Normal Mixture Model > ### Aliases: wle.normal.mixture wle.normal.mixture.start > ### print.wle.normal.mixture > ### Keywords: robust models > > ### ** Examples > > library(wle) > set.seed(1234) > x <- c(rnorm(150,0,1),rnorm(50,15,2)) > wle.normal.mixture(x,m=2,group=50,group.start=2,boot=5,num.sol=3) Call: wle.normal.mixture(x = x, m = 2, boot = 5, group = 50, num.sol = 3, group.start = 2) Location: [,1] [,2] [1,] 15.1904 -0.1167 [2,] 15.1924 -0.1167 Scale: [,1] [,2] [1,] 1.9546 0.9294 [2,] 1.9356 0.9298 Proportion: [,1] [,2] [1,] 0.2370 0.7630 [2,] 0.2215 0.7785 Number of solutions 2 > wle.normal(x,group=2,boot=10,num.sol=3) Call: wle.normal(x = x, boot = 10, group = 2, num.sol = 3) Location: [1] -0.1171 4.1823 Scale: [1] 0.9375 7.0449 Number of solutions 2 > > > > > cleanEx(); ..nameEx <- "wle.normal.multi" > > ### * wle.normal.multi > > flush(stderr()); flush(stdout()) > > ### Name: wle.normal.multi > ### Title: Robust Estimation in the Normal Multivariate Model > ### Aliases: wle.normal.multi > ### Keywords: models multivariate robust > > ### ** Examples > > library(wle) > > data(iris) > > smooth <- wle.smooth(dimension=4,costant=4, + weight=0.5,interval=c(0.3,0.7)) > > x.data <- as.matrix(iris[iris[,5]=="virginica",1:4]) > > result <- wle.normal.multi(x.data,boot=20,group=21, + num.sol=3,smooth=smooth$root) > > result Call: wle.normal.multi(x = x.data, boot = 20, group = 21, num.sol = 3, smooth = smooth$root) Location: Sepal.Length Sepal.Width Petal.Length Petal.Width 6.535 2.964 5.495 2.020 Variance-Covariance matrix: [[1]] Sepal.Length Sepal.Width Petal.Length Petal.Width Sepal.Length 0.31930 0.07328 0.22985 0.03729 Sepal.Width 0.07328 0.08363 0.05881 0.04542 Petal.Length 0.22985 0.05881 0.23008 0.03834 Petal.Width 0.03729 0.04542 0.03834 0.07090 Number of solutions 1 > > result <- wle.normal.multi(x.data,boot=20,group=21, + num.sol=1,smooth=smooth$root) > > barplot(result$weights,col=2,xlab="Observations", + ylab="Weights",ylim=c(0,1), + names.arg=seq(1:length(result$weights))) > > > > cleanEx(); ..nameEx <- "wle.onestep" > > ### * wle.onestep > > flush(stderr()); flush(stdout()) > > ### Name: wle.onestep > ### Title: A One-Step Weighted Likelihood Estimator for Linear model > ### Aliases: wle.onestep > ### Keywords: robust regression > > ### ** Examples > > #library(wle) > #library(lqs) > > #data(artificial) > > #result.lts <- lqs(y.artificial~x.artificial, > # method = "lts") > > #result.wle <- wle.onestep(y.artificial~x.artificial, > # ini.param=result.lts$coefficients, > # ini.scale=result.lts$scale[1]) > > #result.wle > > > > cleanEx(); ..nameEx <- "wle.poisson" > > ### * wle.poisson > > flush(stderr()); flush(stdout()) > > ### Name: wle.poisson > ### Title: Robust Estimation in the Poisson Model > ### Aliases: wle.poisson print.wle.poisson > ### Keywords: models robust > > ### ** Examples > > library(wle) > > set.seed(1234) > > x <- rpois(40,5) > wle.poisson(x) Call: wle.poisson(x = x) lambda: [1] 4.731 Number of solutions 1 > > x <- c(rpois(40,5),rpois(10,20)) > wle.poisson(x) Call: wle.poisson(x = x) lambda: [1] 4.509 Number of solutions 1 > > > > > cleanEx(); ..nameEx <- "wle.smooth" > > ### * wle.smooth > > flush(stderr()); flush(stdout()) > > ### Name: wle.smooth > ### Title: Bandwidth selection for the normal kernel and normal model. > ### Aliases: wle.smooth print.wle.smooth > ### Keywords: robust > > ### ** Examples > > library(wle) > > wle.smooth() Call: wle.smooth() Bandwidth: 0.003111 > > > > cleanEx(); ..nameEx <- "wle.stepwise" > > ### * wle.stepwise > > flush(stderr()); flush(stdout()) > > ### Name: wle.stepwise > ### Title: Weighted Stepwise, Backward and Forward selection methods > ### Aliases: wle.stepwise > ### Keywords: robust regression > > ### ** Examples > > > library(wle) > > # You can find this dataset in: > # Agostinelli, C., (2002). Robust model selection in regression > # via weighted likelihood methodology, Statistics & > # Probability Letters, 56, 289-300. > > data(selection) > > result <- wle.stepwise(ydata~xdata, boot=100, group=6, num.sol=3, + min.weight=0.8, type="Stepwise", method="WLS") > > summary(result) Call: wle.stepwise(formula = ydata ~ xdata, boot = 100, group = 6, num.sol = 3, min.weight = 0.8, type = "Stepwise", method = "WLS") Stepwise selection procedure F.in: 4 F.out: 4 Last 1 iterations: (Intercept) xdataX xdataX^2 0 0 0 0 > > > > cleanEx(); ..nameEx <- "wle.t.test" > > ### * wle.t.test > > flush(stderr()); flush(stdout()) > > ### Name: wle.t.test > ### Title: Weighted Likelihood Student's t-Test > ### Aliases: wle.t.test print.wle.t.test > ### Keywords: robust htest > > ### ** Examples > > library(wle) > > set.seed(1234) > > x <- rnorm(20,0,1) > y <- rnorm(20,6,1) > > t.test(x,y) # P < 2.2e-16 Welch Two Sample t-test data: x and y t = -19.7269, df = 35.889, p-value < 2.2e-16 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -6.256951 -5.090237 sample estimates: mean of x mean of y -0.2506641 5.4229301 > wle.t.test(x,y,group=5) # P < 2.2e-16 Call: wle.t.test(x = x, y = y, group = 5) Weighted t test: 'x' Root 1 'y' Root 1 Welch Two Sample wt-test for normal distributed data data: x and y wt = -23.8105, df = 30.758, p-value = < 2.2e-16 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -6.223333 -5.241027 sample estimates: [1] -0.3404375 5.3917426 Number of 'x' solutions 1 Number of 'y' solutions 1 > > t.test(x,y=c(y,250)) # P = 0.1419 -- NOT significant anymore Welch Two Sample t-test data: x and c(y, 250) t = -1.4867, df = 20.015, p-value = 0.1527 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -41.620311 6.980069 sample estimates: mean of x mean of y -0.2506641 17.0694572 > wle.t.test(x,y=c(y,250),group=5) # P < 2.2e-16 -- still significant Call: wle.t.test(x = x, y = c(y, 250), group = 5) Weighted t test: 'x' Root 1 'y' Root 1 Welch Two Sample wt-test for normal distributed data data: x and c(y, 250) wt = -23.842, df = 30.917, p-value = < 2.2e-16 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -6.223823 -5.242825 sample estimates: [1] -0.3404385 5.3928853 Number of 'x' solutions 1 Number of 'y' solutions 1 > set.seed(1234) > > # three roots for 'x' and three roots for 'y' > # with nine t-test value > res <- wle.t.test(x=c(rnorm(40,0,1),rnorm(40,10,1)), + y=c(rnorm(40,0,1),rnorm(40,10,1)), + group=4,num.sol=3,boot=100) > > print(res) # print ALL the t-test Call: wle.t.test(x = c(rnorm(40, 0, 1), rnorm(40, 10, 1)), y = c(rnorm(40, 0, 1), rnorm(40, 10, 1)), boot = 100, group = 4, num.sol = 3) Weighted t test: 'x' Root 1 'y' Root 1 Welch Two Sample wt-test for normal distributed data data: c(rnorm(40, 0, 1), rnorm(40, 10, 1)) and c(rnorm(40, 0, 1), rnorm(40, 10, 1)) wt = -0.3382, df = 123.042, p-value = 0.7358 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -0.5545859 0.3927116 sample estimates: [1] 5.028074 5.342190 'x' Root 1 'y' Root 2 Welch Two Sample wt-test for normal distributed data data: c(rnorm(40, 0, 1), rnorm(40, 10, 1)) and c(rnorm(40, 0, 1), rnorm(40, 10, 1)) wt = 7.3451, df = 65.967, p-value = 3.94e-10 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: 1.279824 2.235330 sample estimates: [1] -0.4877765 5.3421904 'x' Root 1 'y' Root 3 Welch Two Sample wt-test for normal distributed data data: c(rnorm(40, 0, 1), rnorm(40, 10, 1)) and c(rnorm(40, 0, 1), rnorm(40, 10, 1)) wt = -7.2909, df = 68.406, p-value = 4.111e-10 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -2.222049 -1.267179 sample estimates: [1] 9.903944 5.342190 'x' Root 2 'y' Root 1 Welch Two Sample wt-test for normal distributed data data: c(rnorm(40, 0, 1), rnorm(40, 10, 1)) and c(rnorm(40, 0, 1), rnorm(40, 10, 1)) wt = -8.8572, df = 67.628, p-value = 6.392e-13 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -2.596932 -1.641865 sample estimates: [1] -0.4877765 5.3421904 'x' Root 2 'y' Root 2 Welch Two Sample wt-test for normal distributed data data: c(rnorm(40, 0, 1), rnorm(40, 10, 1)) and c(rnorm(40, 0, 1), rnorm(40, 10, 1)) wt = -2.6243, df = 65.758, p-value = 0.01079 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -1.1057280 -0.1501658 sample estimates: [1] -0.48777650 0.01481665 'x' Root 2 'y' Root 3 Welch Two Sample wt-test for normal distributed data data: c(rnorm(40, 0, 1), rnorm(40, 10, 1)) and c(rnorm(40, 0, 1), rnorm(40, 10, 1)) wt = -49.1517, df = 67.45, p-value = < 2.2e-16 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -12.23883 -11.28372 sample estimates: [1] 9.90394422 0.01481665 'x' Root 3 'y' Root 1 Welch Two Sample wt-test for normal distributed data data: c(rnorm(40, 0, 1), rnorm(40, 10, 1)) and c(rnorm(40, 0, 1), rnorm(40, 10, 1)) wt = 6.8421, df = 70.796, p-value = 2.311e-09 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: 1.160076 2.114366 sample estimates: [1] 9.903944 5.342190 'x' Root 3 'y' Root 2 Welch Two Sample wt-test for normal distributed data data: c(rnorm(40, 0, 1), rnorm(40, 10, 1)) and c(rnorm(40, 0, 1), rnorm(40, 10, 1)) wt = 45.1665, df = 64.112, p-value = < 2.2e-16 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: 10.32967 11.28569 sample estimates: [1] 9.90394422 0.01481665 'x' Root 3 'y' Root 3 Welch Two Sample wt-test for normal distributed data data: c(rnorm(40, 0, 1), rnorm(40, 10, 1)) and c(rnorm(40, 0, 1), rnorm(40, 10, 1)) wt = -0.6266, df = 70.12, p-value = 0.5329 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -0.6271653 0.3272845 sample estimates: [1] 9.903944 10.053885 Number of 'x' solutions 3 Number of 'y' solutions 3 > print(res,x.root=1,y.root=1) # print the test associated to the Call: wle.t.test(x = c(rnorm(40, 0, 1), rnorm(40, 10, 1)), y = c(rnorm(40, 0, 1), rnorm(40, 10, 1)), boot = 100, group = 4, num.sol = 3) Weighted t test: 'x' Root 1 'y' Root 1 Welch Two Sample wt-test for normal distributed data data: c(rnorm(40, 0, 1), rnorm(40, 10, 1)) and c(rnorm(40, 0, 1), rnorm(40, 10, 1)) wt = -0.3382, df = 123.042, p-value = 0.7358 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -0.5545859 0.3927116 sample estimates: [1] 5.028074 5.342190 Number of 'x' solutions 3 Number of 'y' solutions 3 > # x.root=1,y.root=1 > > root.1.1 <- res$test[[1]][[1]] # access to the object associated > # to the x.root=1,y.root=1 > > names(root.1.1) [1] "statistic" "parameter" "p.value" "conf.int" "estimate" [6] "null.value" "alternative" "method" "data.name" "x.weights" [11] "y.weights" "x.root" "y.root" > > set.seed(1234) > > # one root and NOT significant t-test > wle.t.test(x=c(rnorm(40,0,1),rnorm(40,10,1)), + y=c(rnorm(40,0,1),rnorm(40,10,1)), + group=4,num.sol=3,boot=100,paired=TRUE) Call: wle.t.test(x = c(rnorm(40, 0, 1), rnorm(40, 10, 1)), y = c(rnorm(40, 0, 1), rnorm(40, 10, 1)), paired = TRUE, boot = 100, group = 4, num.sol = 3) Weighted t test: 'x' Root 1 Paired wt-test for normal distributed data data: c(rnorm(40, 0, 1), rnorm(40, 10, 1)) and c(rnorm(40, 0, 1), rnorm(40, 10, 1)) wt = -2.0945, df = 74.02, p-value = 0.03964 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -0.55679572 -0.01389546 sample estimates: [1] -0.2853456 Number of 'x' solutions 1 > > > > > cleanEx(); ..nameEx <- "wle.var.test" > > ### * wle.var.test > > flush(stderr()); flush(stdout()) > > ### Name: wle.var.test > ### Title: Weighted F Test to Compare Two Variances > ### Aliases: wle.var.test > ### Keywords: robust htest > > ### ** Examples > > > set.seed(2345) > > x <- rnorm(50,0,1) > y <- rnorm(50,10,1) > > res.x <- wle.normal(x,group=5) > res.y <- wle.normal(y,group=5) > > wle.var.test(res.x, res.y) # Do x and y have the same variance? WF test to compare two variances data: res.x and res.y WF = 0.6387, num df = 42.936, denom df = 45.386, p-value = 0.1419 alternative hypothesis: true ratio of variances is not equal to 1 95 percent confidence interval: 0.3521775 1.1641298 sample estimates: ratio of variances 0.6386964 > > set.seed(2345) > > x <- c(rnorm(50,0,1),rnorm(20,10,1)) > y <- c(rnorm(50,10,1),rnorm(10,0,5)) > > res.x <- wle.normal(x,group=5,num.sol=2) > res.y <- wle.normal(y,group=5) > > res.x Call: wle.normal(x = x, group = 5, num.sol = 2) Location: [1] 2.71327 -0.04928 Scale: [1] 4.5264 0.8119 Number of solutions 2 > wle.var.test(res.x, res.y, x.root=1) WF test to compare two variances data: res.x and res.y WF = 18.5957, num df = 52.963, denom df = 46.606, p-value < 2.2e-16 alternative hypothesis: true ratio of variances is not equal to 1 95 percent confidence interval: 10.53441 32.49160 sample estimates: ratio of variances 18.59568 > if (res.x$tot.sol>1) wle.var.test(res.x, res.y, x.root=2) WF test to compare two variances data: res.x and res.y WF = 0.6006, num df = 43.755, denom df = 46.606, p-value = 0.09167 alternative hypothesis: true ratio of variances is not equal to 1 95 percent confidence interval: 0.3335395 1.0874394 sample estimates: ratio of variances 0.6005765 > > > > cleanEx(); ..nameEx <- "wle.vonmises" > > ### * wle.vonmises > > flush(stderr()); flush(stdout()) > > ### Name: wle.vonmises > ### Title: von Mises Weighted Likelihood Estimates > ### Aliases: wle.vonmises print.wle.vonmises > ### Keywords: robust > > ### ** Examples > > > if (require(circular)) { + x <- c(rvonmises(n=50, mu=0, kappa=10), rvonmises(n=5, mu=pi/2, kappa=20)) + wle.vonmises(x, smooth=20, group=5) + } else { + cat("Please, install the package 'circular' in order to use this function.\n") + } Loading required package: circular Loading required package: boot This is version 0.3-2 of circular package Please report any bugs or comments to claudio@unive.it The package redefine how function 'var' works In particular, (try 'methods(var)') notice that 'var.default' is an alias for the original 'var' function and that a method for data.frame is available. Attaching package: 'circular' The following object(s) are masked from package:stats : var Call: wle.vonmises(x = x, group = 5, smooth = 20) mu: [1] -0.01395 kappa: [1] 6.635 Number of solutions 1 > > > > > cleanEx(); ..nameEx <- "wle.wrappednormal" > > ### * wle.wrappednormal > > flush(stderr()); flush(stdout()) > > ### Name: wle.wrappednormal > ### Title: Wrapped Normal Weighted Likelihood Estimates > ### Aliases: wle.wrappednormal print.wle.wrappednormal > ### Keywords: robust > > ### ** Examples > > > if (require(circular)) { + x <- c(rwrappednormal(n=50, mu=0, sd=1), rwrappednormal(n=5, mu=pi/2, sd=0.5)) + wle.wrappednormal(x, smooth=1/20, group=5) + } else { + cat("Please, install the package 'circular' in order to use this function.\n") + } Loading required package: circular Loading required package: boot This is version 0.3-2 of circular package Please report any bugs or comments to claudio@unive.it The package redefine how function 'var' works In particular, (try 'methods(var)') notice that 'var.default' is an alias for the original 'var' function and that a method for data.frame is available. Attaching package: 'circular' The following object(s) are masked from package:stats : var Call: wle.wrappednormal(x = x, group = 5, smooth = 1/20) mu: 0.2584 rho: 0.6765 sd: 0.884 Number of solutions 1 > > > > > ### *