R : Copyright 2005, The R Foundation for Statistical Computing Version 2.1.1 (2005-06-20), ISBN 3-900051-07-0 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for a HTML browser interface to help. Type 'q()' to quit R. > ### *
> ### > attach(NULL, name = "CheckExEnv") > assign(".CheckExEnv", as.environment(2), pos = length(search())) # base > ## add some hooks to label plot pages for base and grid graphics > setHook("plot.new", ".newplot.hook") > setHook("persp", ".newplot.hook") > setHook("grid.newpage", ".gridplot.hook") > > assign("cleanEx", + function(env = .GlobalEnv) { + rm(list = ls(envir = env, all.names = TRUE), envir = env) + RNGkind("default", "default") + set.seed(1) + options(warn = 1) + delayedAssign("T", stop("T used instead of TRUE"), + assign.env = .CheckExEnv) + delayedAssign("F", stop("F used instead of FALSE"), + assign.env = .CheckExEnv) + sch <- search() + newitems <- sch[! sch %in% .oldSearch] + for(item in rev(newitems)) + eval(substitute(detach(item), list(item=item))) + missitems <- .oldSearch[! .oldSearch %in% sch] + if(length(missitems)) + warning("items ", paste(missitems, collapse=", "), + " have been removed from the search path") + }, + env = .CheckExEnv) > assign("..nameEx", "__{must remake R-ex/*.R}__", env = .CheckExEnv) # for now > assign("ptime", proc.time(), env = .CheckExEnv) > grDevices::postscript("gbm-Examples.ps") > assign("par.postscript", graphics::par(no.readonly = TRUE), env = .CheckExEnv) > options(contrasts = c(unordered = "contr.treatment", ordered = "contr.poly")) > options(warn = 1) > library('gbm') Loading required package: survival Loading required package: splines Loading required package: lattice Loading required package: mgcv This is mgcv 1.3-1 Loaded gbm 1.5-1 > > assign(".oldSearch", search(), env = .CheckExEnv) > assign(".oldNS", loadedNamespaces(), env = .CheckExEnv) > cleanEx(); ..nameEx <- "calibrate.plot" > > ### * calibrate.plot > > flush(stderr()); flush(stdout()) > > ### Name: calibrate.plot > ### Title: Calibration plot > ### Aliases: calibrate.plot > ### Keywords: hplot > > ### ** Examples > > library(rpart) > data(kyphosis) > y <- as.numeric(kyphosis$Kyphosis)-1 > x <- kyphosis$Age > glm1 <- glm(y~poly(x,2),family=binomial) > p <- predict(glm1,type="response") > calibrate.plot(y, p, xlim=c(0,0.6), ylim=c(0,0.6)) > > > > cleanEx(); ..nameEx <- "gbm" > > ### * gbm > > flush(stderr()); flush(stdout()) > > ### Name: gbm > ### Title: Generalized Boosted Regression Modeling > ### Aliases: gbm gbm.more gbm.fit > ### Keywords: models nonlinear survival nonparametric tree > > ### ** Examples > > # A least squares regression example > # create some data > > N <- 1000 > X1 <- runif(N) > X2 <- 2*runif(N) > X3 <- ordered(sample(letters[1:4],N,replace=TRUE),levels=letters[4:1]) > X4 <- factor(sample(letters[1:6],N,replace=TRUE)) > X5 <- factor(sample(letters[1:3],N,replace=TRUE)) > X6 <- 3*runif(N) > mu <- c(-1,0,1,2)[as.numeric(X3)] > > SNR <- 10 # signal-to-noise ratio > Y <- X1**1.5 + 2 * (X2**.5) + mu > sigma <- sqrt(var(Y)/SNR) > Y <- Y + rnorm(N,0,sigma) > > # introduce some missing values > X1[sample(1:N,size=500)] <- NA > X4[sample(1:N,size=300)] <- NA > > data <- data.frame(Y=Y,X1=X1,X2=X2,X3=X3,X4=X4,X5=X5,X6=X6) > > # fit initial model > gbm1 <- gbm(Y~X1+X2+X3+X4+X5+X6, # formula + data=data, # dataset + var.monotone=c(0,0,0,0,0,0), # -1: monotone decrease, + # +1: monotone increase, + # 0: no monotone restrictions + distribution="gaussian", # bernoulli, adaboost, gaussian, + # poisson, and coxph available + n.trees=3000, # number of trees + shrinkage=0.005, # shrinkage or learning rate, + # 0.001 to 0.1 usually work + interaction.depth=3, # 1: additive model, 2: two-way interactions, etc. + bag.fraction = 0.5, # subsampling fraction, 0.5 is probably best + train.fraction = 0.5, # fraction of data for training, + # first train.fraction*N used for training + n.minobsinnode = 10, # minimum total weight needed in each node + cv.folds = 5, # do 5-fold cross-validation + keep.data=TRUE, # keep a copy of the dataset with the object + verbose=TRUE) # print out progress CV: 1 Iter TrainDeviance ValidDeviance StepSize Improve 1 1.9445 1.9168 0.0050 0.0136 2 1.9299 1.9037 0.0050 0.0136 3 1.9162 1.8913 0.0050 0.0137 4 1.9017 1.8780 0.0050 0.0138 5 1.8873 1.8648 0.0050 0.0137 6 1.8730 1.8519 0.0050 0.0143 7 1.8592 1.8396 0.0050 0.0140 8 1.8465 1.8282 0.0050 0.0121 9 1.8335 1.8165 0.0050 0.0126 10 1.8200 1.8043 0.0050 0.0137 100 1.0187 1.0809 0.0050 0.0057 200 0.6127 0.7167 0.0050 0.0024 300 0.4256 0.5539 0.0050 0.0012 400 0.3307 0.4686 0.0050 0.0005 500 0.2764 0.4186 0.0050 0.0003 600 0.2449 0.3873 0.0050 0.0000 700 0.2240 0.3669 0.0050 0.0001 800 0.2105 0.3552 0.0050 0.0000 900 0.2009 0.3452 0.0050 -0.0000 1000 0.1932 0.3397 0.0050 -0.0000 1100 0.1872 0.3356 0.0050 -0.0000 1200 0.1815 0.3340 0.0050 -0.0001 1300 0.1768 0.3323 0.0050 -0.0000 1400 0.1727 0.3318 0.0050 -0.0001 1500 0.1688 0.3318 0.0050 -0.0001 1600 0.1650 0.3307 0.0050 -0.0001 1700 0.1616 0.3318 0.0050 -0.0000 1800 0.1585 0.3312 0.0050 -0.0000 1900 0.1557 0.3309 0.0050 -0.0001 2000 0.1527 0.3312 0.0050 -0.0000 2100 0.1502 0.3320 0.0050 -0.0001 2200 0.1476 0.3321 0.0050 -0.0001 2300 0.1450 0.3330 0.0050 -0.0000 2400 0.1426 0.3337 0.0050 -0.0000 2500 0.1403 0.3346 0.0050 -0.0000 2600 0.1382 0.3345 0.0050 -0.0000 2700 0.1363 0.3352 0.0050 -0.0000 2800 0.1344 0.3357 0.0050 -0.0001 2900 0.1324 0.3361 0.0050 -0.0000 3000 0.1305 0.3361 0.0050 -0.0000 CV: 2 Iter TrainDeviance ValidDeviance StepSize Improve 1 1.9199 2.0328 0.0050 0.0138 2 1.9056 2.0179 0.0050 0.0141 3 1.8921 2.0032 0.0050 0.0139 4 1.8783 1.9884 0.0050 0.0141 5 1.8645 1.9733 0.0050 0.0139 6 1.8518 1.9594 0.0050 0.0127 7 1.8393 1.9466 0.0050 0.0128 8 1.8268 1.9339 0.0050 0.0133 9 1.8132 1.9190 0.0050 0.0119 10 1.8008 1.9053 0.0050 0.0119 100 1.0202 1.0863 0.0050 0.0054 200 0.6247 0.6741 0.0050 0.0022 300 0.4410 0.4843 0.0050 0.0012 400 0.3452 0.3868 0.0050 0.0003 500 0.2912 0.3354 0.0050 0.0003 600 0.2601 0.3091 0.0050 0.0002 700 0.2391 0.2933 0.0050 0.0001 800 0.2251 0.2851 0.0050 0.0000 900 0.2146 0.2797 0.0050 -0.0000 1000 0.2065 0.2783 0.0050 0.0000 1100 0.1998 0.2767 0.0050 -0.0000 1200 0.1941 0.2762 0.0050 -0.0001 1300 0.1890 0.2757 0.0050 -0.0001 1400 0.1845 0.2766 0.0050 -0.0000 1500 0.1806 0.2772 0.0050 -0.0001 1600 0.1769 0.2774 0.0050 -0.0001 1700 0.1735 0.2779 0.0050 -0.0000 1800 0.1702 0.2784 0.0050 -0.0001 1900 0.1671 0.2788 0.0050 -0.0000 2000 0.1641 0.2782 0.0050 -0.0001 2100 0.1612 0.2788 0.0050 -0.0001 2200 0.1585 0.2795 0.0050 -0.0000 2300 0.1561 0.2798 0.0050 -0.0000 2400 0.1533 0.2803 0.0050 -0.0001 2500 0.1510 0.2810 0.0050 -0.0001 2600 0.1488 0.2813 0.0050 -0.0000 2700 0.1467 0.2815 0.0050 -0.0001 2800 0.1447 0.2815 0.0050 -0.0001 2900 0.1426 0.2815 0.0050 -0.0001 3000 0.1405 0.2818 0.0050 -0.0000 CV: 3 Iter TrainDeviance ValidDeviance StepSize Improve 1 1.9872 1.7427 0.0050 0.0140 2 1.9735 1.7311 0.0050 0.0141 3 1.9596 1.7189 0.0050 0.0138 4 1.9463 1.7070 0.0050 0.0149 5 1.9327 1.6948 0.0050 0.0132 6 1.9188 1.6815 0.0050 0.0127 7 1.9050 1.6694 0.0050 0.0127 8 1.8920 1.6571 0.0050 0.0137 9 1.8799 1.6460 0.0050 0.0127 10 1.8675 1.6341 0.0050 0.0138 100 1.0646 0.9232 0.0050 0.0055 200 0.6511 0.5666 0.0050 0.0023 300 0.4597 0.4019 0.0050 0.0009 400 0.3595 0.3185 0.0050 0.0007 500 0.3026 0.2757 0.0050 0.0002 600 0.2690 0.2540 0.0050 0.0002 700 0.2476 0.2435 0.0050 0.0000 800 0.2332 0.2373 0.0050 0.0001 900 0.2222 0.2353 0.0050 0.0000 1000 0.2143 0.2352 0.0050 -0.0001 1100 0.2077 0.2348 0.0050 -0.0001 1200 0.2022 0.2359 0.0050 -0.0000 1300 0.1972 0.2370 0.0050 -0.0000 1400 0.1931 0.2370 0.0050 -0.0001 1500 0.1889 0.2381 0.0050 -0.0000 1600 0.1851 0.2385 0.0050 -0.0000 1700 0.1816 0.2382 0.0050 -0.0001 1800 0.1783 0.2400 0.0050 -0.0000 1900 0.1752 0.2406 0.0050 -0.0001 2000 0.1721 0.2411 0.0050 -0.0001 2100 0.1695 0.2424 0.0050 -0.0001 2200 0.1667 0.2429 0.0050 -0.0001 2300 0.1641 0.2426 0.0050 -0.0000 2400 0.1618 0.2431 0.0050 -0.0001 2500 0.1592 0.2436 0.0050 -0.0001 2600 0.1569 0.2432 0.0050 -0.0000 2700 0.1548 0.2430 0.0050 -0.0001 2800 0.1526 0.2433 0.0050 -0.0000 2900 0.1502 0.2446 0.0050 -0.0000 3000 0.1482 0.2454 0.0050 -0.0001 CV: 4 Iter TrainDeviance ValidDeviance StepSize Improve 1 1.9312 1.9694 0.0050 0.0151 2 1.9175 1.9557 0.0050 0.0140 3 1.9039 1.9424 0.0050 0.0138 4 1.8898 1.9285 0.0050 0.0135 5 1.8768 1.9159 0.0050 0.0129 6 1.8630 1.9028 0.0050 0.0139 7 1.8499 1.8905 0.0050 0.0119 8 1.8360 1.8772 0.0050 0.0127 9 1.8229 1.8639 0.0050 0.0134 10 1.8103 1.8518 0.0050 0.0137 100 1.0313 1.0654 0.0050 0.0053 200 0.6366 0.6396 0.0050 0.0024 300 0.4540 0.4478 0.0050 0.0012 400 0.3589 0.3524 0.0050 0.0005 500 0.3043 0.2982 0.0050 0.0003 600 0.2712 0.2684 0.0050 0.0001 700 0.2513 0.2533 0.0050 -0.0001 800 0.2374 0.2453 0.0050 -0.0000 900 0.2268 0.2395 0.0050 -0.0000 1000 0.2188 0.2381 0.0050 -0.0000 1100 0.2121 0.2376 0.0050 0.0000 1200 0.2063 0.2351 0.0050 -0.0000 1300 0.2014 0.2343 0.0050 -0.0001 1400 0.1969 0.2332 0.0050 -0.0000 1500 0.1929 0.2325 0.0050 -0.0000 1600 0.1895 0.2319 0.0050 -0.0001 1700 0.1860 0.2320 0.0050 -0.0001 1800 0.1829 0.2327 0.0050 -0.0000 1900 0.1799 0.2316 0.0050 -0.0001 2000 0.1769 0.2315 0.0050 -0.0001 2100 0.1741 0.2316 0.0050 -0.0001 2200 0.1712 0.2305 0.0050 0.0000 2300 0.1686 0.2300 0.0050 -0.0001 2400 0.1661 0.2301 0.0050 -0.0001 2500 0.1637 0.2299 0.0050 -0.0001 2600 0.1615 0.2300 0.0050 -0.0001 2700 0.1594 0.2294 0.0050 -0.0000 2800 0.1572 0.2291 0.0050 -0.0000 2900 0.1553 0.2286 0.0050 -0.0001 3000 0.1535 0.2288 0.0050 -0.0001 CV: 5 Iter TrainDeviance ValidDeviance StepSize Improve 1 1.9079 2.0652 0.0050 0.0135 2 1.8953 2.0510 0.0050 0.0130 3 1.8828 2.0369 0.0050 0.0123 4 1.8697 2.0229 0.0050 0.0119 5 1.8570 2.0091 0.0050 0.0139 6 1.8442 1.9954 0.0050 0.0126 7 1.8308 1.9807 0.0050 0.0139 8 1.8184 1.9668 0.0050 0.0110 9 1.8060 1.9525 0.0050 0.0125 10 1.7941 1.9392 0.0050 0.0109 100 1.0268 1.1019 0.0050 0.0054 200 0.6292 0.6693 0.0050 0.0022 300 0.4431 0.4740 0.0050 0.0012 400 0.3469 0.3791 0.0050 0.0006 500 0.2923 0.3317 0.0050 0.0004 600 0.2596 0.3057 0.0050 0.0002 700 0.2390 0.2915 0.0050 0.0001 800 0.2247 0.2862 0.0050 0.0000 900 0.2144 0.2819 0.0050 -0.0001 1000 0.2064 0.2804 0.0050 -0.0000 1100 0.1998 0.2804 0.0050 -0.0001 1200 0.1940 0.2813 0.0050 -0.0001 1300 0.1892 0.2830 0.0050 -0.0000 1400 0.1850 0.2841 0.0050 -0.0001 1500 0.1811 0.2849 0.0050 -0.0001 1600 0.1776 0.2851 0.0050 -0.0001 1700 0.1740 0.2858 0.0050 -0.0001 1800 0.1710 0.2855 0.0050 -0.0001 1900 0.1680 0.2869 0.0050 -0.0001 2000 0.1651 0.2876 0.0050 -0.0001 2100 0.1624 0.2881 0.0050 -0.0001 2200 0.1598 0.2886 0.0050 -0.0001 2300 0.1574 0.2885 0.0050 -0.0000 2400 0.1550 0.2880 0.0050 -0.0001 2500 0.1528 0.2884 0.0050 -0.0001 2600 0.1505 0.2901 0.0050 -0.0001 2700 0.1482 0.2908 0.0050 -0.0001 2800 0.1461 0.2913 0.0050 -0.0000 2900 0.1441 0.2924 0.0050 -0.0001 3000 0.1422 0.2937 0.0050 -0.0001 Iter TrainDeviance ValidDeviance StepSize Improve 1 1.9384 2.1350 0.0050 0.0145 2 1.9249 2.1215 0.0050 0.0139 3 1.9119 2.1085 0.0050 0.0140 4 1.8983 2.0947 0.0050 0.0131 5 1.8847 2.0810 0.0050 0.0131 6 1.8713 2.0675 0.0050 0.0135 7 1.8586 2.0545 0.0050 0.0130 8 1.8462 2.0420 0.0050 0.0123 9 1.8333 2.0288 0.0050 0.0126 10 1.8204 2.0159 0.0050 0.0131 100 1.0403 1.2156 0.0050 0.0059 200 0.6378 0.7848 0.0050 0.0023 300 0.4505 0.5727 0.0050 0.0010 400 0.3528 0.4559 0.0050 0.0007 500 0.2984 0.3869 0.0050 0.0002 600 0.2662 0.3469 0.0050 0.0000 700 0.2459 0.3229 0.0050 0.0001 800 0.2323 0.3062 0.0050 0.0000 900 0.2229 0.2968 0.0050 -0.0000 1000 0.2153 0.2901 0.0050 -0.0001 1100 0.2091 0.2857 0.0050 -0.0000 1200 0.2041 0.2826 0.0050 -0.0001 1300 0.1997 0.2812 0.0050 -0.0000 1400 0.1953 0.2799 0.0050 -0.0000 1500 0.1920 0.2798 0.0050 -0.0000 1600 0.1887 0.2806 0.0050 -0.0001 1700 0.1855 0.2808 0.0050 -0.0001 1800 0.1825 0.2814 0.0050 -0.0000 1900 0.1797 0.2822 0.0050 -0.0000 2000 0.1769 0.2828 0.0050 -0.0000 2100 0.1744 0.2834 0.0050 -0.0001 2200 0.1720 0.2842 0.0050 -0.0000 2300 0.1697 0.2849 0.0050 -0.0001 2400 0.1673 0.2857 0.0050 -0.0000 2500 0.1650 0.2869 0.0050 -0.0000 2600 0.1630 0.2885 0.0050 -0.0000 2700 0.1610 0.2891 0.0050 -0.0000 2800 0.1589 0.2896 0.0050 -0.0001 2900 0.1569 0.2906 0.0050 -0.0001 3000 0.1550 0.2916 0.0050 -0.0001 > > # check performance using an out-of-bag estimator > # OOB underestimates the optimal number of iterations, performance is competitive > best.iter <- gbm.perf(gbm1,method="OOB") Warning in gbm.perf(gbm1, method = "OOB") : OOB generally underestimates the optimal number of iterations although predictive performance is reasonably competitive. Using cv.folds>0 when calling gbm usually results in improved predictive performance. > print(best.iter) [1] 806 > > # check performance using a 50 > best.iter <- gbm.perf(gbm1,method="test") > print(best.iter) [1] 1409 > > # check performance using 5-fold cross-validation > best.iter <- gbm.perf(gbm1,method="cv") > print(best.iter) [1] 1226 > > # plot the performance > # plot variable influence > summary(gbm1,n.trees=1) # based on the first tree var rel.inf 1 X3 70.98875 2 X2 29.01125 3 X1 0.00000 4 X4 0.00000 5 X5 0.00000 6 X6 0.00000 > summary(gbm1,n.trees=best.iter) # based on the estimated best number of trees var rel.inf 1 X3 66.4787449 2 X2 27.5182342 3 X1 3.4947383 4 X4 1.2183214 5 X6 1.1357839 6 X5 0.1541774 > > # compactly print the first and last trees for curiosity > print(pretty.gbm.tree(gbm1,1)) SplitVar SplitCodePred LeftNode RightNode MissingNode ErrorReduction Weight 0 2 1.500000e+00 1 5 9 264.82193 250 1 1 7.548380e-01 2 3 4 47.71784 121 2 -1 -9.061111e-03 -1 -1 -1 0.00000 49 3 -1 -2.664664e-03 -1 -1 -1 0.00000 72 4 -1 -5.254961e-03 -1 -1 -1 0.00000 121 5 1 7.105657e-01 6 7 8 60.50796 129 6 -1 8.754679e-04 -1 -1 -1 0.00000 52 7 -1 7.856572e-03 -1 -1 -1 0.00000 77 8 -1 5.042483e-03 -1 -1 -1 0.00000 129 9 -1 5.852038e-05 -1 -1 -1 0.00000 250 Prediction 0 5.852038e-05 1 -5.254961e-03 2 -9.061111e-03 3 -2.664664e-03 4 -5.254961e-03 5 5.042483e-03 6 8.754679e-04 7 7.856572e-03 8 5.042483e-03 9 5.852038e-05 > print(pretty.gbm.tree(gbm1,gbm1$n.trees)) SplitVar SplitCodePred LeftNode RightNode MissingNode ErrorReduction Weight 0 5 2.773044e+00 1 8 9 0.8041567 250 1 4 1.752000e+03 2 3 7 0.4174088 236 2 -1 -3.322516e-04 -1 -1 -1 0.0000000 76 3 0 1.097308e-01 4 5 6 0.6029726 160 4 -1 -7.743003e-04 -1 -1 -1 0.0000000 11 5 -1 -7.621223e-06 -1 -1 -1 0.0000000 76 6 -1 3.827509e-04 -1 -1 -1 0.0000000 73 7 -1 -2.714758e-05 -1 -1 -1 0.0000000 236 8 -1 1.206214e-03 -1 -1 -1 0.0000000 14 9 -1 4.192066e-05 -1 -1 -1 0.0000000 250 Prediction 0 4.192066e-05 1 -2.714758e-05 2 -3.322516e-04 3 1.177769e-04 4 -7.743003e-04 5 -7.621223e-06 6 3.827509e-04 7 -2.714758e-05 8 1.206214e-03 9 4.192066e-05 > > # make some new data > N <- 1000 > X1 <- runif(N) > X2 <- 2*runif(N) > X3 <- ordered(sample(letters[1:4],N,replace=TRUE)) > X4 <- factor(sample(letters[1:6],N,replace=TRUE)) > X5 <- factor(sample(letters[1:3],N,replace=TRUE)) > X6 <- 3*runif(N) > mu <- c(-1,0,1,2)[as.numeric(X3)] > > Y <- X1**1.5 + 2 * (X2**.5) + mu + rnorm(N,0,sigma) > > data2 <- data.frame(Y=Y,X1=X1,X2=X2,X3=X3,X4=X4,X5=X5,X6=X6) > > # predict on the new data using "best" number of trees > # f.predict generally will be on the canonical scale (logit,log,etc.) > f.predict <- predict.gbm(gbm1,data2,best.iter) > > # least squares error > print(sum((data2$Y-f.predict)^2)) [1] 236.3234 > > # create marginal plots > # plot variable X1,X2,X3 after "best" iterations > par(mfrow=c(1,3)) > plot.gbm(gbm1,1,best.iter) > plot.gbm(gbm1,2,best.iter) > plot.gbm(gbm1,3,best.iter) > par(mfrow=c(1,1)) > # contour plot of variables 1 and 2 after "best" iterations > plot.gbm(gbm1,1:2,best.iter) > # lattice plot of variables 2 and 3 > plot.gbm(gbm1,2:3,best.iter) > # lattice plot of variables 3 and 4 > plot.gbm(gbm1,3:4,best.iter) > > # 3-way plots > plot.gbm(gbm1,c(1,2,6),best.iter,cont=20) > plot.gbm(gbm1,1:3,best.iter) > plot.gbm(gbm1,2:4,best.iter) > plot.gbm(gbm1,3:5,best.iter) > > # do another 100 iterations > gbm2 <- gbm.more(gbm1,100, + verbose=FALSE) # stop printing detailed progress > > > > graphics::par(get("par.postscript", env = .CheckExEnv)) > cleanEx(); ..nameEx <- "quantile.rug" > > ### * quantile.rug > > flush(stderr()); flush(stdout()) > > ### Name: quantile.rug > ### Title: Quantile rug plot > ### Aliases: quantile.rug > ### Keywords: aplot > > ### ** Examples > > x <- rnorm(100) > y <- rnorm(100) > plot(x,y) > quantile.rug(x) > > > > cleanEx(); ..nameEx <- "shrink.gbm" > > ### * shrink.gbm > > flush(stderr()); flush(stdout()) > > ### Name: shrink.gbm > ### Title: L1 shrinkage of the predictor variables in a GBM > ### Aliases: shrink.gbm > ### Keywords: methods > > ### ** Examples > > > > cleanEx(); ..nameEx <- "shrink.gbm.pred" > > ### * shrink.gbm.pred > > flush(stderr()); flush(stdout()) > > ### Name: shrink.gbm.pred > ### Title: Predictions from a shrunked GBM > ### Aliases: shrink.gbm.pred > ### Keywords: methods > > ### ** Examples > > > > ### *