R : Copyright 2005, The R Foundation for Statistical Computing Version 2.1.1 (2005-06-20), ISBN 3-900051-07-0 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for a HTML browser interface to help. Type 'q()' to quit R. > ### *
> ### > attach(NULL, name = "CheckExEnv") > assign(".CheckExEnv", as.environment(2), pos = length(search())) # base > ## add some hooks to label plot pages for base and grid graphics > setHook("plot.new", ".newplot.hook") > setHook("persp", ".newplot.hook") > setHook("grid.newpage", ".gridplot.hook") > > assign("cleanEx", + function(env = .GlobalEnv) { + rm(list = ls(envir = env, all.names = TRUE), envir = env) + RNGkind("default", "default") + set.seed(1) + options(warn = 1) + delayedAssign("T", stop("T used instead of TRUE"), + assign.env = .CheckExEnv) + delayedAssign("F", stop("F used instead of FALSE"), + assign.env = .CheckExEnv) + sch <- search() + newitems <- sch[! sch %in% .oldSearch] + for(item in rev(newitems)) + eval(substitute(detach(item), list(item=item))) + missitems <- .oldSearch[! .oldSearch %in% sch] + if(length(missitems)) + warning("items ", paste(missitems, collapse=", "), + " have been removed from the search path") + }, + env = .CheckExEnv) > assign("..nameEx", "__{must remake R-ex/*.R}__", env = .CheckExEnv) # for now > assign("ptime", proc.time(), env = .CheckExEnv) > grDevices::postscript("LogicReg-Examples.ps") > assign("par.postscript", graphics::par(no.readonly = TRUE), env = .CheckExEnv) > options(contrasts = c(unordered = "contr.treatment", ordered = "contr.poly")) > options(warn = 1) > library('LogicReg') > > assign(".oldSearch", search(), env = .CheckExEnv) > assign(".oldNS", loadedNamespaces(), env = .CheckExEnv) > cleanEx(); ..nameEx <- "cumhaz" > > ### * cumhaz > > flush(stderr()); flush(stdout()) > > ### Name: cumhaz > ### Title: Cumulative hazard transformation > ### Aliases: cumhaz > ### Keywords: logic methods nonparametric tree > > ### ** Examples > > data(logreg.testdat) > # > # this is not survival data, but it shows the functionality > yy <- cumhaz(exp(logreg.testdat[,1]), logreg.testdat[, 2]) Loading required package: splines > # then we would use > # logreg(resp=yy, cens=logreg.testdat[,2], type=5, ... > # insted of > # logreg(resp=logreg.testdat[,1], cens=logreg.testdat[,2], type=4, ... > > > > cleanEx(); ..nameEx <- "eval.logreg" > > ### * eval.logreg > > flush(stderr()); flush(stdout()) > > ### Name: eval.logreg > ### Title: Evaluate a Logic Regression tree > ### Aliases: eval.logreg > ### Keywords: logic methods nonparametric tree > > ### ** Examples > > data(logreg.savefit1) > # myanneal <- logreg.anneal.control(start = -1, end = -4, iter = 25000, update = 1000) > # logreg.savefit1 <- logreg(resp = logreg.testdat[,1], bin=logreg.testdat[, 2:21], > # type = 2, select = 1, ntrees = 2, anneal.control = myanneal) > tree1 <- eval.logreg(logreg.savefit1$model$trees[[1]], logreg.savefit1$binary) > tree2 <- eval.logreg(logreg.savefit1$model$trees[[2]], logreg.savefit1$binary) > alltrees <- eval.logreg(logreg.savefit1$model, logreg.savefit1$binary) > > > > cleanEx(); ..nameEx <- "frame.logreg" > > ### * frame.logreg > > flush(stderr()); flush(stdout()) > > ### Name: frame.logreg > ### Title: Constructs a data frame for one or more Logic Regression models > ### Aliases: frame.logreg > ### Keywords: logic methods nonparametric tree > > ### ** Examples > > data(logreg.savefit1,logreg.savefit2,logreg.savefit6) > # > # fit a single mode > # myanneal <- logreg.anneal.control(start = -1, end = -4, iter = 25000, update = 1000) > # logreg.savefit1 <- logreg(resp = logreg.testdat[,1], bin=logreg.testdat[, 2:21], > # type = 2, select = 1, ntrees = 2, anneal.control = myanneal) > frame1 <- frame.logreg(logreg.savefit1) > # > # a complete sequence > # myanneal2 <- logreg.anneal.control(start = -1, end = -4, iter = 25000, update = 0) > # logreg.savefit2 <- logreg(select = 2, ntrees = c(1,2), nleaves =c(1,7), > # oldfit = logreg.savefit1, anneal.control = myanneal2) > frame2 <- frame.logreg(logreg.savefit2) > # > # a greedy sequence > # logreg.savefit6 <- logreg(select = 6, ntrees = 2, nleaves =c(1,12), oldfit = logreg.savefit1) > frame6 <- frame.logreg(logreg.savefit6, msz = 3:5) # restrict the size > > > > > cleanEx(); ..nameEx <- "logreg" > > ### * logreg > > flush(stderr()); flush(stdout()) > > ### Name: logreg > ### Title: Logic Regression > ### Aliases: logreg > ### Keywords: logic methods nonparametric tree > > ### ** Examples > > data(logreg.savefit1,logreg.savefit2,logreg.savefit3,logreg.savefit4, + logreg.savefit5,logreg.savefit6,logreg.savefit7,logreg.testdat) > > myanneal <- logreg.anneal.control(start = -1, end = -4, iter = 2500, update = 100) > # in practie we would use 25000 iterations or more - the use of 2500 is only > # to have the examples run fast > ## Not run: myanneal <- logreg.anneal.control(start = -1, end = -4, iter = 25000, update = 1000) > fit1 <- logreg(resp = logreg.testdat[,1], bin=logreg.testdat[, 2:21], type = 2, + select = 1, ntrees = 2, anneal.control = myanneal) log-temp current score best score acc / rej /sing current parameters -1.000 1.4941 1.4941 0( 0) 0 0 2.886 0.000 0.000 -1.120 1.2097 1.0333 59( 1) 39 1 2.576 -0.654 1.778 -1.240 1.1512 1.0333 43( 2) 53 2 1.876 0.003 1.912 -1.360 1.1463 1.0333 44( 0) 56 0 2.185 -0.341 1.905 -1.480 1.0480 1.0333 46( 4) 50 0 4.159 -0.176 -2.130 -1.600 1.1504 1.0333 47( 7) 46 0 3.772 0.113 -1.917 -1.720 1.1300 1.0333 39( 7) 53 1 3.038 0.809 -1.898 -1.840 1.0457 1.0333 35( 3) 61 1 3.918 -2.141 0.169 -1.960 1.0414 1.0333 40( 2) 58 0 3.938 -2.152 0.297 -2.080 1.0482 1.0333 41( 3) 56 0 3.980 -2.134 0.117 -2.200 1.0486 1.0333 43( 0) 57 0 3.963 -2.133 0.065 -2.320 1.0376 1.0333 27( 12) 60 1 4.116 -2.118 -0.317 -2.440 1.0033 1.0033 13( 4) 83 0 4.090 -2.127 -1.090 -2.560 0.9885 0.9885 5( 8) 87 0 4.115 -2.116 -1.111 -2.680 0.9885 0.9878 3( 7) 90 0 4.115 -2.116 -1.111 -2.800 0.9844 0.9844 5( 8) 87 0 4.116 -2.103 -1.122 -2.920 0.9844 0.9844 0( 3) 97 0 4.116 -2.103 -1.122 -3.040 0.9834 0.9834 1( 2) 96 1 4.118 -2.107 -1.138 -3.160 0.9841 0.9834 1( 10) 89 0 4.112 -2.079 -1.080 -3.280 0.9789 0.9789 2( 9) 89 0 4.116 -2.109 -1.222 -3.400 0.9783 0.9783 1( 5) 94 0 4.119 -2.118 -1.248 -3.520 0.9771 0.9771 1( 6) 93 0 4.119 -2.113 -1.248 -3.640 0.9762 0.9762 1( 7) 92 0 4.105 -2.114 -1.234 -3.760 0.9733 0.9733 1( 3) 96 0 4.113 -2.119 -1.239 -3.880 0.9726 0.9726 1( 1) 98 0 4.116 -2.120 -1.224 -4.000 0.9726 0.9726 0( 6) 92 2 4.116 -2.120 -1.224 > # the best score should be in the 0.97-0.98 range > plot(fit1) > # you'll probably see X1-X4 as well as a few noise predictors > # use logreg.savefit1 for the results with 25000 iterations > plot(logreg.savefit1) > print(logreg.savefit1) score 0.966 -1.3 * (((X14 or (not X5)) and ((not X1) and (not X2))) and (((not X3) or X1) or ((not X20) and (not X2)))) +2.15 * (((not X4) or ((not X13) and (not X11))) and (not X3)) > z <- predict(logreg.savefit1) > plot(z, logreg.testdat[,1]-z, xlab="fitted values", ylab="residuals") > # there are some streaks, thanks to the very discrete predictions > # > # a bit less output > myanneal2 <- logreg.anneal.control(start = -1, end = -4, iter = 1000, update = 0) > # in practie we would use 25000 iterations or more - the use of 1000 is only > # to have the examples run fast > ## Not run: myanneal2 <- logreg.anneal.control(start = -1, end = -4, iter = 25000, update = 0) > # > # fit multiple models > fit2 <- logreg(resp = logreg.testdat[,1], bin=logreg.testdat[, 2:21], type = 2, + select = 2, ntrees = c(1,2), nleaves =c(1,7), anneal.control = myanneal2) The number of trees in these models is 1 The model size is 1 The best model with 1 trees of size 1 has a score of 1.1500 The model size is 2 The best model with 1 trees of size 2 has a score of 1.0481 The model size is 3 The best model with 1 trees of size 3 has a score of 1.0457 The model size is 4 The best model with 1 trees of size 4 has a score of 1.1500 The model size is 5 The best model with 1 trees of size 5 has a score of 1.0481 The model size is 6 The best model with 1 trees of size 6 has a score of 1.1500 The model size is 7 The best model with 1 trees of size 7 has a score of 1.0481 The number of trees in these models is 2 The size for this model is smaller than the number of trees you requested. ( 1 versus 2) To save CPU time, we will skip this run. On to the next model... The model size is 2 The best model with 2 trees of size 2 has a score of 1.1168 The model size is 3 The best model with 2 trees of size 3 has a score of 1.0333 The model size is 4 The best model with 2 trees of size 4 has a score of 1.0271 The model size is 5 The best model with 2 trees of size 5 has a score of 1.0484 The model size is 6 The best model with 2 trees of size 6 has a score of 0.9814 The model size is 7 The best model with 2 trees of size 7 has a score of 0.9875 > # equivalent > fit2 <- logreg(select = 2, ntrees = c(1,2), nleaves =c(1,7), oldfit = fit1, + anneal.control = myanneal2) The number of trees in these models is 1 The model size is 1 The best model with 1 trees of size 1 has a score of 1.1500 The model size is 2 The best model with 1 trees of size 2 has a score of 1.0481 The model size is 3 The best model with 1 trees of size 3 has a score of 1.0481 The model size is 4 The best model with 1 trees of size 4 has a score of 1.0481 The model size is 5 The best model with 1 trees of size 5 has a score of 1.0457 The model size is 6 The best model with 1 trees of size 6 has a score of 1.0481 The model size is 7 The best model with 1 trees of size 7 has a score of 1.0481 The number of trees in these models is 2 The size for this model is smaller than the number of trees you requested. ( 1 versus 2) To save CPU time, we will skip this run. On to the next model... The model size is 2 The best model with 2 trees of size 2 has a score of 1.1168 The model size is 3 The best model with 2 trees of size 3 has a score of 1.0333 The model size is 4 The best model with 2 trees of size 4 has a score of 1.0484 The model size is 5 The best model with 2 trees of size 5 has a score of 1.0822 The model size is 6 The best model with 2 trees of size 6 has a score of 0.9816 The model size is 7 The best model with 2 trees of size 7 has a score of 1.0142 > plot(fit2) > print(fit2) 1 trees with 1 leaves: score is 1.15 +1.91 * (not X3) 1 trees with 2 leaves: score is 1.048 +2.13 * ((not X3) and (not X4)) 1 trees with 3 leaves: score is 1.048 -2.13 * (X3 or X4) 1 trees with 4 leaves: score is 1.048 +2.13 * ((not X3) and (not X4)) 1 trees with 5 leaves: score is 1.046 -2.14 * (X3 or (X4 and X13)) 1 trees with 6 leaves: score is 1.048 +2.13 * ((not X3) and (not X4)) 1 trees with 7 leaves: score is 1.048 +2.13 * ((not X3) and (not X4)) 2 trees with 2 leaves: score is 1.117 -1.89 * X3 +0.904 * (not X4) 2 trees with 3 leaves: score is 1.033 -2.13 * (X3 or X4) +0.401 * X1 2 trees with 4 leaves: score is 1.048 -2.03 * X4 +2.16 * ((not X3) or X4) 2 trees with 5 leaves: score is 1.082 +1.87 * (not X3) -1.07 * (((not X1) or X4) and (not X2)) 2 trees with 6 leaves: score is 0.982 +2.12 * ((not X4) and (not X3)) -1.21 * (((not X1) and (not X2)) and ((not X7) or (not X20))) 2 trees with 7 leaves: score is 1.014 -0.693 * ((X10 and X4) or ((X19 or (not X2)) and (not X1))) -2.12 * (X3 or X4) > # use logreg.savefit2 for the results with 25000 iterations > plot(logreg.savefit2) > print(logreg.savefit2) 1 trees with 1 leaves: score is 1.15 -1.91 * X3 1 trees with 2 leaves: score is 1.048 +2.13 * ((not X3) and (not X4)) 1 trees with 3 leaves: score is 1.046 +2.14 * ((not X3) and ((not X13) or (not X4))) 1 trees with 4 leaves: score is 1.042 +2.14 * ((((not X13) and (not X11)) or (not X4)) and (not X3)) 1 trees with 5 leaves: score is 1.042 +2.14 * ((not X3) and ((not X4) or ((not X13) and (not X11)))) 1 trees with 6 leaves: score is 1.042 -2.14 * (X3 or (X4 and (X13 or (not X19)))) 1 trees with 7 leaves: score is 1.04 +2.15 * ((((not X4) or (not X13)) and (not X3)) or (((not X6) and X14) and ((not X1) and (not X12)))) 2 trees with 2 leaves: score is 1.117 +1.89 * (not X3) -0.904 * X4 2 trees with 3 leaves: score is 1.033 +0.401 * X1 +2.13 * ((not X3) and (not X4)) 2 trees with 4 leaves: score is 0.988 -1.11 * ((not X1) and (not X2)) -2.12 * (X3 or X4) 2 trees with 5 leaves: score is 0.982 +1.22 * ((X2 or X1) or X20) +2.12 * ((not X4) and (not X3)) 2 trees with 6 leaves: score is 0.979 +2.13 * ((not X3) and (not X4)) +1.23 * ((X2 or X1) or (X20 and X3)) 2 trees with 7 leaves: score is 0.978 +2.13 * ((not X3) and (not X4)) -1.29 * (((not X7) or ((not X5) or (not X15))) and ((not X1) and (not X2))) > # After an initial steep decline, the scores only get slightly better > # for models with more than four leaves and two trees. > # > # cross validation > fit3 <- logreg(resp = logreg.testdat[,1], bin=logreg.testdat[, 2:21], type = 2, + select = 3, ntrees = c(1,2), nleaves=c(1,7), anneal.control = myanneal2) The number of trees in these models is 1 The model size is 1 training-now training-ave test-now test-ave Step 1 of 10 [ 1 trees; 1 leaves] CV score: 1.1575 1.1575 1.1038 1.1038 Step 2 of 10 [ 1 trees; 1 leaves] CV score: 1.1619 1.1597 1.0589 1.0813 Step 3 of 10 [ 1 trees; 1 leaves] CV score: 1.1566 1.1587 1.1126 1.0918 Step 4 of 10 [ 1 trees; 1 leaves] CV score: 1.1218 1.1494 1.4067 1.1705 Step 5 of 10 [ 1 trees; 1 leaves] CV score: 1.1096 1.1415 1.4940 1.2352 Step 6 of 10 [ 1 trees; 1 leaves] CV score: 1.1607 1.1447 1.0714 1.2079 Step 7 of 10 [ 1 trees; 1 leaves] CV score: 1.1705 1.1484 0.9667 1.1734 Step 8 of 10 [ 1 trees; 1 leaves] CV score: 1.1653 1.1505 1.0252 1.1549 Step 9 of 10 [ 1 trees; 1 leaves] CV score: 1.1450 1.1499 1.2202 1.1622 Step 10 of 10 [ 1 trees; 1 leaves] CV score: 1.1517 1.1501 1.1583 1.1618 The model size is 2 training-now training-ave test-now test-ave Step 1 of 10 [ 1 trees; 2 leaves] CV score: 1.0552 1.0552 1.0024 1.0024 Step 2 of 10 [ 1 trees; 2 leaves] CV score: 1.0541 1.0546 1.0142 1.0083 Step 3 of 10 [ 1 trees; 2 leaves] CV score: 1.1566 1.0886 1.1126 1.0431 Step 4 of 10 [ 1 trees; 2 leaves] CV score: 1.0366 1.0756 1.1738 1.0758 Step 5 of 10 [ 1 trees; 2 leaves] CV score: 1.0190 1.0643 1.3066 1.1219 Step 6 of 10 [ 1 trees; 2 leaves] CV score: 1.0449 1.0611 1.0987 1.1181 Step 7 of 10 [ 1 trees; 2 leaves] CV score: 1.0577 1.0606 0.9787 1.0982 Step 8 of 10 [ 1 trees; 2 leaves] CV score: 1.0561 1.0600 0.9940 1.0851 Step 9 of 10 [ 1 trees; 2 leaves] CV score: 1.0364 1.0574 1.1725 1.0948 Step 10 of 10 [ 1 trees; 2 leaves] CV score: 1.0521 1.0569 1.0320 1.0886 The model size is 3 training-now training-ave test-now test-ave Step 1 of 10 [ 1 trees; 3 leaves] CV score: 1.0526 1.0526 1.0018 1.0018 Step 2 of 10 [ 1 trees; 3 leaves] CV score: 1.0516 1.0521 1.0126 1.0072 Step 3 of 10 [ 1 trees; 3 leaves] CV score: 1.0662 1.0568 0.8578 0.9574 Step 4 of 10 [ 1 trees; 3 leaves] CV score: 1.0366 1.0517 1.1738 1.0115 Step 5 of 10 [ 1 trees; 3 leaves] CV score: 1.1096 1.0633 1.4940 1.1080 Step 6 of 10 [ 1 trees; 3 leaves] CV score: 1.0449 1.0602 1.0987 1.1065 Step 7 of 10 [ 1 trees; 3 leaves] CV score: 1.0577 1.0599 0.9787 1.0882 Step 8 of 10 [ 1 trees; 3 leaves] CV score: 1.0561 1.0594 0.9940 1.0764 Step 9 of 10 [ 1 trees; 3 leaves] CV score: 1.0339 1.0566 1.1703 1.0869 Step 10 of 10 [ 1 trees; 3 leaves] CV score: 1.0495 1.0559 1.0318 1.0814 The model size is 4 training-now training-ave test-now test-ave Step 1 of 10 [ 1 trees; 4 leaves] CV score: 1.0486 1.0486 1.0027 1.0027 Step 2 of 10 [ 1 trees; 4 leaves] CV score: 1.0475 1.0480 1.0142 1.0084 Step 3 of 10 [ 1 trees; 4 leaves] CV score: 1.0662 1.0541 0.8578 0.9582 Step 4 of 10 [ 1 trees; 4 leaves] CV score: 1.0366 1.0497 1.1738 1.0121 Step 5 of 10 [ 1 trees; 4 leaves] CV score: 1.0140 1.0426 1.3054 1.0708 Step 6 of 10 [ 1 trees; 4 leaves] CV score: 1.1607 1.0623 1.0714 1.0709 Step 7 of 10 [ 1 trees; 4 leaves] CV score: 1.0577 1.0616 0.9787 1.0577 Step 8 of 10 [ 1 trees; 4 leaves] CV score: 1.0561 1.0609 0.9940 1.0497 Step 9 of 10 [ 1 trees; 4 leaves] CV score: 1.0364 1.0582 1.1725 1.0634 Step 10 of 10 [ 1 trees; 4 leaves] CV score: 1.0455 1.0569 1.0319 1.0602 The model size is 5 training-now training-ave test-now test-ave Step 1 of 10 [ 1 trees; 5 leaves] CV score: 1.0502 1.0502 1.0023 1.0023 Step 2 of 10 [ 1 trees; 5 leaves] CV score: 1.0516 1.0509 1.0126 1.0075 Step 3 of 10 [ 1 trees; 5 leaves] CV score: 1.0689 1.0569 0.8567 0.9572 Step 4 of 10 [ 1 trees; 5 leaves] CV score: 1.0366 1.0518 1.1738 1.0113 Step 5 of 10 [ 1 trees; 5 leaves] CV score: 1.0190 1.0453 1.3066 1.0704 Step 6 of 10 [ 1 trees; 5 leaves] CV score: 1.0382 1.0441 1.0992 1.0752 Step 7 of 10 [ 1 trees; 5 leaves] CV score: 1.0525 1.0453 1.0038 1.0650 Step 8 of 10 [ 1 trees; 5 leaves] CV score: 1.0494 1.0458 1.0098 1.0581 Step 9 of 10 [ 1 trees; 5 leaves] CV score: 1.0339 1.0445 1.1703 1.0705 Step 10 of 10 [ 1 trees; 5 leaves] CV score: 1.0521 1.0452 1.0320 1.0667 The model size is 6 training-now training-ave test-now test-ave Step 1 of 10 [ 1 trees; 6 leaves] CV score: 1.0523 1.0523 1.0323 1.0323 Step 2 of 10 [ 1 trees; 6 leaves] CV score: 1.0541 1.0532 1.0142 1.0233 Step 3 of 10 [ 1 trees; 6 leaves] CV score: 1.0689 1.0584 0.8567 0.9677 Step 4 of 10 [ 1 trees; 6 leaves] CV score: 1.0299 1.0513 1.2437 1.0367 Step 5 of 10 [ 1 trees; 6 leaves] CV score: 1.1096 1.0630 1.4940 1.1282 Step 6 of 10 [ 1 trees; 6 leaves] CV score: 1.0449 1.0599 1.0987 1.1233 Step 7 of 10 [ 1 trees; 6 leaves] CV score: 1.0525 1.0589 1.0038 1.1062 Step 8 of 10 [ 1 trees; 6 leaves] CV score: 1.0561 1.0585 0.9940 1.0922 Step 9 of 10 [ 1 trees; 6 leaves] CV score: 1.1450 1.0681 1.2202 1.1064 Step 10 of 10 [ 1 trees; 6 leaves] CV score: 1.0495 1.0663 1.0318 1.0989 The model size is 7 training-now training-ave test-now test-ave Step 1 of 10 [ 1 trees; 7 leaves] CV score: 1.0552 1.0552 1.0024 1.0024 Step 2 of 10 [ 1 trees; 7 leaves] CV score: 1.0541 1.0546 1.0142 1.0083 Step 3 of 10 [ 1 trees; 7 leaves] CV score: 1.0689 1.0594 0.8567 0.9578 Step 4 of 10 [ 1 trees; 7 leaves] CV score: 1.0366 1.0537 1.1738 1.0118 Step 5 of 10 [ 1 trees; 7 leaves] CV score: 1.0140 1.0457 1.3054 1.0705 Step 6 of 10 [ 1 trees; 7 leaves] CV score: 1.0449 1.0456 1.0987 1.0752 Step 7 of 10 [ 1 trees; 7 leaves] CV score: 1.0525 1.0466 1.0038 1.0650 Step 8 of 10 [ 1 trees; 7 leaves] CV score: 1.0554 1.0477 0.9959 1.0564 Step 9 of 10 [ 1 trees; 7 leaves] CV score: 1.0364 1.0464 1.1725 1.0693 Step 10 of 10 [ 1 trees; 7 leaves] CV score: 1.0471 1.0465 1.0319 1.0655 The number of trees in these models is 2 The size for this model is smaller than the number of trees you requested. ( 1 versus 2) To save CPU time, we will skip this run. On to the next model... The model size is 2 training-now training-ave test-now test-ave Step 1 of 10 [ 2 trees; 2 leaves] CV score: 1.0552 1.0552 1.0024 1.0024 Step 2 of 10 [ 2 trees; 2 leaves] CV score: 1.1287 1.0919 1.0371 1.0198 Step 3 of 10 [ 2 trees; 2 leaves] CV score: 1.1301 1.1047 1.0263 1.0220 Step 4 of 10 [ 2 trees; 2 leaves] CV score: 1.0968 1.1027 1.3287 1.0986 Step 5 of 10 [ 2 trees; 2 leaves] CV score: 1.0763 1.0974 1.4746 1.1738 Step 6 of 10 [ 2 trees; 2 leaves] CV score: 1.1194 1.1011 1.1325 1.1669 Step 7 of 10 [ 2 trees; 2 leaves] CV score: 1.1375 1.1063 0.9415 1.1347 Step 8 of 10 [ 2 trees; 2 leaves] CV score: 1.1241 1.1085 1.0894 1.1291 Step 9 of 10 [ 2 trees; 2 leaves] CV score: 1.1046 1.1081 1.2635 1.1440 Step 10 of 10 [ 2 trees; 2 leaves] CV score: 1.1228 1.1095 1.0979 1.1394 The model size is 3 training-now training-ave test-now test-ave Step 1 of 10 [ 2 trees; 3 leaves] CV score: 1.0421 1.0421 0.9821 0.9821 Step 2 of 10 [ 2 trees; 3 leaves] CV score: 1.0404 1.0413 0.9989 0.9905 Step 3 of 10 [ 2 trees; 3 leaves] CV score: 1.0485 1.0437 0.9222 0.9677 Step 4 of 10 [ 2 trees; 3 leaves] CV score: 1.0377 1.0422 1.1853 1.0221 Step 5 of 10 [ 2 trees; 3 leaves] CV score: 1.0080 1.0354 1.2800 1.0737 Step 6 of 10 [ 2 trees; 3 leaves] CV score: 1.0344 1.0352 1.0602 1.0714 Step 7 of 10 [ 2 trees; 3 leaves] CV score: 1.0371 1.0355 1.0354 1.0663 Step 8 of 10 [ 2 trees; 3 leaves] CV score: 1.0413 1.0362 0.9897 1.0567 Step 9 of 10 [ 2 trees; 3 leaves] CV score: 1.0279 1.0353 1.1230 1.0641 Step 10 of 10 [ 2 trees; 3 leaves] CV score: 1.0360 1.0353 1.0413 1.0618 The model size is 4 training-now training-ave test-now test-ave Step 1 of 10 [ 2 trees; 4 leaves] CV score: 1.0326 1.0326 1.0188 1.0188 Step 2 of 10 [ 2 trees; 4 leaves] CV score: 0.9950 1.0138 0.9590 0.9889 Step 3 of 10 [ 2 trees; 4 leaves] CV score: 1.0057 1.0111 0.8454 0.9411 Step 4 of 10 [ 2 trees; 4 leaves] CV score: 0.9699 1.0008 1.1859 1.0023 Step 5 of 10 [ 2 trees; 4 leaves] CV score: 0.9711 0.9949 1.1763 1.0371 Step 6 of 10 [ 2 trees; 4 leaves] CV score: 1.0308 1.0009 1.0918 1.0462 Step 7 of 10 [ 2 trees; 4 leaves] CV score: 1.1217 1.0181 0.9450 1.0317 Step 8 of 10 [ 2 trees; 4 leaves] CV score: 0.9896 1.0146 1.0118 1.0292 Step 9 of 10 [ 2 trees; 4 leaves] CV score: 1.0198 1.0151 1.1362 1.0411 Step 10 of 10 [ 2 trees; 4 leaves] CV score: 1.0327 1.0169 1.0166 1.0387 The model size is 5 training-now training-ave test-now test-ave Step 1 of 10 [ 2 trees; 5 leaves] CV score: 1.0412 1.0412 0.9936 0.9936 Step 2 of 10 [ 2 trees; 5 leaves] CV score: 0.9884 1.0148 0.9573 0.9754 Step 3 of 10 [ 2 trees; 5 leaves] CV score: 1.0047 1.0114 0.8455 0.9321 Step 4 of 10 [ 2 trees; 5 leaves] CV score: 1.0158 1.0125 1.2298 1.0065 Step 5 of 10 [ 2 trees; 5 leaves] CV score: 1.0669 1.0234 1.4205 1.0893 Step 6 of 10 [ 2 trees; 5 leaves] CV score: 0.9872 1.0174 0.9702 1.0695 Step 7 of 10 [ 2 trees; 5 leaves] CV score: 0.9828 1.0124 1.0101 1.0610 Step 8 of 10 [ 2 trees; 5 leaves] CV score: 1.0392 1.0158 1.0072 1.0543 Step 9 of 10 [ 2 trees; 5 leaves] CV score: 1.0261 1.0169 1.1429 1.0641 Step 10 of 10 [ 2 trees; 5 leaves] CV score: 1.0360 1.0188 1.0413 1.0618 The model size is 6 training-now training-ave test-now test-ave Step 1 of 10 [ 2 trees; 6 leaves] CV score: 0.9898 0.9898 0.9457 0.9457 Step 2 of 10 [ 2 trees; 6 leaves] CV score: 0.9877 0.9887 0.9545 0.9501 Step 3 of 10 [ 2 trees; 6 leaves] CV score: 1.0057 0.9944 0.8454 0.9152 Step 4 of 10 [ 2 trees; 6 leaves] CV score: 0.9681 0.9878 1.1430 0.9722 Step 5 of 10 [ 2 trees; 6 leaves] CV score: 0.9711 0.9845 1.1763 1.0130 Step 6 of 10 [ 2 trees; 6 leaves] CV score: 1.0268 0.9916 1.0523 1.0195 Step 7 of 10 [ 2 trees; 6 leaves] CV score: 1.0213 0.9958 1.0587 1.0251 Step 8 of 10 [ 2 trees; 6 leaves] CV score: 0.9817 0.9940 1.0223 1.0248 Step 9 of 10 [ 2 trees; 6 leaves] CV score: 1.0191 0.9968 1.1481 1.0385 Step 10 of 10 [ 2 trees; 6 leaves] CV score: 0.9795 0.9951 1.0421 1.0389 The model size is 7 training-now training-ave test-now test-ave Step 1 of 10 [ 2 trees; 7 leaves] CV score: 1.0346 1.0346 0.9920 0.9920 Step 2 of 10 [ 2 trees; 7 leaves] CV score: 0.9937 1.0142 0.9583 0.9751 Step 3 of 10 [ 2 trees; 7 leaves] CV score: 1.0164 1.0149 0.9209 0.9571 Step 4 of 10 [ 2 trees; 7 leaves] CV score: 0.9681 1.0032 1.1430 1.0036 Step 5 of 10 [ 2 trees; 7 leaves] CV score: 0.9631 0.9952 1.1825 1.0393 Step 6 of 10 [ 2 trees; 7 leaves] CV score: 1.0239 1.0000 1.0342 1.0385 Step 7 of 10 [ 2 trees; 7 leaves] CV score: 0.9827 0.9975 1.0426 1.0391 Step 8 of 10 [ 2 trees; 7 leaves] CV score: 1.0320 1.0018 1.0043 1.0347 Step 9 of 10 [ 2 trees; 7 leaves] CV score: 1.0130 1.0031 1.1265 1.0449 Step 10 of 10 [ 2 trees; 7 leaves] CV score: 1.0499 1.0077 1.0436 1.0448 > # equivalent > fit3 <- logreg(select = 3, oldfit = fit2) The number of trees in these models is 1 The model size is 1 training-now training-ave test-now test-ave Step 1 of 10 [ 1 trees; 1 leaves] CV score: 1.1574 1.1574 1.1055 1.1055 Step 2 of 10 [ 1 trees; 1 leaves] CV score: 1.1471 1.1522 1.2046 1.1551 Step 3 of 10 [ 1 trees; 1 leaves] CV score: 1.1633 1.1559 1.0449 1.1183 Step 4 of 10 [ 1 trees; 1 leaves] CV score: 1.1667 1.1586 1.0101 1.0913 Step 5 of 10 [ 1 trees; 1 leaves] CV score: 1.1637 1.1596 1.0464 1.0823 Step 6 of 10 [ 1 trees; 1 leaves] CV score: 1.1566 1.1591 1.1122 1.0873 Step 7 of 10 [ 1 trees; 1 leaves] CV score: 1.1363 1.1559 1.2972 1.1173 Step 8 of 10 [ 1 trees; 1 leaves] CV score: 1.1118 1.1503 1.4788 1.1625 Step 9 of 10 [ 1 trees; 1 leaves] CV score: 1.1457 1.1498 1.2124 1.1680 Step 10 of 10 [ 1 trees; 1 leaves] CV score: 1.1513 1.1500 1.1630 1.1675 The model size is 2 training-now training-ave test-now test-ave Step 1 of 10 [ 1 trees; 2 leaves] CV score: 1.0576 1.0576 0.9803 0.9803 Step 2 of 10 [ 1 trees; 2 leaves] CV score: 1.0421 1.0498 1.1255 1.0529 Step 3 of 10 [ 1 trees; 2 leaves] CV score: 1.0591 1.0529 0.9636 1.0231 Step 4 of 10 [ 1 trees; 2 leaves] CV score: 1.0504 1.0523 1.0543 1.0309 Step 5 of 10 [ 1 trees; 2 leaves] CV score: 1.0581 1.0535 0.9767 1.0201 Step 6 of 10 [ 1 trees; 2 leaves] CV score: 1.0527 1.0533 1.0263 1.0211 Step 7 of 10 [ 1 trees; 2 leaves] CV score: 1.0460 1.0523 1.0928 1.0314 Step 8 of 10 [ 1 trees; 2 leaves] CV score: 1.0217 1.0485 1.2879 1.0634 Step 9 of 10 [ 1 trees; 2 leaves] CV score: 1.0586 1.0496 0.9694 1.0530 Step 10 of 10 [ 1 trees; 2 leaves] CV score: 1.0333 1.0480 1.1990 1.0676 The model size is 3 training-now training-ave test-now test-ave Step 1 of 10 [ 1 trees; 3 leaves] CV score: 1.0576 1.0576 0.9803 0.9803 Step 2 of 10 [ 1 trees; 3 leaves] CV score: 1.0393 1.0484 1.1265 1.0534 Step 3 of 10 [ 1 trees; 3 leaves] CV score: 1.0591 1.0520 0.9636 1.0234 Step 4 of 10 [ 1 trees; 3 leaves] CV score: 1.0504 1.0516 1.0543 1.0312 Step 5 of 10 [ 1 trees; 3 leaves] CV score: 1.0581 1.0529 0.9767 1.0203 Step 6 of 10 [ 1 trees; 3 leaves] CV score: 1.0477 1.0520 1.0488 1.0250 Step 7 of 10 [ 1 trees; 3 leaves] CV score: 1.0421 1.0506 1.1963 1.0495 Step 8 of 10 [ 1 trees; 3 leaves] CV score: 1.0217 1.0470 1.2879 1.0793 Step 9 of 10 [ 1 trees; 3 leaves] CV score: 1.0586 1.0483 0.9694 1.0671 Step 10 of 10 [ 1 trees; 3 leaves] CV score: 1.0333 1.0468 1.1990 1.0803 The model size is 4 training-now training-ave test-now test-ave Step 1 of 10 [ 1 trees; 4 leaves] CV score: 1.1574 1.1574 1.1055 1.1055 Step 2 of 10 [ 1 trees; 4 leaves] CV score: 1.1471 1.1522 1.2046 1.1551 Step 3 of 10 [ 1 trees; 4 leaves] CV score: 1.0591 1.1212 0.9636 1.0912 Step 4 of 10 [ 1 trees; 4 leaves] CV score: 1.0504 1.1035 1.0543 1.0820 Step 5 of 10 [ 1 trees; 4 leaves] CV score: 1.0516 1.0931 0.9909 1.0638 Step 6 of 10 [ 1 trees; 4 leaves] CV score: 1.0527 1.0864 1.0263 1.0575 Step 7 of 10 [ 1 trees; 4 leaves] CV score: 1.1363 1.0935 1.2972 1.0918 Step 8 of 10 [ 1 trees; 4 leaves] CV score: 1.1118 1.0958 1.4788 1.1402 Step 9 of 10 [ 1 trees; 4 leaves] CV score: 1.0578 1.0916 1.1687 1.1433 Step 10 of 10 [ 1 trees; 4 leaves] CV score: 1.0308 1.0855 1.1978 1.1488 The model size is 5 training-now training-ave test-now test-ave Step 1 of 10 [ 1 trees; 5 leaves] CV score: 1.1574 1.1574 1.1055 1.1055 Step 2 of 10 [ 1 trees; 5 leaves] CV score: 1.0421 1.0997 1.1255 1.1155 Step 3 of 10 [ 1 trees; 5 leaves] CV score: 1.0567 1.0854 0.9617 1.0642 Step 4 of 10 [ 1 trees; 5 leaves] CV score: 1.0504 1.0766 1.0543 1.0618 Step 5 of 10 [ 1 trees; 5 leaves] CV score: 1.0516 1.0716 0.9909 1.0476 Step 6 of 10 [ 1 trees; 5 leaves] CV score: 1.0527 1.0685 1.0263 1.0440 Step 7 of 10 [ 1 trees; 5 leaves] CV score: 1.0421 1.0647 1.1963 1.0658 Step 8 of 10 [ 1 trees; 5 leaves] CV score: 1.0217 1.0593 1.2879 1.0936 Step 9 of 10 [ 1 trees; 5 leaves] CV score: 1.0586 1.0592 0.9694 1.0798 Step 10 of 10 [ 1 trees; 5 leaves] CV score: 1.0308 1.0564 1.1978 1.0916 The model size is 6 training-now training-ave test-now test-ave Step 1 of 10 [ 1 trees; 6 leaves] CV score: 1.0576 1.0576 0.9803 0.9803 Step 2 of 10 [ 1 trees; 6 leaves] CV score: 1.0421 1.0498 1.1255 1.0529 Step 3 of 10 [ 1 trees; 6 leaves] CV score: 1.0567 1.0521 0.9617 1.0225 Step 4 of 10 [ 1 trees; 6 leaves] CV score: 1.0455 1.0505 1.0530 1.0301 Step 5 of 10 [ 1 trees; 6 leaves] CV score: 1.0516 1.0507 0.9761 1.0193 Step 6 of 10 [ 1 trees; 6 leaves] CV score: 1.0527 1.0510 1.0263 1.0205 Step 7 of 10 [ 1 trees; 6 leaves] CV score: 1.0351 1.0487 1.1986 1.0459 Step 8 of 10 [ 1 trees; 6 leaves] CV score: 1.0189 1.0450 1.2870 1.0761 Step 9 of 10 [ 1 trees; 6 leaves] CV score: 1.0531 1.0459 0.9682 1.0641 Step 10 of 10 [ 1 trees; 6 leaves] CV score: 1.0282 1.0441 1.1988 1.0775 The model size is 7 training-now training-ave test-now test-ave Step 1 of 10 [ 1 trees; 7 leaves] CV score: 1.0576 1.0576 0.9803 0.9803 Step 2 of 10 [ 1 trees; 7 leaves] CV score: 1.0311 1.0443 1.1172 1.0487 Step 3 of 10 [ 1 trees; 7 leaves] CV score: 1.0567 1.0484 0.9617 1.0197 Step 4 of 10 [ 1 trees; 7 leaves] CV score: 1.0504 1.0489 1.0543 1.0283 Step 5 of 10 [ 1 trees; 7 leaves] CV score: 1.0581 1.0508 0.9767 1.0180 Step 6 of 10 [ 1 trees; 7 leaves] CV score: 1.0477 1.0502 1.0488 1.0232 Step 7 of 10 [ 1 trees; 7 leaves] CV score: 1.0460 1.0496 1.0928 1.0331 Step 8 of 10 [ 1 trees; 7 leaves] CV score: 1.0151 1.0453 1.2866 1.0648 Step 9 of 10 [ 1 trees; 7 leaves] CV score: 1.0586 1.0468 0.9694 1.0542 Step 10 of 10 [ 1 trees; 7 leaves] CV score: 1.0308 1.0452 1.1978 1.0685 The number of trees in these models is 2 The size for this model is smaller than the number of trees you requested. ( 1 versus 2) To save CPU time, we will skip this run. On to the next model... The model size is 2 training-now training-ave test-now test-ave Step 1 of 10 [ 2 trees; 2 leaves] CV score: 1.1296 1.1296 1.0298 1.0298 Step 2 of 10 [ 2 trees; 2 leaves] CV score: 1.1060 1.1178 1.2537 1.1418 Step 3 of 10 [ 2 trees; 2 leaves] CV score: 1.1273 1.1210 1.0512 1.1116 Step 4 of 10 [ 2 trees; 2 leaves] CV score: 1.1262 1.1223 1.0670 1.1004 Step 5 of 10 [ 2 trees; 2 leaves] CV score: 1.1236 1.1225 1.0968 1.0997 Step 6 of 10 [ 2 trees; 2 leaves] CV score: 1.1288 1.1236 1.0356 1.0890 Step 7 of 10 [ 2 trees; 2 leaves] CV score: 1.1103 1.1217 1.2191 1.1076 Step 8 of 10 [ 2 trees; 2 leaves] CV score: 1.0842 1.1170 1.4212 1.1468 Step 9 of 10 [ 2 trees; 2 leaves] CV score: 1.1209 1.1174 1.1210 1.1439 Step 10 of 10 [ 2 trees; 2 leaves] CV score: 1.1095 1.1166 1.2228 1.1518 The model size is 3 training-now training-ave test-now test-ave Step 1 of 10 [ 2 trees; 3 leaves] CV score: 1.0499 1.0499 0.9913 0.9913 Step 2 of 10 [ 2 trees; 3 leaves] CV score: 1.0276 1.0388 1.1200 1.0556 Step 3 of 10 [ 2 trees; 3 leaves] CV score: 1.0425 1.0400 0.9795 1.0303 Step 4 of 10 [ 2 trees; 3 leaves] CV score: 1.0341 1.0385 1.0673 1.0395 Step 5 of 10 [ 2 trees; 3 leaves] CV score: 1.0391 1.0386 1.0186 1.0353 Step 6 of 10 [ 2 trees; 3 leaves] CV score: 1.0393 1.0388 1.0091 1.0310 Step 7 of 10 [ 2 trees; 3 leaves] CV score: 1.1079 1.0486 1.2683 1.0649 Step 8 of 10 [ 2 trees; 3 leaves] CV score: 1.0067 1.0434 1.2888 1.0929 Step 9 of 10 [ 2 trees; 3 leaves] CV score: 1.0597 1.0452 0.9818 1.0805 Step 10 of 10 [ 2 trees; 3 leaves] CV score: 1.0262 1.0433 1.1437 1.0868 The model size is 4 training-now training-ave test-now test-ave Step 1 of 10 [ 2 trees; 4 leaves] CV score: 0.9907 0.9907 1.0043 1.0043 Step 2 of 10 [ 2 trees; 4 leaves] CV score: 1.0276 1.0091 1.1200 1.0621 Step 3 of 10 [ 2 trees; 4 leaves] CV score: 0.9964 1.0049 0.9449 1.0231 Step 4 of 10 [ 2 trees; 4 leaves] CV score: 0.9926 1.0018 0.9895 1.0147 Step 5 of 10 [ 2 trees; 4 leaves] CV score: 0.9962 1.0007 0.9474 1.0012 Step 6 of 10 [ 2 trees; 4 leaves] CV score: 1.0757 1.0132 0.9496 0.9926 Step 7 of 10 [ 2 trees; 4 leaves] CV score: 0.9827 1.0088 1.0751 1.0044 Step 8 of 10 [ 2 trees; 4 leaves] CV score: 1.0000 1.0077 1.2857 1.0396 Step 9 of 10 [ 2 trees; 4 leaves] CV score: 1.0514 1.0126 0.9752 1.0324 Step 10 of 10 [ 2 trees; 4 leaves] CV score: 0.9884 1.0102 1.0298 1.0321 The model size is 5 training-now training-ave test-now test-ave Step 1 of 10 [ 2 trees; 5 leaves] CV score: 1.0681 1.0681 1.0846 1.0846 Step 2 of 10 [ 2 trees; 5 leaves] CV score: 0.9766 1.0224 1.0657 1.0752 Step 3 of 10 [ 2 trees; 5 leaves] CV score: 0.9929 1.0126 0.9104 1.0202 Step 4 of 10 [ 2 trees; 5 leaves] CV score: 1.0723 1.0275 0.9886 1.0123 Step 5 of 10 [ 2 trees; 5 leaves] CV score: 0.9929 1.0206 0.9690 1.0037 Step 6 of 10 [ 2 trees; 5 leaves] CV score: 0.9889 1.0153 0.9509 0.9949 Step 7 of 10 [ 2 trees; 5 leaves] CV score: 1.0424 1.0192 1.2026 1.0246 Step 8 of 10 [ 2 trees; 5 leaves] CV score: 0.9627 1.0121 1.1846 1.0446 Step 9 of 10 [ 2 trees; 5 leaves] CV score: 1.1023 1.0221 1.1901 1.0607 Step 10 of 10 [ 2 trees; 5 leaves] CV score: 0.9875 1.0187 1.0629 1.0610 The model size is 6 training-now training-ave test-now test-ave Step 1 of 10 [ 2 trees; 6 leaves] CV score: 1.0956 1.0956 1.1429 1.1429 Step 2 of 10 [ 2 trees; 6 leaves] CV score: 0.9766 1.0361 1.0657 1.1043 Step 3 of 10 [ 2 trees; 6 leaves] CV score: 0.9925 1.0216 0.9434 1.0507 Step 4 of 10 [ 2 trees; 6 leaves] CV score: 1.0234 1.0220 1.0913 1.0608 Step 5 of 10 [ 2 trees; 6 leaves] CV score: 0.9861 1.0149 0.9456 1.0378 Step 6 of 10 [ 2 trees; 6 leaves] CV score: 0.9958 1.0117 0.9498 1.0231 Step 7 of 10 [ 2 trees; 6 leaves] CV score: 1.0381 1.0154 1.1159 1.0364 Step 8 of 10 [ 2 trees; 6 leaves] CV score: 1.0647 1.0216 1.4640 1.0898 Step 9 of 10 [ 2 trees; 6 leaves] CV score: 1.0463 1.0244 0.9614 1.0756 Step 10 of 10 [ 2 trees; 6 leaves] CV score: 0.9884 1.0208 1.0298 1.0710 The model size is 7 training-now training-ave test-now test-ave Step 1 of 10 [ 2 trees; 7 leaves] CV score: 1.0543 1.0543 0.9949 0.9949 Step 2 of 10 [ 2 trees; 7 leaves] CV score: 0.9788 1.0166 1.1801 1.0875 Step 3 of 10 [ 2 trees; 7 leaves] CV score: 1.0524 1.0285 1.0262 1.0671 Step 4 of 10 [ 2 trees; 7 leaves] CV score: 0.9861 1.0179 1.0052 1.0516 Step 5 of 10 [ 2 trees; 7 leaves] CV score: 0.9901 1.0124 1.0144 1.0442 Step 6 of 10 [ 2 trees; 7 leaves] CV score: 1.0043 1.0110 0.9689 1.0316 Step 7 of 10 [ 2 trees; 7 leaves] CV score: 0.9878 1.0077 1.1065 1.0423 Step 8 of 10 [ 2 trees; 7 leaves] CV score: 0.9908 1.0056 1.2785 1.0718 Step 9 of 10 [ 2 trees; 7 leaves] CV score: 1.0379 1.0092 0.9994 1.0638 Step 10 of 10 [ 2 trees; 7 leaves] CV score: 0.9792 1.0062 1.0488 1.0623 > plot(fit3) > # use logreg.savefit3 for the results with 25000 iterations > plot(logreg.savefit3) > # 4 leaves, 2 trees should top > # null model test > fit4 <- logreg(resp = logreg.testdat[,1], bin=logreg.testdat[, 2:21], type = 2, + select = 4, ntrees = 2, anneal.control = myanneal2) The model of size 0 has score 1.4941 The best model has score 0.9814 Permutation number 1 out of 25 has score 1.4512 Permutation number 2 out of 25 has score 1.4561 Permutation number 3 out of 25 has score 1.4692 Permutation number 4 out of 25 has score 1.4370 Permutation number 5 out of 25 has score 1.4559 Permutation number 6 out of 25 has score 1.4676 Permutation number 7 out of 25 has score 1.4667 Permutation number 8 out of 25 has score 1.4422 Permutation number 9 out of 25 has score 1.4550 Permutation number 10 out of 25 has score 1.4537 Permutation number 11 out of 25 has score 1.4622 Permutation number 12 out of 25 has score 1.4542 Permutation number 13 out of 25 has score 1.4520 Permutation number 14 out of 25 has score 1.4310 Permutation number 15 out of 25 has score 1.4390 Permutation number 16 out of 25 has score 1.4716 Permutation number 17 out of 25 has score 1.4570 Permutation number 18 out of 25 has score 1.4413 Permutation number 19 out of 25 has score 1.4617 Permutation number 20 out of 25 has score 1.4614 Permutation number 21 out of 25 has score 1.4607 Permutation number 22 out of 25 has score 1.4659 Permutation number 23 out of 25 has score 1.4563 Permutation number 24 out of 25 has score 1.4721 Permutation number 25 out of 25 has score 1.4625 > # equivalent > fit4 <- logreg(select = 4, anneal.control = myanneal2, oldfit = fit1) The model of size 0 has score 1.4941 The best model has score 0.9782 Permutation number 1 out of 25 has score 1.4576 Permutation number 2 out of 25 has score 1.4690 Permutation number 3 out of 25 has score 1.4652 Permutation number 4 out of 25 has score 1.4716 Permutation number 5 out of 25 has score 1.4498 Permutation number 6 out of 25 has score 1.4533 Permutation number 7 out of 25 has score 1.4605 Permutation number 8 out of 25 has score 1.4428 Permutation number 9 out of 25 has score 1.4692 Permutation number 10 out of 25 has score 1.4425 Permutation number 11 out of 25 has score 1.4620 Permutation number 12 out of 25 has score 1.4597 Permutation number 13 out of 25 has score 1.4510 Permutation number 14 out of 25 has score 1.4265 Permutation number 15 out of 25 has score 1.4632 Permutation number 16 out of 25 has score 1.4443 Permutation number 17 out of 25 has score 1.4589 Permutation number 18 out of 25 has score 1.4434 Permutation number 19 out of 25 has score 1.4402 Permutation number 20 out of 25 has score 1.4586 Permutation number 21 out of 25 has score 1.4614 Permutation number 22 out of 25 has score 1.4408 Permutation number 23 out of 25 has score 1.4464 Permutation number 24 out of 25 has score 1.4653 Permutation number 25 out of 25 has score 1.4709 > plot(fit4) > # use logreg.savefit4 for the results with 25000 iterations > plot(logreg.savefit4) > # A histogram of the 25 scores obtained from the permutation test. Also shown > # are the scores for the best scoring model with one logic tree, and the null > # model (no tree). Since the permutation scores are not even close to the score > # of the best model with one tree (fit on the original data), there is overwhelming > # evidence against the null hypothesis that there was no signal in the data. > fit5 <- logreg(resp = logreg.testdat[,1], bin=logreg.testdat[, 2:21], type = 2, + select = 5, ntrees = c(1,2), nleaves=c(1,7), anneal.control = myanneal2, + nrep = 10, oldfit = fit2) The model of size 0 has score 1.4941 The number of trees in these models is 1 The model size is 1 Permutation number 1 out of 10 has score 1.130 model size with 1 tree(s) Permutation number 2 out of 10 has score 1.137 model size with 1 tree(s) Permutation number 3 out of 10 has score 1.133 model size with 1 tree(s) Permutation number 4 out of 10 has score 1.138 model size with 1 tree(s) Permutation number 5 out of 10 has score 1.132 model size with 1 tree(s) Permutation number 6 out of 10 has score 1.128 model size with 1 tree(s) Permutation number 7 out of 10 has score 1.135 model size with 1 tree(s) Permutation number 8 out of 10 has score 1.136 model size with 1 tree(s) Permutation number 9 out of 10 has score 1.124 model size with 1 tree(s) Permutation number 10 out of 10 has score 1.135 model size with 1 tree(s) The model size is 2 Permutation number 1 out of 10 has score 1.041 model size with 1 tree(s) Permutation number 2 out of 10 has score 1.034 model size with 1 tree(s) Permutation number 3 out of 10 has score 1.029 model size with 1 tree(s) Permutation number 4 out of 10 has score 1.031 model size with 1 tree(s) Permutation number 5 out of 10 has score 1.033 model size with 1 tree(s) Permutation number 6 out of 10 has score 1.036 model size with 1 tree(s) Permutation number 7 out of 10 has score 1.037 model size with 1 tree(s) Permutation number 8 out of 10 has score 1.040 model size with 1 tree(s) Permutation number 9 out of 10 has score 1.042 model size with 1 tree(s) Permutation number 10 out of 10 has score 1.043 model size with 1 tree(s) The model size is 3 Permutation number 1 out of 10 has score 1.032 model size with 1 tree(s) Permutation number 2 out of 10 has score 1.039 model size with 1 tree(s) Permutation number 3 out of 10 has score 1.025 model size with 1 tree(s) Permutation number 4 out of 10 has score 1.030 model size with 1 tree(s) Permutation number 5 out of 10 has score 1.045 model size with 1 tree(s) Permutation number 6 out of 10 has score 1.036 model size with 1 tree(s) Permutation number 7 out of 10 has score 1.028 model size with 1 tree(s) Permutation number 8 out of 10 has score 1.027 model size with 1 tree(s) Permutation number 9 out of 10 has score 1.030 model size with 1 tree(s) Permutation number 10 out of 10 has score 1.031 model size with 1 tree(s) The model size is 4 Permutation number 1 out of 10 has score 1.032 model size with 1 tree(s) Permutation number 2 out of 10 has score 1.036 model size with 1 tree(s) Permutation number 3 out of 10 has score 1.035 model size with 1 tree(s) Permutation number 4 out of 10 has score 1.042 model size with 1 tree(s) Permutation number 5 out of 10 has score 1.033 model size with 1 tree(s) Permutation number 6 out of 10 has score 1.040 model size with 1 tree(s) Permutation number 7 out of 10 has score 1.099 model size with 1 tree(s) Permutation number 8 out of 10 has score 1.046 model size with 1 tree(s) Permutation number 9 out of 10 has score 1.035 model size with 1 tree(s) Permutation number 10 out of 10 has score 1.040 model size with 1 tree(s) The model size is 5 Permutation number 1 out of 10 has score 1.066 model size with 1 tree(s) Permutation number 2 out of 10 has score 1.039 model size with 1 tree(s) Permutation number 3 out of 10 has score 1.041 model size with 1 tree(s) Permutation number 4 out of 10 has score 1.039 model size with 1 tree(s) Permutation number 5 out of 10 has score 1.027 model size with 1 tree(s) Permutation number 6 out of 10 has score 1.057 model size with 1 tree(s) Permutation number 7 out of 10 has score 1.037 model size with 1 tree(s) Permutation number 8 out of 10 has score 1.037 model size with 1 tree(s) Permutation number 9 out of 10 has score 1.053 model size with 1 tree(s) Permutation number 10 out of 10 has score 1.035 model size with 1 tree(s) The model size is 6 Permutation number 1 out of 10 has score 1.038 model size with 1 tree(s) Permutation number 2 out of 10 has score 1.037 model size with 1 tree(s) Permutation number 3 out of 10 has score 1.037 model size with 1 tree(s) Permutation number 4 out of 10 has score 1.033 model size with 1 tree(s) Permutation number 5 out of 10 has score 1.031 model size with 1 tree(s) Permutation number 6 out of 10 has score 1.042 model size with 1 tree(s) Permutation number 7 out of 10 has score 1.042 model size with 1 tree(s) Permutation number 8 out of 10 has score 1.035 model size with 1 tree(s) Permutation number 9 out of 10 has score 1.039 model size with 1 tree(s) Permutation number 10 out of 10 has score 1.043 model size with 1 tree(s) The model size is 7 Permutation number 1 out of 10 has score 1.028 model size with 1 tree(s) Permutation number 2 out of 10 has score 1.049 model size with 1 tree(s) Permutation number 3 out of 10 has score 1.016 model size with 1 tree(s) Permutation number 4 out of 10 has score 1.037 model size with 1 tree(s) Permutation number 5 out of 10 has score 1.041 model size with 1 tree(s) Permutation number 6 out of 10 has score 1.045 model size with 1 tree(s) Permutation number 7 out of 10 has score 1.079 model size with 1 tree(s) Permutation number 8 out of 10 has score 1.039 model size with 1 tree(s) Permutation number 9 out of 10 has score 1.026 model size with 1 tree(s) Permutation number 10 out of 10 has score 1.038 model size with 1 tree(s) The number of trees in these models is 2 The size for this model is smaller than the number of trees you requested. ( 1 versus 2) To save CPU time, we will skip this run. On to the next model... The model size is 2 Permutation number 1 out of 10 has score 1.034 model size with 2 tree(s) Permutation number 2 out of 10 has score 1.035 model size with 2 tree(s) Permutation number 3 out of 10 has score 1.035 model size with 2 tree(s) Permutation number 4 out of 10 has score 1.027 model size with 2 tree(s) Permutation number 5 out of 10 has score 1.040 model size with 2 tree(s) Permutation number 6 out of 10 has score 1.033 model size with 2 tree(s) Permutation number 7 out of 10 has score 1.026 model size with 2 tree(s) Permutation number 8 out of 10 has score 1.029 model size with 2 tree(s) Permutation number 9 out of 10 has score 1.047 model size with 2 tree(s) Permutation number 10 out of 10 has score 1.032 model size with 2 tree(s) The model size is 3 Permutation number 1 out of 10 has score 1.030 model size with 2 tree(s) Permutation number 2 out of 10 has score 1.024 model size with 2 tree(s) Permutation number 3 out of 10 has score 1.045 model size with 2 tree(s) Permutation number 4 out of 10 has score 1.033 model size with 2 tree(s) Permutation number 5 out of 10 has score 1.019 model size with 2 tree(s) Permutation number 6 out of 10 has score 1.025 model size with 2 tree(s) Permutation number 7 out of 10 has score 1.028 model size with 2 tree(s) Permutation number 8 out of 10 has score 1.026 model size with 2 tree(s) Permutation number 9 out of 10 has score 1.033 model size with 2 tree(s) Permutation number 10 out of 10 has score 1.030 model size with 2 tree(s) The model size is 4 Permutation number 1 out of 10 has score 1.029 model size with 2 tree(s) Permutation number 2 out of 10 has score 1.033 model size with 2 tree(s) Permutation number 3 out of 10 has score 1.030 model size with 2 tree(s) Permutation number 4 out of 10 has score 1.019 model size with 2 tree(s) Permutation number 5 out of 10 has score 1.032 model size with 2 tree(s) Permutation number 6 out of 10 has score 1.031 model size with 2 tree(s) Permutation number 7 out of 10 has score 0.999 model size with 2 tree(s) Permutation number 8 out of 10 has score 1.038 model size with 2 tree(s) Permutation number 9 out of 10 has score 1.039 model size with 2 tree(s) Permutation number 10 out of 10 has score 1.035 model size with 2 tree(s) The model size is 5 Permutation number 1 out of 10 has score 1.074 model size with 2 tree(s) Permutation number 2 out of 10 has score 1.084 model size with 2 tree(s) Permutation number 3 out of 10 has score 1.101 model size with 2 tree(s) Permutation number 4 out of 10 has score 1.086 model size with 2 tree(s) Permutation number 5 out of 10 has score 1.122 model size with 2 tree(s) Permutation number 6 out of 10 has score 1.095 model size with 2 tree(s) Permutation number 7 out of 10 has score 1.129 model size with 2 tree(s) Permutation number 8 out of 10 has score 1.109 model size with 2 tree(s) Permutation number 9 out of 10 has score 1.079 model size with 2 tree(s) Permutation number 10 out of 10 has score 1.092 model size with 2 tree(s) The model size is 6 Permutation number 1 out of 10 has score 0.974 model size with 2 tree(s) Permutation number 2 out of 10 has score 1.030 model size with 2 tree(s) Permutation number 3 out of 10 has score 0.971 model size with 2 tree(s) Permutation number 4 out of 10 has score 0.984 model size with 2 tree(s) Permutation number 5 out of 10 has score 0.978 model size with 2 tree(s) Permutation number 6 out of 10 has score 0.975 model size with 2 tree(s) Permutation number 7 out of 10 has score 0.982 model size with 2 tree(s) Permutation number 8 out of 10 has score 1.044 model size with 2 tree(s) Permutation number 9 out of 10 has score 0.980 model size with 2 tree(s) Permutation number 10 out of 10 has score 0.982 model size with 2 tree(s) The model size is 7 Permutation number 1 out of 10 has score 1.007 model size with 2 tree(s) Permutation number 2 out of 10 has score 1.066 model size with 2 tree(s) Permutation number 3 out of 10 has score 1.014 model size with 2 tree(s) Permutation number 4 out of 10 has score 1.037 model size with 2 tree(s) Permutation number 5 out of 10 has score 1.026 model size with 2 tree(s) Permutation number 6 out of 10 has score 1.022 model size with 2 tree(s) Permutation number 7 out of 10 has score 1.016 model size with 2 tree(s) Permutation number 8 out of 10 has score 1.018 model size with 2 tree(s) Permutation number 9 out of 10 has score 1.024 model size with 2 tree(s) Permutation number 10 out of 10 has score 0.999 model size with 2 tree(s) > # equivalent > fit5 <- logreg(select = 5, nrep = 10, oldfit = fit2) The model of size 0 has score 1.4941 The number of trees in these models is 1 The model size is 1 Permutation number 1 out of 10 has score 1.136 model size with 1 tree(s) Permutation number 2 out of 10 has score 1.130 model size with 1 tree(s) Permutation number 3 out of 10 has score 1.138 model size with 1 tree(s) Permutation number 4 out of 10 has score 1.134 model size with 1 tree(s) Permutation number 5 out of 10 has score 1.135 model size with 1 tree(s) Permutation number 6 out of 10 has score 1.131 model size with 1 tree(s) Permutation number 7 out of 10 has score 1.133 model size with 1 tree(s) Permutation number 8 out of 10 has score 1.136 model size with 1 tree(s) Permutation number 9 out of 10 has score 1.134 model size with 1 tree(s) Permutation number 10 out of 10 has score 1.124 model size with 1 tree(s) The model size is 2 Permutation number 1 out of 10 has score 1.030 model size with 1 tree(s) Permutation number 2 out of 10 has score 1.029 model size with 1 tree(s) Permutation number 3 out of 10 has score 1.030 model size with 1 tree(s) Permutation number 4 out of 10 has score 1.039 model size with 1 tree(s) Permutation number 5 out of 10 has score 1.040 model size with 1 tree(s) Permutation number 6 out of 10 has score 1.039 model size with 1 tree(s) Permutation number 7 out of 10 has score 1.027 model size with 1 tree(s) Permutation number 8 out of 10 has score 1.036 model size with 1 tree(s) Permutation number 9 out of 10 has score 1.034 model size with 1 tree(s) Permutation number 10 out of 10 has score 1.035 model size with 1 tree(s) The model size is 3 Permutation number 1 out of 10 has score 1.040 model size with 1 tree(s) Permutation number 2 out of 10 has score 1.026 model size with 1 tree(s) Permutation number 3 out of 10 has score 1.030 model size with 1 tree(s) Permutation number 4 out of 10 has score 1.033 model size with 1 tree(s) Permutation number 5 out of 10 has score 1.034 model size with 1 tree(s) Permutation number 6 out of 10 has score 1.034 model size with 1 tree(s) Permutation number 7 out of 10 has score 1.025 model size with 1 tree(s) Permutation number 8 out of 10 has score 1.019 model size with 1 tree(s) Permutation number 9 out of 10 has score 1.033 model size with 1 tree(s) Permutation number 10 out of 10 has score 1.046 model size with 1 tree(s) The model size is 4 Permutation number 1 out of 10 has score 1.037 model size with 1 tree(s) Permutation number 2 out of 10 has score 1.030 model size with 1 tree(s) Permutation number 3 out of 10 has score 1.034 model size with 1 tree(s) Permutation number 4 out of 10 has score 1.029 model size with 1 tree(s) Permutation number 5 out of 10 has score 1.030 model size with 1 tree(s) Permutation number 6 out of 10 has score 1.031 model size with 1 tree(s) Permutation number 7 out of 10 has score 1.032 model size with 1 tree(s) Permutation number 8 out of 10 has score 1.025 model size with 1 tree(s) Permutation number 9 out of 10 has score 1.045 model size with 1 tree(s) Permutation number 10 out of 10 has score 1.032 model size with 1 tree(s) The model size is 5 Permutation number 1 out of 10 has score 1.045 model size with 1 tree(s) Permutation number 2 out of 10 has score 1.038 model size with 1 tree(s) Permutation number 3 out of 10 has score 1.047 model size with 1 tree(s) Permutation number 4 out of 10 has score 1.054 model size with 1 tree(s) Permutation number 5 out of 10 has score 1.034 model size with 1 tree(s) Permutation number 6 out of 10 has score 1.046 model size with 1 tree(s) Permutation number 7 out of 10 has score 1.044 model size with 1 tree(s) Permutation number 8 out of 10 has score 1.048 model size with 1 tree(s) Permutation number 9 out of 10 has score 1.028 model size with 1 tree(s) Permutation number 10 out of 10 has score 1.031 model size with 1 tree(s) The model size is 6 Permutation number 1 out of 10 has score 1.036 model size with 1 tree(s) Permutation number 2 out of 10 has score 1.039 model size with 1 tree(s) Permutation number 3 out of 10 has score 1.040 model size with 1 tree(s) Permutation number 4 out of 10 has score 1.034 model size with 1 tree(s) Permutation number 5 out of 10 has score 1.038 model size with 1 tree(s) Permutation number 6 out of 10 has score 1.034 model size with 1 tree(s) Permutation number 7 out of 10 has score 1.043 model size with 1 tree(s) Permutation number 8 out of 10 has score 1.037 model size with 1 tree(s) Permutation number 9 out of 10 has score 1.033 model size with 1 tree(s) Permutation number 10 out of 10 has score 1.076 model size with 1 tree(s) The model size is 7 Permutation number 1 out of 10 has score 1.040 model size with 1 tree(s) Permutation number 2 out of 10 has score 1.042 model size with 1 tree(s) Permutation number 3 out of 10 has score 1.047 model size with 1 tree(s) Permutation number 4 out of 10 has score 1.029 model size with 1 tree(s) Permutation number 5 out of 10 has score 1.038 model size with 1 tree(s) Permutation number 6 out of 10 has score 1.046 model size with 1 tree(s) Permutation number 7 out of 10 has score 1.042 model size with 1 tree(s) Permutation number 8 out of 10 has score 1.042 model size with 1 tree(s) Permutation number 9 out of 10 has score 1.047 model size with 1 tree(s) Permutation number 10 out of 10 has score 1.037 model size with 1 tree(s) The number of trees in these models is 2 The size for this model is smaller than the number of trees you requested. ( 1 versus 2) To save CPU time, we will skip this run. On to the next model... The model size is 2 Permutation number 1 out of 10 has score 1.039 model size with 2 tree(s) Permutation number 2 out of 10 has score 1.042 model size with 2 tree(s) Permutation number 3 out of 10 has score 1.045 model size with 2 tree(s) Permutation number 4 out of 10 has score 1.116 model size with 2 tree(s) Permutation number 5 out of 10 has score 1.029 model size with 2 tree(s) Permutation number 6 out of 10 has score 1.040 model size with 2 tree(s) Permutation number 7 out of 10 has score 1.038 model size with 2 tree(s) Permutation number 8 out of 10 has score 1.036 model size with 2 tree(s) Permutation number 9 out of 10 has score 1.040 model size with 2 tree(s) Permutation number 10 out of 10 has score 1.038 model size with 2 tree(s) The model size is 3 Permutation number 1 out of 10 has score 1.028 model size with 2 tree(s) Permutation number 2 out of 10 has score 1.033 model size with 2 tree(s) Permutation number 3 out of 10 has score 1.028 model size with 2 tree(s) Permutation number 4 out of 10 has score 1.019 model size with 2 tree(s) Permutation number 5 out of 10 has score 1.018 model size with 2 tree(s) Permutation number 6 out of 10 has score 1.036 model size with 2 tree(s) Permutation number 7 out of 10 has score 1.019 model size with 2 tree(s) Permutation number 8 out of 10 has score 1.033 model size with 2 tree(s) Permutation number 9 out of 10 has score 1.030 model size with 2 tree(s) Permutation number 10 out of 10 has score 1.033 model size with 2 tree(s) The model size is 4 Permutation number 1 out of 10 has score 1.046 model size with 2 tree(s) Permutation number 2 out of 10 has score 1.022 model size with 2 tree(s) Permutation number 3 out of 10 has score 1.027 model size with 2 tree(s) Permutation number 4 out of 10 has score 1.044 model size with 2 tree(s) Permutation number 5 out of 10 has score 1.047 model size with 2 tree(s) Permutation number 6 out of 10 has score 1.027 model size with 2 tree(s) Permutation number 7 out of 10 has score 1.038 model size with 2 tree(s) Permutation number 8 out of 10 has score 1.035 model size with 2 tree(s) Permutation number 9 out of 10 has score 1.041 model size with 2 tree(s) Permutation number 10 out of 10 has score 1.041 model size with 2 tree(s) The model size is 5 Permutation number 1 out of 10 has score 1.098 model size with 2 tree(s) Permutation number 2 out of 10 has score 1.095 model size with 2 tree(s) Permutation number 3 out of 10 has score 1.086 model size with 2 tree(s) Permutation number 4 out of 10 has score 1.081 model size with 2 tree(s) Permutation number 5 out of 10 has score 1.131 model size with 2 tree(s) Permutation number 6 out of 10 has score 1.087 model size with 2 tree(s) Permutation number 7 out of 10 has score 1.110 model size with 2 tree(s) Permutation number 8 out of 10 has score 1.087 model size with 2 tree(s) Permutation number 9 out of 10 has score 1.118 model size with 2 tree(s) Permutation number 10 out of 10 has score 1.091 model size with 2 tree(s) The model size is 6 Permutation number 1 out of 10 has score 1.046 model size with 2 tree(s) Permutation number 2 out of 10 has score 0.982 model size with 2 tree(s) Permutation number 3 out of 10 has score 0.979 model size with 2 tree(s) Permutation number 4 out of 10 has score 0.980 model size with 2 tree(s) Permutation number 5 out of 10 has score 0.983 model size with 2 tree(s) Permutation number 6 out of 10 has score 0.987 model size with 2 tree(s) Permutation number 7 out of 10 has score 0.976 model size with 2 tree(s) Permutation number 8 out of 10 has score 0.980 model size with 2 tree(s) Permutation number 9 out of 10 has score 1.036 model size with 2 tree(s) Permutation number 10 out of 10 has score 0.966 model size with 2 tree(s) The model size is 7 Permutation number 1 out of 10 has score 1.009 model size with 2 tree(s) Permutation number 2 out of 10 has score 1.028 model size with 2 tree(s) Permutation number 3 out of 10 has score 1.031 model size with 2 tree(s) Permutation number 4 out of 10 has score 1.016 model size with 2 tree(s) Permutation number 5 out of 10 has score 1.031 model size with 2 tree(s) Permutation number 6 out of 10 has score 1.047 model size with 2 tree(s) Permutation number 7 out of 10 has score 1.010 model size with 2 tree(s) Permutation number 8 out of 10 has score 1.018 model size with 2 tree(s) Permutation number 9 out of 10 has score 0.997 model size with 2 tree(s) Permutation number 10 out of 10 has score 1.022 model size with 2 tree(s) > plot(fit5) > # use logreg.savefit5 for the results with 25000 iterations and 25 permutations > plot(logreg.savefit5) > # The permutation scores improve until we condition on a model with two trees and > # four leaves, and then do not change very much anymore. This indicates that the > # best model has indeed four leaves. > # > # greedy selection > fit6 <- logreg(select = 6, ntrees = 2, nleaves =c(1,12), oldfit = fit1) Model 0 has a score of 1.4941 Model 1 has a score of 1.1500 Model 2 has a score of 1.0481 Model 3 has a score of 1.0333 Model 4 has a score of 0.9885 Model 5 has a score of 0.9823 Model 6 has a score of 0.9814 Model 7 has a score of 0.9803 No further improvement possible > plot(fit6) empty tree empty tree empty tree empty tree > # use logreg.savefit6 for the results with 25000 iterations > plot(logreg.savefit6) empty tree empty tree empty tree empty tree > # > # Monte Carlo Logic Regression > fit7 <- logreg(select = 7, oldfit = fit1, mc.control= + logreg.mc.control(nburn=1000, niter=10000, hyperpars=log(2))) iter(10k) current scr best score acc / rej /sing current parameters 0.000 0.7470 0.7470 0( 0) 0 0 2.886 0.000 0.000 0.010 16.8887 0.7470 30( 1) 69 0 2.949 -0.152 -1.991 0.020 11.1104 0.7470 35( 1) 64 0 3.032 -0.588 -1.991 0.030 0.7470 0.7470 33( 1) 66 0 2.886 -1.988 -1.991 0.040 0.7470 0.7470 16( 0) 84 0 2.886 -1.988 -1.991 0.050 5.8209 0.7470 36( 0) 64 0 2.762 0.226 2.813 0.060 38.9818 0.7470 53( 0) 47 0 2.985 -0.143 -0.122 0.070 16.8892 0.7470 65( 3) 32 0 2.916 -0.120 -1.991 0.080 0.7470 0.7470 20( 0) 80 0 2.886 -1.988 -1.991 0.090 0.7470 0.7470 25( 1) 74 0 2.886 -1.988 -1.991 0.100 5.8220 0.7470 20( 1) 79 0 2.856 0.227 -1.991 0.110 5.8221 0.7470 25( 0) 75 0 2.823 0.147 -1.991 0.120 0.7470 0.7470 27( 1) 72 0 2.886 -1.988 -1.991 0.130 0.7470 0.7470 11( 1) 88 0 2.886 -1.988 -1.991 0.140 0.7470 0.7470 29( 1) 70 0 2.886 -1.988 -1.991 0.150 0.7470 0.7470 11( 0) 89 0 2.886 -1.988 -1.991 0.160 0.7470 0.7470 42( 1) 57 0 2.886 -1.988 -1.991 0.170 11.1212 0.7470 30( 0) 70 0 3.082 -0.130 -0.120 0.180 0.7470 0.7470 36( 0) 64 0 2.886 -1.988 -1.991 0.190 0.7470 0.7470 3( 0) 97 0 2.886 -1.988 -1.991 0.200 0.7470 0.7470 8( 0) 92 0 2.886 -1.988 -1.991 0.210 0.7470 0.7470 26( 0) 74 0 2.886 -1.988 -1.991 0.220 22.8785 0.7470 34( 1) 65 0 2.933 -0.370 -1.991 0.230 5.8222 0.7470 30( 3) 67 0 2.728 0.186 -1.991 0.240 5.8227 0.7470 36( 2) 62 0 2.916 -0.083 -1.991 0.250 5.8227 0.7470 41( 1) 58 0 2.844 0.078 -1.991 0.260 5.8228 0.7470 20( 3) 77 0 2.791 0.105 -1.991 0.270 0.7470 0.7470 31( 2) 67 0 2.886 -1.988 -1.991 0.280 16.8896 0.7470 23( 0) 77 0 2.882 0.005 -1.991 0.290 5.8225 0.7470 22( 0) 78 0 2.857 0.130 -1.991 0.300 5.8220 0.7470 53( 2) 45 0 3.082 -0.227 -1.991 0.310 0.7470 0.7470 22( 0) 78 0 2.886 -1.988 -1.991 0.320 0.7470 0.7470 12( 0) 88 0 2.886 -1.988 -1.991 0.330 0.7470 0.7470 4( 0) 96 0 2.886 -1.988 -1.991 0.340 33.7251 0.7470 33( 3) 64 0 2.848 0.105 -0.068 0.350 5.8061 0.7470 35( 2) 63 0 2.995 -1.023 -1.991 0.360 0.7470 0.7470 8( 0) 92 0 2.886 -1.988 -1.991 0.370 16.8889 0.7470 24( 1) 75 0 3.018 -0.164 -1.991 0.380 0.7470 0.7470 13( 0) 87 0 2.886 -1.988 -1.991 0.390 0.7470 0.7470 17( 0) 83 0 2.886 -1.988 -1.991 0.400 0.7470 0.7470 18( 0) 82 0 2.886 -1.988 -1.991 0.410 5.6502 0.7470 14( 1) 85 0 1.877 1.912 -1.991 0.420 5.8204 0.7470 28( 1) 71 0 2.933 -0.370 -1.991 0.430 5.8229 0.7470 18( 0) 82 0 2.943 -0.068 -1.991 0.440 0.7470 0.7470 30( 2) 68 0 2.886 -1.988 -1.991 0.450 0.7470 0.7470 0( 0) 100 0 2.886 -1.988 -1.991 0.460 16.8893 0.7470 7( 0) 93 0 2.824 0.089 -1.991 0.470 0.7470 0.7470 28( 0) 72 0 2.886 -1.988 -1.991 0.480 11.1214 0.7470 11( 0) 89 0 2.834 0.102 0.085 0.490 0.7470 0.7470 32( 2) 66 0 2.886 -1.988 -1.991 0.500 0.7470 0.7470 29( 1) 70 0 2.886 -1.988 -1.991 0.510 5.8061 0.7470 8( 0) 92 0 2.995 -1.023 -1.991 0.520 0.7470 0.7470 34( 0) 66 0 2.886 -1.988 -1.991 0.530 5.8229 0.7470 36( 0) 64 0 2.875 0.068 -1.991 0.540 11.1211 0.7470 46( 1) 53 0 2.876 0.133 -1.991 0.550 16.8893 0.7470 28( 0) 72 0 2.916 -0.083 -1.991 0.560 0.7470 0.7470 32( 0) 68 0 2.886 -1.988 -1.991 0.570 0.7470 0.7470 18( 3) 79 0 2.886 -1.988 -1.991 0.580 0.7470 0.7470 5( 0) 95 0 2.886 -1.988 -1.991 0.590 0.7470 0.7470 6( 1) 93 0 2.886 -1.988 -1.991 0.600 0.7470 0.7470 23( 2) 75 0 2.886 -1.988 -1.991 0.610 0.7470 0.7470 7( 0) 93 0 2.886 -1.988 -1.991 0.620 11.1215 0.7470 30( 1) 69 0 2.818 0.101 0.049 0.630 5.8202 0.7470 39( 1) 60 0 2.973 -0.279 2.813 0.640 5.8229 0.7470 25( 0) 75 0 2.875 0.068 -1.991 0.650 0.7470 0.7470 24( 2) 74 0 2.886 -1.988 -1.991 0.660 5.8209 0.7470 23( 1) 76 0 2.987 -0.226 2.813 0.670 0.7470 0.7470 28( 0) 72 0 2.886 -1.988 -1.991 0.680 0.7470 0.7470 33( 0) 67 0 2.886 -1.988 -1.991 0.690 11.1216 0.7470 32( 1) 67 0 2.813 0.086 0.093 0.700 0.7470 0.7470 38( 0) 62 0 2.886 -1.988 -1.991 0.710 0.7470 0.7470 26( 0) 74 0 2.886 -1.988 -1.991 0.720 11.1179 0.7470 35( 2) 63 0 3.064 -0.291 -1.991 0.730 11.1197 0.7470 45( 1) 54 0 3.115 -0.270 -1.991 0.740 5.8227 0.7470 43( 2) 55 0 2.844 0.078 -1.991 0.750 0.7470 0.7470 28( 1) 71 0 2.886 -1.988 -1.991 0.760 0.7470 0.7470 25( 2) 73 0 2.886 -1.988 -1.991 0.770 0.7470 0.7470 21( 0) 79 0 2.886 -1.988 -1.991 0.780 0.7470 0.7470 6( 1) 93 0 2.886 -1.988 -1.991 0.790 0.7470 0.7470 27( 0) 73 0 2.886 -1.988 -1.991 0.800 5.8214 0.7470 8( 0) 92 0 2.841 0.248 -1.991 0.810 0.7470 0.7470 21( 0) 79 0 2.886 -1.988 -1.991 0.820 5.8226 0.7470 12( 0) 88 0 2.953 -0.103 -1.991 0.830 5.8221 0.7470 29( 0) 71 0 2.823 0.147 2.813 0.840 11.1190 0.7470 27( 0) 73 0 2.931 -0.345 -1.991 0.850 5.8202 0.7470 50( 2) 48 0 2.973 -0.279 -1.991 0.860 5.8225 0.7470 38( 0) 62 0 2.857 0.130 -1.991 0.870 0.7470 0.7470 18( 1) 81 0 2.886 -1.988 -1.991 0.880 5.8227 0.7470 30( 2) 68 0 2.844 0.078 -1.991 0.890 11.1158 0.7470 26( 0) 74 0 3.073 -0.419 -0.085 0.900 0.7470 0.7470 33( 1) 66 0 2.886 -1.988 -1.991 0.910 5.8229 0.7470 30( 0) 70 0 2.849 0.055 -1.991 0.920 0.7470 0.7470 19( 1) 80 0 2.886 -1.988 -1.991 0.930 16.8888 0.7470 49( 2) 49 0 2.933 0.345 -0.106 0.940 0.7470 0.7470 38( 2) 60 0 2.886 -1.988 -1.991 0.950 11.1119 0.7470 42( 1) 57 0 1.837 1.102 -1.991 0.960 11.1206 0.7470 41( 1) 58 0 2.813 0.131 -1.991 0.970 5.8221 0.7470 19( 0) 81 0 2.970 -0.147 -1.991 0.980 0.7470 0.7470 21( 0) 79 0 2.886 -1.988 -1.991 0.990 0.7470 0.7470 7( 0) 93 0 2.886 -1.988 -1.991 1.000 5.8202 0.7470 6( 0) 94 0 2.973 -0.279 -1.991 1.010 11.1212 0.7470 49( 3) 48 0 2.924 -0.047 -1.991 1.020 16.8894 0.7470 38( 1) 61 0 2.910 -0.065 -1.991 1.030 0.7470 0.7470 26( 0) 74 0 2.886 -1.988 -1.991 1.040 16.8893 0.7470 12( 0) 88 0 2.976 -0.111 -1.991 1.050 0.7470 0.7470 21( 1) 78 0 2.886 -1.988 -1.991 1.060 0.7470 0.7470 16( 0) 84 0 2.886 -1.988 -1.991 1.070 10.9486 0.7470 20( 0) 80 0 3.737 -1.912 0.095 1.080 16.8891 0.7470 28( 1) 71 0 2.947 -0.110 -1.991 1.090 0.7470 0.7470 46( 3) 51 0 2.886 -1.988 -1.991 1.100 22.8797 0.7470 40( 0) 60 0 2.931 -0.234 -1.991 > # we need many more iterations for reasonable results > ## Not run: > ##D logreg.savefit7 <- logreg(select = 7, oldfit = fit1, mc.control= > ##D logreg.mc.control(nburn=1000, niter=100000, hyperpars=log(2))) > ## End(Not run) > # > plot(fit7) > # use logreg.savefit7 for the results with 25000 iterations > plot(logreg.savefit7) > > > > > cleanEx(); ..nameEx <- "logreg.anneal.control" > > ### * logreg.anneal.control > > flush(stderr()); flush(stdout()) > > ### Name: logreg.anneal.control > ### Title: Control for Logic Regression > ### Aliases: logreg.anneal.control > ### Keywords: logic methods nonparametric tree > > ### ** Examples > > myannealcontrol <- logreg.anneal.control(start = 2, end = -2, iter = 50000, update = 1000) > > > > cleanEx(); ..nameEx <- "logreg.mc.control" > > ### * logreg.mc.control > > flush(stderr()); flush(stdout()) > > ### Name: logreg.mc.control > ### Title: Control for Logic Regression > ### Aliases: logreg.mc.control > ### Keywords: logic methods nonparametric tree > > ### ** Examples > > mymccontrol <- logreg.mc.control(nburn = 500, niter = 500000, update = 25000, + hyperpars = log(2), output = -2) > > > > cleanEx(); ..nameEx <- "logreg.myown" > > ### * logreg.myown > > flush(stderr()); flush(stdout()) logreg.myown package:LogicReg R Documentation _W_r_i_t_i_n_g _y_o_u_r _o_w_n _L_o_g_i_c _R_e_g_r_e_s_s_i_o_n _s_c_o_r_i_n_g _f_u_n_c_t_i_o_n _D_e_s_c_r_i_p_t_i_o_n: Help file for writing your own scoring function for 'logreg'! _U_s_a_g_e: logreg.myown() _D_e_t_a_i_l_s: You can write your own scoring function for 'logreg'! This may be useful if you have a model other than those which we already programmed in. Essentially you need to provide two routines in the file *My_own_scoring.f*: (i) A routine _My_own_fitting_ which fits your model: it provides a coefficient (beta) for each of the logic trees and provides a score of how good the model is. Low scores are good. (So add a minus sign if your score is a log-likelihood.) (ii) A routine _My_own_scoring_ which - given the betas - provides the score of your model. [If you don't use cross-validation, this second routine is not needed, though some dummy routine to satisfy the compiler should still be provided.] After recompilation, you can fit your model using the option ' type = 0' in 'logreg'. Below we give an example for a version of the My.own functions for conditional logistic regression which are also provided as *condlogic.ff* when you downloaded the files. *PROGRAMMING DETAILS* Below is a list of variables that are passed on. Most of them are as you expect - response, predictors (binary ones and continuous ones), number of cases, number of predictors. In addition there are two columns - 'dcph' and 'weight' - that can either be used to pass on an auxiliary variable for each case (discrete for 'dcph' and continuous for 'weight'), or even some overall auxiliary variables - as these numbers are not used anywhere else. If you do not need any of the variables - just ignore them! 'prtr': the predictions of the logic trees in the current model: this is an integer matrix of size 'n1' times 'ntr' - although only the first 'nop' columns contain useful information. 'rsp': the response variable: this is a real (single precision) vector of length 'n1'. 'dcph': censor times: this is an integer vector of length 'n1' this could be used as an auxiliary (integer) vector - as it is just passed on. (There is no check that this is a 0/1 variable, when you use your own scoring function.) For example, you could use this to pass on something like cluster membership. 'weight': weights for the cases this is a real vector of length 'n1'. this could be used as an auxiliary (real) vector - as it is just passed on. There is no check that these numbers are positive, when you choose your own scoring function. 'ordrs': the order (by response size) of the cases This is an integer vector of length 'n1'. For the case with the smallest response this one is 1, for the second smallest 2, and so on. Ties are resolved arbitrary. Always computed, although only used for proportional hazards models. Use it as you wish. 'n1': the total number of cases in the data. 'ntr': the number of logic trees ALLOWED in the tree. 'nop': the number of logic trees in the CURRENT model. The subroutines should work if 'nop' is 0. 'wh': the index of the tree that has been edited in the last move - i.e. the column of 'prtr' that has changes since the last call. 'nsep': number of variables that get fit a separate parameter The subroutines should work if 'nsep' is 0. 'seps': array of the above variables - this is a single precision matrix of size 'nsep' times 'n1'. Note that 'seps' and 'prtr' are stored in different directions. For _My_own_fitting_ you should return: 'betas': a vector of parameters of the model that you fit. 'betas(0)' should be the parameter for the intercept 'betas(1:nsep)' should be the parameters for the continuous variables in seps 'betas((nsep+1):(nsep+nop))' should be the parameters for the binary trees in prtr if you have more parameters, use 'dcph', or 'weight'; these variables will not be printed however. 'score': whatever score you assign to your model small should be good (i.e. deviance or -log.likelihood). 'reject': an indicator whether or not to reject the proposed move *regardless* of the score (for example when an iteration necessary to determine the score failed to converge (0 = move is OK ; 1 = reject move) set this one to 0 if there is no such condition. You are allowed to change the values of dcph, and weight. For _My_own_scoring_ additional input is: 'betas': the coefficients You should return: 'score': whatever score you assign to your model small should be good (i.e. deviance or -log.likelihood). If the model "crashes", you should simply return a very large number. While we try to prevent that models are singular, it is possible that for your model a single or degenerate model is passed on for evaluation. For My_own_fitting you can pass the model back with 'reject = 1', for My_own_scoring you can pass it on with a very large value for 'score'. Currently My_own_scoring.f contains empty frames for the scoring functions; condlogic.ff contains an example with conditional logistic regression. The logic regression program is written in Fortran 77. *CONDITIONAL LOGISTIC REGRESSION* A function for a conditional logistic regression score function is attached as an example function on how to write your own scoring function for Logic Regression. Obviously you can also use it if you have conditional logistic data. Conditional logistic regression is common model fitting technique for matched case-control studies, in which each case is matched with one or more controls. (In conditional logistic regression several cases could be matched to several controls, in the implementation provided here only one case can be matched with each group of controls.) Conditional logistic regression models are parameterized like regular logistic regression models, except that the intercept is no longer identifiable. See, for example, Breslow and Day - Volume 1 (1990, Statistical Methods in Cancer Research, International Agency for Research on Cancer, Lyon) for details. Conditional logistic regression models are most easily fit using a stratified proportional hazards model (if there is one-to-one case-control matching it can also be fit using logistic regression, but that method breaks down if there is more than one control per case). Each group of a case and controls is one stratum. All cases get an arbitrarily event time of 1.00, and all controls get a censoring time of 2.00. In our implementation we use the response column to indicate the matching. For all controls this column is 0, for a case it is k, indicating that the next k records are the matched controls for the current case. Thus, we order our cases so that each case is followed by its controls. Cases with a negative response are put in a stratum -1, which is not used in any computations. This has implications for cross-validation. See below. In _My_own_fitting_ and _My_own_scoring_ we first allocate various vectors (strata, index, censoring variable) that are local, as well as some work arrays that are used by our fitting routines. (We need to set some of the parameters for that, see the help page of 'logreg' for details.) We then define 'idx(j)=j' for 'j=1,n1', and we define the 'strata' and 'delta' vectors. We use slightly modified versions of the proportional hazards routines that are already used otherwise in the Logic Regression program, to include stratification. After the model is fitted, we assign minus the partial likelihood to 'score(1)' and (for _My_own_fitting_) we pass on the betas. Recompile after replacing _My_own_scoring.f_ by _condlogic.ff_ The permutation and null model versions are not directly usable (we could do some permutation tests, but they require more programming), but we can use cross-validation. Obviously we should keep cases and controls match. To that extend, we would run permutation with a negative seed (see 'logreg') and we would take care ourselves that case-control groups are in a random order, and that every block has the same number of records. We achieve the later by adding some records with response -1. In particular, suppose that we have 19 pairs of case- (single) control data, and that we want to do 3-fold cross validation. We would permute the sequence of the 19 pairs, and add two records with response -1 after the 13th pair, and two records with -1 at the end of the file, so that the total data file would have 42 records. _A_u_t_h_o_r(_s): Ingo Ruczinski ingo@jhu.edu and Charles Kooperberg clk@fhcrc.org. _R_e_f_e_r_e_n_c_e_s: Ruczinski I, Kooperberg C, LeBlanc ML (2003). Logic Regression, _Journal of Computational and Graphical Statistics_, *12*, 475-511. Ruczinski I, Kooperberg C, LeBlanc ML (2002). Logic Regression - methods and software. _Proceedings of the MSRI workshop on Nonlinear Estimation and Classification_ (Eds: D. Denison, M. Hansen, C. Holmes, B. Mallick, B. Yu), Springer: New York, 333-344. Kooperberg C, Ruczinski I, LeBlanc ML, Hsu L (2001). Sequence Analysis using Logic Regression, _Genetic Epidemiology_, *21*, S626-S631. Selected chapters from the dissertation of Ingo Ruczinski, available from _S_e_e _A_l_s_o: 'logreg' _E_x_a_m_p_l_e_s: logreg.myown() # displays this help file help(logreg.myown) # equivalent logreg.myown package:LogicReg R Documentation _W_r_i_t_i_n_g _y_o_u_r _o_w_n _L_o_g_i_c _R_e_g_r_e_s_s_i_o_n _s_c_o_r_i_n_g _f_u_n_c_t_i_o_n _D_e_s_c_r_i_p_t_i_o_n: Help file for writing your own scoring function for 'logreg'! _U_s_a_g_e: logreg.myown() _D_e_t_a_i_l_s: You can write your own scoring function for 'logreg'! This may be useful if you have a model other than those which we already programmed in. Essentially you need to provide two routines in the file *My_own_scoring.f*: (i) A routine _My_own_fitting_ which fits your model: it provides a coefficient (beta) for each of the logic trees and provides a score of how good the model is. Low scores are good. (So add a minus sign if your score is a log-likelihood.) (ii) A routine _My_own_scoring_ which - given the betas - provides the score of your model. [If you don't use cross-validation, this second routine is not needed, though some dummy routine to satisfy the compiler should still be provided.] After recompilation, you can fit your model using the option ' type = 0' in 'logreg'. Below we give an example for a version of the My.own functions for conditional logistic regression which are also provided as *condlogic.ff* when you downloaded the files. *PROGRAMMING DETAILS* Below is a list of variables that are passed on. Most of them are as you expect - response, predictors (binary ones and continuous ones), number of cases, number of predictors. In addition there are two columns - 'dcph' and 'weight' - that can either be used to pass on an auxiliary variable for each case (discrete for 'dcph' and continuous for 'weight'), or even some overall auxiliary variables - as these numbers are not used anywhere else. If you do not need any of the variables - just ignore them! 'prtr': the predictions of the logic trees in the current model: this is an integer matrix of size 'n1' times 'ntr' - although only the first 'nop' columns contain useful information. 'rsp': the response variable: this is a real (single precision) vector of length 'n1'. 'dcph': censor times: this is an integer vector of length 'n1' this could be used as an auxiliary (integer) vector - as it is just passed on. (There is no check that this is a 0/1 variable, when you use your own scoring function.) For example, you could use this to pass on something like cluster membership. 'weight': weights for the cases this is a real vector of length 'n1'. this could be used as an auxiliary (real) vector - as it is just passed on. There is no check that these numbers are positive, when you choose your own scoring function. 'ordrs': the order (by response size) of the cases This is an integer vector of length 'n1'. For the case with the smallest response this one is 1, for the second smallest 2, and so on. Ties are resolved arbitrary. Always computed, although only used for proportional hazards models. Use it as you wish. 'n1': the total number of cases in the data. 'ntr': the number of logic trees ALLOWED in the tree. 'nop': the number of logic trees in the CURRENT model. The subroutines should work if 'nop' is 0. 'wh': the index of the tree that has been edited in the last move - i.e. the column of 'prtr' that has changes since the last call. 'nsep': number of variables that get fit a separate parameter The subroutines should work if 'nsep' is 0. 'seps': array of the above variables - this is a single precision matrix of size 'nsep' times 'n1'. Note that 'seps' and 'prtr' are stored in different directions. For _My_own_fitting_ you should return: 'betas': a vector of parameters of the model that you fit. 'betas(0)' should be the parameter for the intercept 'betas(1:nsep)' should be the parameters for the continuous variables in seps 'betas((nsep+1):(nsep+nop))' should be the parameters for the binary trees in prtr if you have more parameters, use 'dcph', or 'weight'; these variables will not be printed however. 'score': whatever score you assign to your model small should be good (i.e. deviance or -log.likelihood). 'reject': an indicator whether or not to reject the proposed move *regardless* of the score (for example when an iteration necessary to determine the score failed to converge (0 = move is OK ; 1 = reject move) set this one to 0 if there is no such condition. You are allowed to change the values of dcph, and weight. For _My_own_scoring_ additional input is: 'betas': the coefficients You should return: 'score': whatever score you assign to your model small should be good (i.e. deviance or -log.likelihood). If the model "crashes", you should simply return a very large number. While we try to prevent that models are singular, it is possible that for your model a single or degenerate model is passed on for evaluation. For My_own_fitting you can pass the model back with 'reject = 1', for My_own_scoring you can pass it on with a very large value for 'score'. Currently My_own_scoring.f contains empty frames for the scoring functions; condlogic.ff contains an example with conditional logistic regression. The logic regression program is written in Fortran 77. *CONDITIONAL LOGISTIC REGRESSION* A function for a conditional logistic regression score function is attached as an example function on how to write your own scoring function for Logic Regression. Obviously you can also use it if you have conditional logistic data. Conditional logistic regression is common model fitting technique for matched case-control studies, in which each case is matched with one or more controls. (In conditional logistic regression several cases could be matched to several controls, in the implementation provided here only one case can be matched with each group of controls.) Conditional logistic regression models are parameterized like regular logistic regression models, except that the intercept is no longer identifiable. See, for example, Breslow and Day - Volume 1 (1990, Statistical Methods in Cancer Research, International Agency for Research on Cancer, Lyon) for details. Conditional logistic regression models are most easily fit using a stratified proportional hazards model (if there is one-to-one case-control matching it can also be fit using logistic regression, but that method breaks down if there is more than one control per case). Each group of a case and controls is one stratum. All cases get an arbitrarily event time of 1.00, and all controls get a censoring time of 2.00. In our implementation we use the response column to indicate the matching. For all controls this column is 0, for a case it is k, indicating that the next k records are the matched controls for the current case. Thus, we order our cases so that each case is followed by its controls. Cases with a negative response are put in a stratum -1, which is not used in any computations. This has implications for cross-validation. See below. In _My_own_fitting_ and _My_own_scoring_ we first allocate various vectors (strata, index, censoring variable) that are local, as well as some work arrays that are used by our fitting routines. (We need to set some of the parameters for that, see the help page of 'logreg' for details.) We then define 'idx(j)=j' for 'j=1,n1', and we define the 'strata' and 'delta' vectors. We use slightly modified versions of the proportional hazards routines that are already used otherwise in the Logic Regression program, to include stratification. After the model is fitted, we assign minus the partial likelihood to 'score(1)' and (for _My_own_fitting_) we pass on the betas. Recompile after replacing _My_own_scoring.f_ by _condlogic.ff_ The permutation and null model versions are not directly usable (we could do some permutation tests, but they require more programming), but we can use cross-validation. Obviously we should keep cases and controls match. To that extend, we would run permutation with a negative seed (see 'logreg') and we would take care ourselves that case-control groups are in a random order, and that every block has the same number of records. We achieve the later by adding some records with response -1. In particular, suppose that we have 19 pairs of case- (single) control data, and that we want to do 3-fold cross validation. We would permute the sequence of the 19 pairs, and add two records with response -1 after the 13th pair, and two records with -1 at the end of the file, so that the total data file would have 42 records. _A_u_t_h_o_r(_s): Ingo Ruczinski ingo@jhu.edu and Charles Kooperberg clk@fhcrc.org. _R_e_f_e_r_e_n_c_e_s: Ruczinski I, Kooperberg C, LeBlanc ML (2003). Logic Regression, _Journal of Computational and Graphical Statistics_, *12*, 475-511. Ruczinski I, Kooperberg C, LeBlanc ML (2002). Logic Regression - methods and software. _Proceedings of the MSRI workshop on Nonlinear Estimation and Classification_ (Eds: D. Denison, M. Hansen, C. Holmes, B. Mallick, B. Yu), Springer: New York, 333-344. Kooperberg C, Ruczinski I, LeBlanc ML, Hsu L (2001). Sequence Analysis using Logic Regression, _Genetic Epidemiology_, *21*, S626-S631. Selected chapters from the dissertation of Ingo Ruczinski, available from _S_e_e _A_l_s_o: 'logreg' _E_x_a_m_p_l_e_s: logreg.myown() # displays this help file help(logreg.myown) # equivalent > > ### Name: logreg.myown > ### Title: Writing your own Logic Regression scoring function > ### Aliases: logreg.myown > ### Keywords: logic methods nonparametric tree > > ### ** Examples > > logreg.myown() # displays this help file > help(logreg.myown) # equivalent > > > > cleanEx(); ..nameEx <- "logreg.savefit1" > > ### * logreg.savefit1 > > flush(stderr()); flush(stdout()) > > ### Name: logreg.savefit1 > ### Title: Sample results for Logic Regression > ### Aliases: logreg.savefit1 logreg.savefit2 logreg.savefit3 > ### logreg.savefit4 logreg.savefit5 logreg.savefit6 logreg.savefit7 > ### Keywords: datasets > > ### ** Examples > > data(logreg.savefit1) > print(logreg.savefit1$call) logreg(resp = logreg.testdat[, 1], bin = logreg.testdat[, 2:21], type = 2, select = 1, ntrees = 2, anneal.control = myanneal) > data(logreg.savefit2) > print(logreg.savefit2$call) logreg(resp = logreg.testdat[, 1], bin = logreg.testdat[, 2:21], type = 2, select = 2, ntrees = c(1, 2), nleaves = c(1, 7), anneal.control = myanneal2) > data(logreg.savefit3) > print(logreg.savefit3$call) logreg(resp = logreg.testdat[, 1], bin = logreg.testdat[, 2:21], type = 2, select = 3, ntrees = c(1, 2), nleaves = c(1, 7), anneal.control = myanneal2) > data(logreg.savefit4) > print(logreg.savefit4$call) logreg(resp = logreg.testdat[, 1], bin = logreg.testdat[, 2:21], type = 2, select = 4, ntrees = 2, anneal.control = myanneal2) > data(logreg.savefit5) > print(logreg.savefit5$call) logreg(resp = logreg.testdat[, 1], bin = logreg.testdat[, 2:21], type = 2, select = 5, ntrees = c(1, 2), nleaves = c(1, 7), oldfit = logregsavefit2, anneal.control = myanneal2) > data(logreg.savefit6) > print(logreg.savefit6$call) logreg(select = 6, ntrees = 2, nleaves = c(1, 12), oldfit = logregsavefit1) > data(logreg.savefit7) > print(logreg.savefit7$call) logreg(select = 7, oldfit = logregsavefit1, mc.control = logreg.mc.control(nburn = 1000, niter = 1e+05, hyperpars = log(2))) > > > > cleanEx(); ..nameEx <- "logreg.testdat" > > ### * logreg.testdat > > flush(stderr()); flush(stdout()) > > ### Name: logreg.testdat > ### Title: Test data for Logic Regression > ### Aliases: logreg.testdat > ### Keywords: datasets > > ### ** Examples > > data(logreg.testdat) > > > > cleanEx(); ..nameEx <- "logreg.tree.control" > > ### * logreg.tree.control > > flush(stderr()); flush(stdout()) > > ### Name: logreg.tree.control > ### Title: Control for logreg > ### Aliases: logreg.tree.control > ### Keywords: logic methods nonparametric tree > > ### ** Examples > > mytreecontrol <- logreg.tree.control(treesize = 16, minmass = 10) > > > > cleanEx(); ..nameEx <- "logregmodel" > > ### * logregmodel > > flush(stderr()); flush(stdout()) logregmodel package:LogicReg R Documentation _F_o_r_m_a_t _o_f _c_l_a_s_s _l_o_g_r_e_g_m_o_d_e_l _D_e_s_c_r_i_p_t_i_o_n: This help file contains a description of the format of class logregmodel. _U_s_a_g_e: logregmodel() _V_a_l_u_e: An object of class logregtree has the following components: ntrees: the number of trees in the current model. nleaves: the number of leaves for the fitted model. coef: the coefficients for this model. score: the score of the fitted model. trees: a list of 'ntrees' objects of class 'logregtree'. _A_u_t_h_o_r(_s): Ingo Ruczinski ingo@jhu.edu and Charles Kooperberg clk@fhcrc.org. _R_e_f_e_r_e_n_c_e_s: Ruczinski I, Kooperberg C, LeBlanc ML (2003). Logic Regression, _Journal of Computational and Graphical Statistics_, *12*, 475-511. Ruczinski I, Kooperberg C, LeBlanc ML (2002). Logic Regression - methods and software. _Proceedings of the MSRI workshop on Nonlinear Estimation and Classification_ (Eds: D. Denison, M. Hansen, C. Holmes, B. Mallick, B. Yu), Springer: New York, 333-344. Kooperberg C, Ruczinski I, LeBlanc ML, Hsu L (2001). Sequence Analysis using Logic Regression, _Genetic Epidemiology_, *21*, S626-S631. Selected chapters from the dissertation of Ingo Ruczinski, available from _S_e_e _A_l_s_o: 'logreg', 'plot.logregmodel', 'print.logregmodel', 'logregtree' _E_x_a_m_p_l_e_s: logregmodel() # displays this help file help(logregmodel) # equivalent logregmodel package:LogicReg R Documentation _F_o_r_m_a_t _o_f _c_l_a_s_s _l_o_g_r_e_g_m_o_d_e_l _D_e_s_c_r_i_p_t_i_o_n: This help file contains a description of the format of class logregmodel. _U_s_a_g_e: logregmodel() _V_a_l_u_e: An object of class logregtree has the following components: ntrees: the number of trees in the current model. nleaves: the number of leaves for the fitted model. coef: the coefficients for this model. score: the score of the fitted model. trees: a list of 'ntrees' objects of class 'logregtree'. _A_u_t_h_o_r(_s): Ingo Ruczinski ingo@jhu.edu and Charles Kooperberg clk@fhcrc.org. _R_e_f_e_r_e_n_c_e_s: Ruczinski I, Kooperberg C, LeBlanc ML (2003). Logic Regression, _Journal of Computational and Graphical Statistics_, *12*, 475-511. Ruczinski I, Kooperberg C, LeBlanc ML (2002). Logic Regression - methods and software. _Proceedings of the MSRI workshop on Nonlinear Estimation and Classification_ (Eds: D. Denison, M. Hansen, C. Holmes, B. Mallick, B. Yu), Springer: New York, 333-344. Kooperberg C, Ruczinski I, LeBlanc ML, Hsu L (2001). Sequence Analysis using Logic Regression, _Genetic Epidemiology_, *21*, S626-S631. Selected chapters from the dissertation of Ingo Ruczinski, available from _S_e_e _A_l_s_o: 'logreg', 'plot.logregmodel', 'print.logregmodel', 'logregtree' _E_x_a_m_p_l_e_s: logregmodel() # displays this help file help(logregmodel) # equivalent > > ### Name: logregmodel > ### Title: Format of class logregmodel > ### Aliases: logregmodel > ### Keywords: logic methods nonparametric tree > > ### ** Examples > > logregmodel() # displays this help file > help(logregmodel) # equivalent > > > > cleanEx(); ..nameEx <- "logregtree" > > ### * logregtree > > flush(stderr()); flush(stdout()) logregtree package:LogicReg R Documentation _F_o_r_m_a_t _o_f _c_l_a_s_s _l_o_g_r_e_g_t_r_e_e _D_e_s_c_r_i_p_t_i_o_n: This help file contains a description of the format of class logregtree. _U_s_a_g_e: logregtree() _D_e_t_a_i_l_s: When storing trees, we number the location of the nodes using the following scheme (this is an example for a tree with at most 8 _terminal_ nodes, but the generalization should be obvious): 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 Each node may or may not be present in the current tree. If it is present, it can contain an operator (``and'' or ``or''), in which case it has to child nodes, or it can contain a variable, in which case the node is a terminal node. It is also possible that the node does not exist (as the user only specifies the maximum tree size, not the tree size that is actually fitted). Output files have one line for each node. Each line contains 5 numbers: 1. the node number. 2. does this node contain an ``and'' (1), an ``or'' (2), a variable (3), or is the node empty (0). 3. if the node contains a variable, which one is it; e.g. if this number is 3 the node contains X3. 4. if the node contains a variable, does it contain the regular variable (0) or its complement (1) 5. is the node empty (0) or not (1) (this information is redundant with the second number) .inen -3 *Example* AND OR OR OR OR X20 OR X17 X12 X3 X13c X2 X1 is represented as 1 1 0 0 1 2 2 0 0 1 3 2 0 0 1 4 2 0 0 1 5 2 0 0 1 6 3 20 0 1 7 2 0 0 1 8 3 17 0 1 9 3 12 0 1 10 3 3 0 1 11 3 13 1 1 12 0 0 0 0 13 0 0 0 0 14 3 2 0 1 15 3 1 0 1 _V_a_l_u_e: An object of class logregtree is typically a substructure of an object of the class 'logregmodel'. It will typically be the result of using the fitting function 'logreg'. An object of class logictree has the following components: whichtree: the sequence number of the current tree within the model. coef: the coefficients of this tree. trees: a matrix (data.frame) with five columns; see below for the format. _A_u_t_h_o_r(_s): Ingo Ruczinski ingo@jhu.edu and Charles Kooperberg clk@fhcrc.org. _R_e_f_e_r_e_n_c_e_s: Ruczinski I, Kooperberg C, LeBlanc ML (2003). Logic Regression, _Journal of Computational and Graphical Statistics_, *12*, 475-511. Ruczinski I, Kooperberg C, LeBlanc ML (2002). Logic Regression - methods and software. _Proceedings of the MSRI workshop on Nonlinear Estimation and Classification_ (Eds: D. Denison, M. Hansen, C. Holmes, B. Mallick, B. Yu), Springer: New York, 333-344. Selected chapters from the dissertation of Ingo Ruczinski, available from _S_e_e _A_l_s_o: 'logreg', 'plot.logregtree', 'print.logregtree', 'logregmodel' _E_x_a_m_p_l_e_s: logregtree() # displays this help file help(logregtree) # equivalent logregtree package:LogicReg R Documentation _F_o_r_m_a_t _o_f _c_l_a_s_s _l_o_g_r_e_g_t_r_e_e _D_e_s_c_r_i_p_t_i_o_n: This help file contains a description of the format of class logregtree. _U_s_a_g_e: logregtree() _D_e_t_a_i_l_s: When storing trees, we number the location of the nodes using the following scheme (this is an example for a tree with at most 8 _terminal_ nodes, but the generalization should be obvious): 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 Each node may or may not be present in the current tree. If it is present, it can contain an operator (``and'' or ``or''), in which case it has to child nodes, or it can contain a variable, in which case the node is a terminal node. It is also possible that the node does not exist (as the user only specifies the maximum tree size, not the tree size that is actually fitted). Output files have one line for each node. Each line contains 5 numbers: 1. the node number. 2. does this node contain an ``and'' (1), an ``or'' (2), a variable (3), or is the node empty (0). 3. if the node contains a variable, which one is it; e.g. if this number is 3 the node contains X3. 4. if the node contains a variable, does it contain the regular variable (0) or its complement (1) 5. is the node empty (0) or not (1) (this information is redundant with the second number) .inen -3 *Example* AND OR OR OR OR X20 OR X17 X12 X3 X13c X2 X1 is represented as 1 1 0 0 1 2 2 0 0 1 3 2 0 0 1 4 2 0 0 1 5 2 0 0 1 6 3 20 0 1 7 2 0 0 1 8 3 17 0 1 9 3 12 0 1 10 3 3 0 1 11 3 13 1 1 12 0 0 0 0 13 0 0 0 0 14 3 2 0 1 15 3 1 0 1 _V_a_l_u_e: An object of class logregtree is typically a substructure of an object of the class 'logregmodel'. It will typically be the result of using the fitting function 'logreg'. An object of class logictree has the following components: whichtree: the sequence number of the current tree within the model. coef: the coefficients of this tree. trees: a matrix (data.frame) with five columns; see below for the format. _A_u_t_h_o_r(_s): Ingo Ruczinski ingo@jhu.edu and Charles Kooperberg clk@fhcrc.org. _R_e_f_e_r_e_n_c_e_s: Ruczinski I, Kooperberg C, LeBlanc ML (2003). Logic Regression, _Journal of Computational and Graphical Statistics_, *12*, 475-511. Ruczinski I, Kooperberg C, LeBlanc ML (2002). Logic Regression - methods and software. _Proceedings of the MSRI workshop on Nonlinear Estimation and Classification_ (Eds: D. Denison, M. Hansen, C. Holmes, B. Mallick, B. Yu), Springer: New York, 333-344. Selected chapters from the dissertation of Ingo Ruczinski, available from _S_e_e _A_l_s_o: 'logreg', 'plot.logregtree', 'print.logregtree', 'logregmodel' _E_x_a_m_p_l_e_s: logregtree() # displays this help file help(logregtree) # equivalent > > ### Name: logregtree > ### Title: Format of class logregtree > ### Aliases: logregtree > ### Keywords: logic methods nonparametric tree > > ### ** Examples > > logregtree() # displays this help file > help(logregtree) # equivalent > > > > cleanEx(); ..nameEx <- "plot.logreg" > > ### * plot.logreg > > flush(stderr()); flush(stdout()) > > ### Name: plot.logreg > ### Title: Plots for Logic Regression > ### Aliases: plot.logreg > ### Keywords: logic methods nonparametric tree > > ### ** Examples > > data(logreg.savefit1,logreg.savefit2,logreg.savefit3,logreg.savefit4, + logreg.savefit5,logreg.savefit6,logreg.savefit7) > # > # fit a single model > # myanneal <- logreg.anneal.control(start = -1, end = -4, iter = 25000, update = 1000) > # logreg.savefit1 <- logreg(resp = logreg.testdat[,1], bin=logreg.testdat[, 2:21], > # type = 2, select = 1, ntrees = 2, anneal.control = myanneal) > # the best score should be in the 0.96-0.98 range > plot(logreg.savefit1) > # > # fit multiple models > # myanneal2 <- logreg.anneal.control(start = -1, end = -4, iter = 25000, update = 0) > # logreg.savefit2 <- logreg(select = 2, ntrees = c(1,2), nleaves =c(1,7), > # oldfit = logreg.savefit1, anneal.control = myanneal2) > plot(logreg.savefit2) > # After an initial steep decline, the scores only get slightly better > # for models with more than four leaves and two trees. > # > # cross validation > # logreg.savefit3 <- logreg(select = 3, oldfit = logreg.savefit2) > plot(logreg.savefit3) > # 4 leaves, 2 trees should give the best test set score > # > # null model test > # logreg.savefit4 <- logreg(select = 4, anneal.control = myanneal2, oldfit = logreg.savefit1) > plot(logreg.savefit4) > # A histogram of the 25 scores obtained from the permutation test. Also shown > # are the scores for the best scoring model with one logic tree, and the null > # model (no tree). As the permutation scores are not even close to the score > # of the best model with one tree (fit on the original data), there is strong > # evidence against the null hypothesis that there was no signal in the data. > # > # Permutation tests > # logreg.savefit5 <- logreg(select = 5, oldfit = logreg.savefit2) > plot(logreg.savefit5) > # The permutation scores improve until we condition on a model with two > # trees and four leaves, and then do not change very much anymore. This > # indicates that the best model has indeed four leaves. > # > # a greedy sequence > # logreg.savefit6 <- logreg(select = 6, ntrees = 2, nleaves =c(1,12), oldfit = logreg.savefit1) > plot(logreg.savefit6) empty tree empty tree empty tree empty tree > # Monte Carlo Logic Regression > # logreg.savefit7 <- logreg(select = 7, oldfit = logreg.savefit1, mc.control= > # logreg.mc.control(nburn=1000, niter=100000, hyperpars=log(2))) > plot(logreg.savefit7) > > > > cleanEx(); ..nameEx <- "plot.logregmodel" > > ### * plot.logregmodel > > flush(stderr()); flush(stdout()) > > ### Name: plot.logregmodel > ### Title: Plots for Logic Regression > ### Aliases: plot.logregmodel > ### Keywords: logic methods nonparametric tree > > ### ** Examples > > data(logreg.savefit1) > # myanneal <- logreg.anneal.control(start = -1, end = -4, iter = 25000, update = 1000) > # logreg.savefit1 <- logreg(resp = logreg.testdat[,1], bin=logreg.testdat[, 2:21], > # type = 2, select = 1, ntrees = 2, anneal.control = myanneal) > # plot(logreg.savefit1) > plot(logreg.savefit1$model) # does the same > > > > cleanEx(); ..nameEx <- "plot.logregtree" > > ### * plot.logregtree > > flush(stderr()); flush(stdout()) > > ### Name: plot.logregtree > ### Title: A plot of one Logic Regression tree. > ### Aliases: plot.logregtree > ### Keywords: logic methods nonparametric tree > > ### ** Examples > > data(logreg.savefit2) > # > # myanneal2 <- logreg.anneal.control(start = -1, end = -4, iter = 25000, update = 0) > # logreg.savefit2 <- logreg(resp = logreg.testdat[,1], bin=logreg.testdat[, 2:21], > # type = 2, select = 2, ntrees = c(1,2), nleaves =c(1,7), > # anneal.control = myanneal2) > for(i in 1:logreg.savefit2$nmodels) for(j in 1:logreg.savefit2$alltrees[[i]]$ntrees[1]){ + plot.logregtree(logreg.savefit2$alltrees[[i]]$trees[[j]]) + title(main=paste("model",i,"tree",j)) + } > > > > cleanEx(); ..nameEx <- "predict.logreg" > > ### * predict.logreg > > flush(stderr()); flush(stdout()) > > ### Name: predict.logreg > ### Title: Predicted values Logic Regression > ### Aliases: predict.logreg > ### Keywords: logic methods nonparametric tree > > ### ** Examples > > data(logreg.savefit1,logreg.savefit2,logreg.savefit6,logreg.testdat) > # > # myanneal <- logreg.anneal.control(start = -1, end = -4, iter = 25000, update = 1000) > # logreg.savefit1 <- logreg(resp = logreg.testdat[,1], bin=logreg.testdat[, 2:21], type = 2, > # select = 1, ntrees = 2, anneal.control = myanneal) > z1 <- predict(logreg.savefit1) > plot(z1, logreg.testdat[,1]-z1, xlab="fitted values", ylab="residuals") > # myanneal2 <- logreg.anneal.control(start = -1, end = -4, iter = 25000, update = 0) > # logreg.savefit2 <- logreg(select = 2, nleaves =c(1,7), oldfit = logreg.savefit1, > # anneal.control = myanneal2) > z2 <- predict(logreg.savefit2) > # logreg.savefit6 <- logreg(select = 6, ntrees = 2, nleaves =c(1,12), oldfit = logreg.savefit1) > z6 <- predict(logreg.savefit6, msz = 3:5) > > > > > cleanEx(); ..nameEx <- "print.logreg" > > ### * print.logreg > > flush(stderr()); flush(stdout()) > > ### Name: print.logreg > ### Title: Prints Logic Regression Output > ### Aliases: print.logreg > ### Keywords: logic methods nonparametric tree > > ### ** Examples > > data(logreg.savefit1,logreg.savefit2,logreg.savefit3,logreg.savefit4, + logreg.savefit5,logreg.savefit6) > # > # fit a single model > # myanneal <- logreg.anneal.control(start = -1, end = -4, iter = 25000, update = 1000) > # logreg.savefit1 <- logreg(resp = logreg.testdat[,1], bin=logreg.testdat[, 2:21], > # type = 2, select = 1, ntrees = 2, anneal.control = myanneal) > # the best score should be in the 0.96-0.98 range > print(logreg.savefit1) score 0.966 -1.3 * (((X14 or (not X5)) and ((not X1) and (not X2))) and (((not X3) or X1) or ((not X20) and (not X2)))) +2.15 * (((not X4) or ((not X13) and (not X11))) and (not X3)) > # > # fit multiple models > # myanneal2 <- logreg.anneal.control(start = -1, end = -4, iter = 25000, update = 0) > # logreg.savefit2 <- logreg(select = 2, ntrees = c(1,2), nleaves =c(1,7), > # oldfit = logreg.savefit1, anneal.control = myanneal2) > print(logreg.savefit2) 1 trees with 1 leaves: score is 1.15 -1.91 * X3 1 trees with 2 leaves: score is 1.048 +2.13 * ((not X3) and (not X4)) 1 trees with 3 leaves: score is 1.046 +2.14 * ((not X3) and ((not X13) or (not X4))) 1 trees with 4 leaves: score is 1.042 +2.14 * ((((not X13) and (not X11)) or (not X4)) and (not X3)) 1 trees with 5 leaves: score is 1.042 +2.14 * ((not X3) and ((not X4) or ((not X13) and (not X11)))) 1 trees with 6 leaves: score is 1.042 -2.14 * (X3 or (X4 and (X13 or (not X19)))) 1 trees with 7 leaves: score is 1.04 +2.15 * ((((not X4) or (not X13)) and (not X3)) or (((not X6) and X14) and ((not X1) and (not X12)))) 2 trees with 2 leaves: score is 1.117 +1.89 * (not X3) -0.904 * X4 2 trees with 3 leaves: score is 1.033 +0.401 * X1 +2.13 * ((not X3) and (not X4)) 2 trees with 4 leaves: score is 0.988 -1.11 * ((not X1) and (not X2)) -2.12 * (X3 or X4) 2 trees with 5 leaves: score is 0.982 +1.22 * ((X2 or X1) or X20) +2.12 * ((not X4) and (not X3)) 2 trees with 6 leaves: score is 0.979 +2.13 * ((not X3) and (not X4)) +1.23 * ((X2 or X1) or (X20 and X3)) 2 trees with 7 leaves: score is 0.978 +2.13 * ((not X3) and (not X4)) -1.29 * (((not X7) or ((not X5) or (not X15))) and ((not X1) and (not X2))) > # After an initial steep decline, the scores only get slightly better > # for models with more than four leaves and two trees. > # > # cross validation > # logreg.savefit3 <- logreg(select = 3, oldfit = logreg.savefit2) > print(logreg.savefit3) ntree nleaf train.ave train.sd cv/test cv/test.sd 10 1 1 1.1498841 0.016778885 1.167571 0.15626639 20 1 2 1.0478746 0.011226685 1.069748 0.10832190 30 1 3 1.0449851 0.010941456 1.086064 0.08148609 40 1 4 1.0415285 0.011277524 1.073501 0.11002902 50 1 5 1.0407865 0.010747466 1.103262 0.10778810 60 1 6 1.0375888 0.016061698 1.081281 0.08087984 70 1 7 1.0389121 0.012111442 1.083661 0.08286943 80 2 2 1.0742266 0.029866724 1.113556 0.14902853 90 2 3 1.0331042 0.009432286 1.069458 0.09146249 100 2 4 0.9880183 0.009248304 1.028298 0.08375898 110 2 5 0.9819231 0.008823874 1.034018 0.09038718 120 2 6 0.9780617 0.009782010 1.045832 0.09437465 130 2 7 0.9752644 0.008960632 1.059286 0.10264426 > # 4 leaves, 2 trees should give the best test set score > # > # null model test > # logreg.savefit4 <- logreg(select = 4, anneal.control = myanneal2, oldfit = logreg.savefit1) > print(logreg.savefit4) Null Score 1.494 ; best score 0.969 Summary 25 Randomized scores Min. 1st Qu. Median Mean 3rd Qu. Max. 1.398 1.411 1.423 1.421 1.428 1.444 0 randomized scores ( 0 %) are better than the best score > # A summary of the permutation test > # > # Permutation tests > # logreg.savefit5 <- logreg(select = 5, oldfit = logreg.savefit2) > print(logreg.savefit5) 25 randomizations trees leaves null start best rand: min 1st Qu median mean 1 1 1 1.494099 1.1500238 0.9707544 1.1080 1.1140 1.1190 1.1180 2 1 2 1.494099 1.0480632 0.9707544 0.9896 1.0140 1.0190 1.0170 3 1 3 1.494099 1.0456557 0.9707544 0.9987 1.0120 1.0160 1.0150 4 1 4 1.494099 1.0420860 0.9707544 1.0040 1.0080 1.0120 1.0120 5 1 5 1.494099 1.0420860 0.9707544 0.9996 1.0080 1.0110 1.0110 6 1 6 1.494099 1.0420861 0.9707544 0.9867 1.0140 1.0180 1.0160 7 1 7 1.494099 1.0400850 0.9707544 1.0040 1.0120 1.0160 1.0170 8 2 2 1.494099 1.1168472 0.9707544 1.0040 1.0090 1.0180 1.0170 9 2 3 1.494099 1.0333284 0.9707544 0.9783 0.9998 1.0030 1.0030 10 2 4 1.494099 0.9884828 0.9707544 0.9542 0.9667 0.9720 0.9709 11 2 5 1.494099 0.9823242 0.9707544 0.9489 0.9643 0.9677 0.9679 12 2 6 1.494099 0.9791870 0.9707544 0.9524 0.9630 0.9674 0.9650 13 2 7 1.494099 0.9778322 0.9707544 0.9539 0.9670 0.9702 0.9713 3rd Qu max % < best 1 1.1210 1.1310 0 2 1.0230 1.0260 0 3 1.0200 1.0250 0 4 1.0160 1.0230 0 5 1.0150 1.0280 0 6 1.0200 1.0290 0 7 1.0220 1.0390 0 8 1.0220 1.0320 0 9 1.0090 1.0150 0 10 0.9755 0.9845 40 11 0.9714 0.9795 68 12 0.9686 0.9757 84 13 0.9779 0.9894 56 > # A table summarizing the permutation tests > # > # a greedy sequence > # logreg.savefit6 <- logreg(select = 6, ntrees = 2, nleaves =c(1,12), oldfit = logreg.savefit1) > print(logreg.savefit6) 2 trees with 0 leaves: score is 1.494 +0 * 1 2 trees with 1 leaves: score is 1.15 -1.91 * X3 2 trees with 2 leaves: score is 1.048 -2.13 * (X3 or X4) 2 trees with 3 leaves: score is 1.033 -2.13 * (X3 or X4) +0.401 * X1 2 trees with 4 leaves: score is 0.988 -2.12 * (X3 or X4) +1.11 * (X1 or X2) 2 trees with 5 leaves: score is 0.982 -2.12 * (X3 or X4) +1.22 * ((X1 or X20) or X2) 2 trees with 6 leaves: score is 0.981 -2.14 * (X3 or (X4 and X13)) +0 * ((X1 or X20) or X2) 2 trees with 7 leaves: score is 0.98 +0 * (X3 or (X4 and X13)) +0 * ((X1 or X20) or (X2 and (not X4))) > > > > cleanEx(); ..nameEx <- "print.logregmodel" > > ### * print.logregmodel > > flush(stderr()); flush(stdout()) > > ### Name: print.logregmodel > ### Title: Prints Logic Regression Formula > ### Aliases: print.logregmodel > ### Keywords: logic methods nonparametric tree > > ### ** Examples > > data(logreg.savefit1) > # > # myanneal <- logreg.anneal.control(start = -1, end = -4, iter = 25000, update = 1000) > # logreg.savefit1 <- logreg(resp = logreg.testdat[,1], bin=logreg.testdat[, 2:21], > # type = 2, select = 1, ntrees = 2, anneal.control = myanneal) > print(logreg.savefit1$model) -1.3 * (((X14 or (not X5)) and ((not X1) and (not X2))) and (((not X3) or X1) or ((not X20) and (not X2)))) +2.15 * (((not X4) or ((not X13) and (not X11))) and (not X3)) > > > > cleanEx(); ..nameEx <- "print.logregtree" > > ### * print.logregtree > > flush(stderr()); flush(stdout()) > > ### Name: print.logregtree > ### Title: Prints Logic Regression Formula > ### Aliases: print.logregtree > ### Keywords: logic methods nonparametric tree > > ### ** Examples > > data(logreg.savefit1) > # > # myanneal <- logreg.anneal.control(start = -1, end = -4, iter = 25000, update = 1000) > # logreg.savefit1 <- logreg(resp = logreg.testdat[,1], bin=logreg.testdat[, 2:21], > # type = 2, select = 1, ntrees = 2, anneal.control = myanneal) > print(logreg.savefit1$model$trees[[1]]) (((X14 or (not X5)) and ((not X1) and (not X2))) and (((not X3) or X1) or ((not X20) and (not X2))))> > > > ### *