R : Copyright 2005, The R Foundation for Statistical Computing Version 2.1.1 (2005-06-20), ISBN 3-900051-07-0 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for a HTML browser interface to help. Type 'q()' to quit R. > ### *
> ### > attach(NULL, name = "CheckExEnv") > assign(".CheckExEnv", as.environment(2), pos = length(search())) # base > ## add some hooks to label plot pages for base and grid graphics > setHook("plot.new", ".newplot.hook") > setHook("persp", ".newplot.hook") > setHook("grid.newpage", ".gridplot.hook") > > assign("cleanEx", + function(env = .GlobalEnv) { + rm(list = ls(envir = env, all.names = TRUE), envir = env) + RNGkind("default", "default") + set.seed(1) + options(warn = 1) + delayedAssign("T", stop("T used instead of TRUE"), + assign.env = .CheckExEnv) + delayedAssign("F", stop("F used instead of FALSE"), + assign.env = .CheckExEnv) + sch <- search() + newitems <- sch[! sch %in% .oldSearch] + for(item in rev(newitems)) + eval(substitute(detach(item), list(item=item))) + missitems <- .oldSearch[! .oldSearch %in% sch] + if(length(missitems)) + warning("items ", paste(missitems, collapse=", "), + " have been removed from the search path") + }, + env = .CheckExEnv) > assign("..nameEx", "__{must remake R-ex/*.R}__", env = .CheckExEnv) # for now > assign("ptime", proc.time(), env = .CheckExEnv) > grDevices::postscript("gnm-Examples.ps") > assign("par.postscript", graphics::par(no.readonly = TRUE), env = .CheckExEnv) > options(contrasts = c(unordered = "contr.treatment", ordered = "contr.poly")) > options(warn = 1) > library('gnm') > > assign(".oldSearch", search(), env = .CheckExEnv) > assign(".oldNS", loadedNamespaces(), env = .CheckExEnv) > cleanEx(); ..nameEx <- "Diag" > > ### * Diag > > flush(stderr()); flush(stdout()) > > ### Name: Diag > ### Title: Equality of Two or More Factors > ### Aliases: Diag > ### Keywords: models > > ### ** Examples > > row <- gl(4, 4, 16) > col <- gl(4, 1, 16) > diag4by4 <- Diag(row, col) > matrix(Diag(row, col, binary = TRUE), 4, 4) [,1] [,2] [,3] [,4] [1,] 1 0 0 0 [2,] 0 1 0 0 [3,] 0 0 1 0 [4,] 0 0 0 1 > > > > cleanEx(); ..nameEx <- "Dref" > > ### * Dref > > flush(stderr()); flush(stdout()) > > ### Name: Dref > ### Title: gnm ``plug-in'' Function to Fit Diagonal Reference Terms > ### Aliases: Dref > ### Keywords: models regression nonlinear > > ### ** Examples > > ### Examples from Clifford and Heath paper > ### (Results differ slightly - possible transcription error in > ### published data?) > set.seed(1) > data(voting) > ## reconstruct counts voting Labour/non-Labour > count <- with(voting, percentage/100 * total) > yvar <- cbind(count, voting$total - count) > > ## fit diagonal reference model with constant weights > classMobility <- gnm(yvar ~ Nonlin(Dref(origin, destination)), + family = binomial, data = voting) Running main iterations....... Done > prop.table(exp(coef(classMobility)[2:3])) Dref(origin, destination).origin Dref(origin, destination).destination 0.4372474 0.5627526 > > ## create factors indicating movement in and out of salariat (class 1) > upward <- with(voting, origin != 1 & destination == 1) > downward <- with(voting, origin == 1 & destination != 1) > > ## fit separate weights for the "socially mobile" groups > socialMobility <- gnm(yvar ~ Nonlin(Dref(origin, destination, + formula = ~ 1 + downward + upward)), + family = binomial, data = voting) Running main iterations....... Done > prop.table(exp(coef(socialMobility)[c(4, 7)] + coef(socialMobility)[c(2, 5)])) Dref(origin, destination, formula = ~1 + downward + upward).origin.upwardTRUE 0.3900752 Dref(origin, destination, formula = ~1 + downward + upward).destination.upwardTRUE 0.6099248 > prop.table(exp(coef(socialMobility)[c(3, 6)] + coef(socialMobility)[c(2, 5)])) Dref(origin, destination, formula = ~1 + downward + upward).origin.downwardTRUE 0.6044571 Dref(origin, destination, formula = ~1 + downward + upward).destination.downwardTRUE 0.3955429 > prop.table(exp(coef(socialMobility)[c(2, 5)])) Dref(origin, destination, formula = ~1 + downward + upward).origin.(Intercept) 0.4045022 Dref(origin, destination, formula = ~1 + downward + upward).destination.(Intercept) 0.5954978 > > ## fit separate weights for downwardly mobile groups only > downwardMobility <- gnm(yvar ~ Nonlin(Dref(origin, destination, + formula = ~ 1 + downward)), + family = binomial, data = voting) Running main iterations....... Done > downwardMobility Call: gnm(formula = yvar ~ Nonlin(Dref(origin, destination, formula = ~1 + downward)), family = binomial, data = voting) Coefficients: (Intercept) -1.58578 Dref(origin, destination, formula = ~1 + downward).origin.(Intercept) 0.29561 Dref(origin, destination, formula = ~1 + downward).origin.downwardTRUE 0.90538 Dref(origin, destination, formula = ~1 + downward).destination.(Intercept) 0.70439 Dref(origin, destination, formula = ~1 + downward).destination.downwardTRUE 0.09462 Dref(origin, destination, formula = ~1 + downward).1 -0.48409 Dref(origin, destination, formula = ~1 + downward).2 0.47924 Dref(origin, destination, formula = ~1 + downward).3 -0.40588 Dref(origin, destination, formula = ~1 + downward).4 1.01270 Dref(origin, destination, formula = ~1 + downward).5 1.64207 Deviance: 18.99389 Pearson chi-squared: 17.09983 Residual df: 18 > prop.table(exp(coef(downwardMobility)[c(3, 5)] + + coef(downwardMobility)[c(2, 4)])) Dref(origin, destination, formula = ~1 + downward).origin.downwardTRUE 0.5991644 Dref(origin, destination, formula = ~1 + downward).destination.downwardTRUE 0.4008356 > prop.table(exp(coef(downwardMobility)[c(2, 4)])) Dref(origin, destination, formula = ~1 + downward).origin.(Intercept) 0.3992041 Dref(origin, destination, formula = ~1 + downward).destination.(Intercept) 0.6007959 > > ## Not run: > ##D ### Examples from Van der Slik paper > ##D ### For illustration only - data not publically available > ##D ### Using data in data.frame named 'conformity', with variables > ##D ### MCFM - mother's conformity score > ##D ### FCFF - father's conformity score > ##D ### MOPLM - a factor describing the mother's education with 7 levels > ##D ### FOPLF - a factor describing the father's education with 7 levels > ##D ### AGEM - mother's birth cohort > ##D ### MRMM - mother's traditional role model > ##D ### FRMF - father's traditional role model > ##D ### MWORK - mother's employment > ##D ### MFCM - mother's family conflict score > ##D ### FFCF - father's family conflict score > ##D > ##D set.seed(1) > ##D > ##D ## Models for mothers' conformity score as specified in Figure 1 > ##D A <- gnm(MCFM ~ -1 + AGEM + MRMM + FRMF + MWORK + MFCM + > ##D Nonlin(Dref(MOPLM, FOPLF)), family = gaussian, data = conformity, > ##D verbose = FALSE) > ##D A > ##D ## Call: > ##D ## gnm(formula = MCFM ~ -1 + AGEM + MRMM + FRMF + MWORK + MFCM + > ##D ## Nonlin(Dref(MOPLM, FOPLF)), family = gaussian, data = conformity, > ##D ## verbose = FALSE) > ##D ## > ##D ## > ##D ## Coefficients: > ##D ## AGEM MRMM FRMF > ##D ## 0.06364 -0.32425 -0.25324 > ##D ## MWORK MFCM Dref(MOPLM, FOPLF).MOPLM > ##D ## -0.06430 -0.06043 0.34389 > ##D ## Dref(MOPLM, FOPLF).FOPLF Dref(MOPLM, FOPLF).1 Dref(MOPLM, FOPLF).2 > ##D ## 0.65611 4.95123 4.86328 > ##D ## Dref(MOPLM, FOPLF).3 Dref(MOPLM, FOPLF).4 Dref(MOPLM, FOPLF).5 > ##D ## 4.86458 4.72342 4.43516 > ##D ## Dref(MOPLM, FOPLF).6 Dref(MOPLM, FOPLF).7 > ##D ## 4.18873 4.43379 > ##D ## > ##D ## Deviance: 425.3389 > ##D ## Pearson chi-squared: 425.3389 > ##D ## Residual df: 576 > ##D > ##D prop.table(exp(coef(A)[6:7])) ## weights as in Table 4 > ##D ## Dref(MOPLM, FOPLF).MOPLM Dref(MOPLM, FOPLF).FOPLF > ##D ## 0.4225734 0.5774266 > ##D > ##D F <- gnm(MCFM ~ -1 + AGEM + MRMM + FRMF + MWORK + MFCM + > ##D Nonlin(Dref(MOPLM, FOPLF, formula = ~1 + MFCM)), family = gaussian, > ##D data = conformity, verbose = FALSE) > ##D F > ##D ## Call: > ##D ## gnm(formula = MCFM ~ -1 + AGEM + MRMM + FRMF + MWORK + MFCM + > ##D ## Nonlin(Dref(MOPLM, FOPLF, formula = ~1 + MFCM)), family = gaussian, > ##D ## data = conformity, verbose = FALSE) > ##D ## > ##D ## > ##D ## Coefficients: > ##D ## AGEM > ##D ## 0.05818 > ##D ## MRMM > ##D ## -0.32701 > ##D ## FRMF > ##D ## -0.25772 > ##D ## MWORK > ##D ## -0.07847 > ##D ## MFCM > ##D ## -0.01694 > ##D ## Dref(MOPLM, FOPLF, formula = ~1 + MFCM).MOPLM.(Intercept) > ##D ## 1.03516 > ##D ## Dref(MOPLM, FOPLF, formula = ~1 + MFCM).MOPLM.MFCM > ##D ## -1.77703 > ##D ## Dref(MOPLM, FOPLF, formula = ~1 + MFCM).FOPLF.(Intercept) > ##D ## -0.03516 > ##D ## Dref(MOPLM, FOPLF, formula = ~1 + MFCM).FOPLF.MFCM > ##D ## 2.77703 > ##D ## Dref(MOPLM, FOPLF, formula = ~1 + MFCM).1 > ##D ## 4.82477 > ##D ## Dref(MOPLM, FOPLF, formula = ~1 + MFCM).2 > ##D ## 4.88066 > ##D ## Dref(MOPLM, FOPLF, formula = ~1 + MFCM).3 > ##D ## 4.83969 > ##D ## Dref(MOPLM, FOPLF, formula = ~1 + MFCM).4 > ##D ## 4.74849 > ##D ## Dref(MOPLM, FOPLF, formula = ~1 + MFCM).5 > ##D ## 4.42019 > ##D ## Dref(MOPLM, FOPLF, formula = ~1 + MFCM).6 > ##D ## 4.17956 > ##D ## Dref(MOPLM, FOPLF, formula = ~1 + MFCM).7 > ##D ## 4.40819 > ##D ## > ##D ## Deviance: 420.9022 > ##D ## Pearson chi-squared: 420.9022 > ##D ## Residual df: 575 > ##D > ##D prop.table(exp(coef(F))[c(6,8)]) ## weights for MFCM = 0 > ##D ## Dref(MOPLM, FOPLF, formula = ~1 + MFCM).MOPLM.(Intercept) > ##D ## 0.7446585 > ##D ## Dref(MOPLM, FOPLF, formula = ~1 + MFCM).FOPLF.(Intercept) > ##D ## 0.2553415 > ##D prop.table(exp(coef(F)[c(7,9)] + coef(F)[c(6,8)])) ## weights for MFCM = 1 > ##D ## Dref(MOPLM, FOPLF, formula = ~1 + MFCM).MOPLM.MFCM > ##D ## 0.02977851 > ##D ## Dref(MOPLM, FOPLF, formula = ~1 + MFCM).FOPLF.MFCM > ##D ## 0.97022149 > ## End(Not run) > > > > cleanEx(); ..nameEx <- "MPinv" > > ### * MPinv > > flush(stderr()); flush(stdout()) > > ### Name: MPinv > ### Title: Moore-Penrose Pseudoinverse of a Real-valued Matrix > ### Aliases: MPinv > ### Keywords: array > > ### ** Examples > > A <- matrix(c(1, 1, 0, + 1, 1, 0, + 2, 3, 4), 3, 3) > B <- MPinv(A) > A %*% B %*% A - A # essentially zero [,1] [,2] [,3] [1,] -1.332268e-15 -1.332268e-15 -1.554312e-15 [2,] -1.332268e-15 -1.332268e-15 -1.776357e-15 [3,] -2.775558e-17 -2.775558e-17 -4.440892e-16 > B %*% A %*% B - B # essentially zero [,1] [,2] [,3] [1,] -3.330669e-16 -3.053113e-16 2.220446e-16 [2,] -3.885781e-16 -2.775558e-16 2.220446e-16 [3,] 0.000000e+00 -6.938894e-18 -2.775558e-17 attr(,"rank") [1] 2 > attr(B, "rank") # here 2 [1] 2 > > ## now a symmetric example with diagonal submatrix to `eliminate' > A <- matrix(c(1, 0, 2, + 0, 2, 3, + 2, 3, 4), 3, 3) > B <- MPinv(A, eliminate = 1:2) > A %*% B %*% A - A # essentially zero [,1] [,2] [,3] [1,] 0 0 0 [2,] 0 0 0 [3,] 0 0 0 > B %*% A %*% B - B # essentially zero [,1] [,2] [,3] [1,] 0 0 0 [2,] 0 0 0 [3,] 0 0 0 attr(,"rank") [1] 3 > attr(B, "rank") # here 3 [1] 3 > > ## Not run: > ##D ## demo that eliminate can give substantial speed gains > ##D A <- diag(rnorm(100)) > ##D A <- cbind(A, matrix(rnorm(200), 100, 2)) > ##D A <- rbind(A, cbind(t(A[, 101:102]), matrix(c(1, 2, 2, 1), 2, 2))) > ##D system.time(for (i in 1:1000) B <- MPinv(A)) > ##D ## [1] 26.83 10.24 39.76 0.00 0.00 > ##D system.time(for (i in 1:1000) B <- MPinv(A, eliminate = 1:100)) > ##D ## [1] 3.49 1.37 5.04 0.00 0.00 > ## End(Not run) > > > > cleanEx(); ..nameEx <- "Mult" > > ### * Mult > > flush(stderr()); flush(stdout()) > > ### Name: Multiplicative interaction > ### Title: Specify a Multiplicative Interaction in a gnm Model Formula > ### Aliases: Mult Exp > ### Keywords: models regression nonlinear > > ### ** Examples > > set.seed(1) > ## Using 'Mult' with 'Exp' to constrain the first constituent multiplier > data(yaish) > ## Fit the "UNIDIFF" mobility model across education levels > unidiff <- gnm(Freq ~ educ:orig + educ:dest + + Mult(Exp(-1 + educ), orig:dest), + family = poisson, data = yaish) Running start-up iterations.. Running main iterations.................................. Done > > ## Not run: > ##D ## (this example can take quite a while to run) > ##D ## > ##D ## Using 'multiplicity' > 1 > ##D data(wheat) > ##D yield.scaled <- wheat$yield * sqrt(3/1000) > ##D treatment <- factor(paste(wheat$tillage, wheat$summerCrop, wheat$manure, > ##D wheat$N, sep = "")) > ##D bilinear2 <- gnm(yield.scaled ~ year + treatment + > ##D Mult(year, treatment, multiplicity = 2), > ##D family = gaussian, data = wheat) > ## End(Not run) > > > > cleanEx(); ..nameEx <- "MultHomog" > > ### * MultHomog > > flush(stderr()); flush(stdout()) > > ### Name: MultHomog > ### Title: gnm ``plug-in'' Function to Fit Multiplicative Interactions with > ### Homogeneous Effects > ### Aliases: MultHomog > ### Keywords: models regression nonlinear > > ### ** Examples > > set.seed(1) > data(occupationalStatus) > > ## Fit an association model with homogeneous row-column effects > RChomog <- gnm(Freq ~ origin + destination + Diag(origin, destination) + + Nonlin(MultHomog(origin, destination)), family = poisson, + data = occupationalStatus) Running start-up iterations.. Running main iterations....... Done > ## Deviance is 32.56, 34 d.f. > > > > cleanEx(); ..nameEx <- "Nonlin" > > ### * Nonlin > > flush(stderr()); flush(stdout()) > > ### Name: Nonlin > ### Title: Specify a Special Nonlinear Term in a Model Formula > ### Aliases: Nonlin > ### Keywords: models regression nonlinear > > ### ** Examples > > set.seed(1) > data(occupationalStatus) > > ## Fit an association model with homogeneous row-column effects > RChomog <- gnm(Freq ~ origin + destination + Diag(origin, destination) + + Nonlin(MultHomog(origin, destination)), family = poisson, + data = occupationalStatus) Running start-up iterations.. Running main iterations....... Done > > > > cleanEx(); ..nameEx <- "Symm" > > ### * Symm > > flush(stderr()); flush(stdout()) > > ### Name: Symm > ### Title: Symmetric Interaction of Factors > ### Aliases: Symm > ### Keywords: models > > ### ** Examples > > if (require(gtools)) { + row <- gl(4, 4, 16) + col <- gl(4, 1, 16) + symm4by4 <- Symm(row, col) + matrix(symm4by4, 4, 4) + } else cat("Symm requires the gtools package to be installed\n") Loading required package: gtools [,1] [,2] [,3] [,4] [1,] "11" "12" "13" "14" [2,] "12" "22" "23" "24" [3,] "13" "23" "33" "34" [4,] "14" "24" "34" "44" > > > > cleanEx(); ..nameEx <- "backPain" > > ### * backPain > > flush(stderr()); flush(stdout()) > > ### Name: backPain > ### Title: Data on Back Pain Prognosis, from Anderson (1984) > ### Aliases: backPain > ### Keywords: datasets > > ### ** Examples > > set.seed(1) > data(backPain) > > ### Re-express as count data > library(nnet) > .incidence <- class.ind(backPain$pain) > .counts <- as.vector(t(.incidence)) > .rowID <- factor(t(row(.incidence))) > backPain <- backPain[.rowID, ] > backPain$pain <- C(factor(rep(levels(backPain$pain), nrow(.incidence)), + levels = levels(backPain$pain), ordered = TRUE), + treatment) > > ### Fit models described in Table 5 of Anderson (1984) > > ### Logistic family models > noRelationship <- gnm(.counts ~ pain, eliminate = ~ .rowID, + family = "poisson", data = backPain) Running main iterations. Done > > ## stereotype model > oneDimensional <- update(noRelationship, + ~ . + Mult(pain - 1, x1 + x2 + x3 - 1), + iterStart = 3) Running start-up iterations... Running main iterations............ Done > > threeDimensional <- update(noRelationship, ~ . + pain:(x1 + x2 + x3)) Running main iterations. Done > > ### Models to determine distinguishability in stereotype model > .pain <- backPain$pain > > levels(.pain)[2:3] <- paste(levels(.pain)[2:3], collapse = " | ") > fiveGroups <- update(noRelationship, + ~ . + Mult(as.ordered(.pain) - 1, + x1 + x2 + x3 - 1)) Running start-up iterations.. Running main iterations................ Done > > levels(.pain)[4:5] <- paste(levels(.pain)[4:5], collapse = " | ") > fourGroups <- update(fiveGroups) Running start-up iterations.. Running main iterations.................. Done > > levels(.pain)[2:3] <- paste(levels(.pain)[2:3], collapse = " | ") > threeGroups <- update(fourGroups) Running start-up iterations.. Running main iterations....... Iterative weights are not all finite: restarting Running start-up iterations.. Running main iterations......... Done > > ### Grouped continuous model, aka proportional odds model > library(MASS) > sixCategories <- polr(pain ~ x1 + x2 + x3, data = backPain) > > ### Obtain number of parameters and log-likelihoods for equivalent > ### multinomial models as presented in Anderson (1984) > logLikMultinom <- function(model){ + object <- get(model) + if (inherits(object, "gnm")) { + l <- logLik(object) + object$eliminate + c(nParameters = attr(l, "df") - object$eliminate, logLikelihood = l) + } + else + c(nParameters = object$edf, logLikelihood = -deviance(object)/2) + } > models <- c("threeDimensional", "oneDimensional", "noRelationship", + "fiveGroups", "fourGroups", "threeGroups", "sixCategories") > t(sapply(models, logLikMultinom)) nParameters logLikelihood threeDimensional 20 -149.5076 oneDimensional 12 -151.5501 noRelationship 5 -171.5287 fiveGroups 11 -151.6092 fourGroups 10 -152.7819 threeGroups 9 -154.3779 sixCategories 8 -1085.8062 > > > > cleanEx(); ..nameEx <- "barley" > > ### * barley > > flush(stderr()); flush(stdout()) > > ### Name: barley > ### Title: Jenkyn's Data on Leaf-blotch on Barley > ### Aliases: barley > ### Keywords: datasets > > ### ** Examples > > data(barley) > set.seed(1) > > ## Fit Wedderburn's logit model with variance proportional to [mu(1-mu)]^2 > logitModel <- glm(y ~ site + variety, family = wedderburn, data = barley) > fit <- fitted(logitModel) > print(sum((barley$y - fit)^2 / (fit * (1-fit))^2)) [1] 71.17401 > ## Agrees with the chi-squared value reported in McCullagh and Nelder > ## (1989, p331), which differs slightly from Wedderburn's reported value. > > ## Fit the biplot model as in Gabriel (1998, p694) > biplotModel <- gnm(y ~ -1 + Mult(site, variety, multiplicity = 2), + family = wedderburn, data = barley) Running start-up iterations.. Running main iterations......................................................... ......................... Done > barleySVD <- svd(matrix(biplotModel$predictors, 10, 9)) > A <- sweep(barleySVD$v, 2, sqrt(barleySVD$d), "*")[, 1:2] > B <- sweep(barleySVD$u, 2, sqrt(barleySVD$d), "*")[, 1:2] > ## These are essentially A and B as in Gabriel (1998, p694), from which > ## the biplot is made by > plot(rbind(A, B), pch = c(levels(barley$site), levels(barley$variety))) > > ## Fit the double-additive model as in Gabriel (1998, p697) > variety.binary <- factor(match(barley$variety, c(2,3,6), nomatch = 0) > 0, + labels = c("rest", "2,3,6")) > doubleAdditive <- gnm(y ~ variety + Mult(site, variety.binary), + family = wedderburn, data = barley) Running start-up iterations.. Running main iterations..................... Done > ## It is unclear why Gabriel's chi-squared statistics differ slightly > ## from the ones produced in these fits. Possibly Gabriel adjusted the > ## data somehow prior to fitting? > > > > cleanEx(); ..nameEx <- "cautres" > > ### * cautres > > flush(stderr()); flush(stdout()) > > ### Name: cautres > ### Title: Data on Class, Religion and Vote in France > ### Aliases: cautres > ### Keywords: datasets > > ### ** Examples > > set.seed(1) > data(cautres) > > ## Fit a "double UNIDIFF" model with the religion-vote and class-vote > ## interactions both modulated by nonnegative election-specific multipliers > doubleUnidiff <- gnm(Freq ~ election:vote + election:class:religion + + Mult(Exp(-1 + election), religion:vote) + + Mult(Exp(-1 + election), class:vote), + family = poisson, data = cautres) Running start-up iterations.. Running main iterations........ Done > ## Deviance should be 133.04 > > ## Examine the multipliers of the class-vote log odds ratios > coefs.of.interest <- grep("Mult2.*election", names(coef(doubleUnidiff))) > coef(doubleUnidiff)[coefs.of.interest] Mult2.Factor1.election1 Mult2.Factor1.election2 Mult2.Factor1.election3 -0.29536858 0.38636833 0.15400528 Mult2.Factor1.election4 0.06645946 > ## Mult2.Factor1.election1 Mult2.Factor1.election2 > ## -0.5724370 0.1092972 > ## Mult2.Factor1.election3 Mult2.Factor1.election4 > ## -0.1230682 -0.2105843 > getContrasts(doubleUnidiff, coefs.of.interest) [[1]] estimate se Mult2.Factor1.election1 -0.36182804 0.2534753 Mult2.Factor1.election2 0.31990886 0.1320022 Mult2.Factor1.election3 0.08754582 0.1446833 Mult2.Factor1.election4 0.00000000 0.0000000 > ## estimate se > ## Mult2.Factor1.election1 -0.3618399 0.2534762 > ## Mult2.Factor1.election2 0.3198951 0.1320034 > ## Mult2.Factor1.election3 0.0875308 0.1446842 > ## Mult2.Factor1.election4 0.0000000 0.0000000 > > ## Same thing but with election 1 as reference category: > getContrasts(doubleUnidiff, rev(coefs.of.interest)) [[1]] estimate se Mult2.Factor1.election4 0.3618280 0.2534753 Mult2.Factor1.election3 0.4493739 0.2473520 Mult2.Factor1.election2 0.6817369 0.2401642 Mult2.Factor1.election1 0.0000000 0.0000000 > ## estimate se > ## Mult2.Factor1.election4 0.3618399 0.2534746 > ## Mult2.Factor1.election3 0.4493707 0.2473524 > ## Mult2.Factor1.election2 0.6817351 0.2401645 > ## Mult2.Factor1.election1 0.0000000 0.0000000 > > > > cleanEx(); ..nameEx <- "checkEstimable" > > ### * checkEstimable > > flush(stderr()); flush(stdout()) > > ### Name: checkEstimable > ### Title: Check Whether One or More Parameter Combinations in a gnm Model > ### are Identified > ### Aliases: checkEstimable > ### Keywords: models regression nonlinear > > ### ** Examples > > data(yaish) > set.seed(1) > > ## Fit the "UNIDIFF" mobility model across education levels > unidiff <- gnm(Freq ~ educ:orig + educ:dest + + Mult(Exp(educ), orig:dest), family = poisson, + data = yaish) Running start-up iterations.. Running main iterations.......................... Done > > ## Check whether Mult1.Factor1.educ4 - Mult1.Factor1.educ5 is estimable > educ4.pos <- grep("Mult1.Factor1.educ4", names(coef(unidiff))) > checkEstimable(unidiff, c(rep(0, educ4.pos - 1), 1, -1, + rep(0, length(coef(unidiff)) - educ4.pos - 1))) [1] TRUE > ## should be TRUE > > ## Check whether Mult1.Factor1.educ4 is estimable > checkEstimable(unidiff, c(rep(0, educ4.pos - 1), 1, 0, + rep(0, length(coef(unidiff)) - educ4.pos - 1))) [1] FALSE > ## should be FALSE -- only *differences* are identified here > > > > cleanEx(); ..nameEx <- "getContrasts" > > ### * getContrasts > > flush(stderr()); flush(stdout()) > > ### Name: getContrasts > ### Title: Estimated Contrasts and Standard Errors for Parameters in a gnm > ### Model > ### Aliases: getContrasts > ### Keywords: models regression nonlinear > > ### ** Examples > > set.seed(1) > data(yaish) > > ## Fit the "UNIDIFF" mobility model across education levels > unidiff <- gnm(Freq ~ educ:orig + educ:dest + + Mult(Exp(-1 + educ), orig:dest), family = poisson, + data = yaish) Running start-up iterations.. Running main iterations.................................. Done > ## Examine the education multipliers (differences on the log scale): > getContrasts(unidiff, grep("Mult1.Factor1", names(coef(unidiff)))) [[1]] estimate se Mult1.Factor1.educ1 2.237862 0.9411152 Mult1.Factor1.educ2 2.021110 0.9435045 Mult1.Factor1.educ3 1.506627 0.9535672 Mult1.Factor1.educ4 1.207738 0.9780882 Mult1.Factor1.educ5 0.000000 0.0000000 > > > > cleanEx(); ..nameEx <- "getModelFrame" > > ### * getModelFrame > > flush(stderr()); flush(stdout()) > > ### Name: getModelFrame > ### Title: Get the Model Frame in Use by 'gnm' > ### Aliases: getModelFrame > ### Keywords: data models regression nonlinear > > ### ** Examples > > ## Create a dummy plug-in function > dummy <- function(...) { + cat("Model frame:\n") + print(getModelFrame()[1:5,]) + stop("Not a valid plug-in function, model can not be estimated") + } > > ## Use data from Vargas et al (2001) > set.seed(1) > data(wheat) > yield.scaled <- wheat$yield * sqrt(3/1000) > treatment <- factor(paste(wheat$tillage, wheat$summerCrop, wheat$manure, + wheat$N, sep = "")) > > ## Add dummy nonlinear term to main effects model - the dummy function > ## gets the model frame used by gnm and prints the first 5 rows > mainEffects <- try(gnm(yield.scaled ~ year + treatment + + Nonlin(dummy(as.numeric(year), N, scale(MTD))), + family = gaussian, data = wheat)) Model frame: yield.scaled year treatment as.numeric(year) N scale(MTD) 1 445.4080 1988 TSM0 1 0 0.06771738 2 366.4812 1988 tSM0 1 0 0.06771738 3 392.3337 1988 TsM0 1 0 0.06771738 4 308.4773 1988 tsM0 1 0 0.06771738 5 325.4567 1988 TSm0 1 0 0.06771738 Error in dummy(as.numeric(year), N, scale(MTD)) : Not a valid plug-in function, model can not be estimated > > > > cleanEx(); ..nameEx <- "gnm" > > ### * gnm > > flush(stderr()); flush(stdout()) > > ### Name: gnm > ### Title: Fitting Generalized Nonlinear Models > ### Aliases: gnm > ### Keywords: models regression nonlinear > > ### ** Examples > > ### Analysis of a 4-way contingency table > set.seed(1) > data(cautres) > print(cautres) , , religion = 1, election = 1 class vote 1 2 3 4 5 6 1 37 19 30 22 38 38 2 5 4 4 8 9 6 , , religion = 2, election = 1 class vote 1 2 3 4 5 6 1 20 20 8 8 33 33 2 11 3 1 6 16 37 , , religion = 3, election = 1 class vote 1 2 3 4 5 6 1 10 22 7 24 43 40 2 15 18 9 30 55 104 , , religion = 4, election = 1 class vote 1 2 3 4 5 6 1 0 2 2 1 3 5 2 4 3 3 4 6 28 , , religion = 1, election = 2 class vote 1 2 3 4 5 6 1 86 52 47 80 113 55 2 4 0 2 20 31 18 , , religion = 2, election = 2 class vote 1 2 3 4 5 6 1 57 35 19 45 90 53 2 13 5 9 24 53 54 , , religion = 3, election = 2 class vote 1 2 3 4 5 6 1 69 106 67 118 212 144 2 29 45 29 118 258 333 , , religion = 4, election = 2 class vote 1 2 3 4 5 6 1 3 9 17 16 23 15 2 8 9 31 78 72 102 , , religion = 1, election = 3 class vote 1 2 3 4 5 6 1 61 22 32 59 79 31 2 11 11 7 21 55 37 , , religion = 2, election = 3 class vote 1 2 3 4 5 6 1 34 27 20 42 79 40 2 13 16 10 37 85 69 , , religion = 3, election = 3 class vote 1 2 3 4 5 6 1 49 60 51 98 145 88 2 34 59 61 159 317 318 , , religion = 4, election = 3 class vote 1 2 3 4 5 6 1 2 7 3 8 12 10 2 6 10 37 70 81 84 , , religion = 1, election = 4 class vote 1 2 3 4 5 6 1 50 30 33 63 73 31 2 4 5 12 17 18 17 , , religion = 2, election = 4 class vote 1 2 3 4 5 6 1 35 37 32 58 89 43 2 11 5 13 24 52 40 , , religion = 3, election = 4 class vote 1 2 3 4 5 6 1 28 98 52 114 186 116 2 18 27 58 122 196 196 , , religion = 4, election = 4 class vote 1 2 3 4 5 6 1 5 16 10 20 44 26 2 3 14 56 91 59 95 > > ## Fit a "double UNIDIFF" model with the religion-vote and class-vote > ## interactions both modulated by nonnegative election-specific > ## multipliers. > doubleUnidiff <- gnm(Freq ~ election:vote + election:class:religion + + Mult(Exp(election - 1), religion:vote - 1) + + Mult(Exp(election - 1), class:vote - 1), family = poisson, + data = cautres) Running start-up iterations.. Running main iterations....... Done > > ## Examine the multipliers of the class-vote log odds ratios > coefs.of.interest <- grep("Mult2.*election", names(coef(doubleUnidiff))) > coef(doubleUnidiff)[coefs.of.interest] Mult2.Factor1.election1 Mult2.Factor1.election2 Mult2.Factor1.election3 -1.0451329 -0.3633948 -0.5957576 Mult2.Factor1.election4 -0.6833048 > ## Mult2.Factor1.election1 Mult2.Factor1.election2 > ## -0.5724370 0.1092972 > ## Mult2.Factor1.election3 Mult2.Factor1.election4 > ## -0.1230682 -0.2105843 > > ## Re-parameterize by setting Mult2.Factor1.election4 to zero > getContrasts(doubleUnidiff, coefs.of.interest) [[1]] estimate se Mult2.Factor1.election1 -0.36182805 0.2534754 Mult2.Factor1.election2 0.31991001 0.1320023 Mult2.Factor1.election3 0.08754726 0.1446834 Mult2.Factor1.election4 0.00000000 0.0000000 > ## estimate se > ## Mult2.Factor1.election1 -0.3618399 0.2534762 > ## Mult2.Factor1.election2 0.3198951 0.1320034 > ## Mult2.Factor1.election3 0.0875308 0.1446842 > ## Mult2.Factor1.election4 0.0000000 0.0000000 > > ## Same thing but with election 1 as reference category: > getContrasts(doubleUnidiff, rev(coefs.of.interest)) [[1]] estimate se Mult2.Factor1.election4 0.3618280 0.2534754 Mult2.Factor1.election3 0.4493753 0.2473521 Mult2.Factor1.election2 0.6817381 0.2401644 Mult2.Factor1.election1 0.0000000 0.0000000 > ## estimate se > ## Mult2.Factor1.election4 0.3618399 0.2534746 > ## Mult2.Factor1.election3 0.4493707 0.2473524 > ## Mult2.Factor1.election2 0.6817351 0.2401645 > ## Mult2.Factor1.election1 0.0000000 0.0000000 > > ## Re-fit model with Mult2.Factor1.election1 set to zero > doubleUnidiffConstrained <- + update(doubleUnidiff, constrain = coefs.of.interest[1]) Running start-up iterations.. Running main iterations........................................... Done > > ## Examine the multipliers of the class-vote log odds ratios > coef(doubleUnidiffConstrained)[coefs.of.interest] Mult2.Factor1.election1 Mult2.Factor1.election2 Mult2.Factor1.election3 NA 0.6817194 0.4493564 Mult2.Factor1.election4 0.3618121 > ## ...as using 'getContrasts' (to 4 d.p.). > > > > cleanEx(); ..nameEx <- "gnmControl" > > ### * gnmControl > > flush(stderr()); flush(stdout()) > > ### Name: gnmControl > ### Title: Set Control Parameters for Model Fitting > ### Aliases: gnmControl > ### Keywords: models regression nonlinear > > ### ** Examples > > ## A variation on example(gnm)... > data(cautres) > set.seed(1) > > ## Fit a "double UNIDIFF" model with the religion-vote and class-vote > ## interactions both modulated by nonnegative election-specific > ## multipliers. > > system.time(doubleUnidiff <- + gnm(Freq ~ election:vote + election:class:religion + + Mult(Exp(election - 1), religion:vote - 1) + + Mult(Exp(election - 1), class:vote - 1), + family = poisson, data = cautres, trace = TRUE)) Running start-up iterations Start-up iteration 1. Deviance = 231.6888 Start-up iteration 2. Deviance = 137.292 Running main iterations Iteration 1. Deviance = 134.0682 Iteration 2. Deviance = 133.0473 Iteration 3. Deviance = 133.0431 Iteration 4. Deviance = 133.043 Iteration 5. Deviance = 133.0430 Iteration 6. Deviance = 133.0430 Iteration 7. Deviance = 133.0430 Done [1] 2.13 0.55 2.71 0.00 0.00 > > ## repeat with 10 start-up iterations > system.time(doubleUnidiff <- + gnm(Freq ~ election:vote + election:class:religion + + Mult(Exp(election - 1), religion:vote - 1) + + Mult(Exp(election - 1), class:vote - 1), + family = poisson, data = cautres, trace = TRUE, + iterStart = 10)) Running start-up iterations Start-up iteration 1. Deviance = 219.8745 Start-up iteration 2. Deviance = 135.0683 Start-up iteration 3. Deviance = 133.1532 Start-up iteration 4. Deviance = 133.0514 Start-up iteration 5. Deviance = 133.0437 Start-up iteration 6. Deviance = 133.0430 Start-up iteration 7. Deviance = 133.0430 Start-up iteration 8. Deviance = 133.0430 Start-up iteration 9. Deviance = 133.0430 Start-up iteration 10. Deviance = 133.0430 Running main iterations Iteration 1. Deviance = 133.0430 Done [1] 7.29 2.21 9.55 0.00 0.00 > ## ...at convergence by first main iteration, but much slower! > > > > cleanEx(); ..nameEx <- "mentalHealth" > > ### * mentalHealth > > flush(stderr()); flush(stdout()) > > ### Name: mentalHealth > ### Title: Data on Mental Health and Socioeconomic Status > ### Aliases: mentalHealth > ### Keywords: datasets > > ### ** Examples > > set.seed(1) > data(mentalHealth) > > ## Goodman Row-Column association model fits well (deviance 3.57, df 8) > mentalHealth$MHS <- C(mentalHealth$MHS, treatment) > mentalHealth$SES <- C(mentalHealth$SES, treatment) > RC1model <- gnm(count ~ SES + MHS + + Mult(-1 + SES, -1 + MHS), + family = poisson, data = mentalHealth) Running start-up iterations.. Running main iterations..... Done > ## Row scores are parameters coef(RC1model)[10:15] > ## Column scores are coef(RC1model)[16:19] > ## -- both unnormalized in this parameterization of the model > > ## The scores can be normalized as in Agresti's eqn (9.15): > rowProbs <- with(mentalHealth, tapply(count, SES, sum) / sum(count)) > colProbs <- with(mentalHealth, tapply(count, MHS, sum) / sum(count)) > rowScores <- coef(RC1model)[10:15] > colScores <- coef(RC1model)[16:19] > rowScores <- rowScores - sum(rowScores * rowProbs) > colScores <- colScores - sum(colScores * colProbs) > beta1 <- sqrt(sum(rowScores^2 * rowProbs)) > beta2 <- sqrt(sum(colScores^2 * colProbs)) > assoc <- list(beta = beta1 * beta2, + mu = rowScores / beta1, + nu = colScores / beta2) > > > > cleanEx(); ..nameEx <- "occupationalStatus" > > ### * occupationalStatus > > flush(stderr()); flush(stdout()) > > ### Name: occupationalStatus > ### Title: Data on Occupational Status of Fathers and their Sons > ### Aliases: occupationalStatus > ### Keywords: datasets > > ### ** Examples > > set.seed(1) > data(occupationalStatus) > > ## Fit a uniform association model separating diagonal effects > Rscore <- scale(as.numeric(row(occupationalStatus)), scale = FALSE) > Cscore <- scale(as.numeric(col(occupationalStatus)), scale = FALSE) > Uniform <- glm(Freq ~ origin + destination + Diag(origin, destination) + + Rscore:Cscore, family = poisson, data = occupationalStatus) > > ## Fit an association model with homogeneous row-column effects > RChomog <- gnm(Freq ~ origin + destination + Diag(origin, destination) + + Nonlin(MultHomog(origin, destination)), family = poisson, + data = occupationalStatus) Running start-up iterations.. Running main iterations....... Done > > ## Fit an association model with separate row and column effects > RC <- gnm(Freq ~ origin + destination + Diag(origin, destination) + + Mult(origin, destination), family = poisson, + data = occupationalStatus) Running start-up iterations.. Running main iterations......... Done > > ## Partition row and column effects > row.and.column.effects <- c(df = Uniform$df.res - RC$df.res, + deviance = Uniform$dev - RC$dev) > homogeneous.row.column.effect <- c(df = Uniform$df.res - RChomog$df.res, + deviance = Uniform$dev - RChomog$dev) > row.column.difference.effect <- row.and.column.effects - + homogeneous.row.column.effect > rbind(homogeneous.row.column.effect, + row.column.difference.effect, + row.and.column.effects) df deviance homogeneous.row.column.effect 6 25.874830 row.column.difference.effect 6 3.411823 row.and.column.effects 12 29.286654 > > > > cleanEx(); ..nameEx <- "se" > > ### * se > > flush(stderr()); flush(stdout()) > > ### Name: se > ### Title: Standard Errors of Linear Parameter Combinations in gnm Models > ### Aliases: se > ### Keywords: models regression nonlinear > > ### ** Examples > > data(yaish) > set.seed(1) > > ## Fit the "UNIDIFF" mobility model across education levels > unidiff <- gnm(Freq ~ educ:orig + educ:dest + + Mult(Exp(-1 + educ), orig:dest), family = poisson, + data = yaish) Running start-up iterations.. Running main iterations.................................. Done > ## Deviance is 208.3 > > ## Get estimate and se for the contrast between educ4 and educ5 in the > ## UNIDIFF multiplier > educ4.pos <- grep("Mult.*educ4", names(coef(unidiff))) > mycontrast <- rep(0, length(coef(unidiff))) > mycontrast[educ4.pos] <- 1 > mycontrast[educ4.pos + 1] <- -1 > se(unidiff, mycontrast) estimate se 1 1.207738 0.9780882 > > ## Get all of the contrasts with educ5 in the UNIDIFF multipliers > getContrasts(unidiff, grep("Mult.*educ", names(coef(unidiff)))) [[1]] estimate se Mult1.Factor1.educ1 2.237862 0.9411152 Mult1.Factor1.educ2 2.021110 0.9435045 Mult1.Factor1.educ3 1.506627 0.9535672 Mult1.Factor1.educ4 1.207738 0.9780882 Mult1.Factor1.educ5 0.000000 0.0000000 > > > > cleanEx(); ..nameEx <- "summary.gnm" > > ### * summary.gnm > > flush(stderr()); flush(stdout()) > > ### Name: summary.gnm > ### Title: Summarize Generalized Nonlinear Model Fits > ### Aliases: summary.gnm print.summary.gnm > ### Keywords: models regression nonlinear > > ### ** Examples > > ## Following on from example(gnm) > data(cautres) > set.seed(1) > > ## Fit model as before > doubleUnidiff <- gnm(Freq ~ election:vote + election:class:religion + + Mult(Exp(election), religion:vote) + + Mult(Exp(election), class:vote), family = poisson, + data = cautres) Running start-up iterations.. Running main iterations........ Done > > ## Summarize results > summary(doubleUnidiff) Call: gnm(formula = Freq ~ election:vote + election:class:religion + Mult(Exp(election), religion:vote) + Mult(Exp(election), class:vote), family = poisson, data = cautres) Deviance Residuals: Min 1Q Median 3Q Max -2.9981359 -0.4986232 0.0003002 0.5388050 2.9312617 Coefficients: (Intercept) election1:vote1 4.000020 -1.466569 election2:vote1 election3:vote1 0.077152 -0.639946 election4:vote1 election1:vote2 0.206824 -1.355217 election2:vote2 election3:vote2 -0.099497 -0.165466 election4:vote2 election1:class1:religion1 0.020459 0.334843 election2:class1:religion1 election3:class1:religion1 -0.377754 0.108638 election4:class1:religion1 election1:class2:religion1 -0.908539 -0.309879 election2:class2:religion1 election3:class2:religion1 -1.017375 -0.732467 election4:class2:religion1 election1:class3:religion1 -1.403933 0.138556 election2:class3:religion1 election3:class3:religion1 -0.929089 -0.517163 election4:class3:religion1 election1:class4:religion1 -1.069882 0.075347 election2:class4:religion1 election3:class4:religion1 -0.080747 0.257632 election4:class4:religion1 election1:class5:religion1 -0.411053 0.539478 election2:class5:religion1 election3:class5:religion1 0.315239 0.786962 election4:class5:religion1 election1:class6:religion1 -0.262278 0.611007 election2:class6:religion1 election3:class6:religion1 -0.102818 0.229047 election4:class6:religion1 election1:class1:religion2 -0.727348 0.086253 election2:class1:religion2 election3:class1:religion2 -0.509488 -0.317582 election4:class1:religion2 election1:class2:religion2 -1.010192 -0.245604 election2:class2:religion2 election3:class2:religion2 -1.149977 -0.454034 election4:class2:religion2 election1:class3:religion2 -1.154621 -1.180246 election2:class3:religion2 election3:class3:religion2 -1.427536 -0.846284 election4:class3:religion2 election1:class4:religion2 -1.054218 -0.714253 election2:class4:religion2 election3:class4:religion2 -0.450165 0.121711 election4:class4:religion2 election1:class5:religion2 -0.409613 0.545272 election2:class5:religion2 election3:class5:religion2 0.295598 0.853259 election4:class5:religion2 election1:class6:religion2 0.143469 0.972992 election2:class6:religion2 election3:class6:religion2 0.144972 0.472548 election4:class6:religion2 election1:class1:religion3 -0.283757 -0.034913 election2:class1:religion3 election3:class1:religion3 0.027717 0.301099 election4:class1:religion3 election1:class2:religion3 -0.884296 0.414528 election2:class2:religion3 election3:class2:religion3 0.395682 0.629938 election4:class2:religion3 election1:class3:religion3 0.073060 -0.569547 election2:class3:religion3 election3:class3:religion3 -0.081637 0.447427 election4:class3:religion3 election1:class4:religion3 -0.088969 0.625917 election2:class4:religion3 election3:class4:religion3 0.816331 1.223007 election4:class4:religion3 election1:class5:religion3 0.673938 1.219114 election2:class5:religion3 election3:class5:religion3 1.505228 1.799424 election4:class5:religion3 election1:class6:religion3 1.156792 1.605226 election2:class6:religion3 election3:class6:religion3 1.533314 1.621939 election4:class6:religion3 election1:class1:religion4 0.984296 -1.760983 election2:class1:religion4 election3:class1:religion4 -1.824929 -1.933560 election4:class1:religion4 election1:class2:religion4 -2.387476 -1.538339 election2:class2:religion4 election3:class2:religion4 -1.359741 -1.186165 election4:class2:religion4 election1:class3:religion4 -1.086333 -1.710308 election2:class3:religion4 election3:class3:religion4 -0.607457 -0.580431 election4:class3:religion4 election1:class4:religion4 -0.450530 -1.788501 election2:class4:religion4 election3:class4:religion4 -0.055085 -0.033642 election4:class4:religion4 election1:class5:religion4 -0.001076 -1.214903 election2:class5:religion4 election3:class5:religion4 -0.067758 0.119605 election4:class5:religion4 election1:class6:religion4 -0.088799 0.009021 election2:class6:religion4 election3:class6:religion4 0.006700 0.003908 election4:class6:religion4 Mult1.Factor1.(Intercept) 0.006294 0.151095 Mult1.Factor1.election2 Mult1.Factor1.election3 -0.087818 -0.261520 Mult1.Factor1.election4 Mult1.Factor2.(Intercept) -0.328346 0.085669 Mult1.Factor2.religion1:vote1 Mult1.Factor2.religion2:vote1 0.507816 0.308949 Mult1.Factor2.religion3:vote1 Mult1.Factor2.religion4:vote1 -0.037373 -0.830710 Mult1.Factor2.religion1:vote2 Mult1.Factor2.religion2:vote2 -0.872918 -0.260296 Mult1.Factor2.religion3:vote2 Mult1.Factor2.religion4:vote2 0.134649 0.476304 Mult2.Factor1.(Intercept) Mult2.Factor1.election2 -0.278938 0.681737 Mult2.Factor1.election3 Mult2.Factor1.election4 0.449374 0.361825 Mult2.Factor2.(Intercept) Mult2.Factor2.class1:vote1 -0.134164 0.197783 Mult2.Factor2.class2:vote1 Mult2.Factor2.class3:vote1 0.264438 0.124463 Mult2.Factor2.class4:vote1 Mult2.Factor2.class5:vote1 -0.006330 -0.037799 Mult2.Factor2.class6:vote1 Mult2.Factor2.class1:vote2 -0.316888 -0.426220 Mult2.Factor2.class2:vote2 Mult2.Factor2.class3:vote2 -0.446957 -0.116353 Mult2.Factor2.class4:vote2 Mult2.Factor2.class5:vote2 0.036427 0.064481 Mult2.Factor2.class6:vote2 0.222398 Residual deviance: 133.04 on 78 degrees of freedom AIC: 1326.2 Number of iterations: 8 > > > > cleanEx(); ..nameEx <- "termPredictors" > > ### * termPredictors > > flush(stderr()); flush(stdout()) > > ### Name: termPredictors > ### Title: Extract Term Contributions to Predictor > ### Aliases: termPredictors > ### Keywords: models regression > > ### ** Examples > > ## Linear model > G <- gl(4, 6) > x <- 1:24 > y <- rnorm(24, 0, 1) > lmGx <- lm(y ~ G + x) > contrib <- termPredictors(lmGx) > contrib (Intercept) G x 1 -0.03784567 0.0000000 0.002521673 2 -0.03784567 0.0000000 0.005043346 3 -0.03784567 0.0000000 0.007565019 4 -0.03784567 0.0000000 0.010086692 5 -0.03784567 0.0000000 0.012608365 6 -0.03784567 0.0000000 0.015130038 7 -0.03784567 0.5801850 0.017651711 8 -0.03784567 0.5801850 0.020173384 9 -0.03784567 0.5801850 0.022695057 10 -0.03784567 0.5801850 0.025216730 11 -0.03784567 0.5801850 0.027738403 12 -0.03784567 0.5801850 0.030260076 13 -0.03784567 -0.1392898 0.032781749 14 -0.03784567 -0.1392898 0.035303422 15 -0.03784567 -0.1392898 0.037825095 16 -0.03784567 -0.1392898 0.040346768 17 -0.03784567 -0.1392898 0.042868441 18 -0.03784567 -0.1392898 0.045390114 19 -0.03784567 0.1838713 0.047911787 20 -0.03784567 0.1838713 0.050433460 21 -0.03784567 0.1838713 0.052955134 22 -0.03784567 0.1838713 0.055476807 23 -0.03784567 0.1838713 0.057998480 24 -0.03784567 0.1838713 0.060520153 > all.equal(as.numeric(rowSums(contrib)), as.numeric(lmGx$fitted)) #TRUE [1] TRUE > > ## Generalized linear model > y <- cbind(rbinom(24, 10, 0.5), rep(10, 24)) > glmGx <- glm(y ~ G + x, family = binomial) > contrib <- termPredictors(glmGx) > contrib (Intercept) G x 1 -0.6375604 0.0000000 0.01152864 2 -0.6375604 0.0000000 0.02305729 3 -0.6375604 0.0000000 0.03458593 4 -0.6375604 0.0000000 0.04611457 5 -0.6375604 0.0000000 0.05764321 6 -0.6375604 0.0000000 0.06917186 7 -0.6375604 -0.3096256 0.08070050 8 -0.6375604 -0.3096256 0.09222914 9 -0.6375604 -0.3096256 0.10375778 10 -0.6375604 -0.3096256 0.11528643 11 -0.6375604 -0.3096256 0.12681507 12 -0.6375604 -0.3096256 0.13834371 13 -0.6375604 -0.2337063 0.14987235 14 -0.6375604 -0.2337063 0.16140100 15 -0.6375604 -0.2337063 0.17292964 16 -0.6375604 -0.2337063 0.18445828 17 -0.6375604 -0.2337063 0.19598692 18 -0.6375604 -0.2337063 0.20751557 19 -0.6375604 -0.2394758 0.21904421 20 -0.6375604 -0.2394758 0.23057285 21 -0.6375604 -0.2394758 0.24210149 22 -0.6375604 -0.2394758 0.25363014 23 -0.6375604 -0.2394758 0.26515878 24 -0.6375604 -0.2394758 0.27668742 > all.equal(as.numeric(rowSums(contrib)), + as.numeric(glmGx$linear.predictors)) #TRUE [1] TRUE > > ## Generalized nonlinear model > A <- gl(4, 6) > B <- gl(6, 1, 24) > y <- cbind(rbinom(24, 10, 0.5), rep(10, 24)) > set.seed(1) > gnmAB <- gnm(y ~ A + B + Mult(A - 1, B - 1), family = binomial) Running start-up iterations.. Running main iterations................ Done > contrib <- termPredictors(gnmAB) > contrib (Intercept) A B Mult(A - 1, B - 1) 1 -0.6805419 0.00000000 0.00000000 -0.19389180 2 -0.6805419 0.00000000 -0.07602656 -0.24276197 3 -0.6805419 0.00000000 0.12064571 -0.02454998 4 -0.6805419 0.00000000 0.13285612 0.16693002 5 -0.6805419 0.00000000 0.10196624 0.12289878 6 -0.6805419 0.00000000 -0.11012402 0.17057366 7 -0.6805419 0.06888255 0.00000000 0.22493572 8 -0.6805419 0.06888255 -0.07602656 0.28163048 9 -0.6805419 0.06888255 0.12064571 0.02848067 10 -0.6805419 0.06888255 0.13285612 -0.19365710 11 -0.6805419 0.06888255 0.10196624 -0.14257605 12 -0.6805419 0.06888255 -0.11012402 -0.19788413 13 -0.6805419 -0.20581209 0.00000000 0.24871124 14 -0.6805419 -0.20581209 -0.07602656 0.31139859 15 -0.6805419 -0.20581209 0.12064571 0.03149106 16 -0.6805419 -0.20581209 0.13285612 -0.21412650 17 -0.6805419 -0.20581209 0.10196624 -0.15764622 18 -0.6805419 -0.20581209 -0.11012402 -0.21880032 19 -0.6805419 -0.01229009 0.00000000 -0.26771206 20 -0.6805419 -0.01229009 -0.07602656 -0.33518854 21 -0.6805419 -0.01229009 0.12064571 -0.03389688 22 -0.6805419 -0.01229009 0.13285612 0.23048515 23 -0.6805419 -0.01229009 0.10196624 0.16968994 24 -0.6805419 -0.01229009 -0.11012402 0.23551604 > all.equal(as.numeric(rowSums(contrib)), + as.numeric(gnmAB$predictors)) #TRUE [1] TRUE > > > > cleanEx(); ..nameEx <- "voting" > > ### * voting > > flush(stderr()); flush(stdout()) > > ### Name: voting > ### Title: Data on Social Mobility and the Labour Vote > ### Aliases: voting > ### Keywords: datasets > > ### ** Examples > > ### (Results differ slightly from paper - possible transcription error in > ### published data?) > set.seed(1) > data(voting) > ## reconstruct counts voting Labour/non-Labour > count <- with(voting, percentage/100 * total) > yvar <- cbind(count, voting$total - count) > > ## fit diagonal reference model with constant weights > classMobility <- gnm(yvar ~ Nonlin(Dref(origin, destination)), + family = binomial, data = voting) Running main iterations....... Done > prop.table(exp(coef(classMobility)[2:3])) Dref(origin, destination).origin Dref(origin, destination).destination 0.4372474 0.5627526 > > ## create factors indicating movement in and out of salariat (class 1) > upward <- with(voting, origin != 1 & destination == 1) > downward <- with(voting, origin == 1 & destination != 1) > > ## fit separate weights for the "socially mobile" groups > socialMobility <- gnm(yvar ~ Nonlin(Dref(origin, destination, + formula = ~ 1 + downward + upward)), + family = binomial, data = voting) Running main iterations....... Done > prop.table(exp(coef(socialMobility)[c(4, 7)] + coef(socialMobility)[c(2, 5)])) Dref(origin, destination, formula = ~1 + downward + upward).origin.upwardTRUE 0.3900752 Dref(origin, destination, formula = ~1 + downward + upward).destination.upwardTRUE 0.6099248 > prop.table(exp(coef(socialMobility)[c(3, 6)] + coef(socialMobility)[c(2, 5)])) Dref(origin, destination, formula = ~1 + downward + upward).origin.downwardTRUE 0.6044571 Dref(origin, destination, formula = ~1 + downward + upward).destination.downwardTRUE 0.3955429 > prop.table(exp(coef(socialMobility)[c(2, 5)])) Dref(origin, destination, formula = ~1 + downward + upward).origin.(Intercept) 0.4045022 Dref(origin, destination, formula = ~1 + downward + upward).destination.(Intercept) 0.5954978 > > ## fit separate weights for downwardly mobile groups only > downwardMobility <- gnm(yvar ~ Nonlin(Dref(origin, destination, + formula = ~ 1 + downward)), + family = binomial, data = voting) Running main iterations....... Done > downwardMobility Call: gnm(formula = yvar ~ Nonlin(Dref(origin, destination, formula = ~1 + downward)), family = binomial, data = voting) Coefficients: (Intercept) -1.58578 Dref(origin, destination, formula = ~1 + downward).origin.(Intercept) 0.29561 Dref(origin, destination, formula = ~1 + downward).origin.downwardTRUE 0.90538 Dref(origin, destination, formula = ~1 + downward).destination.(Intercept) 0.70439 Dref(origin, destination, formula = ~1 + downward).destination.downwardTRUE 0.09462 Dref(origin, destination, formula = ~1 + downward).1 -0.48409 Dref(origin, destination, formula = ~1 + downward).2 0.47924 Dref(origin, destination, formula = ~1 + downward).3 -0.40588 Dref(origin, destination, formula = ~1 + downward).4 1.01270 Dref(origin, destination, formula = ~1 + downward).5 1.64207 Deviance: 18.99389 Pearson chi-squared: 17.09983 Residual df: 18 > prop.table(exp(coef(downwardMobility)[c(3, 5)] + + coef(downwardMobility)[c(2, 4)])) Dref(origin, destination, formula = ~1 + downward).origin.downwardTRUE 0.5991644 Dref(origin, destination, formula = ~1 + downward).destination.downwardTRUE 0.4008356 > prop.table(exp(coef(downwardMobility)[c(2, 4)])) Dref(origin, destination, formula = ~1 + downward).origin.(Intercept) 0.3992041 Dref(origin, destination, formula = ~1 + downward).destination.(Intercept) 0.6007959 > > > > cleanEx(); ..nameEx <- "wedderburn" > > ### * wedderburn > > flush(stderr()); flush(stdout()) > > ### Name: wedderburn > ### Title: Wedderburn Quasi-likelihood Family > ### Aliases: wedderburn > ### Keywords: models > > ### ** Examples > > set.seed(1) > data(barley) ## data from Wedderburn (1974), see ?barley > > ### Fit Wedderburn's logit model with variance proportional to the > ### square of mu(1-mu) > logitModel <- glm(y ~ site + variety, family = wedderburn, data = barley) > fit <- fitted(logitModel) > print(sum((barley$y - fit)^2 / (fit * (1-fit))^2)) [1] 71.17401 > ## Agrees with the chi-squared value reported in McCullagh and Nelder > ## (1989, p331), which differs slightly from Wedderburn's reported value. > > ### Fit the biplot model as in Gabriel (1998, p694) > biplotModel <- gnm(y ~ -1 + Mult(site, variety, multiplicity = 2), + family = wedderburn, data = barley) Running start-up iterations.. Running main iterations......................................................... ......................... Done > barleySVD <- svd(matrix(biplotModel$predictors, 10, 9)) > A <- sweep(barleySVD$v, 2, sqrt(barleySVD$d), "*")[, 1:2] > B <- sweep(barleySVD$u, 2, sqrt(barleySVD$d), "*")[, 1:2] > ## These are essentially A and B as in Gabriel (1998, p694), from which > ## the biplot is made by > plot(rbind(A, B), pch = c(LETTERS[1:9], as.character(1:9), "X")) > > ### Fit the double-additive model as in Gabriel (1998, p697) > variety.binary <- factor(match(barley$variety, c(2,3,6), nomatch = 0) > 0, + labels = c("Rest", "2,3,6")) > doubleAdditive <- gnm(y ~ variety + Mult(site, variety.binary), + family = wedderburn, data = barley) Running start-up iterations.. Running main iterations..................... Done > > > > cleanEx(); ..nameEx <- "wheat" > > ### * wheat > > flush(stderr()); flush(stdout()) > > ### Name: wheat > ### Title: Wheat Yields from Mexican Field Trials > ### Aliases: wheat > ### Keywords: datasets > > ### ** Examples > > set.seed(1) > data(wheat) > > ## Scale yields to reproduce analyses reported in Vargas et al (2001) > yield.scaled <- wheat$yield * sqrt(3/1000) > > ## Reproduce (up to error caused by rounding) Table 1 of Vargas et al (2001) > aov(yield.scaled ~ year*tillage*summerCrop*manure*N, data = wheat) Call: aov(formula = yield.scaled ~ year * tillage * summerCrop * manure * N, data = wheat) Terms: year tillage summerCrop manure N year:tillage Sum of Squares 373242.3 16158.6 18767.6 98625.7 536006.2 21076.9 Deg. of Freedom 9 1 1 1 2 9 year:summerCrop tillage:summerCrop year:manure tillage:manure Sum of Squares 8726.0 207.7 37558.5 1198.1 Deg. of Freedom 9 1 9 1 summerCrop:manure year:N tillage:N summerCrop:N manure:N Sum of Squares 445.8 126894.4 968.1 17782.5 77984.4 Deg. of Freedom 1 18 2 2 2 year:tillage:summerCrop year:tillage:manure Sum of Squares 2861.0 2266.6 Deg. of Freedom 9 9 year:summerCrop:manure tillage:summerCrop:manure year:tillage:N Sum of Squares 1120.8 157.0 4946.1 Deg. of Freedom 9 1 18 year:summerCrop:N tillage:summerCrop:N year:manure:N Sum of Squares 18321.8 2136.4 31368.3 Deg. of Freedom 18 2 18 tillage:manure:N summerCrop:manure:N Sum of Squares 1837.6 186.2 Deg. of Freedom 2 2 year:tillage:summerCrop:manure year:tillage:summerCrop:N Sum of Squares 3251.8 5840.8 Deg. of Freedom 9 18 year:tillage:manure:N year:summerCrop:manure:N Sum of Squares 4431.0 6971.0 Deg. of Freedom 18 18 tillage:summerCrop:manure:N year:tillage:summerCrop:manure:N Sum of Squares 1511.8 3880.6 Deg. of Freedom 2 18 Estimated effects may be unbalanced > treatment <- interaction(wheat$tillage, wheat$summerCrop, wheat$manure, + wheat$N, sep = "") > mainEffects <- glm(yield.scaled ~ year + treatment, family = gaussian, + data = wheat) > bilinear1 <- gnm(yield.scaled ~ year + treatment + Mult(year, treatment), + family = gaussian, data = wheat) Running start-up iterations.. Running main iterations................. Done > anova(mainEffects, bilinear1) Analysis of Deviance Table Model 1: yield.scaled ~ year + treatment Model 2: yield.scaled ~ year + treatment + Mult(year, treatment) Resid. Df Resid. Dev Df Deviance 1 207 279515 2 176 128383 31 151133 > ## Not run: > ##D ## The next two take a lot of iterations to converge... > ##D bilinear2 <- gnm(yield.scaled ~ year + treatment + > ##D Mult(year, treatment, multiplicity = 2), > ##D family = gaussian, data = wheat) > ##D bilinear3 <- gnm(yield.scaled ~ year + treatment + > ##D Mult(year, treatment, multiplicity = 3), > ##D family = gaussian, data = wheat) > ##D anova(mainEffects, bilinear1, bilinear2, bilinear3) > ## End(Not run) > > ## Examine the extent to which, say, mTF explains the first bilinear term > bilinear1mTF <- gnm(yield.scaled ~ year + treatment + Mult(1 + mTF, treatment), + family = gaussian, data = wheat) Running start-up iterations.. Running main iterations.... Done > anova(mainEffects, bilinear1mTF, bilinear1) Analysis of Deviance Table Model 1: yield.scaled ~ year + treatment Model 2: yield.scaled ~ year + treatment + Mult(1 + mTF, treatment) Model 3: yield.scaled ~ year + treatment + Mult(year, treatment) Resid. Df Resid. Dev Df Deviance 1 207 279515 2 184 200992 23 78523 3 176 128383 8 72610 > > > > cleanEx(); ..nameEx <- "yaish" > > ### * yaish > > flush(stderr()); flush(stdout()) > > ### Name: yaish > ### Title: Class Mobility by Level of Education in Israel > ### Aliases: yaish > ### Keywords: datasets > > ### ** Examples > > set.seed(1) > data(yaish) > > ## Fit the "UNIDIFF" mobility model across education levels > > unidiff <- gnm(Freq ~ educ:orig + educ:dest + + Mult(Exp(-1 + educ), orig:dest), family = poisson, + data = yaish) Running start-up iterations.. Running main iterations.................................. Done > ## Deviance should be 208.3 > > ## Look at the multipliers of the orig:dest association: > coefs.of.interest <- grep("Mult1.*educ", names(coef(unidiff))) > coef(unidiff)[coefs.of.interest] Mult1.Factor1.educ1 Mult1.Factor1.educ2 Mult1.Factor1.educ3 Mult1.Factor1.educ4 1.08253469 0.86578329 0.35129963 0.05241102 Mult1.Factor1.educ5 -1.15532711 > ## Mult1.Factor1.educ1 Mult1.Factor1.educ2 Mult1.Factor1.educ3 > ## 1.9853067 1.7685557 1.2540751 > ## Mult1.Factor1.educ4 Mult1.Factor1.educ5 > ## 0.9551895 -0.2525413 > > ## Get standard errors for contrasts with educ5 > getContrasts(unidiff, coefs.of.interest) [[1]] estimate se Mult1.Factor1.educ1 2.237862 0.9411152 Mult1.Factor1.educ2 2.021110 0.9435045 Mult1.Factor1.educ3 1.506627 0.9535672 Mult1.Factor1.educ4 1.207738 0.9780882 Mult1.Factor1.educ5 0.000000 0.0000000 > ## estimate se > ## Mult1.Factor1.educ1 2.237848 0.9411054 > ## Mult1.Factor1.educ2 2.021097 0.9434948 > ## Mult1.Factor1.educ3 1.506616 0.9535574 > ## Mult1.Factor1.educ4 1.207731 0.9780784 > ## Mult1.Factor1.educ5 0.000000 0.0000000 > > ## Table of model residuals > residuals(unidiff) , , dest = 1 orig educ 1 2 3 4 5 6 1 0.9140091 0.5359730 -0.4017501 -1.6018427 -0.5767715 0.7642182 2 -1.0926096 0.9579059 1.0487355 -0.6384006 0.1194830 -0.8647577 3 0.6448081 -1.0110783 -0.3767698 -0.4915874 1.0441178 0.1855225 4 -0.6419836 -1.2933472 -0.5381362 2.7408261 -0.3665693 -0.4721383 5 -0.4436674 -0.4131761 0.5043371 0.5630681 -0.2924198 0.1719192 orig educ 7 1 0.02168646 2 -1.14784997 3 -0.41370725 4 1.03628403 5 -0.45199944 , , dest = 2 orig educ 1 2 3 4 5 6 1 0.1753361 0.3669005 0.2159303 -0.6130340 -0.1651020 -0.21311945 2 -1.7743254 0.1142762 -0.1177331 0.7536500 0.5110054 0.05170603 3 0.3237095 -1.0081068 0.4838170 0.6959076 -0.4758566 -0.17091624 4 0.9856423 0.1616641 -1.1198506 -0.7669940 0.1372696 0.74509597 5 1.9071426 1.2097888 -3.0217800 -1.1406759 -0.8291595 1.17544397 orig educ 7 1 0.2372819 2 -0.3256101 3 -0.3703686 4 0.1113051 5 -0.5651469 , , dest = 3 orig educ 1 2 3 4 5 6 1 -0.48911806 -0.08717674 0.001036406 0.6135807 0.05468338 -0.1801753 2 0.80008289 -2.21850262 -0.186752633 -0.6671387 0.65302920 0.5194778 3 -0.08705864 2.37547320 -0.137537958 -0.6256250 -1.88646339 0.4736683 4 0.62090886 -0.62875373 0.429764635 -1.5931869 0.99504173 -1.2697514 5 -2.00191002 1.66598098 2.021853773 0.4814290 -0.15817365 -2.8926848 orig educ 7 1 -0.6512537 2 -0.1145603 3 1.1631172 4 1.3773067 5 -0.4956896 , , dest = 4 orig educ 1 2 3 4 5 6 1 1.0500847 -0.4747331 0.70500642 -1.0061078 -1.4626065 1.1811550 2 -0.5123242 0.8641086 -1.19144807 0.9747597 0.0700465 -0.4427294 3 -1.3029762 -1.1462354 0.10594040 1.2899438 1.4866751 -2.2413301 4 0.5877151 -0.5612867 0.04433348 0.8673765 -1.1429164 -1.0949290 5 -1.2775312 -0.7030413 -1.22908550 1.9750490 -0.8935360 0.7099450 orig educ 7 1 0.4986745 2 -0.6963397 3 -0.8813331 4 -0.4517546 5 -0.1299412 , , dest = 5 orig educ 1 2 3 4 5 6 1 -0.2764958 -0.8457632 -0.6630255 0.3788845 0.4606149 0.3857962 2 1.1206712 0.4913198 0.0973344 0.6901651 -0.8673578 -0.5784760 3 -1.0874401 0.2456125 1.5981981 -1.1839507 0.3916001 -0.7754415 4 -0.4300783 1.0624194 0.7154555 -1.5650630 -0.2189453 0.6723075 5 1.3014819 -1.7446928 -0.8669408 -3.0275406 1.4534471 0.2977003 orig educ 7 1 0.2534876 2 0.8627814 3 -1.5064033 4 -2.1024743 5 1.4065436 , , dest = 6 orig educ 1 2 3 4 5 6 1 -0.51144163 0.6091101 0.51307965 0.3325794 -0.3050086 -0.58189135 2 0.04233763 -0.6267588 0.01796321 -1.3732476 0.3999653 0.75547733 3 0.94939734 -1.1010637 -2.26939246 0.6196512 0.3228056 1.14711331 4 0.12263708 1.2829150 0.28963460 -1.6431382 -0.1262320 -0.06873082 5 -0.12497649 0.1283145 -0.20686278 0.1989784 0.5605858 -0.42387244 orig educ 7 1 -0.2123710 2 -0.1637650 3 1.2358007 4 -0.7369431 5 -0.2273460 , , dest = 7 orig educ 1 2 3 4 5 1 -2.107342e-08 -6.786088e-01 4.877787e-01 2.752715e-01 -6.695863e-01 2 -2.107342e-08 -5.508599e-01 -5.779032e-01 -5.432169e-01 -5.030379e-01 3 -2.728920e-06 1.081879e+00 -1.073735e+00 -7.401013e-01 9.527869e-01 4 -2.107342e-08 -4.877941e-06 -7.146750e-06 -5.919674e-06 -6.615328e-06 5 -5.093839e-07 -5.532933e-06 -8.281011e-06 -5.106017e-06 -6.227750e-06 orig educ 6 7 1 -1.951934e-01 -2.107342e-08 2 7.818117e-01 -2.107342e-08 3 -1.057564e+00 -2.084795e-06 4 -7.603400e-06 -2.107342e-08 5 -6.959807e-06 -6.395145e-08 > > > > ### *