.packageName <- "concord"
# accepts a "score matrix" like:
#
# 		rater1	rater2	...
# object1	1	2
# object2	1	1
# ...
#
# and returns a "count matrix" like:
#
#		1	2	...
# object1	1	1
# object2	2	0
# ...
#
# as used by the *.kappa() functions

scores.to.counts<-function(scores) {
 if(is.data.frame(scores))
  score.names<-levels(as.factor(as.vector(unlist(scores))))
 if(is.matrix(scores))
  score.names<-levels(as.factor(as.vector(scores)))
 if(missing(score.names)) stop("scores must be a data frame or matrix")
 score.levels<-as.numeric(score.names)
 nlevels<-length(score.levels)
 nobj<-length(scores[,1])
 counts<-matrix(0,nrow=nobj,ncol=nlevels)
 colnames(counts)<-score.names
 for(i in 1:nobj) {
  for(j in 1:nlevels) counts[i,j]<-sum(scores[i,]==score.levels[j],na.rm=TRUE)
 }
 return(counts)
}

cohen.kappa<-function(classif,type=c("score","count")) {
 if(missing(classif))
  stop("Usage: cohen.kappa(classif, type=\"score\")\n")
 if(!is.numeric(classif))
  classif<-apply(apply(classif,2,as.factor),2,as.numeric)
 if(type[1] == "score") classif.mat<-scores.to.counts(classif)
 else classif.mat<-as.matrix(classif)
 minclassif<-min(classif.mat) 
 # bump the minimum value up to 1 for tabulate
 #if(minclassif < 1) classif.mat<-classif.mat+1-minclassif
 classdim<-dim(classif)
 k<-apply(classif.mat,1,sum)
 # check that all the row sums are equal
 if(any(k != k[1])) {
  # stick on an extra column of no-classification counts
  classif.mat<-cbind(classif.mat,max(k)-k)
  # recalculate the row sums
  k<-apply(classif.mat,1,sum)
  # let the user know
  cat("Different row sums, a no-classification category was added.\n\n")
 }
 matdim<-dim(classif.mat)
 N<-matdim[1] # number of data objects
 ncat<-matdim[2] # number of categories
 if(type[1] == "score") {
  if(any(is.na(classif))) {
   cat("Can't use Cohen's method with NAs\n")
   PEc<-NA
  }
  else {
   pj<-apply(apply(classif,2,tabulate,nbins=ncat)/N,1,prod)
   PEc<-sum(pj)
  }
 }
 else PEc<-NA
 Cj<-apply(classif.mat,2,sum)
 pj<-Cj/(N*k[1])
 PEsc<-sum(pj^2)
 Si<-1/(k[1]*(k[1]-1))*sum(classif.mat*(classif.mat-1))
 PA<-(1/N)*sum(Si)
 Ksc<-(PA-PEsc)/(1-PEsc)
 if(type[1] == "score") {
  Kc<-(PA-PEc)/(1-PEc)
  varKc<-(2/(N*k[1]*(k[1]-1)))*
   (PEc-(2*k[1]-3)*PEc^2+2*(k[1]-2)*sum(pj^3))/(1-PEc)^2
  Zc<-Kc/sqrt(varKc)
  pc<-1-pnorm(Zc)
 }
 else {
  Kc<-NA
  Zc<-NA
  pc<-NA
 }
 varKsc<-(2/(N*k[1]*(k[1]-1)))*
  (PEsc-(2*k[1]-3)*PEsc^2+2*(k[1]-2)*sum(pj^3))/(1-PEsc)^2
 Zsc<-Ksc/sqrt(varKsc)
 psc<-1-pnorm(Zsc)
 Kbbc<-2*PA-1
 c.k<-list(kappa.c=Kc,kappa.sc=Ksc,kappa.bbc=Kbbc,
  Zc=Zc,Zsc=Zsc,pc=pc,psc=psc,categories=matdim[2],methods=k[1])
 class(c.k)<-"cohen.kappa"
 return(c.k)
}

print.cohen.kappa<-function(x,...) {
 cat("Kappa test for nominally classified data\n")
 cat(paste(x$categories,"categories -",x$methods,"methods\n"))
 if(!is.na(x$kappa.c)) {
  cat(paste("kappa (Cohen) =",signif(x$kappa.c),", Z =",signif(x$Zc),
   ", p =",signif(x$pc),"\n"))
 }
 cat(paste("kappa (Siegel) =",signif(x$kappa.sc),", Z =",signif(x$Zsc),
  ", p =",signif(x$psc),"\n"))
 cat(paste("kappa (2*PA-1) =",signif(x$kappa.bbc),"\n\n"))
}

# wtpc calculates weighted percentages using the following formula:
# <weighted pc><-(100/<n methods>)*<n ratings>/<n data objects>
# The format of the data is the same as that used for calculating the
# kappa for nominal data cohen.kappa()

wtpc<-function(x,n.methods,n.objects,type=c("count","score")) {
 if(!missing(x) && !missing(n.methods) && !missing(n.objects)) {
  if(type == "score") x<-scores.to.counts(x)
  if(is.data.frame(x)) sumx<-sapply(x,sum)
  if(is.matrix(x)) sumx<-apply(x,2,sum)
  else sumx<-sum(x)
  return((100/n.methods)*sumx/n.objects)
 }
 else {
  cat("Usage: wtpc(x,n.methods,n.objects,type=c(\"count\",\"score\"))\n")
  cat("\twhere x is a vector, data frame or matrix of ratings,\n")
  cat("\tif x is scores rather than counts, specify type=score\n")
  cat("\tn.methods is the number of rating methods (e.g. raters)\n")
  cat("\tand n.objects is the number of data objects (e.g. subjects)\n")
 }
}
# tiecorr calculates the correction for tied ranks

tiecorr <- function (rankarray) {
 tie3margsum <- 0
 ranksize <- dim(rankarray)
 for (rowindex in 1:ranksize[1]) {
  tie3rowsum <- 0
  rankindex <- 1
  while (rankindex <= ranksize[2]) {
   tiesum <- sum(rankindex == rankarray[rowindex,])
   if(tiesum > 1) tie3rowsum <- tie3rowsum + (tiesum^3 - tiesum)
   rankindex <- rankindex + 0.5
  }
  tie3margsum <- tie3margsum + tie3rowsum
 }
 return(tie3margsum)
}

# calculates a Z score for a zero-sum contrast on the rank array

BEZ <- function (rankarray,lambda) {
 ranksize <- dim(rankarray)
 L <- 0
 lambda2sum <- 0
 for (col in 1:ranksize[2]) {
  L<-L+lambda[col]*sum(rankarray[,col])
  lambda2sum<-lambda[col]^2 + lambda2sum
 }
 Z<-L/sqrt((ranksize[1]*ranksize[2]*(ranksize[2]+1)*lambda2sum)/12)
 Z<-Z*sqrt(ranksize[1]/tiecorr(rankarray))
 return(Z)
}

# computes Kendall's W from a matrix of either scores or ranks where
# rows are scoring or ranking methods and columns are data objects

kendall.w <- function (x,lambda=NULL,descending=TRUE,ranks=FALSE) {
 if (missing(x))
  stop("Usage: kendall.w(x,lambda=NULL,descending=TRUE,ranks=FALSE)")
 if (!is.data.frame(x) && !is.matrix(x))
  stop("x must be a dataframe or matrix")
 # lookup table for alpha=0.01 critical values for Kendall's W
 # lookup for small N and k starts at [3,3], so use offset of -2 to read
 Wcrit01<-matrix(
  c(NA,NA,NA,NA,NA,.522,.469,.425,.392,.359,.335,.311,.291,.274,.26,.245,.233,.221,
    NA,.768,.644,.553,.491,.429,.39,.351,.328,.306,.284,.262,.24,.227,.216,.204,.193,.182,
    .84,.683,.571,.489,.434,.379,.344,.309,.289,.269,.25,.23,.211,.2,.19,.18,.17,.16,
    .78,.629,.524,.448,.397,.347,.314,.282,.264,.246,.228,.21,.193,.183,.173,.164,.155,.146,
    .737,.592,.491,.419,.371,.324,.293,.263,.246,.229,.212,.195,.179,.169,.16,.152,.144,.136),
    nrow=5,byrow=TRUE)
 # lookup table for alpha=0.05 critical values for Kendall's W
 # lookup for small N and k starts at [3,3], so use offset of -2 to read
 Wcrit05<-matrix(
  c(NA,NA,NA,NA,NA,.376,.333,.3,.275,.25,.232,.214,.2,.187,.176,.166,.158,.15,
    NA,.619,.501,.421,.369,.318,.287,.256,.232,.222,.204,.188,.171,.160,.151,.143,.136,.129,
    .716,.552,.449,.378,.333,.287,.259,.231,.212,.195,.180,.167,.155,.147,.139,.131,.124,.117,
    .66,.512,.417,.351,.305,.267,.24,.215,.201,.186,.172,.158,.145,.137,.13,.123,.116,.109,
    .624,.484,.395,.333,.29,.253,.228,.204,.19,.176,.163,.15,.137,.13,.123,.116,.109,.103),
    nrow=5,byrow=TRUE)
 datadim<-dim(x)
 if(is.null(colnames(x))) cnames<-as.character(1:datadim[2])
 else cnames<-colnames(x)
 if(!is.null(lambda)) max.lambda.len<-max(nchar(unlist(lambda)))
 else max.lambda.len<-4
 col.width<-max(nchar(cnames))
 if(col.width <= max.lambda.len) col.width<-max.lambda.len+1
 cnames<-formatC(cnames,width=col.width)
 if(ranks) {
  # check that all rows sum to the same value
  xsums<-rowSums(x)
  if(!all(xsums == xsums[1]))
   stop("Differing row sums - will not compute properly.")
  rank.mat<-x
 }
 else {
  meanscore<-sapply(x,mean)
  rank.mat <- t(as.matrix(x))
  if(descending) rank.mat <- max(rank.mat) - rank.mat
  exist.tie<-0
  for (i in 1:datadim[1]) rank.mat[,i]<-rank(rank.mat[,i])
  rank.mat <- t(rank.mat)
 }
 exist.tie<-length(unlist(apply(rank.mat,1,unique)))<length(rank.mat)
 meanranks<-apply(rank.mat,2,mean)
 grandmean<-mean(meanranks)
 if(exist.tie) {
  Tj<-tiecorr(rank.mat)
  W<-sum((meanranks-grandmean)^2)/
   ((datadim[2]*(datadim[2]^2-1)-Tj/datadim[1])/12)
 }
 else W<-sum((meanranks-grandmean)^2)/(datadim[2]*(datadim[2]^2-1)/12)
 if(datadim[2] > 7) {
  p.table<-NA
  x2df<-datadim[2]-1
  p.chisq<-pchisq(datadim[1]*(datadim[2]-1)*W,x2df,lower.tail=FALSE)
 }
 else {
  p.table<-ifelse(W > Wcrit01[datadim[2]-2,datadim[1]-2],"<0.01",
   ifelse(W > Wcrit05[datadim[2]-2,datadim[1]-2],"<0.05",">0.05"))
  x2df<-NA
  p.chisq<-NA
 }
 if(!is.null(lambda)) {
  ldim <- dim(lambda)
  if(is.null(ldim)) zstat<-round(BEZ(rank.mat,lambda),3)
  else {
   zstat <- vector("numeric",ldim[1])
   for (i in 1:ldim[1]) {
    zstat[i]<-round(BEZ(rank.mat,lambda[i,]),3)
   }
  }
 }
 else zstat<-NULL
 k.w<-list(W=W,p.table=p.table,p.chisq=p.chisq,x2df=x2df,rank.mat=rank.mat,
  cnames=cnames,meanranks=meanranks,lambda=lambda,zstat=zstat)
 class(k.w)<-"kendall.w"
 return(k.w)
}

print.kendall.w<-function(x,...) {
 cat("\nKendall's W for ordinal data\n")
 cat("W =",x$W)
 if(is.na(x$p.table)) {
  plabel<-paste("  p(X2[",x$x2df,"]) =",sep="",collapse="")
  cat(plabel,x$p.chisq,"\n\n")
 }
 else cat("  p(table) ",x$p.table,"\n\n")
 if(!is.null(x$zstat)) {
  col.width<-ifelse(is.null(x$cnames),8,max(nchar(x$cnames)))
  cat("Contrasts\n")
  cat(x$cnames)
  cat(paste(rep(" ",7),sep="",collapse=""))
  cat("Z\n")
  ldim <- dim(x$lambda)
  if(is.null(ldim))
   cat(formatC(x$lambda,width=col.width),
    rep(" ",8-nchar(x$zstat)),x$zstat,"\n")
  else {
   for(i in 1:length(x$zstat))
    cat(formatC(x$lambda[i,],width=col.width),
     rep(" ",8-nchar(x$zstat[i])),x$zstat[i],"\n")
  }
  cat("\n")
 }
}
# calculates the coincidence matrix for the kripp.alpha() function

coincidence.matrix<-function(x) {
 levx<-(levels(as.factor(x)))
 nval<-length(levx)
 # set up a coincidence matrix to hold the match/mismatch data
 cm<-matrix(rep(0,nval*nval),nrow=nval)
 dimx<-dim(x)
 # calculate correction factor (?) for data with missing values
 vn<-function(datavec) sum(!is.na(datavec))
 if(any(is.na(x))) mc<-apply(x,2,vn)-1
 else mc<-rep(1,dimx[2])
 for(col in 1:dimx[2]) {
  for(i1 in 1:(dimx[1]-1)) {
   for(i2 in (i1+1):dimx[1]) {
    if(!is.na(x[i1,col]) && !is.na(x[i2,col])) {
     index1<-which(levx==x[i1,col])
     index2<-which(levx==x[i2,col])
     cm[index1,index2]<-cm[index1,index2]+(1+(index1==index2))/mc[col]
     if(index1 != index2) cm[index2,index1]<-cm[index1,index2]
    }
   }
  }
 }
 nmv<-sum(apply(cm,2,sum))
 return(list(statistic=NA,coincidence.matrix=cm,data.values=levx,nmatchval=nmv,
  data.level=NA))
}

# calculates Krippendorff's alpha

kripp.alpha<-function(x,method="nominal") {
 if(!missing(x)) {
  cm<-coincidence.matrix(x)
  cm$data.level<-method
  dimcm<-dim(cm$coincidence.matrix)
  # upper triangle of the coincidence matrix as a vector
  utcm<-as.vector(cm$coincidence.matrix[upper.tri(cm$coincidence.matrix)])
  # diagonal of the coincidence matrix
  diagcm<-diag(cm$coincidence.matrix)
  # sum of diagonal elements of coincidence matrix
  occ<-sum(diagcm)
  # the marginal sums for the coincidence matrix
  nc<-apply(cm$coincidence.matrix,1,sum)
  # calculate this term to simplify
  ncnc<-sum(nc*(nc-1))
  # need the data values for interval and ratio methods
  dv<-as.numeric(cm$data.values)
  diff2<-rep(0,length(utcm))
  ncnk<-rep(0,length(utcm))
  ck<-1
  for(k in 2:dimcm[2]) {
   for(c in 1:(k-1)) {
    ncnk[ck]<-nc[c]*nc[k]
    if(method == "nominal") diff2[ck]<-1
    if(method == "ordinal") {
     diff2[ck]<-nc[c]/2
     if(k > (c+1)) {
      for(g in (c+1):(k-1)) {
       diff2[ck]<-diff2[ck]+nc[g]
      }
     }
     diff2[ck]<-diff2[ck]+nc[k]/2
     diff2[ck]<-diff2[ck]^2
    }
    if(method == "interval") diff2[ck]<-(dv[c]-dv[k])^2
    if(method == "ratio") {
     diff2[ck]<-(dv[c]-dv[k])^2/(dv[c]+dv[k])^2
    }
    ck<-ck+1
   }
  }
  cm$statistic<-1-(cm$nmatchval-1)*sum(utcm*diff2)/sum(ncnk*diff2)
  class(cm)<-"kripp.alpha"
  return(cm)
 }
 else {
  cat("Usage: kripp.alpha(x,method=c(\"nominal\",\"ordinal\",\"interval\",\"ratio\"))\n")
  cat("\twhere x is a classifier by object matrix of classifications\n\n")
 }
}

print.kripp.alpha<-function(x,...) {
 ka.label<-
  paste("\nKrippendorff's alpha (data level - ",x$data.level,") =",
   sep="",collapse="")
 cat(ka.label,x$statistic,"\n\n")
}
# mcnemar.mh computes the simple 2x2 McNemar test for marginal
# homogeneity.

mcnemar.mh<-function(x) {
 if(is.matrix(x) || is.data.frame(x)) {
  dimx<-dim(x)
  if(length(dimx) == 2) {
   if(any(dimx>2)) {
    if(dimx[1] == 2) mnx<-as.matrix(table(x[1,],x[2,]))
    else mnx<-as.matrix(table(x[,1],x[,2]))
   }
   else mnx<-as.matrix(x)
   mns<-(mnx[1,2]-mnx[2,1])^2/(mnx[1,2]+mnx[2,1])
   if((mnx[1,2]+mnx[2,1]) < 10)
    warning("low cell counts - consider binomial test")
   return(list(statistic=mns,p=1-pchisq(mns,1)))
  }
  else cat("Dimension higher than 2, cannot compute\n")
 }
 cat("Usage: mcnemar.mh(x)\n")
 cat("\twhere x is an nx2 matrix or data frame of scores\n")
 cat("\tor a 2x2 matrix or data frame of rater agreement on 2 categories\n")
}
page.trend.test<-function(x) {
 if(missing(x))
  stop("Usage: page.trend.test(x)\n\twhere x is a matrix of scores")
 dimx<-dim(x)
 # This one only requires two dimensions
 page.crit3<-
  array(c(28,41,54,66,79,91,104,116,128,141,153,165,178,190,202,215,227,239,251,
  NA,42,55,68,81,93,106,119,131,144,156,169,181,194,206,218,231,243,256,
  NA,NA,56,70,83,96,109,121,134,147,160,172,185,197,210,223,235,248,260),
  c(19,3))
 # the rest require three
 page.crit4plus<-
  array(c(58,84,111,137,163,189,214,240,266,292,317,
  103,150,197,244,291,338,384,431,477,523,570,
  166,244,321,397,474,550,625,701,777,852,928,
  252,370,487,603,719,835,950,1065,1180,1295,1410,
  362,532,701,869,1037,1204,1371,1537,1703,1868,2035,
  500,736,971,1204,1436,1668,1900,2131,2361,2592,2822,
  670,987,1301,1614,1927,2238,2549,2859,3169,3478,3788,
  60,87,114,141,167,193,220,246,272,298,324,
  106,155,204,251,299,346,393,441,487,534,581,
  173,252,331,409,486,563,640,717,793,869,946,
  261,382,501,620,737,855,972,1088,1205,1321,1437,
  376,549,722,893,1063,1232,1401,1569,1736,1905,2072,
  520,761,999,1236,1472,1706,1940,2174,2407,2639,2872,
  696,1019,1339,1656,1972,2288,2602,2915,3228,3541,3852,
  NA,89,117,145,172,198,225,252,278,305,331,
  109,160,210,259,307,355,403,451,499,546,593,
  178,260,341,420,499,577,655,733,811,888,965,
  269,394,516,637,757,876,994,1113,1230,1348,1465,
  388,567,743,917,1090,1262,1433,1603,1773,1943,2112,
  544,790,1032,1273,1512,1750,1987,2223,2459,2694,2929,
  726,1056,1382,1704,2025,2344,2662,2980,3296,3612,3927),
  c(11,7,3))
 rankx<-t(apply(x,1,rank))
 mean.ranks<-apply(rankx,2,mean)
 Lval<-NA
 p.table<-NA
 L<-sum(apply(rankx,2,sum)*1:dimx[2])
 if((dimx[1] > 1 && dimx[1] < 13) && (dimx[2] > 3 && dimx[2] < 11))
  Lval<-page.crit4plus[dimx[1]-1,dimx[2]-3,]
 if((dimx[1] > 1 && dimx[1] < 21) && dimx[2] == 3)
  Lval<-page.crit3[dimx[1]-1,]
 p.table<-
  ifelse(L > Lval[1],ifelse(L > Lval[2],ifelse(L > Lval[3],"<=.001","<=.01"),"<=.05"),"NS")
 # print(Lval)
 # if there was no tabled value, calculate the normal approximation
 if(length(Lval)<2) {
  munum<-dimx[1]*dimx[2]*(dimx[2]+1)*(dimx[2]+1)
  muL<-munum/4
  cat("muL =",muL,"\n")
  sigmaL<-(dimx[1]*dimx[2]*dimx[2]*(dimx[2]*dimx[2]-1)*(dimx[2]*dimx[2]-1))/
   (144*(dimx[2]-1))
  cat("sigmaL =",sigmaL,"\n")
  zL<-((12*L-3*munum)/(dimx[2]*(dimx[2]-1)))*sqrt((dimx[2]-1)/dimx[1])
  pZ<-pnorm(zL,lower.tail=FALSE)
 }
 else {
  zL<-NA
  pZ<-NA
 }
 return(list(ranks=rankx,mean.ranks=mean.ranks,L=L,p.table=p.table,Z=zL,pZ=pZ))
}

print.page.trend.test<-function(x,...) {
 cat("\nPage test for ordered alternatives\n")
 cat("L =",x$L)
 if(is.na(x$p.table)) {
  plabel<-paste("Z =",x$Z,", p =",x$pZ,sep="",collapse="")
  cat(plabel,x$p.chisq,"\n\n")
 }
 else cat("  p(table) ",x$p.table,"\n\n")
}

# rater.bias computes a Chi-squared value for a systematic bias
# of one rater compared with another.

rater.bias<-function(x) {
 if(is.matrix(x) || is.data.frame(x)) {
  dimx<-dim(x)
  if(length(dimx) == 2) {
   # if dimension lengths are unequal, assume it's a nx2 score matrix
   if(dimx[1] != dimx[2]) {
    if(dimx[1] == 2) rbx<-as.matrix(table(x[1,],x[2,]))
    else rbx<-as.matrix(table(x[,1],x[,2]))
   }
   else rbx<-as.matrix(x)
   print(rbx)
   rbb<-sum(rbx[upper.tri(rbx)])
   rbc<-sum(rbx[lower.tri(rbx)])
   rbstat<-(rbb-rbc)^2/(rbb+rbc)
   return(list(statistic=rbstat,p=1-pchisq(rbstat,1)))
  }
  else cat("Dimension higher than 2, cannot compute\n")  
 }
 cat("Usage: rater.bias(x)\n")
 cat("\twhere x is an nx2 or 2xn  matrix of category scores for n objects\n")
 cat("\tor a CxC matrix or data frame of rater agreement on C categories\n")
}
# stuart.maxwell.mh computes the marginal homogeneity test for
# a CxC matrix of assignments of objects to C categories or an
# nx2 or 2xn matrix of category scores for n data objects by two
# raters. The statistic is distributed as Chi-square with C-1
# degrees of freedom.

stuart.maxwell.mh<-function(x) {
 if(is.matrix(x) || is.data.frame(x)) {
  dimx<-dim(x)
  if(length(dimx) == 2) {
   if(dimx[1] != dimx[2]) {
    # if dimension lengths are unequal, assume it's a score matrix
    if(dimx[1] == 2) smx<-as.matrix(table(x[1,],x[2,]))
    # assume the matrix is nx2
    else smx<-as.matrix(table(x[,1],x[,2]))
   }
   else smx<-as.matrix(x)
   # get the marginals
   rowsums<-apply(smx,1,sum)
   colsums<-apply(smx,2,sum)
   equalsums<-rowsums == colsums
   if(any(equalsums)) {
    # dump any categories with perfect agreement
    smx<-smx[!equalsums,!equalsums]
    # bail out if too many categories have disappeared
    if(dim(smx)[1] < 2) stop("Too many equal marginals, cannot compute")
    # get new marginals
    rowsums<-apply(smx,1,sum)
    colsums<-apply(smx,2,sum)
   }
   # use K-1 marginals
   Kminus1<-length(rowsums)-1
   smd<-(rowsums-colsums)[1:Kminus1]
   smS<-matrix(0,nrow=Kminus1,ncol=Kminus1)
   for(i in 1:Kminus1) {
    for(j in 1:Kminus1) {
     if(i == j) smS[i,j]<-rowsums[i] + colsums[j] - 2 * smx[i,j]
     else smS[i,j]<--(smx[i,j]+smx[j,i])
    }
   }
   smstat<-t(smd)%*%solve(smS)%*%smd
   p=1-pchisq(smstat,Kminus1)
   s.m<-list(statistic=smstat,p=p,df=Kminus1)
   class(s.m)<-"stewart.maxwell"
   return(s.m)
  }
  else cat("Dimension higher than 2, cannot compute\n")  
 }
 else {
  cat("Usage: stuart.maxwell.mh(x)\n")
  cat("\twhere x is an nx2 matrix or data frame of category scores for n objects\n")
  cat("\tor a CxC matrix or data frame of rater agreement on C categories\n")
 }
}

print.stuart.maxwell<-function(x,...) {
 cat("\nStuart-Maxwell marginal homogeneity test\n")
 plabel<-paste(" p(x2[",x$df,"]) =",sep="",collapse="")
 cat(x$statistic,plabel,x$p,"\n\n")
}
