R : Copyright 2005, The R Foundation for Statistical Computing Version 2.1.1 (2005-06-20), ISBN 3-900051-07-0 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for a HTML browser interface to help. Type 'q()' to quit R. > ### *
> ### > attach(NULL, name = "CheckExEnv") > assign(".CheckExEnv", as.environment(2), pos = length(search())) # base > ## add some hooks to label plot pages for base and grid graphics > setHook("plot.new", ".newplot.hook") > setHook("persp", ".newplot.hook") > setHook("grid.newpage", ".gridplot.hook") > > assign("cleanEx", + function(env = .GlobalEnv) { + rm(list = ls(envir = env, all.names = TRUE), envir = env) + RNGkind("default", "default") + set.seed(1) + options(warn = 1) + delayedAssign("T", stop("T used instead of TRUE"), + assign.env = .CheckExEnv) + delayedAssign("F", stop("F used instead of FALSE"), + assign.env = .CheckExEnv) + sch <- search() + newitems <- sch[! sch %in% .oldSearch] + for(item in rev(newitems)) + eval(substitute(detach(item), list(item=item))) + missitems <- .oldSearch[! .oldSearch %in% sch] + if(length(missitems)) + warning("items ", paste(missitems, collapse=", "), + " have been removed from the search path") + }, + env = .CheckExEnv) > assign("..nameEx", "__{must remake R-ex/*.R}__", env = .CheckExEnv) # for now > assign("ptime", proc.time(), env = .CheckExEnv) > grDevices::postscript("fields-Examples.ps") > assign("par.postscript", graphics::par(no.readonly = TRUE), env = .CheckExEnv) > options(contrasts = c(unordered = "contr.treatment", ordered = "contr.poly")) > options(warn = 1) > library('fields') fields is loaded use help(fields) for an overview of this library > > assign(".oldSearch", search(), env = .CheckExEnv) > assign(".oldNS", loadedNamespaces(), env = .CheckExEnv) > cleanEx(); ..nameEx <- "BD" > > ### * BD > > flush(stderr()); flush(stdout()) > > ### Name: BD > ### Title: Data frame of the effect of buffer compositions on DNA strand > ### displacement amplification. A 4-d regression data set with with > ### replication. This is a useful test data set for exercising function > ### fitting methods. > ### Aliases: BD > ### Keywords: datasets > > ### ** Examples > > # fitting a DNA strand > # displacement amplification surface to various buffer compositions > fit<- Tps(BD[,1:4],BD$lnya,scale.type="range") > surface(fit) # plots fitted surface and contours plot window will lay out plots in a 2 by 1 matrix > > > > cleanEx(); ..nameEx <- "Krig.Amatrix" > > ### * Krig.Amatrix > > flush(stderr()); flush(stdout()) > > ### Name: Krig.Amatrix > ### Title: Smoother (or "hat") matrix relating predicted values to the > ### dependent (Y) values. > ### Aliases: Krig.Amatrix > ### Keywords: spatial > > ### ** Examples > > # Compute the A matrix or "hat" matrix for a thin plate spline > # check that this gives the same predicted values > tps.out<-Tps( ozone$x, ozone$y) > A<-Krig.Amatrix( tps.out, ozone$x) > test<- A%*%ozone$y > # now compare this to predict( tps.out) or tps.out$fitted.values > # they should be the same > stats( test- tps.out$fitted.values) [,1] N 2.000000e+01 mean 2.842171e-15 Std.Dev. 5.832012e-15 min -7.105427e-15 Q1 0.000000e+00 median 3.552714e-15 Q3 7.105427e-15 max 1.421085e-14 missing values 0.000000e+00 > > > > cleanEx(); ..nameEx <- "Krig" > > ### * Krig > > flush(stderr()); flush(stdout()) > > ### Name: Krig > ### Title: Kriging surface estimate > ### Aliases: Krig > ### Keywords: spatial > > ### ** Examples > > #2-d example > # fitting a surface to ozone > # measurements. Range parameter is 10 (in miles) > > fit <- Krig(ozone$x, ozone$y, exp.cov, theta=10) > > summary( fit) # summary of fit CALL: Krig(x = ozone$x, Y = ozone$y, cov.function = exp.cov, theta = 10) Number of Observations: 20 Number of unique points: 20 Degree of polynomial null space ( base model): 1 Number of parameters in the null space 3 Effective degrees of freedom: 4.6 Residual degrees of freedom: 15.4 MLE sigma 4.202 GCV est. sigma 4.196 MLE rho 2.42 Scale used for covariance (rho) 2.42 Scale used for nugget (sigma^2) 17.66 lambda (sigma2/rho) 7.296 RESIDUAL SUMMARY: min 1st Q median 3rd Q max -7.0210 -2.1780 -0.4694 2.2900 7.3800 COVARIANCE MODEL: exp.cov DETAILS ON SMOOTHING PARAMETER: Method used: GCV Cost: 1 lambda trA GCV GCV.one GCV.model shat 7.296 4.554 22.801 22.801 NA 4.196 Summary of all estimates found for lambda lambda trA GCV shat GCV 7.296 4.554 22.80 4.196 GCV.one 7.296 4.554 22.80 4.196 -Log Profile (REML) 12927.925 3.001 23.07 4.428 > plot(fit) # diagnostic plots of fit plot window will lay out plots in a 2 by 2 matrix > surface( fit, type="C") # look at the surface > out.p<- predict.surface.se( fit) > image(out.p) > > # predict at data > > predict( fit) [1] 38.85986 39.30038 38.66573 39.02891 39.43544 39.78314 39.13259 39.73376 [9] 40.05118 39.66674 40.38644 40.17673 40.67950 40.39894 40.63593 40.03237 [17] 39.45989 39.51825 40.30555 40.35815 > > # predict on a grid ( grid chosen here by defaults) > out<- predict.surface( fit) > persp( out) > > # predict at arbitrary points (10,-10) and (20, 15) > xnew<- rbind( c( 10, -10), c( 20, 15)) > predict( fit, xnew) [1] 38.81907 40.11750 > > # standard errors of prediction based on covariance model. > predict.se( fit, xnew) [1] 1.642566 2.943582 > > # > # Roll your own: using a user defined Gaussian covariance > # > test.cov <- function(x1,x2,theta){exp(-(rdist(x1,x2)/theta)**2)} > # use this and put in quadratic polynomial fixed function > fit.flame<- Krig(flame$x, flame$y, test.cov, m=3, theta=.5) Warning in Krig.find.gcvmin(info, lambda.grid, gcv.grid$GCV, Krig.fgcv, : GCV search gives a minumum at the endpoints of the grid search Warning in Krig.find.gcvmin(info, lambda.grid, gcv.grid$GCV.one, Krig.fgcv.one, : GCV search gives a minumum at the endpoints of the grid search > # > # note how range parameter is passed to Krig. > # BTW: GCV indicates an interpolating model (nugget variance is zero) > # > # take a look ... > surface(fit.flame, type="I") > > # > # Thin plate spline fit to ozone data using the radial > # basis function as a generalized covariance function > # > # p=2 is the power in the radial basis function (with a log term added for > # even dimensions) > # If m is the degree of derivative in penalty then p=2m-d > # where d is the dimension of x. p must be greater than 0. > # In the example below p = 2*2 - 2 = 2 > # > # See also the Fields function Tps > > out<- Krig( ozone$x, ozone$y, rad.cov, m=2, p=2, + scale.type="range",decomp="WBW") # these last two options make sure the > # the results with Tps are identical > # A Knot example > > data(ozone2) > y16<- ozone2$y[16,] # there are some missing values! > good<- !is.na( y16) > y<- y16[good] # Krig does not remove missing values. > x<- ozone2$lon.lat[ good,] > > # > # the knots can be arbitrary but just for fun find them with a space > # filling design. Here we select 50 from the full set of 147 points > # > xknots<- cover.design( x, 50)$design # select 50 knot points Warning in cover.design(x, 50) : Number of nearst neighbors (nn) reduced to the actual number of candidates > > out<- Krig( x, y, knots=xknots, rad.cov, m=2, p=2,scale.type="range") [1] "Maximum likelihood estimates not found with knots \n \n " > > # Zee plot > surface( out, type="C") > US( add=TRUE) > points( x, col=2) > points( xknots, cex=2, pch="O") > > summary( out) CALL: Krig(x = x, Y = y, cov.function = rad.cov, knots = xknots, m = 2, scale.type = "range", p = 2) Number of Observations: 147 Number of unique points: 147 Degree of polynomial null space ( base model): 1 Number of parameters in the null space 3 Effective degrees of freedom: 14.8 Residual degrees of freedom: 132.2 MLE sigma NA GCV est. sigma 15.03 MLE rho NA Scale used for covariance (rho) 12970 Scale used for nugget (sigma^2) 225.9 lambda (sigma2/rho) 0.01742 RESIDUAL SUMMARY: min 1st Q median 3rd Q max -35.4500 -9.7420 -0.8308 7.6510 49.2300 COVARIANCE MODEL: rad.cov Knot model: 50 knots supplied to define basis functions DETAILS ON SMOOTHING PARAMETER: Method used: GCV Cost: 1 lambda trA GCV GCV.one GCV.model shat 0.01742 14.82910 882.13193 251.25826 NA 15.03036 Summary of all estimates found for lambda lambda trA GCV shat GCV 1.742e-02 14.829 882.1 15.03 GCV.one 1.743e-03 30.396 211.7 12.96 -Log Profile (REML) 1.021e+03 3.001 1585.0 23.28 > # > # note that that trA found by GCV is around 15 so 50>15 knots may be a > # reasonable approximation to the full estimator. > # > surface( out, type='C') > # obs and knots > points( x) > points( xknots, col=2) > > # Correlation model example > > # fit krig surface using a mean and sd function to standardize > # first get stats from 1987 summer Midwest O3 data set > # Compare the function Tps to the call to Krig given above > # fit tps surfaces to the mean and sd points. > # (a shortcut is being taken here just using the lon/lat coordinates) > data(ozone2) > stats.o3<- stats( ozone2$y) > mean.o3<- Tps( ozone2$lon.lat, c( stats.o3[2,])) > sd.o3<- Tps( ozone2$lon.lat, c( stats.o3[3,])) > > # Now use these to fit particular day ( day 16) > > y16<- ozone2$y[16,] # there are some missing values! > good<- !is.na( y16) > > fit<- Krig( ozone2$lon.lat[good,], y16[good],exp.earth.cov, theta=353, + mean.obj=mean.o3, sd.obj=sd.o3) > # > # the finale > surface( fit, type="I") > US( add=TRUE) > title("Estimated ozone surface") > > > > > > cleanEx(); ..nameEx <- "RMprecip" > > ### * RMprecip > > flush(stderr()); flush(stdout()) > > ### Name: RMprecip > ### Title: Monthly total precipitation (mm) for August 1963 in the Rocky > ### Mountain Region > ### Aliases: RMprecip > ### Keywords: datasets > > ### ** Examples > > # this data set was created the > # historical data taken from > # Observed monthly precipitation, min and max temperatures for the coterminous US > # 1895-1997 > # NCAR_pinfill > # see the Geophysical Statistics Project datasets page for the supporting functions > # and details. > > # plot > as.image( RMprecip$y, x=RMprecip$x)-> look > image.plot( look) > US( add=TRUE, col=2, lty=2) > > # > > > > > cleanEx(); ..nameEx <- "Tps" > > ### * Tps > > flush(stderr()); flush(stdout()) > > ### Name: Tps > ### Title: Thin plate spline regression > ### Aliases: Tps > ### Keywords: smooth > > ### ** Examples > > #2-d example > > fit<- Tps(ozone$x, ozone$y) # fits a surface to ozone measurements. > plot(fit) # diagnostic plots of fit and residuals. plot window will lay out plots in a 2 by 2 matrix > summary(fit) CALL: Tps(x = ozone$x, Y = ozone$y, cov.function = "Thin plate spline radial basis functions (rad.cov) ") Number of Observations: 20 Number of unique points: 20 Degree of polynomial null space ( base model): 1 Number of parameters in the null space 3 Effective degrees of freedom: 4.5 Residual degrees of freedom: 15.5 MLE sigma 4.099 GCV est. sigma 4.073 MLE rho 205.1 Scale used for covariance (rho) 205.1 Scale used for nugget (sigma^2) 16.8 lambda (sigma2/rho) 0.0819 RESIDUAL SUMMARY: min 1st Q median 3rd Q max -6.803 -1.437 -0.506 1.442 7.790 COVARIANCE MODEL: rad.cov DETAILS ON SMOOTHING PARAMETER: Method used: GCV Cost: 1 lambda trA GCV GCV.one GCV.model shat 0.0819 4.5063 21.4094 21.4094 NA 4.0725 Summary of all estimates found for lambda lambda trA GCV shat GCV 0.0819 4.506 21.41 4.073 GCV.one 0.0819 4.506 21.41 4.073 -Log Profile (REML) 152.2185 3.001 23.06 4.427 > > # predict onto a grid that matches the ranges of the data. > > out.p<-predict.surface( fit) > image( out.p) > surface(out.p) # perspective and contour plots of GCV spline fit plot window will lay out plots in a 2 by 1 matrix > # predict at different effective > # number of parameters > out.p<-predict.surface( fit,df=10) > > #1-d example > out<-Tps( rat.diet$t, rat.diet$trt) # lambda found by GCV > plot( out$x, out$y) > lines( out$x, out$fitted.values) > > # > # compare to the ( much faster) one spline algorithm > # sreg(rat.diet$t, rat.diet$trt) > # > # > # simulation reusing > fit<- Tps( rat.diet$t, rat.diet$trt) > true<- fit$fitted.values > N<- length( fit$y) > temp<- matrix( NA, ncol=50, nrow=N) > sigma<- fit$shat.GCV > for ( k in 1:50){ + ysim<- true + sigma* rnorm(N) + temp[,k]<- predict(fit, y= ysim) + } > matplot( fit$x, temp, type="l") > > # > #4-d example > fit<- Tps(BD[,1:4],BD$lnya,scale.type="range") > surface(fit) plot window will lay out plots in a 2 by 1 matrix > # plots fitted surface and contours > #2-d example using a reduced set of basis functions > r1 <- range(flame$x[,1]) > r2 <-range( flame$x[,2]) > g.list <- list(seq(r1[1], r1[2],6), seq(r2[1], r2[2], 6)) > knots<- make.surface.grid(g.list) > # these knots are a 6X6 grid over > # the ranges of the two flame variables > out<-Tps(flame$x, flame$y, knots=knots, m=3) [1] "Maximum likelihood estimates not found with knots \n \n " > surface( out, type="I") > points( knots) > > > > cleanEx(); ..nameEx <- "US" > > ### * US > > flush(stderr()); flush(stdout()) > > ### Name: US > ### Title: Plot of the US with state boundaries > ### Aliases: US > ### Keywords: hplot > > ### ** Examples > > # Draw map in device color # 3 > US( col=3) > > > > cleanEx(); ..nameEx <- "W.info" > > ### * W.info > > flush(stderr()); flush(stdout()) > > ### Name: W.info > ### Title: Gives indexing imfomration for a wavelet decompostion > ### Aliases: W.info W.i2s W.s2i > ### Keywords: spatial > > ### ** Examples > > > # series of length 64 where the coarsest level is 8 father basis functions. > W.info(64, 8) $m [1] 64 $cut.min [1] 8 $S [1] 1 8 $H [,1] [,2] [1,] 9 16 [2,] 17 32 [3,] 33 64 $L [,1] [1,] 8 [2,] 16 [3,] 32 $Lmax [1] 3 $offset [,1] [1,] 8 [2,] 16 [3,] 32 > > #index for 4th basis location at the 2nd level > > W.s2i( 4, 2, m=64, cut.min=8) [1] 20 > # location and level for the 48th basis function. > W.i2s( 48, m=64, cut.min=8) $level [1] 3 $i [1] 16 $m [1] 64 $cut.min [1] 8 > > > > cleanEx(); ..nameEx <- "Wimage.cov" > > ### * Wimage.cov > > flush(stderr()); flush(stdout()) > > ### Name: Wimage.cov > ### Title: Functions for W-transform based covariance models > ### Aliases: Wimage.cov Wimage.sim > ### Keywords: spatial > > ### ** Examples > > > # This example creates a block/diagonal for H that is close to a > # Matern covariance > # The example while lengthy is a practical run through of the process > # of creating a reasonable sparse representation for H. > > M<- 16 # a 16X16 grid of locations > MM<- M**2 > x<- 1:M > y<- 1:M > grid<- list( x=x, y=y) > cut.min<- 4 > # H0 is father and mothers at first level > # with cut.min=4 > > # get indices associated with H0 > I<-rep( 1:cut.min, cut.min) > J<- sort( I) > Wimage.s2i(I, J, flavor=0, level=0, cut.min=cut.min, m=M, n=M, mat=FALSE)-> ind0 > Wimage.s2i(I, J, flavor=1, level=1, cut.min=cut.min, m=M, n=M, mat=FALSE)-> ind1 > Wimage.s2i(I, J, flavor=2, level=1, cut.min=cut.min, m=M, n=M, mat=FALSE)-> ind2 > Wimage.s2i(I, J, flavor=3, level=1, cut.min=cut.min, m=M, n=M, mat=FALSE)-> ind3 > IND<- c( ind0, ind1, ind2, ind3) > NH<- length( IND) # H0 will be NH by NH in size. > > # the Matern covariance function range=3, smoothness=1. > cov.obj<- matern.image.cov( grid=grid, setup=TRUE, theta=3, smoothness=1) > > # find elements of D0= H0**2 using the formula > # D0 = W Sigma W.t where W is the 2-d W transform ( the inverse of W.i above). > # and Sigma is the Matern covariance > # > # the following looping algorithms are a way to avoiding explicitly creating > # gigantic covariance matrices and wavelet basis matrices associated with large > # grids. Of course in this example the grid is small enough (16X16) that the > # matrices could be formed explicitly > # > > D0<- matrix( 0, NH,NH) > # fill in D0 > for ( k in 1:NH){ + tmp<- matrix( 0, M,M) + tmp[IND[k]] <- 1 + hold<- Wtransform.image( tmp, transpose=TRUE, cut.min=cut.min) + hold<- matern.image.cov(Y=hold, cov.obj=cov.obj) + hold<- Wtransform.image(hold, cut.min=cut.min) + D0[k,] <- hold[IND] + } > > # sqrt of D0 > temp<- eigen( D0, symmetric=TRUE) > > # next line should be H0<-temp$vectors > > H0<- temp$vectors %*% diag(sqrt(temp$values))%*% t(temp$vectors) > > # find H1 > H1<- matrix(0,M,M) > for ( k in 1:MM){ + tmp<- matrix( 0, M,M) + tmp[k] <- 1 + hold<- Wtransform.image( tmp, transpose=TRUE, cut.min=cut.min) + hold<- matern.image.cov(Y=hold, cov.obj=cov.obj) + hold<- Wtransform.image(hold, cut.min=cut.min) + H1[k] <- sqrt(hold[k]) } > # remember that H1 has the H0 diagonal values set to 1. > H1[IND] <- 1 > > #OK good to go. Create the H.obj list > H.obj<- list( m=M, n=M, ind0=IND, cut.min=cut.min, H0=H0, H1=H1) > # > # > # > > # mutliply the covariance matrix by a random vector > tmp<- matrix( rnorm( 256), 16,16) > Wimage.cov( tmp, H.obj=H.obj)-> look > > # generate a random field > Wimage.sim(H.obj)-> look > image.plot( look) > > #A check that this really works! > #find the 135 =(9-1)*16 +7 == (7,9) row of covariance matrix > # we know what this should look like. > > Wimage.cov( ind= 135, H.obj=H.obj, find.row=TRUE)-> look > image.plot( x,y,look) # the covariance of the grid point (7,9) with > # all other grid points -- a bump centered at (7,9) > > #multiply a vector by just a subset of Sigma > > ind<- sample( 1:256,50) # random set of 50 indices > Y<- rnorm( 50) # random vector of length 50 > Wimage.cov(Y, ind= ind, H.obj=H.obj)[ind]-> look > > # look is now of length 50 -- as expected. > # In R notation, this finding Sigma[ind, ind] > > # OK suppose you really need Sigma[ind,ind] > # e.g. in order for solve( Sigma[ind,ind], u) > # here is a loop to do it. > > Sigma<- matrix( NA, 50,50) > for ( k in 1:50){ + # for each member of subset find row and subset to just the columns identified by ind + Sigma[k,] <- c( Wimage.cov( ind= ind[k], H.obj=H.obj, find.row=TRUE)[ind]) + } > > > > > > > cleanEx(); ..nameEx <- "Wimage.info" > > ### * Wimage.info > > flush(stderr()); flush(stdout()) > > ### Name: Wimage.info > ### Title: Finds key indices related to a 2-d multiresolution > ### Aliases: Wimage.info Wimage.i2s Wimage.s2i > ### Keywords: spatial > > ### ** Examples > > #Find a basis function. > # For a basis on a 64X64 image with cut.min=8 find a > # horizontal basis function at second level of resolution in the (4,4) > # position. ( There are 16X16 horizontal basis functions at the 2 nd level > # > Wimage.s2i( i=3,j=2, level=2, flavor=1, m=64, n=64, cut.min=8)-> ind > tmp<- matrix( 0, 64,64) > tmp[ind] <- 1 > Wtransform.image( tmp, cut.min=8, inv=TRUE)-> look > image.plot( look) > > # A check of Wimage.i2s > Wimage.i2s( ind,m=64, n=64, cut.min=8) $i [1] 3 $j [1] 2 $level [1] 2 $flavor [1] 1 $cut.min [1] 8 $m [1] 64 $n [1] 64 > # should get i=3,j=2, level=2, flavor=1 > > # complete check of functions > Wimage.i2s( 1:512, cut.min=8, m=16, n=32)-> look > Wimage.s2i( look, cut.min=8, m=16, n=32, mat=FALSE)-> look2 > sum( look2 - (1:512)) # sum should be zero [1] 0 > > # W transform of John Lennon image > data(lennon) > m<- nrow(lennon) > n<- ncol(lennon) > look<- Wtransform.image( lennon, cut.min=8) > > # get info > info<- Wimage.info( n, m, cut.min=8) > # Keep only the coarest level of S,H,V,Di basis functions coefficients, > # set the rest to zero. > > tmp<- matrix( 0, m,n) > > # fill in smooths > indm<- info$S[1]: info$S[2] > indn<- info$S[3]: info$S[4] > tmp[ indm, indn] <- look[indm, indn] > > #horizontals > indm<- info$H[1]: info$H[2] > indn<- info$H[3]: info$H[4] > tmp[ indm, indn] <- look[indm, indn] > #verticals > indm<- info$V[1]: info$V[2] > indn<- info$V[3]: info$V[4] > tmp[ indm, indn] <- look[indm, indn] > #diagonals > indm<- info$Di[1]: info$Di[2] > indn<- info$Di[3]: info$Di[4] > tmp[ indm, indn] <- look[indm, indn] > > look2<- Wtransform.image( tmp, cut.min=8, inv=TRUE) > # take a look at (severely) filtered image, Happy Halloween! > set.panel( 2,1) plot window will lay out plots in a 2 by 1 matrix > image( lennon) > image( look2) > > > > > cleanEx(); ..nameEx <- "Wimage.info.plot" > > ### * Wimage.info.plot > > flush(stderr()); flush(stdout()) > > ### Name: Wimage.info.plot > ### Title: Plot to check 2-d multiresolution indexing > ### Aliases: Wimage.info.plot > ### Keywords: spatial > > ### ** Examples > > # > # 64X 128 image coarsest level has 8X16 smooth basis funcitons. > # > # NOTE as a matrix the image plot is upside down! Rows are along X-axis and > # columns on Y > # e.g. the (1,1) element is the lower left corner. > # > > Wimage.info.plot( 64, 128, cut.min=8) > > > > > cleanEx(); ..nameEx <- "Wtransform" > > ### * Wtransform > > flush(stderr()); flush(stdout()) > > ### Name: Wtransform > ### Title: Quadratic W wavelet transform for 1-d vectors or rectangular or > ### cylindrical images > ### Aliases: Wtransform.image Wtransform Wtransform.cylinder.image > ### Keywords: spatial > > ### ** Examples > > # W transform of a simple function > x<- seq( 0,1,256) > y<- x*(1-x)**3 + ifelse( x>.3, x,0) > > Wtransform( y)-> out > > # plot the implied wavelet basis functions > ID<- diag( 1, 256) > WQS.basis( 256)-> Wbasis > set.panel(2,2) plot window will lay out plots in a 2 by 2 matrix > matplot( 1:256, Wbasis[,1:8], type="l", lty=c(1,2), col=2) > title("Father") > matplot( 1:256, Wbasis[,9:16], type="l", lty=c(1,2), col=2) > title("Mother") > matplot( 1:256, Wbasis[,17:32], type="l", lty=c(1,2), col=2) > title("Mother scaled by 2") > matplot( 1:256, Wbasis[,33:64], type="l", lty=c(1,2), col=2) > title("Mother scaled by 4") > > set.panel( 1,1) plot window will lay out plots in a 1 by 1 matrix > > # test that the transform works > > # Precise definition of what the transform is doing in terms of > # explicit matrix multiplication all of > # these should be machine zero > # Note that the direct matrix multiplications will be substantially slower > # for large vectors. > # y<- rnorm( 256) > # y<- y /sqrt(mean( y**2)) > > #sqrt(mean( c( Wtransform(y, inv=TRUE) - Wbasis > #sqrt(mean( c(Wtransform(y, inv=TRUE, transpose=TRUE) - t(Wbasis) > #sqrt(mean( c(Wtransform(y) - solve(Wbasis) > #sqrt(mean( c(Wtransform(y, transpose=TRUE) - t(solve(Wbasis)) > > # > ## 2-d examples > # > > # Wtransform of John Lennon image > data(lennon) > look<- Wtransform.image( lennon) > # > ### take a look: > # image.plot( look) > #threshhold > thr<- quantile( abs( look), .95) > temp<- look > temp[abs(look)< thr] <- 0 > look2<- Wtransform.image( temp, inv=TRUE) > # > ### take a look: > # image( look2) # 95 % compressed image > > > # a diagonal detail basis function > # we find this by just multipling W by a unit vector! > > temp<- matrix(0, nrow=32, ncol=32) > temp[8,5]<- 1 > look<- Wtransform.image( temp , inv=TRUE, cut.min=4) > image( look) > title("diagonal detail W-wavelet") > > #just for fun: redo this example for all indices up to 8! > # > #set.panel( 8,8) > #par( mar=c(0,0,0,0)) > #for ( k in (1:8)){ > #for ( j in (1:8)){ > #temp<- matrix( 0 , nx,ny) > #temp[k,j] <- 1 > #Wtransform.image( temp, inv=T, cut.min=cut.min)-> look > #image( look, axes=FALSE, xlab="", ylab="") > #box() > #} > #} > > # examine a basis function to see periodic condition enforces along > # X axis of image. > temp<- matrix(0, nrow=32, ncol=32) > temp[8,5]<- 1 > image( Wtransform.cylinder.image( temp , inv=TRUE, cut.min=4)) > # now along Y-axis > image( Wtransform.cylinder.image( temp , inv=TRUE, cut.min=4, byX=FALSE)) > > # reset panel > set.panel( 1,1) plot window will lay out plots in a 1 by 1 matrix > > > > cleanEx(); ..nameEx <- "add.image" > > ### * add.image > > flush(stderr()); flush(stdout()) > > ### Name: add.image > ### Title: Adds an image to an existing plot. > ### Aliases: add.image > ### Keywords: hplot > > ### ** Examples > > plot( 1:10, 1:10, type="n") > data( lennon) > > add.image( 5,4,lennon, col=grey( (0:256)/256)) > # reference lines > xline( 5, col=2) > yline( 4,col=2) > > par( read.only=TRUE)-> save.par Warning in par(args) : parameter "read.only" cannot be set > > # > # add lennon right in the corner beyond the plotting region > # > > par(new=TRUE, plt=c(0,1,0,1), mar=c(0,0,0,0), usr=c(0,1,0,1)) > add.image( 0,0, lennon, adj.x=0, adj.y=0) > > > > > graphics::par(get("par.postscript", env = .CheckExEnv)) > cleanEx(); ..nameEx <- "arrow.plot" > > ### * arrow.plot > > flush(stderr()); flush(stdout()) > > ### Name: arrow.plot > ### Title: Adds arrows to a plot > ### Aliases: arrow.plot > ### Keywords: aplot > > ### ** Examples > > # > # 20 random directions at 20 random points > > x<- runif( 20) > y<- runif( 20) > u<- rnorm( 20) > v<- rnorm( 20) > plot( x,y) > arrow.plot( x,y,u,v) # a default that is unattractive > > plot( x,y, type="n") > arrow.plot( x,y,u,v, arrow.ex=.2, length=.1, col='green', lwd=2) > # thicker lines in green, smaller heads and longer tails. Note length, col and lwd are > # options that the arrows function itself knows about. > > > > > cleanEx(); ..nameEx <- "as.image" > > ### * as.image > > flush(stderr()); flush(stdout()) > > ### Name: as.image > ### Title: Creates image from irregular x,y,z > ### Aliases: as.image > ### Keywords: manip > > ### ** Examples > > # convert precip data to 50X50 image > look<- as.image( RMprecip$y, x= RMprecip$x, nrow=50, ncol=50) > image.plot( look) > > # number of obs in each cell. > image.plot( look$x ,look$y, look$N, col=terrain.colors(50)) > # hot spot is around Denver > > > > cleanEx(); ..nameEx <- "as.surface" > > ### * as.surface > > flush(stderr()); flush(stdout()) > > ### Name: as.surface > ### Title: Creates an "surface" object from grid values. > ### Aliases: as.surface > ### Keywords: manip > > ### ** Examples > > > > # Make a perspective of the surface Z= X**2 -Y**2 > # Do this by evaluating quadratic function on a 25 X 25 grid > > grid.l<-list( X= seq( -2,2,,25), Y= seq( -2,2,,25)) > xg<-make.surface.grid( grid.l) > # xg is a 625X2 matrix that has all pairs of X and Y grid values > z<- xg[,1]**2 - xg[,2]**2 > # now fold z in the matrix format needed for persp > out.p<-as.surface( xg, z) > persp( out.p) > # also try plot( out.p) to see the default plot for a surface object > > > > cleanEx(); ..nameEx <- "bplot" > > ### * bplot > > flush(stderr()); flush(stdout()) > > ### Name: bplot > ### Title: boxplot > ### Aliases: bplot > ### Keywords: hplot > > ### ** Examples > > # > set.seed(123) > temp<- matrix( rnorm(12*8), ncol=12) > pos<- c(1:6,9:14) > bplot(temp) > # > bplot( temp, pos=pos, labels=paste( "D",1:12), horizontal=TRUE) > # > bplot( temp, pos=pos, label.cex=0, horizontal=TRUE) > # add an axis > axis( 2) > > > > cleanEx(); ..nameEx <- "bplot.xy" > > ### * bplot.xy > > flush(stderr()); flush(stdout()) > > ### Name: bplot.xy > ### Title: Boxplots for conditional distribution > ### Aliases: bplot.xy > ### Keywords: hplot > > ### ** Examples > > # condition on swim times to see how run times vary > bplot.xy( minitri$swim, minitri$run, N=5) > > # bivariate normal corr= .6 > set.seed( 123) > x<-rnorm( 1000) > y<- .6*x + sqrt( 1- .6**2)*rnorm( 1000) > # > # > bplot.xy( x,y, breaks=seq( -3, 3,,15) ,xlim =c(-4,4), ylim =c(-4,4)) > points( x,y, pch=".", col=3) > > > > cleanEx(); ..nameEx <- "colorbar.plot" > > ### * colorbar.plot > > flush(stderr()); flush(stdout()) > > ### Name: colorbar.plot > ### Title: Plots one or more color scale strips as a symbol > ### Aliases: colorbar.plot > ### Keywords: hplot > > ### ** Examples > > # set up a plot but don't plot points and no "box" > plot( 1:10, (1:10)*10, type="n", bty="n") > # of course this could be anything > > y<- cbind( 1:15, 26:35) Warning in cbind(1:15, 26:35) : number of rows of result is not a multiple of vector length (arg 2) > > colorbar.plot( 2.5, 30, y) > points( 2.5,30, pch="+", cex=2, adj=.5) > # note that strip is still in 1:8 aspect even though plot has very > # different ranges for x and y. > > # adding legend using image.plot > zr<- range( c( y)) > image.plot( legend.only=TRUE, zlim= zr) > # see help(image.plot) to create more room in margin etc. > > zr<- rbind( c(1,20), c(1,100)) # separate ranges for columns of y. > colorbar.plot( 5, 70, y, adj.x=0, zrange= zr) > # some reference lines to show placement > xline( 5, lty=2) # strip starts at x=5 > yline(70, lty=2) # strip is centered around y=7 (because adj.y=.5 by default) > > # many strips on common scale. > > y<- matrix( 1:200, ncol=10) > colorbar.plot( 2, 75, y, horizontal=FALSE, col=rainbow(256)) > > # Xmas strip > y<- cbind( rep( c(1,2),10)) > y[15] <- NA # NA's should work > colorbar.plot( 6, 45, y, adj.y=1,col=c("red", "green")) > text(6,48,"Christmas strip", cex=2) > > # lennon thumbnail > # there are better ways to this ... see add.image for example. > data( lennon) > colorbar.plot( 7.5,22, lennon, + strip.width=.25, strip.length=.25, col=grey(seq( 0,1,,256))) > > > > > cleanEx(); ..nameEx <- "cover.design" > > ### * cover.design > > flush(stderr()); flush(stdout()) > > ### Name: cover.design > ### Title: Computes Space-Filling "Coverage" designs using Swapping > ### Algorithm > ### Aliases: cover.design > ### Keywords: spatial > > ### ** Examples > > ## > ## > # first generate candidate set > set.seed(123) # setting seed so that you get the same thing I do! > test.df <- matrix( runif( 600), ncol=3) > > test1.des<-cover.design(R=test.df,nd=10) > > summary( test1.des) Call: cover.design(R = test.df, nd = 10) Number of design points: 10 Number of fixed points: 0 Optimality Criterion: 0.4016 History: step swap.out swap.in new.crit 0 0 0 0.6866 1 48 13 0.5770 2 45 143 0.5374 3 63 163 0.5132 4 35 144 0.5132 5 157 90 0.5024 6 29 47 0.4812 7 159 145 0.4420 8 72 84 0.4396 9 143 184 0.4303 10 163 176 0.4302 11 90 56 0.4301 12 47 149 0.4208 13 145 138 0.4201 14 64 71 0.4016 > plot( test1.des) > > # > candidates<- make.surface.grid( list( seq( 0,5,,20), seq(0,5,,20))) > out<- cover.design( candidates, 15) > > # find 10 more points keeping this original design fixed > > out3<-cover.design( candidates, 10,fixed=out$best.id) > # see what happened > > plot( candidates[,1:2], pch=".") > points( out$design, pch="x") > points( out3$design, pch="o") > > # here is a strange graph illustrating the swapping history for the > # the first design. Arrows show the swap done > # at each pass through the design. > > h<- out$history > cd<- candidates > plot( cd[,1:2], pch=".") > points( out$design, pch="O", col=2) > points( out$start.design, pch="x", col=5) > > arrows( + cd[h[,2],1], + cd[h[,2],2], + cd[h[,3],1], + cd[h[,3],2],length=.1) > text( cd[h[,2],1], + cd[h[,2],2], h[,1], cex=1.0 ) > > > # > # try this out using "Manhattan distance" > # ( distance following a grid of city streets) > > dist.man<- function(x1,x2) { + d<- ncol( x1) + temp<- abs(outer( x1[,1], x2[,1],'-')) + for ( k in 2:d){ + temp<- temp+abs(outer( x1[,k], x2[,k],'-')) + } + temp } > > # use the design from the Euclidean distance as the starting > #configuration. > > cover.design( candidates, 15, DIST=dist.man, start= out3$best.id)-> out2 > # this takes a while ... > plot( out2$design) > points( out3$design, col=2) > > # find a design on the sphere > # > > candidates<- make.surface.grid( list( x=seq( -180,180,,20), y= seq( -85, + 85,,20))) > > out4<-cover.design( candidates, 15, DIST=rdist.earth) > # this takes a while > > plot( candidates, pch="+", cex=2) > points(out4$design, pch="o", cex=2, col="blue") > > # covering based on correlation for 153 ozone stations > # > data( ozone2) > > cor.mat<-cor( ozone2$y, use="pairwise") > > cor.dist<- function( x1,x2) + {matrix( 1-cor.mat[ x1,x2], ncol=length(x2))} > > # > # find 25 points out of the 153 > # here the "locations" are just the index but the distance is > # determined by the correlation function. > # > out5<-cover.design(cbind(1:153),25, DIST= cor.dist, scale.type="unscaled") > > plot( ozone2$lon.lat, pch=".") > points( ozone2$lon.lat[out5$best.id,],pch="O", col=4) > # > # this seems a bit strange probably due some funny correlation values > # > > # reset panel > set.panel(1,1) plot window will lay out plots in a 1 by 1 matrix > > > > > cleanEx(); ..nameEx <- "exp.cov" > > ### * exp.cov > > flush(stderr()); flush(stdout()) > > ### Name: exp.cov > ### Title: Exponential, Gaussian and "power" covariance family > ### Aliases: exp.cov exp.cov.simple > ### Keywords: spatial > > ### ** Examples > > # exponential covariance matrix ( marginal variance =1) for the ozone > #locations > out<- exp.cov( ozone$x, theta=100) > > # out is a 20X20 matrix > out2<- exp.cov( ozone$x[6:20,],ozone$x[1:2,], theta=100) > # out2 is 15X2 matrix > # Kriging fit where the nugget variance is found by GCV > fit<- Krig( ozone$x, ozone$y, exp.cov, theta=100) > > > > cleanEx(); ..nameEx <- "fields" > > ### * fields > > flush(stderr()); flush(stdout()) > > ### Name: fields > ### Title: fields - tools for spatial data > ### Aliases: fields > ### Keywords: datasets > > ### ** Examples > > set.panel() plot window will lay out plots in a 1 by 1 matrix > fit<- Tps(ozone$x, ozone$y) # fits a thin plate spline surface to ozone > # measurements. > summary(fit) #summary of the fit CALL: Tps(x = ozone$x, Y = ozone$y, cov.function = "Thin plate spline radial basis functions (rad.cov) ") Number of Observations: 20 Number of unique points: 20 Degree of polynomial null space ( base model): 1 Number of parameters in the null space 3 Effective degrees of freedom: 4.5 Residual degrees of freedom: 15.5 MLE sigma 4.099 GCV est. sigma 4.073 MLE rho 205.1 Scale used for covariance (rho) 205.1 Scale used for nugget (sigma^2) 16.8 lambda (sigma2/rho) 0.0819 RESIDUAL SUMMARY: min 1st Q median 3rd Q max -6.803 -1.437 -0.506 1.442 7.790 COVARIANCE MODEL: rad.cov DETAILS ON SMOOTHING PARAMETER: Method used: GCV Cost: 1 lambda trA GCV GCV.one GCV.model shat 0.0819 4.5063 21.4094 21.4094 NA 4.0725 Summary of all estimates found for lambda lambda trA GCV shat GCV 0.0819 4.506 21.41 4.073 GCV.one 0.0819 4.506 21.41 4.073 -Log Profile (REML) 152.2185 3.001 23.06 4.427 > plot(fit) # diagnostic plots of fit and residuals. plot window will lay out plots in a 2 by 2 matrix > surface( fit, type="I") # image and contour plot of the fitted surface > > > > > cleanEx(); ..nameEx <- "gcv.Krig" > > ### * gcv.Krig > > flush(stderr()); flush(stdout()) > > ### Name: gcv.Krig > ### Title: Finds profile likelihood and GCV estimates of smoothing > ### parameters > ### Aliases: gcv.Krig > ### Keywords: spatial > > ### ** Examples > > > # > Tps( ozone$x, ozone$y)-> obj # default is to find lambda by GCV > summary( obj) CALL: Tps(x = ozone$x, Y = ozone$y, cov.function = "Thin plate spline radial basis functions (rad.cov) ") Number of Observations: 20 Number of unique points: 20 Degree of polynomial null space ( base model): 1 Number of parameters in the null space 3 Effective degrees of freedom: 4.5 Residual degrees of freedom: 15.5 MLE sigma 4.099 GCV est. sigma 4.073 MLE rho 205.1 Scale used for covariance (rho) 205.1 Scale used for nugget (sigma^2) 16.8 lambda (sigma2/rho) 0.0819 RESIDUAL SUMMARY: min 1st Q median 3rd Q max -6.803 -1.437 -0.506 1.442 7.790 COVARIANCE MODEL: rad.cov DETAILS ON SMOOTHING PARAMETER: Method used: GCV Cost: 1 lambda trA GCV GCV.one GCV.model shat 0.0819 4.5063 21.4094 21.4094 NA 4.0725 Summary of all estimates found for lambda lambda trA GCV shat GCV 0.0819 4.506 21.41 4.073 GCV.one 0.0819 4.506 21.41 4.073 -Log Profile (REML) 152.2185 3.001 23.06 4.427 > > gcv.Krig( obj)-> out > print( out$lambda.est) # results agree with Tps summary lambda trA GCV shat GCV 0.08190223 4.506280 21.40938 4.072536 GCV.model NA NA NA NA GCV.one 0.08190223 4.506280 21.40938 4.072536 RMSE NA NA NA NA pure error NA NA NA NA -Log Profile (REML) 152.21851072 3.001189 23.06327 4.427461 > > # a simulation example > x<- seq( 0,1,,200) > f<- x**2*( 1-x) > f<- f/sqrt( var( f)) > > Tps( x,f)-> obj # create Krig object Warning in Krig.find.gcvmin(info, lambda.grid, gcv.grid$GCV, Krig.fgcv, : GCV search gives a minumum at the endpoints of the grid search Warning in Krig.find.gcvmin(info, lambda.grid, gcv.grid$GCV.one, Krig.fgcv.one, : GCV search gives a minumum at the endpoints of the grid search > > set.seed(123) # let's all use the same seed > sigma<- .1 > > hold<- matrix( NA, ncol=4, nrow=100) > > for( k in 1 :100){ + # look at GCV estimates of lambda + # new data simulated + y<- f + rnorm(200)*sigma + # save GCV estimates + hold[k,]<- gcv.Krig(obj, y=y, give.warnings=FALSE)$lambda.est[1,] + } > plot( hold[,2], hold[,4], xlab="estimated eff. df", ylab="sigma hat") > yline( sigma, col=2) > # note some occaisional flaky behaviour with GCV ( eff df > 20!) > > > > > cleanEx(); ..nameEx <- "grid.list" > > ### * grid.list > > flush(stderr()); flush(stdout()) > > ### Name: grid list > ### Title: Grid list for describing equally spaced grids > ### Aliases: grid list > ### Keywords: misc > > ### ** Examples > > #Given below are some examples of grid.list objects and the results > #when they are used with make.surface.grid. Note that > #make.surface.grid returns a matrix that retains the grid.list > #information as an attribute. > > grid.l<- list( 1:3, 2:5) > make.surface.grid(grid.l) [,1] [,2] [1,] 1 2 [2,] 2 2 [3,] 3 2 [4,] 1 3 [5,] 2 3 [6,] 3 3 [7,] 1 4 [8,] 2 4 [9,] 3 4 [10,] 1 5 [11,] 2 5 [12,] 3 5 attr(,"format") what [1,] 1 3 [2,] 2 4 attr(,"surface.info") attr(,"surface.info")$x [1] 1 2 3 attr(,"surface.info")$y [1] 2 3 4 5 attr(,"surface.info")$nx [1] 3 attr(,"surface.info")$ny [1] 4 attr(,"surface.info")$xlab NULL attr(,"surface.info")$ylab NULL attr(,"surface.info")$fixed.variables list() attr(,"surface.info")$nvar [1] 2 attr(,"grid.list") attr(,"grid.list")[[1]] [1] 1 2 3 attr(,"grid.list")[[2]] [1] 2 3 4 5 attr(,"class") [1] "surface.grid" > > grid.l <- list( 1:3, 10, 1:3) > make.surface.grid(grid.l) [,1] [,2] [,3] [1,] 1 10 1 [2,] 2 10 1 [3,] 3 10 1 [4,] 1 10 2 [5,] 2 10 2 [6,] 3 10 2 [7,] 1 10 3 [8,] 2 10 3 [9,] 3 10 3 attr(,"format") what [1,] 1 3 [2,] 3 3 attr(,"surface.info") attr(,"surface.info")$x [1] 1 2 3 attr(,"surface.info")$y [1] 1 2 3 attr(,"surface.info")$nx [1] 3 attr(,"surface.info")$ny [1] 3 attr(,"surface.info")$xlab NULL attr(,"surface.info")$ylab NULL attr(,"surface.info")$fixed.variables attr(,"surface.info")$fixed.variables[[1]] [1] 10 attr(,"surface.info")$nvar [1] 3 attr(,"grid.list") attr(,"grid.list")[[1]] [1] 1 2 3 attr(,"grid.list")[[2]] [1] 10 attr(,"grid.list")[[3]] [1] 1 2 3 attr(,"class") [1] "surface.grid" > > #The next set of examples show how the grid.list can be used to > #control surface plotting and evaluation of an estimated function. > > # first create a test function > X<- 2*cbind( runif(50), runif(50), runif(50)) > dimnames( X)<- list(NULL, c("X1","X2","X3")) > y<- X[,1]**2 + X[,2]**2 + exp(X[,3]) > > # fit an interpolating thin plate spline > out<- Tps( X,y,lambda=1e-8) Warning in Krig.find.gcvmin(info, lambda.grid, gcv.grid$GCV, Krig.fgcv, : GCV search gives a minumum at the endpoints of the grid search Warning in Krig.find.gcvmin(info, lambda.grid, gcv.grid$GCV.one, Krig.fgcv.one, : GCV search gives a minumum at the endpoints of the grid search > grid.l<- list( X1="x", X2="y") > surface( out, grid.l, type="I") # surface plot of estimated surface with > # X3= mean value > > grid.l<- list( X1= seq( 0,2,,25), X2=1.0, X3=seq(0,2,,25)) > surface( out,grid.l, type="I") > # surface plot based on a 25X25 grid in X1 an X3 > # over the square [0,2] and [0,2] > # holding X2 equal to 1.0. > # > > > > cleanEx(); ..nameEx <- "image.count" > > ### * image.count > > flush(stderr()); flush(stdout()) > > ### Name: image.count > ### Title: Creates image for a 2-d histogram from irregular locations > ### Aliases: image.count > ### Keywords: smooth > > ### ** Examples > > look<- image.count( RMprecip$x, nrow=32, ncol=32) > image.plot( look) > > > > cleanEx(); ..nameEx <- "image.cov" > > ### * image.cov > > flush(stderr()); flush(stdout()) > > ### Name: image.cov > ### Title: Exponential,Gaussian, "power" and Matern covariance functions > ### for 2-d gridded locations > ### Aliases: exp.image.cov matern.image.cov > ### Keywords: spatial > > ### ** Examples > > # multiply 2-d isotropic exponential with theta=4 by a random vector > junk<- matrix(rnorm(100*100), 100,100) > cov.obj<- exp.image.cov( setup=TRUE, grid=list(x=1:100,y=1:100), theta=8) > result<- exp.image.cov(Y=junk,cov.obj=cov.obj) > > image( matrix( result, 100,100)) > # do it again, no setup needed > # junk2<- matrix(rnorm(100**2, 100,10)) > # result2<- exp.image.cov(Y=junk2, cov.obj=cov.obj) > > > # generate a grid and set of indices based on discretizing the locations > # in the precip dataset > data(RMprecip) > out<-as.image( RMprecip$y, x= RMprecip$x) > ind1<- out$ind > grid<- list( x= out$x, y=out$y) > # there are 750 gridded locations > > # setup to create cov.obj for exponential covariance with range= 1.25 > # p=1 is the default > cov.obj<- exp.image.cov( setup=TRUE, grid=grid, theta=1.25) > > # multiply covariance matrix by an arbitrary vector > junk<- rnorm(750) > result<- exp.image.cov( ind1, ind1, Y= junk,cov.obj=cov.obj) Warning: number of items to replace is not a multiple of replacement length > > # The brute force way would be > # result<- exp.cov( RMprecip$x, RMprecip$x,theta=1.25, C=junk) > > # evaluate the covariance between all grid points and the center grid point > Y<- matrix(0,cov.obj$m, cov.obj$n) > Y[32,32]<- 1 > result<- exp.image.cov( Y= Y,cov.obj=cov.obj) > # covariance surface with respect to the grid point at (32,32) > # > # reshape "vector" as an image > temp<- matrix( result, cov.obj$m,cov.obj$n) > image.plot(cov.obj$grid$x,cov.obj$grid$y, temp) > # or persp( cov.obj$grid$x,cov.obj$grid$y, temp) > > # check out the Matern. > cov.obj<- matern.image.cov( setup=TRUE, grid=grid, theta=1.25, + smoothness=2) > Y<- matrix(0,64,64) > Y[16,16]<- 1 > result<- matern.image.cov( Y= Y,cov.obj=cov.obj) > temp<- matrix( result, cov.obj$m,cov.obj$n) > image.plot( cov.obj$grid$x,cov.obj$grid$y, temp) > # Note we have centered at the location (16,16) for this case > > > > > cleanEx(); ..nameEx <- "image.plot" > > ### * image.plot > > flush(stderr()); flush(stdout()) > > ### Name: image.plot > ### Title: Draws image plot with a legend strip for the color scale. > ### Aliases: image.plot > ### Keywords: hplot > > ### ** Examples > > > x<- 1:10 > y<- 1:15 > z<- outer( x,y,"+") > image.plot(x,y,z) > > # or obj<- list( x=x,y=y,z=z); image.plot(obj) > > # now add some points on diagonal with some clipping anticipated > points( 5:12, 5:12, pch="X", cex=3) > > # > #fat (5% of figure) and short (50% of figure) legend strip on the bottom > image.plot( x,y,z,legend.width=.05, legend.shrink=.5, horizontal=TRUE) > > > set.panel() plot window will lay out plots in a 1 by 1 matrix > > # > # Here is a strategy to add a common legend to a panel of plots > # consult the split.screen help file for more explanations > # draw two images top and bottom and add a single legned strip on the right side > # first divide screen into the figure region and strip on the right ot put a > # legend. > > split.screen( rbind(c(0, .8,0,1), c(.8,1,0,1))) [1] 1 2 > > # now divide up the figure region > split.screen(c(2,1), screen=1)-> ind > > zr<- range( 2,35) > # first image > screen( ind[1]) > image( x,y,z, col=tim.colors(), zlim=zr) > > # second image > screen( ind[2]) > image( x,y,z+10, col=tim.colors(), zlim =zr) > > # move to skinny region on right and draw the legend strip > screen( 2) > image.plot( zlim=zr,legend.only=TRUE, smallplot=c(.1,.2, .3,.7), + col=tim.colors()) > > close.screen( all=TRUE) > > # you can always add a legend arbitrarily to any plot; > # note that here the plot is too big for the vertical strip but the > # horizontal fits nicely. > plot( 1:10, 1:10) > image.plot( zlim=c(0,25), legend.only=TRUE) > image.plot( zlim=c(0,25), legend.only=TRUE, horizontal =TRUE) > > # combining the usual image function and adding a legend > # first change margin for some more room > ## Not run: > ##D par( mar=c(10,5,5,5)) > ##D image( x,y,z, col=topo.colors(64)) > ##D image.plot( zlim=c(0,25), nlevel=64,legend.only=TRUE, horizontal=TRUE) > ## End(Not run) > # > # > # sorting difference in formatting between matrix storage and the image > #plot depiction > A<- matrix( 1:48, ncol=6) > # Note that matrix(c(A), ncol=6) == A > image.plot(1:8, 1:6, A) > # add labels to each box > text( c( row(A)), c( col(A)), A) > # and the indices ... > text( c( row(A)), c( col(A))-.25, + paste( "(", c(row(A)), ",",c(col(A)),")", sep=""), col="grey") > > # "columns" of A are horizontal and rows are ordered from bottom to top! > # > # matrix in its usual tabluar form where the rows are y and columns are x > image.plot( t( A[6:1,]), axes=FALSE) > > > > > > > cleanEx(); ..nameEx <- "image.smooth" > > ### * image.smooth > > flush(stderr()); flush(stdout()) > > ### Name: image.smooth > ### Title: Kernel smoother for irregular 2-d data > ### Aliases: image.smooth > ### Keywords: smooth > > ### ** Examples > > # first convert precip data to the 128X128 discretized image format ( with > # missing values to indicate where data is not observed) > # > out<- as.image( RMprecip$y, x= RMprecip$x, nrow=128, ncol=128) > # out$z is the image matrix > > dx<- out$x[2]- out$x[1] > dy<- out$y[2] - out$y[1] > # > look<- image.smooth( out$z, dx=dx, dy=dy, theta= .25) > > # grid scale in degree and so kernel bandwidth is .25 degrees. > image.plot( x= out$x, y=out$y, z= look) > points( RMprecip$x) > > # to save on computation, decrease the padding with zeroes > > look<- image.smooth( out$z, dx=dx, dy=dy, theta= .25, Mwidth=32,Nwidth=32) > > # the range of these data is ~ 10 and so 32*( 10/128) = 2.5 > # about 10 standard deviations of the normal kernel so there is still > # lots of room for padding > # a minimal choice might be Mwidth = 4* (.25/dx) 4 SD for the normal > # > # creating weighting object outside the call > # this is useful when one wants to smooth different data sets but on the > # same grid with the same kernel function > # > > wght<- image.smooth.setup( nrow=128, ncol=128, dx=dx, dy=dy, theta= .25, + Mwidth=32, Nwidth=32) > > # > # random fields from smoothing white noise with this filter. > # > look<- image.smooth( matrix( rnorm(128**2), 128,128), wght) > # take a look: image.plot( look) > > > > > cleanEx(); ..nameEx <- "interp.surface" > > ### * interp.surface > > flush(stderr()); flush(stdout()) > > ### Name: interp.surface > ### Title: Fast bilinear interpolator from a grid. > ### Aliases: interp.surface > ### Keywords: spatial > > ### ** Examples > > # > # evaluate an image at a finer grid > # > > data( lennon) > # create the surface object > obj<- list( x= 1:20, y=1:20, z= lennon[ 201:220, 201:220]) > > # sample at 50 equally spaced points > temp<- seq( 1,20,,50) > make.surface.grid( list( temp,temp))-> loc > interp.surface( obj, loc)-> look > # take a look > image.plot( as.surface( loc, look)) > > > > > cleanEx(); ..nameEx <- "krig.image" > > ### * krig.image > > flush(stderr()); flush(stdout()) > > ### Name: krig.image > ### Title: Spatial process estimate for large irregular 2-d dats sets. > ### Aliases: krig.image > ### Keywords: spatial > > ### ** Examples > > # > # fit a monthly precipitation field over the Rocky Mountains > # grid is 64X64 > out<- krig.image( x= RMprecip$x, Y = RMprecip$y, m=64,n=64,cov.function= + exp.image.cov, + lambda=.5, theta=1, kmax=100) Warning in return(yM, xM, weightsM, uniquerows, shat.rep, shat.pure.error, : multi-argument returns are deprecated iterative solution of linear system Warning in return(beta, delta, rhohat, rho, sigma2, shat.MLE) : multi-argument returns are deprecated > > # > # range parameter for exponential here is .5 degree in lon and lat. > #diagnostic plots. > plot( out) plot window will lay out plots in a 2 by 2 matrix > > # look at the surface > image.plot( out$surface) #or just surface( out) > > # > #simulate 4 realizations from the conditional distribution > look<- sim.krig.image( out, nreps=4) Warning in return(beta, delta, rhohat, rho, sigma2, shat.MLE) : multi-argument returns are deprecated Warning in return(yM, xM, weightsM, uniquerows, shat.rep, shat.pure.error, : multi-argument returns are deprecated iterative solution of linear system Warning in return(beta, delta, rhohat, rho, sigma2, shat.MLE) : multi-argument returns are deprecated Warning in return(yM, xM, weightsM, uniquerows, shat.rep, shat.pure.error, : multi-argument returns are deprecated iterative solution of linear system Warning in return(beta, delta, rhohat, rho, sigma2, shat.MLE) : multi-argument returns are deprecated Warning in return(yM, xM, weightsM, uniquerows, shat.rep, shat.pure.error, : multi-argument returns are deprecated iterative solution of linear system Warning in return(beta, delta, rhohat, rho, sigma2, shat.MLE) : multi-argument returns are deprecated Warning in return(yM, xM, weightsM, uniquerows, shat.rep, shat.pure.error, : multi-argument returns are deprecated iterative solution of linear system Warning in return(beta, delta, rhohat, rho, sigma2, shat.MLE) : multi-argument returns are deprecated > # take a look: plot( look) > > # check out another values of lambda reusing some of the objects from the > # first fit > > out2<- krig.image( RMprecip$x, RMprecip$y, cov.function= exp.image.cov, + lambda=4, + start= out$omega2,cov.obj=out$cov.obj) Warning in return(yM, xM, weightsM, uniquerows, shat.rep, shat.pure.error, : multi-argument returns are deprecated iterative solution of linear system Warning in return(beta, delta, rhohat, rho, sigma2, shat.MLE) : multi-argument returns are deprecated > # > # some of the obsare lumped together into a singel grid box > # > # find residuals among grid box means and predictions > res<- predict( out2, out2$xM) - out2$yM > #compare with sizes of out2$residuals (raw y data) > > #starting values from first fit in out$omega2 > # covariance and grid information are > # bundled in the cov.obj > ## > > # > ## fitting a thin plate spline. The default here is a linear null space > ## and second derivative type penalty term. > ## you will just have to try different values of lambda vary them on > ## log scale to > > out<- krig.image( RMprecip$x, RMprecip$y, cov.function=rad.image.cov, + lambda=1, m=64, n=64, p=2, kmax=300) Warning in return(yM, xM, weightsM, uniquerows, shat.rep, shat.pure.error, : multi-argument returns are deprecated iterative solution of linear system Warning in return(beta, delta, rhohat, rho, sigma2, shat.MLE) : multi-argument returns are deprecated > > # take a look: image.plot( out$surface) > > # check out different values reuse some of the things to make it quicker > # note addition of kmax argument to increase teh number of iterations > > out2<- krig.image( RMprecip$x, RMprecip$y,cov.function=rad.image.cov, + lambda=.5, start= out$omega2, cov.obj=out$cov.obj, kmax=400) Warning in return(yM, xM, weightsM, uniquerows, shat.rep, shat.pure.error, : multi-argument returns are deprecated iterative solution of linear system Warning in return(beta, delta, rhohat, rho, sigma2, shat.MLE) : multi-argument returns are deprecated > > # here is something rougher > out3<- krig.image( RMprecip$x, RMprecip$y,cov.function=rad.image.cov, + lambda=1e-2, start= out2$omega2, cov.obj=out$cov.obj,kmax=400, + tol=1e-3) Warning in return(yM, xM, weightsM, uniquerows, shat.rep, shat.pure.error, : multi-argument returns are deprecated iterative solution of linear system Warning in return(beta, delta, rhohat, rho, sigma2, shat.MLE) : multi-argument returns are deprecated > > # here is something close to an interpolation > out4<- krig.image( RMprecip$x, RMprecip$y,cov.function=rad.image.cov, + lambda=1e-7, start= out3$omega2, cov.obj=out$cov.obj,kmax=500, tol=1e-3) Warning in return(yM, xM, weightsM, uniquerows, shat.rep, shat.pure.error, : multi-argument returns are deprecated iterative solution of linear system Warning in return(beta, delta, rhohat, rho, sigma2, shat.MLE) : multi-argument returns are deprecated > > #compare the the four surfaces: > # but note the differences in scales ( fix zlim to make them the same) > # > # take a look > # set.panel( 2,2) > # image.plot( out$surface) > # points( out$x, pch=".") > > # image.plot( out2$surface) > # image.plot( out3$surface) > # image.plot( out4$surface) > > # some diagnostic plots) > set.panel( 4,4) plot window will lay out plots in a 4 by 4 matrix > plot( out, graphics.reset=FALSE) > plot( out2, graphics.reset=FALSE) > plot( out3, graphics.reset=FALSE) > plot( out4, graphics.reset=FALSE) > set.panel(1,1) plot window will lay out plots in a 1 by 1 matrix > > > > cleanEx(); ..nameEx <- "matern.cov" > > ### * matern.cov > > flush(stderr()); flush(stdout()) > > ### Name: matern.cov > ### Title: Matern covariance function > ### Aliases: matern.cov > ### Keywords: spatial > > ### ** Examples > > # > # Presenting the Matern family: > # the function matern is called by matern.cov > d<- seq( 0,5,,200) > sm<- seq( .5, 8,,5) > temp<- matrix( NA, 200, 5) > for ( k in 1:5){ + temp[,k] <- matern(d, smoothness=sm[k]) + } > matplot( d, temp, type="l", lty=1) > # note differing correlation scales depending on smoothness > > # Matern covariance matrix ( marginal variance =1) for the ozone > # locations > out<- matern.cov( ozone$x, theta=100, smoothness=1.0) > # out is a 20X20 matrix > > out2<- matern.cov( ozone$x[6:20,],ozone$x[1:2,], theta=100, + smoothness=1.0) > > # out2 is 15X2 cross covariance matrix > > # Kriging fit using a Matern covariance and where the nugget and > # sill variances are found by GCV > fit<- Krig( ozone$x, ozone$y, matern.cov, theta=100, smoothness=1.0) > > > > > cleanEx(); ..nameEx <- "ozone" > > ### * ozone > > flush(stderr()); flush(stdout()) > > ### Name: ozone > ### Title: Data set of ozone measurements at 20 Chicago monitoring > ### stations. > ### Aliases: ozone > ### Keywords: datasets > > ### ** Examples > > fit<- Tps(ozone$x, ozone$y) > # fitting a surface to ozone measurements. > surface( fit, type="I") > > > > cleanEx(); ..nameEx <- "ozone2" > > ### * ozone2 > > flush(stderr()); flush(stdout()) > > ### Name: ozone2 > ### Title: Daily 8-hour ozone averages for sites in the Midwest > ### Aliases: ozone2 > ### Keywords: datasets > > ### ** Examples > > data( ozone2) > > # pairwise correlation among all stations > # ( See cover.design to continue this example) > cor.mat<- cor( ozone2$y, use="pairwise") > > #raw data image for day number 16 > good<- !is.na( ozone2$y[16,]) > out<- as.image( ozone2$y[16,good], x=ozone2$lon.lat[good,]) > image.plot( out) > > > > cleanEx(); ..nameEx <- "plot.Krig" > > ### * plot.Krig > > flush(stderr()); flush(stdout()) > > ### Name: plot.Krig > ### Title: Diagnostic and summary plots of the Krig object > ### Aliases: plot.Krig > ### Keywords: spatial > > ### ** Examples > > fit<-Krig(ozone$x, ozone$y,exp.cov, theta=200) > # fitting a surface to ozone > # measurements > plot(fit) plot window will lay out plots in a 2 by 2 matrix > > > > cleanEx(); ..nameEx <- "plot.Wimage" > > ### * plot.Wimage > > flush(stderr()); flush(stdout()) > > ### Name: plot.Wimage > ### Title: Plots 2-d wavelet coefficents by level and type > ### Aliases: plot.Wimage > ### Keywords: hplot > > ### ** Examples > > > old.par<- par(no.readonly=TRUE) # these functions may leave the device in > # with some funny defaults > > # > #multiresolution of John Lennon > data(lennon) > Wtransform.image( lennon, cut.min=8)->look > plot.Wimage( look, cut.min=16) > > # adding information > plot.Wimage( look, cut.min=16, Nlevel=3, graphics.reset=FALSE)-> plot.layout Still in split.screen mode Use: screen.close( all=TRUE) to close this panel after adding to plots > # plot.layout here is a 4X3 matrix with the screen numbers > > # move to the smooth coefficients plot > > screen( plot.layout[1,1]) > plot( c(.5,16.5), c( .5,16.5), type="n",axes=FALSE) > points( 8,8, cex=2, pch="+") > > # NOTE: just points( 8,8) will not work here. This has to do with split.screen not > # reseting the plotting pars correctly. > > box(col=6, lwd=2)# just for fun > > close.screen( all=TRUE) > > par( old.par) # reset old settings > > > > > graphics::par(get("par.postscript", env = .CheckExEnv)) > cleanEx(); ..nameEx <- "plot.sreg" > > ### * plot.sreg > > flush(stderr()); flush(stdout()) > > ### Name: plot.sreg > ### Title: Plots smoothing spline regression object > ### Aliases: plot.sreg > ### Keywords: smooth > > ### ** Examples > > fit<-sreg(rat.diet$t,rat.diet$con) > # fit rat data > plot(fit) # plot sreg fit plot window will lay out plots in a 2 by 2 matrix > > > > cleanEx(); ..nameEx <- "plot.surface" > > ### * plot.surface > > flush(stderr()); flush(stdout()) > > ### Name: plot.surface > ### Title: Plots a surface > ### Aliases: plot.surface > ### Keywords: hplot > > ### ** Examples > > fit<- Tps( BD[,1:4], BD$lnya) # fit surface to data > > # make grid list > grid<-list( KCl="x", MgCl2=mean(BD[,2]),KPO4="y", dNTP=mean(BD[,4])) > > # evalute on a grid on two > # variables holding two fixed at their mean levels > out.p<-predict.surface(fit, grid, extrap=TRUE) > > plot.surface(out.p, type="C") # surface plot > > > > cleanEx(); ..nameEx <- "poisson.cov" > > ### * poisson.cov > > flush(stderr()); flush(stdout()) > > ### Name: poisson.cov > ### Title: Poisson spherical covariance function > ### Aliases: poisson.cov > ### Keywords: spatial > > ### ** Examples > > # plot of covariance > > x<- make.surface.grid( list( x=seq( -180,180,,40), y= seq( -85,85,,40))) > x0<- matrix( c(0,0), ncol=2) > look<- poisson.cov( x,x0, eta=.5) > image.plot(as.surface(x,look)) > > > > cleanEx(); ..nameEx <- "predict.Krig" > > ### * predict.Krig > > flush(stderr()); flush(stdout()) > > ### Name: predict.Krig > ### Title: Evaluation of Krig spatial process estimate. > ### Aliases: predict.Krig > ### Keywords: spatial > > ### ** Examples > > Krig(ozone$x,ozone$y,exp.cov, theta=50) ->fit > predict( fit) # gives predicted values at data points [1] 38.49391 38.91631 38.37986 38.89197 39.08970 39.81358 38.65230 39.55112 [9] 39.72397 39.68370 40.73275 40.15051 40.88835 40.61397 41.03108 40.26902 [17] 39.70742 39.32884 40.93054 40.76059 > > grid<- make.surface.grid( list( seq( -40,40,,15), seq( -40,40,,15))) > > look<- predict(fit,grid) # evaluate on a grid of points > > # some useful graphing functions > out.p<- as.surface( grid, look) # reformat into $x $y $z image-type object > contour( out.p) > > # refit with 10 degrees of freedom in surface > > look<- predict(fit,grid, df=15) > > # re fit with random data and lambda found by GCV > look<- predict( fit, grid, y= rnorm( 20), gcv=TRUE) Warning in Krig.find.gcvmin(info, lambda.grid, gcv.grid$GCV, Krig.fgcv, : GCV search gives a minumum at the endpoints of the grid search Warning in Krig.find.gcvmin(info, lambda.grid, gcv.grid$GCV.one, Krig.fgcv.one, : GCV search gives a minumum at the endpoints of the grid search > > # NOTE: look is a list now look$predicted predicted values and look$lambda > # the value of lambda > > out.p<-as.surface( grid, look$predicted) > contour( out.p) > > > > cleanEx(); ..nameEx <- "predict.se.Krig" > > ### * predict.se.Krig > > flush(stderr()); flush(stdout()) > > ### Name: predict.se.Krig > ### Title: Standard errors of predictions for Krig spatial process estimate > ### Aliases: predict.se.Krig > ### Keywords: spatial > > ### ** Examples > > # > # Note: in these examples predict.se will default to predict.se.Krig using > # a Krig object > > fit<- Krig(ozone$x,ozone$y,exp.cov, theta=10) # krig fit > predict.se.Krig(fit) # std errors of predictions [1] 1.640636 1.660596 1.627399 1.784598 1.577245 2.167278 1.597545 1.655090 [9] 1.811672 2.064568 2.252637 1.877326 2.597958 2.124358 2.225172 2.566069 [17] 1.963677 1.730222 2.480966 2.169103 > > # make a grid of X's > xg<-make.surface.grid( list(seq(-27,34,,40),seq(-20,35,,40))) > out<- predict.se.Krig(fit,xg) # std errors of predictions > > #at the grid points out is a vector of length 1600 > # reshape the grid points into a 40X40 matrix etc. > out.p<-as.surface( xg, out) > image.plot( out.p) > > # this is equivalent to the single step function > # (but default is not to extrapolation beyond data > out<- predict.surface.se( fit) > image.plot( out) > > # Investigate misspecification > # > # first call Krig to create the Krig object. > # > Krig( ozone$x, ozone$y, cov.function=exp.cov, theta=100)-> fit > > # note how the new cov. parameters are specified just like in Krig > predict.se(fit,xg)-> look > predict.se( fit, xg, cov.function=exp.cov, theta=2.0, sigma2=1)-> look2 > > set.panel( 2,1) plot window will lay out plots in a 2 by 1 matrix > image.plot( as.surface( xg, look)) > points( fit$x) > image.plot( as.surface( xg, look2)) > set.panel( 1,1) plot window will lay out plots in a 1 by 1 matrix > > > > cleanEx(); ..nameEx <- "predict.se" > > ### * predict.se > > flush(stderr()); flush(stdout()) > > ### Name: predict.se > ### Title: Standard errors of predictions > ### Aliases: predict.se > ### Keywords: spatial > > ### ** Examples > > fit<-Tps(ozone$x,ozone$y) > predict.se(fit) # std errors of predictions [1] 1.518289 1.628278 1.475313 1.661505 1.482796 2.033112 1.498287 1.515260 [9] 1.776488 1.932977 2.314889 1.693362 2.694220 1.993171 2.337021 2.565067 [17] 1.804249 1.562608 2.550111 2.082594 > xg<- expand.grid(seq(-15,15,,40),seq(-20,20,,40)) > out<- predict.se(fit,xg) > image.plot( matrix( out, 40)) > > > > cleanEx(); ..nameEx <- "predict.surface" > > ### * predict.surface > > flush(stderr()); flush(stdout()) > > ### Name: predict.surface > ### Title: Evaluates a fitted function as a surface object > ### Aliases: predict.surface > ### Keywords: spatial > > ### ** Examples > > fit<- Tps( BD[,1:4], BD$lnya) # fit surface to data > > # evaluate on a grid on two > # variables holding two fixed > # default surface and contour plot > # make grid list > grid.list<- list( KCl="x", MgCl2=mean(BD[,2]),KPO4="y",dNTP=mean(BD[,4])) > > out.p<- predict.surface(fit,grid.list, extrap=TRUE) > > surface(out.p) plot window will lay out plots in a 2 by 1 matrix > > set.panel(1,1) plot window will lay out plots in a 1 by 1 matrix > > > > cleanEx(); ..nameEx <- "predict.surface.se" > > ### * predict.surface.se > > flush(stderr()); flush(stdout()) > > ### Name: predict.surface.se > ### Title: Standard errors of predictions > ### Aliases: predict.surface.se > ### Keywords: spatial > > ### ** Examples > > fit<-Tps(ozone$x,ozone$y) # tps fit > out<- predict.surface.se( fit) > > surface( out) plot window will lay out plots in a 2 by 1 matrix > > # or ... > > image.plot( out) > > > > cleanEx(); ..nameEx <- "print.Krig" > > ### * print.Krig > > flush(stderr()); flush(stdout()) > > ### Name: print.Krig > ### Title: Print kriging fit results. > ### Aliases: print.Krig > ### Keywords: spatial > > ### ** Examples > > fit<- Krig(ozone$x,ozone$y,exp.cov, theta=100) > print(fit) # print the summary Call: Krig(x = ozone$x, Y = ozone$y, cov.function = exp.cov, theta = 100) Number of Observations: 20 Degree of polynomial null space ( base model): 1 Number of parameters in the null space 3 Model degrees of freedom: 4.8 Residual degrees of freedom: 15.2 GCV estimate for sigma: 4.085 MLE for sigma: 4.105 lambda 0.96 rho 17.54 sigma^2 16.85 > fit # this will work too Call: Krig(x = ozone$x, Y = ozone$y, cov.function = exp.cov, theta = 100) Number of Observations: 20 Degree of polynomial null space ( base model): 1 Number of parameters in the null space 3 Model degrees of freedom: 4.8 Residual degrees of freedom: 15.2 GCV estimate for sigma: 4.085 MLE for sigma: 4.105 lambda 0.96 rho 17.54 sigma^2 16.85 > > > > cleanEx(); ..nameEx <- "qsreg" > > ### * qsreg > > flush(stderr()); flush(stdout()) > > ### Name: qsreg > ### Title: Quantile or Robust spline regression > ### Aliases: qsreg > ### Keywords: smooth > > ### ** Examples > > > # fit a CV quantile spline > fit50<- qsreg(rat.diet$t,rat.diet$con) > # (default is .5 so this is an estimate of the conditional median) > # control group of rats. > plot( fit50) plot window will lay out plots in a 2 by 2 matrix > predict( fit50) [1] 20.49998 20.40356 20.25621 19.84628 19.89900 20.14532 20.99036 21.50002 [9] 22.79981 24.83986 25.14912 25.28184 24.40011 24.39903 24.83234 26.21044 [17] 26.64899 27.37894 27.55002 27.45862 27.25645 27.60002 27.74726 27.84999 [25] 27.72075 27.80000 27.98235 27.60001 27.44900 27.19902 27.79998 28.19900 [33] 27.99998 27.42413 27.89900 28.69814 28.57225 28.59999 27.50000 > # predicted values at data points > xg<- seq(0,110,,50) > plot( fit50$x, fit50$y) > lines( xg, predict( fit50, xg)) > > # A robust fit to rat diet data > # > SC<- .5* median(abs((rat.diet$con- median(rat.diet$con)))) > fit.robust<- qsreg(rat.diet$t,rat.diet$con, sc= SC) > plot( fit.robust) plot window will lay out plots in a 2 by 2 matrix > > # The global GCV function suggests little smoothing so > # try the local > # minima with largest lambda instead of this default value. > # one should should consider redoing the three quantile fits in this > # example after looking at the cv functions and choosing a good value for > #lambda > # for example > lam<- fit50$cv.grid[,1] > tr<- fit50$cv.grid[,2] > # lambda close to df=6 > lambda.good<- max(lam[tr>=6]) > fit50.subjective<-qsreg(rat.diet$t,rat.diet$con, lam= lambda.good) > fit10<-qsreg(rat.diet$t,rat.diet$con, alpha=.1, nstep.cv=200) > fit90<-qsreg(rat.diet$t,rat.diet$con, alpha=.9, nstep.cv=200) > # spline fits at 50 equally spaced points > sm<- cbind( + + predict( fit10, xg), + predict( fit50.subjective, xg),predict( fit50, xg), + predict( fit90, xg)) > > # and now zee data ... > plot( fit50$x, fit50$y) > # and now zee quantile splines at 10 > # > matlines( xg, sm, col=c( 3,3,2,3), lty=1) # the spline > > > > > cleanEx(); ..nameEx <- "rdist" > > ### * rdist > > flush(stderr()); flush(stdout()) > > ### Name: rdist > ### Title: Euclidean distance matrix > ### Aliases: rdist > ### Keywords: spatial > > ### ** Examples > > out<- rdist( ozone$x) > # out is a 20X20 matrix. > out2<- rdist( ozone$x[1:5,], ozone$x[11:20,]) > #out2 is a 5X10 matrix > > > > cleanEx(); ..nameEx <- "rdist.earth" > > ### * rdist.earth > > flush(stderr()); flush(stdout()) > > ### Name: rdist.earth > ### Title: Great circle distance matrix > ### Aliases: rdist.earth > ### Keywords: spatial > > ### ** Examples > > out<- rdist.earth ( ozone$lon.lat) > #out is a 20X20 distance matrix > > > > cleanEx(); ..nameEx <- "set.panel" > > ### * set.panel > > flush(stderr()); flush(stdout()) > > ### Name: set.panel > ### Title: Specify a panel of plots > ### Aliases: set.panel > ### Keywords: hplot > > ### ** Examples > > set.panel(5,2) #divide screen to hold 10 plots where there are 5 rows plot window will lay out plots in a 5 by 2 matrix > #and 2 columns > plot( 1:10) > plot( 2:8) > > set.panel() #reset screen to one plot per screen plot window will lay out plots in a 1 by 1 matrix > > > > cleanEx(); ..nameEx <- "sim.rf" > > ### * sim.rf > > flush(stderr()); flush(stdout()) > > ### Name: sim.rf > ### Title: Simulates a random field > ### Aliases: sim.rf > ### Keywords: spatial > > ### ** Examples > > #Simulate a Gaussian random field with an exponential covariance function, > #range parameter = 2.0 and the domain is [0,5]X [0,5] evaluating the > #field at a 100X100 grid. > grid<- list( x= seq( 0,5,,100), y= seq(0,5,,100)) > obj<-exp.image.cov( grid=grid, theta=.5, setup=TRUE) > look<- sim.rf( obj) > # Now simulate another ... > look2<- sim.rf( obj) > # take a look > set.panel(2,1) plot window will lay out plots in a 2 by 1 matrix > image.plot( grid$x, grid$y, look) > title("simulated gaussian field") > image.plot( grid$x, grid$y, look2) > title("another (independent) realization ...") > > > > cleanEx(); ..nameEx <- "smooth.2d" > > ### * smooth.2d > > flush(stderr()); flush(stdout()) > > ### Name: smooth.2d > ### Title: Kernel smoother for irregular 2-d data > ### Aliases: smooth.2d > ### Keywords: smooth > > ### ** Examples > > # Normal kernel smooth of the precip data with bandwidth of .5 ( degree) > # > look<- smooth.2d( RMprecip$y, x=RMprecip$x, theta=.25) > > # finer resolution used in computing the smooth > look3<-smooth.2d( RMprecip$y, x=RMprecip$x, theta=.25, nrow=256, + ncol=256,Nwidth=32, + Mwidth=32) > # if the width arguments were omitted the padding would create a > # 512X 512 matrix with the data filled in the upper 256X256 part. > # with a bandwidth of .25 degrees the normal kernel is essentially zero > # beyond 32 grid points from its center ( about 6 standard deviations) > # > # take a look: > > #set.panel(2,1) > #image( look3, zlim=c(-8,12)) > #points( RMprecip$x, pch=".") > #image( look, zlim =c(-8,12)) > #points( RMprecip$x, pch=".") > > # bandwidth changed to .25, exponential kernel > look2<- smooth.2d( RMprecip$y, x=RMprecip$x, cov.function=exp.cov,theta=.25) > # > > > > > > cleanEx(); ..nameEx <- "splint" > > ### * splint > > flush(stderr()); flush(stdout()) > > ### Name: splint > ### Title: Cubic spline interpolation > ### Aliases: splint > ### Keywords: smooth > > ### ** Examples > > x<- seq( 0, 120,,200) > > splint(rat.diet$t, rat.diet$trt,x )-> y > > plot( rat.diet$t, rat.diet$trt) > lines( x,y) > #( this is weird!) > > > > cleanEx(); ..nameEx <- "sreg" > > ### * sreg > > flush(stderr()); flush(stdout()) > > ### Name: sreg > ### Title: Smoothing spline regression > ### Aliases: sreg > ### Keywords: smooth > > ### ** Examples > > # fit a GCV spline to > # control group of rats. > fit<- sreg(rat.diet$t,rat.diet$con) > summary( fit) CALL: sreg(x = rat.diet$t, y = rat.diet$con) Number of Observations: 39 Number of unique points: 39 Eff. degrees of freedom for spline: 7.5 Residual degrees of freedom: 31.5 GCV est. sigma 1.581 lambda 10.52 RESIDUAL SUMMARY: min 1st Q median 3rd Q max -4.53600 -0.46090 -0.06991 0.61540 3.55600 DETAILS ON SMOOTHING PARAMETER: Method used: GCV Cost: 1 lambda trA GCV GCV.one GCV.model shat 10.523 7.535 3.098 3.098 NA 1.581 Summary of estimates for lambda lambda trA GCV shat GCV 10.52 7.535 3.098 1.581 GCV.one 10.52 7.535 3.098 1.581 > > plot(fit) # diagnostic plots of fit > predict( fit) # predicted values at data points [1] 19.84076 19.86350 19.91390 20.10987 20.21062 20.49639 21.43474 21.75698 [9] 22.48105 23.99870 24.34322 24.94385 25.81605 25.99780 26.34421 26.95215 [17] 27.08354 27.31055 27.58143 27.61681 27.66504 27.76675 27.79382 27.84563 [25] 27.93351 27.95070 27.96794 27.90770 27.88420 27.78456 27.78605 27.82273 [33] 27.85514 27.90918 27.96891 28.04041 28.05735 28.02786 27.82139 > > xg<- seq(0,110,,50) > sm<-predict( fit, xg) # spline fit at 50 equally spaced points > der.sm<- predict( fit, xg, deriv=1) # derivative of spline fit > set.panel( 2,1) plot window will lay out plots in a 2 by 1 matrix > plot( fit$x, fit$y) # the data > lines( xg, sm) # the spline > plot( xg,der.sm, type="l") # plot of estimated derivative > set.panel() # reset panel to 1 plot plot window will lay out plots in a 1 by 1 matrix > > # the same fit using the thin plate spline numerical algorithms > # (NOTE: sreg is more efficient for 1-d problems) > fit.tps<-Tps( rat.diet$t,rat.diet$con) > summary( fit.tps) CALL: Tps(x = rat.diet$t, Y = rat.diet$con, cov.function = "Thin plate spline radial basis functions (rad.cov) ") Number of Observations: 39 Number of unique points: 39 Degree of polynomial null space ( base model): 1 Number of parameters in the null space 2 Effective degrees of freedom: 7.5 Residual degrees of freedom: 31.5 MLE sigma 1.525 GCV est. sigma 1.582 MLE rho 6434 Scale used for covariance (rho) 6434 Scale used for nugget (sigma^2) 2.325 lambda (sigma2/rho) 0.0003613 RESIDUAL SUMMARY: min 1st Q median 3rd Q max -4.54300 -0.45750 -0.06962 0.61640 3.56200 COVARIANCE MODEL: rad.cov DETAILS ON SMOOTHING PARAMETER: Method used: GCV Cost: 1 lambda trA GCV GCV.one GCV.model shat 0.0003613 7.5045334 3.0977731 3.0977731 NA 1.5816723 Summary of all estimates found for lambda lambda trA GCV shat GCV 3.613e-04 7.505 3.098 1.582 GCV.one 3.613e-04 7.505 3.098 1.582 -Log Profile (REML) 9.764e+01 2.001 5.729 2.331 > > # finding approximate standard errors at observations > > SE<- fit$shat.GCV*sqrt(fit$diagA) > > # compare to predict.se( fit.tps) differences are due to > # slightly different lambda values and using shat.MLE instad of shat.GCV > # > > # 95 > Zvalue<- qnorm(.0975) > upper<- fit$fitted.values + Zvalue* SE > lower<- fit$fitted.values - Zvalue* SE > # > # conservative, simultaneous Bonferroni bounds > # > ZBvalue<- qnorm(1- .025/fit$N) > upperB<- fit$fitted.values + ZBvalue* SE > lowerB<- fit$fitted.values - ZBvalue* SE > # > # take a look > > plot( fit$x, fit$y) > lines( fit$predicted, lwd=2) > matlines( fit$x, + cbind( lower, upper, lowerB, upperB), type="l", col=c( 2,2,4,4), lty=1) > title( "95 pct pointwise and simultaneous intervals") > # or try the more visually honest: > plot( fit$x, fit$y) > lines( fit$predicted, lwd=2) > segments( fit$x, lowerB, fit$x, upperB, col=4) > segments( fit$x, lower, fit$x, upper, col=2, lwd=2) > title( "95 pct pointwise and simultaneous intervals") > > > > > > # replicated data > # this is a simulated case. Find lambda by matching rmse to be .2 > # and use this estimate of lambda > sreg( test.data2$x, test.data2$y, rmse=.2, method="RMSE")-> fit results of matching with pure error sigma > > set.panel( 1,1) plot window will lay out plots in a 1 by 1 matrix > > > > cleanEx(); ..nameEx <- "stats" > > ### * stats > > flush(stderr()); flush(stdout()) > > ### Name: stats > ### Title: Calculate summary statistics > ### Aliases: stats > ### Keywords: univar > > ### ** Examples > > #Statistics for 8 normal random samples: > zork<- matrix( rnorm(200), ncol=8) > stats(zork) [,1] [,2] [,3] [,4] [,5] N 25.0000000 25.00000000 25.0000000 25.00000000 25.0000000 mean 0.1686652 0.03223135 0.1654091 0.06924384 0.1047730 Std.Dev. 0.9501080 0.70628065 1.1051952 0.83071749 0.7834642 min -2.2146999 -1.47075238 -1.8049586 -1.52356680 -0.9109216 Q1 -0.3053884 -0.39428995 -0.7099464 -0.54252003 -0.4616447 median 0.3898432 -0.05931340 0.1532533 0.07434132 -0.1795565 Q3 0.7821363 0.55666320 0.6107264 0.59394619 0.4941883 max 1.5952808 1.35867955 2.4016178 1.58683345 1.7672873 missing values 0.0000000 0.00000000 0.0000000 0.00000000 0.0000000 [,6] [,7] [,8] N 25.00000000 25.00000000 25.00000000 mean -0.40974386 0.08186525 0.07187332 Std.Dev. 0.94926150 1.04846008 0.98889447 min -1.91435943 -1.48746031 -1.46725003 Q1 -1.11592011 -0.61924305 -0.76608200 median -0.46353040 -0.07715294 -0.03472603 Q3 0.01739562 0.45699881 0.83037317 max 2.08716655 2.30797840 2.07524501 missing values 0.00000000 0.00000000 0.00000000 > > zork<- rnorm( 200) > id<- sample( 1:8, 200, replace=TRUE) > stats( zork, by=id) 7 1 8 6 3 N 20.0000000 31.0000000 30.00000000 25.00000000 27.00000000 mean -0.0870513 0.2785115 -0.08431315 -0.08174414 -0.08016774 Std.Dev. 0.9223204 0.9243022 1.09643708 0.91234158 1.30152812 min -1.1864586 -1.8697888 -2.88892067 -1.36329126 -2.59232767 Q1 -0.8699710 -0.1324568 -0.86771313 -0.45413691 -0.46497113 median -0.2668835 0.2441649 0.03369048 -0.25567071 0.17048947 Q3 0.4847286 0.9530747 0.77929008 0.37996269 0.77178164 max 1.8874745 1.8031419 1.58658843 2.49766159 1.97133739 missing values 0.0000000 0.0000000 0.00000000 0.00000000 0.00000000 4 5 2 N 24.00000000 16.000000000 27.0000000 mean -0.14069755 0.118826273 0.3499169 Std.Dev. 1.00722745 0.964964832 0.8439739 min -2.26488936 -2.129360648 -1.1676623 Q1 -0.64123101 -0.352820471 -0.3492914 median -0.02606136 -0.001606084 0.3747244 Q3 0.63153533 0.835102364 0.7510879 max 1.31400217 1.577891795 2.6491669 missing values 0.00000000 0.000000000 0.0000000 > > > > cleanEx(); ..nameEx <- "stats.bin" > > ### * stats.bin > > flush(stderr()); flush(stdout()) > > ### Name: stats.bin > ### Title: Bins data and finds some summary statistics. > ### Aliases: stats.bin > ### Keywords: univar > > ### ** Examples > > u<- rnorm( 2000) > v<- rnorm( 2000) > x<- u > y<- .7*u + sqrt(1-.7**2)*v > > look<- stats.bin( x,y) > look$stats["Std.Dev.",] 1 2 3 4 5 6 7 8 1.1823807 0.7113252 0.7385569 0.7648203 0.7855104 0.6986479 0.7997418 1.1382870 > > data( ozone2) > # make up a variogram day 16 of Midwest daily ozone ... > look<- vgram( ozone2$lon.lat, c(ozone2$y[16,]), lon.lat=TRUE) > > # break points > brk<- seq( 0, 250,,40) > > out<-stats.bin( look$d, look$vgram, breaks=brk) > # plot bin means, and some quantiles Q1, median, Q3 > matplot( out$centers, t(out$stats[ c("mean", "median","Q1", "Q3"),]), + type="l",lty=c(1,2,2,2), col=c(3,4,3,4), ylab="ozone PPB") > > > > cleanEx(); ..nameEx <- "summary.Krig" > > ### * summary.Krig > > flush(stderr()); flush(stdout()) > > ### Name: summary.Krig > ### Title: Summary for Krig spatial process estimate > ### Aliases: summary.Krig > ### Keywords: spatial > > ### ** Examples > > fit<- Krig(ozone$x, ozone$y, exp.cov, theta=100) > summary(fit) # summary of fit CALL: Krig(x = ozone$x, Y = ozone$y, cov.function = exp.cov, theta = 100) Number of Observations: 20 Number of unique points: 20 Degree of polynomial null space ( base model): 1 Number of parameters in the null space 3 Effective degrees of freedom: 4.8 Residual degrees of freedom: 15.2 MLE sigma 4.105 GCV est. sigma 4.085 MLE rho 17.54 Scale used for covariance (rho) 17.54 Scale used for nugget (sigma^2) 16.85 lambda (sigma2/rho) 0.9607 RESIDUAL SUMMARY: min 1st Q median 3rd Q max -6.7240 -1.6970 -0.5666 1.7920 7.5840 COVARIANCE MODEL: exp.cov DETAILS ON SMOOTHING PARAMETER: Method used: GCV Cost: 1 lambda trA GCV GCV.one GCV.model shat 0.9607 4.8497 22.0240 22.0240 NA 4.0845 Summary of all estimates found for lambda lambda trA GCV shat GCV 0.9607 4.850 22.02 4.085 GCV.one 0.9607 4.850 22.02 4.085 -Log Profile (REML) 2227.3210 3.001 23.06 4.428 > > > > cleanEx(); ..nameEx <- "surface.Krig" > > ### * surface.Krig > > flush(stderr()); flush(stdout()) > > ### Name: surface.Krig > ### Title: Plots a surface and contours > ### Aliases: surface.Krig > ### Keywords: spatial > > ### ** Examples > > fit<- Krig(ozone$x,ozone$y, theta=30) # krig fit > > #Image plot of surface with nice, smooth contours and shading > > surface(fit, type="C", nx=128, ny=128) > > > > > cleanEx(); ..nameEx <- "tim.colors" > > ### * tim.colors > > flush(stderr()); flush(stdout()) > > ### Name: tim.colors > ### Title: Tim's useful color table > ### Aliases: tim.colors > ### Keywords: aplot > > ### ** Examples > > > tim.colors(10) [1] "#00008F" "#0000FF" "#0070FF" "#00DFFF" "#50FFAF" "#BFFF40" "#FFCF00" [8] "#FF6000" "#EF0000" "#800000" > # returns an array of 10 strings in hex format > #e.g. (red, green, blue) values of (16,255, 239) > # translates to "#10FFEF" . > > > > image( outer( 1:20,1:20,"+"), col=tim.colors( 75)) > > > > cleanEx(); ..nameEx <- "transformx" > > ### * transformx > > flush(stderr()); flush(stdout()) > > ### Name: transformx > ### Title: Linear transformation > ### Aliases: transformx > ### Keywords: manip > > ### ** Examples > > # > newx<-transformx( ozone$x, scale.type="range") > > > > cleanEx(); ..nameEx <- "vgram" > > ### * vgram > > flush(stderr()); flush(stdout()) > > ### Name: vgram > ### Title: Finds a traditional or robust variogram for spatial data. > ### Aliases: vgram > ### Keywords: spatial > > ### ** Examples > > # > # compute variogram for the midwest ozone field day 16 > # (BTW this looks a bit strange!) > # > data( ozone2) > good<- !is.na(ozone2$y[16,]) > x<- ozone2$lon.lat[good,] > y<- ozone2$y[16,good] > > look<-vgram( x,y, N=15, lon.lat=TRUE) # locations are in lon/lat so use right > #distance > # take a look: > #plot( look$d, look$vgram) > #lines(look$centers, look$stats["mean",], col=4) > > brk<- seq( 0, 250,,25) > > ## or some boxplot bin summaries > > bplot.xy( look$d, sqrt(look$vgram), breaks=brk,ylab="sqrt(VG)") > lines(look$centers, look$stats["mean",], col=4) > > > > > cleanEx(); ..nameEx <- "vgram.matrix" > > ### * vgram.matrix > > flush(stderr()); flush(stdout()) > > ### Name: vgram.matrix > ### Title: Computes a variogram from an image > ### Aliases: vgram.matrix > ### Keywords: spatial > > ### ** Examples > > # variogram for Lennon image. > data(lennon) > out<-vgram.matrix( lennon) > > plot( out$d, out$vgram, xlab="separation distance", ylab="variogram") > # image plot of vgram values by direction. > > # look at different directions > out<-vgram.matrix( lennon, R=10, collapse=FALSE) #this takes a bit of time > > set.panel(2,1) plot window will lay out plots in a 2 by 1 matrix > plot( out$d, out$vgram) > plot(out$d, out$vgram.robust) > > #image plot of variogram values for different directions. > set.panel(1,1) plot window will lay out plots in a 1 by 1 matrix > plot.vgram.matrix( out) > # John Lennon appears remarkably isotropic! > > > > > cleanEx(); ..nameEx <- "world" > > ### * world > > flush(stderr()); flush(stdout()) > > ### Name: world > ### Title: Plot of the world > ### Aliases: world > ### Keywords: hplot > > ### ** Examples > > # Draw map in device color number 2 ( usually red?) > > world( col=2) > # add the US > US( add=TRUE) > > ## Western Europe (*which* big island is missing ?) > ## with a (light color!) coordinate grid: > > world(xlim=c(-10,18),ylim=c(36,60), xaxt = "s", yaxt = "s") > grid() > > > > > cleanEx(); ..nameEx <- "xline" > > ### * xline > > flush(stderr()); flush(stdout()) > > ### Name: xline > ### Title: Draw a vertical line > ### Aliases: xline > ### Keywords: aplot > > ### ** Examples > > > plot( 1:10) > xline( 6.5, col=2) > > world( col=3) > yline( seq( -80,80,10),col=4, lty=2) > xline( seq( -180,180,10),col=4,lty=2) > yline( 0, lwd=2, col=4) > > > > cleanEx(); ..nameEx <- "yline" > > ### * yline > > flush(stderr()); flush(stdout()) > > ### Name: yline > ### Title: Draw horizontal lines > ### Aliases: yline > ### Keywords: aplot > > ### ** Examples > > world( col=3) > yline( seq( -80,80,10),col=4, lty=2) > xline( seq( -180,180,10),col=4,lty=2) > yline( 0, lwd=2, col=4) > > > > ### *