.packageName <- "mlica"
"CheckStability" <-
function(a.best.l, corr.th=0.7){
  
  nIruns <- length(a.best.l) ;
  if( nIruns < 2 ){
    print("Argument must be list of at least two a.best objects");
    stop;
  }
  for( i in 2:nIruns){
    if( a.best.l[[1]]$ncp != a.best.l[[i]]$ncp ){
    print("Stopping: the objects in list must have same number of ICA modes");
    stop;
    }
  }
 ncp <- a.best.l[[1]]$ncp ;
 X <- a.best.l[[1]]$X;
  
 nConn.v <- vector(length=nIruns*ncp); # number of connections
 Adj.a <- array(rep(0,nIruns*ncp*nIruns*ncp),dim=c(nIruns,ncp,nIruns,ncp));
 v <- 1;

 for( j1 in 1:nIruns){
  setdiff(1:nIruns,j1) -> selrun ;
  for( j2 in selrun ){

    for( i1 in 1:ncp){

      cor.v <- as.vector(abs(cor(a.best.l[[j1]]$S[,i1],a.best.l[[j2]]$S)));
      which.max(cor.v) -> i2;
      Adj.a[j1,i1,j2,i2] <- 1 ;

    }
  }

  for( i in 1:ncp){ # compute number of connections/links
    nConn <- 0;
    for( j2 in selrun){
      if( sum(Adj.a[j1,i,j2,]) > 0 ){
        nConn <- nConn + 1;
      }
    }
    nConn.v[v] <- nConn ;
    v <- v + 1;
  }
 } # j1-loop

 selmodes.idx <- 1:(nIruns*ncp);
 consS.lv <- list();
 StabScore.l <- list();
 c <- 1;
 while( c <= ncp ){
 which.max(nConn.v) -> max.idx ;
 as.integer(max.idx/(ncp+1))+1 -> j1.max ;
 max.idx - (j1.max-1)*ncp -> i1.max ;
 selrun <- setdiff(1:nIruns,j1.max);
 consS <- a.best.l[[j1.max]]$S[,i1.max];
 corr.idx <- vector();
 for( j2 in selrun ){
   which(Adj.a[j1.max,i1.max,j2,]==1) -> i2;
   if ( length(i2) > 0 ){
    corr.idx <- c(corr.idx, (j2-1)*ncp+i2);
    tmp <- cor(a.best.l[[j1.max]]$S[,i1.max],a.best.l[[j2]]$S[,i2]);
    consS <- consS + sign(tmp)*a.best.l[[j2]]$S[,i2];
   }
 }
 selmodes.idx <- setdiff(selmodes.idx,c(max.idx,corr.idx));
 consS.lv[[c]] <- consS/(nConn.v[max.idx]+1) ;
 StabScore.l[[c]] <- (nConn.v[max.idx]+1)/nIruns ;
 nConn.v[c(max.idx,corr.idx)] <- -1;
 c <- c + 1;
 }

 consS.m <- matrix(nrow=nrow(a.best.l[[1]]$S),ncol=ncp );
 StabScore.v <- vector();
 for( c in 1:ncol(consS.m)){
  consS.m[,c] <- consS.lv[[c]]/sqrt(var(consS.lv[[c]]));
  StabScore.v[c] <- StabScore.l[[c]];
 }

 STS <- t(consS.m) %*% consS.m;
 solve(STS) -> STSinv ;
 STX <- t(consS.m) %*% X ;
 consA.m <- STSinv %*% STX ;
 
 return(list(stabM=StabScore.v, consS=consS.m, consA=consA.m));

} # END OF FUNCTION

"PriorNormPCA" <-
function(X){ # START FUNCTION
 ndim <- ncol(X);
 ntp  <- nrow(X);
 # Center column means to zero
 for( s in 1:ndim ){
  X[,s] <- X[,s] - mean( X[,s] );
 }
 print("Performing SVD");
 # SVD
 svd.o <- svd(X,LINPACK=TRUE);
 Dx <- diag(svd.o$d*svd.o$d)/ntp;
 Ex <- svd.o$v ;
 barplot(Dx,main="Singular values");
 return(list(X=X,Dx=Dx,Ex=Ex));
} # END OF FUNCTION
"SortModes" <-
function(a.best,c.val=0.25){
  
ncp <- ncol(a.best$S);
Ng <- nrow(a.best$S);

#SORTING CRITERION

# A) Computation of relative data power. Store values in vector of size H=ncp. Could use different criterion here.
# need squared entries
 Ssq <- a.best$S * a.best$S ; 
 Asq <- a.best$A * a.best$A ;
 Xsq <- a.best$X * a.best$X ;
 rdp <- rep(0, times=ncp);
 for ( k in 1:ncp ) {
  rdp[k]<- sum(Ssq[,k])*sum(Asq[k,])/sum(Xsq) ;
 }
 rdp.s <- sort(rdp, na.last=NA,decreasing=TRUE, index.return=TRUE);

# B) sorting with mixture of contrast and data variance (Liebermeister)
 JG <- rep(0, times=ncp);
 JA <- rep(0, times=ncp);
 # generate values from std. normal distribution
 nu <- rnorm(10000,0,1);
 G0 <- mean(log(cosh(nu)));
 
 for ( k in 1:ncp ){
   # compute contrast for mode using logcosh
   G1 <- mean(log(cosh(a.best$S[,k])));
   JG[k] <- abs(G1-G0);
   JA[k] <- sum(Asq[k,]);
 }   
   sumJG <- sum(JG) ; sumJA <- sum(JA) ;
   J <- JG*(c.val/sumJG)+ JA*(1-c.val)/sumJA ;
   J.s <- sort(J, na.last=NA,decreasing=TRUE, index.return=TRUE);

   return(list(a.best=a.best,rdp=rdp.s,lbm=J.s));

} # END OF FUNCTION
"mlica" <-
function(prNCP, nruns=10, tol=0.0001, maxit=300, fail.th=5, learn.mu=1){

print("Entering mlica");
print("Performing preliminary run");
# Perform one run to learn ncp and initialise logL of compared
a <- mlicaMAIN(prNCP,tol=0.0001,maxit=10,mu=learn.mu);
ncp <- dim(a$S)[2];
max.logL <- a$LL;
a.best <- a;
print("Finished preliminary run");

# Performs runs 
# only use converged runs and find the one of maximum likelihood
print("Starting runs");
run.n <- 0 ;
fail.count <- 0;
v.logL <- vector();
v.NC <- vector();

while ( run.n < nruns ){

 a <- mlicaMAIN(prNCP,tol=0.0001,maxit=maxit,mu=learn.mu); 
 v.logL <- c(v.logL,a$LL);
 v.NC <- c(v.NC,a$NC);
 
 if ( a$NC == 0 ){ # if converged
  fail.count <- 0;
  run.n <- run.n + 1;

  if ( a$LL > max.logL ){
    a.best <- a;
  }
  
 }

 else { # did not converge
  fail.count <- fail.count + 1;
 }

 if( fail.count >= fail.th ){
   print("Stopping: Five consecutive runs failed to converge!");
   print("Consider either increasing the threshold for pca eigenvalues to perform ICA on a smaller subspace or increasing maxit");
   stop;
 }
 
} # matches run loop
print("End of runs");

return(a.best);

} # END OF FUNCTION
"mlicaMAIN" <-
function(prNCP, tol=0.0001, maxit=300, mu=1){# default values
  print("Entered MLica");
  #######################################################
  X <- prNCP$X;
  x <-  prNCP$x ;
  pEx <- prNCP$pEx ;
  pCorr <- prNCP$pCorr ;
  ntp <- dim(X)[1]; # number of "time points"
  ndim <- dim(X)[2]; # number of dimensions.
  ncp <- ncol(x); # number of components
  ######################################################
  # Estimated Source matrix
  Sest <- matrix(nrow=ntp,ncol=ncp);  
  # Choose initial random separating matrix B(=Ainv):
  B.old <- matrix(runif(ncp*ncp,0,1),nrow=ncp,ncol=ncp);
  B.o <- B.old ; # this is unwhitened B separating matrix needed to check convergence of algorithm
  # variables needed
  icount <- 0;  
  not.conv <- c(1,2); # arbitrary non-zero vector to give non-zero length
  y <- matrix(nrow=ntp,ncol=ncp);
  tmp <- matrix(nrow=ncp,ncol=ncp);
  beta <- vector(length=ncp);
  alpha <- vector(length=ncp);
  
  # STARTING ITERATIONS
  while ( (length(not.conv) > 0) && ( icount < maxit)  ){ # iteration loop : update B
   print(c("Entering iteration loop ",icount));
   # whiten y=B.old x first
   Cy <- B.old %*% t(B.old);
   svds <- eigen(Cy,symmetric=TRUE) ;
   D <- diag(svds$values);
   E <- svds$vectors;
   Dinv <- solve(D);
   # whitening matrix:
   V <- E %*% sqrt(Dinv) %*% t(E);

   # project B.old onto whitening set
   B.old <- V %*% B.old ;
  
   for ( g in 1:ntp){
    y[g,] <- B.old %*% x[g,]; # y is white
   }

   for ( c in 1:ncp){

     beta[c] <- 2*sum( y[,c]*tanh(y[,c]) )/ntp ;
     alpha[c] <- -1/(beta[c] -2 + 2* sum( tanh(y[,c])*tanh(y[,c]) )/ntp ) ;

     for ( c2 in 1:ncp){
       tmp[c,c2] <- -2*sum(tanh(y[,c])*y[,c2])/ntp ;
     }
       
   }
   print("Checkpt1");
   # Update separating matrix
   tmp <- diag(beta) + tmp ;
   B <- B.old + mu*diag(alpha) %*% tmp %*% B.old ;

   # Check convergence
   Dev <- abs(B - B.o);
   AvDev <- sum(Dev)/(ncp*ncp);
   print(c("AvDev=",AvDev));
         
   not.conv <- vector();
   not.conv <- as.vector( Dev[ Dev > tol ] );

   B.old <- B ;
   B.o <- B;
   icount <- icount + 1;

   for( g in 1:ntp ){
    Sest[g,] <- B %*% x[g,] ;
   }
   logL <- -2*sum( log( cosh(Sest) ) ) + ntp*log(abs(det(B)));

   print("iterated logL");
   print(logL);
   
 } # matches iteration loop  


  # find estimated source components
  # whiten y=Bx first (actually we only do PCA)(i.e variance is not scaled)
   Cy <- B %*% t(B);
   svds <- eigen(Cy,symmetric=TRUE) ;
   D <- diag(svds$values);
   E <- svds$vectors;
   Dinv <- solve(D);
   # whitening matrix (variance scaling here)
   V <- E %*% sqrt(Dinv) %*% t(E);

   # project B.old onto whitening set
   B <- V %*% B ;

  
  for( g in 1:ntp ){
    Sest[g,] <- B %*% x[g,] ;
  }
  
  Aest <- t(pEx %*% sqrt(pCorr) %*% t(B)) ; # this is now estimate of original mixing matrix, Ns x ncp matrix
  if ( length(not.conv) > 0 ){
    NotConv <- 1 ;
  }
  else { NotConv <- 0 ;}

  # logL computation
  logL <- -2*sum( log( cosh(Sest) ) ) + ntp*log(abs(det(B)));


  return(list(A=Aest,B=B,S=Sest,X=X,ncp=dim(Sest)[2],NC=NotConv,LL=logL));

} # end of function
"proposeNCP" <-
function(prPCA, thresh=0.1){ # START FUNCTION

  X <- prPCA$X ;
  eigenvals.v <- diag(prPCA$Dx);
  Ex <- prPCA$Ex ;
  ntp <- nrow(X);
  ndim <- ncol(X);

  print("About to find ncp");
  # project onto relevant eigendirections
  p.cpts <- eigenvals.v[eigenvals.v > thresh ];
  ncp <- length(p.cpts);
  pCorr <- diag( eigenvals.v[1:ncp] );
  pEx <- Ex[,1:ncp]; # this is Nsamples x ncp projection matrix
  
  x <- matrix(nrow=ntp,ncol=ncp); # is white
  for ( g in 1:ntp){
   for ( c in 1:ncp ){
     x[g,c] <- sum(X[g,]*Ex[,c])/sqrt(diag(pCorr)[c]) ; # projection onto c'th component
   }
  }

  return(list(X=X,x=x,pEx=pEx,pCorr=pCorr,ncp=ncp));

} # END FUNCTION
