The goal of SIHR is to provide inference procedures in the high-dimensional setting for (1)linear functionals (LF) (Cai, Cai, and Guo 2019) and quadratic functionals (QF)(Guo, Renaux, et al. 2019) in linear regression, (2)linear functional in logistic regression (Guo, Rakshit, et al. 2019), (3) individual treatment effects (ITE) in linear and logistic regression and (4) single regression coefficient in binary outcome regression (Cai, Guo, and Ma, n.d.).
These are basic examples which show how to solve the common high-dimensional inference problems:
library(SIHR)
Inference for linear functional in high-dimensional linear regression model
library(MASS)
100
n = 400
p = function(rho,p){
A1gen <-matrix(0,p,p)
A1=for(i in 1:p){
for(j in 1:p){
^(abs(i-j))
A1[i,j]<-rho
}
}
A1
} rep(0,p)
mu <-1:5] <- c(1:5)/5
mu[ 0.5
rho = (A1gen(rho,p))/2
Cov <- rep(0,p)
beta <-1:10] <- c(1:10)/5
beta[ MASS::mvrnorm(n,mu,Cov)
X <- X%*%beta + rnorm(n)
y = MASS::mvrnorm(1,rep(0,p),Cov)
loading <- SIHR::LF(X = X, y = y, loading = loading)
Est =#> [1] "step is 3"
### Point esitmator
$prop.est
Est#> [,1]
#> [1,] -2.54952
### Standard error
$se
Est#> [1] 2.493051
### Confidence interval
$CI
Est#> [1] -7.43581 2.33677
### test whether the linear functional is below zero or not (1 indicates that it is above zero)
$decision
Est#> [1] 0
Individualised Treatment Effect in high-dimensional logistic regression model
100
n1 = 400
p = 100
n2 = function(rho,p){
A1gen <-matrix(0,p,p)
A1=for(i in 1:p){
for(j in 1:p){
^(abs(i-j))
A1[i,j]<-rho
}
}
A1
} rep(0,p)
mu <- 0.5
rho = (A1gen(rho,p))/2
Cov <- rep(0,p)
beta1 <-1:10] <- c(1:10)/5
beta1[ rep(0,p)
beta2 <-1:5] <- c(1:5)/10
beta2[ MASS::mvrnorm(n1,mu,Cov)
X1 <- MASS::mvrnorm(n2,mu,Cov)
X2 <- X1%*%beta1 + rnorm(n1)
y1 = X2%*%beta2 + rnorm(n2)
y2 = MASS::mvrnorm(1,rep(0,p),Cov)
loading <- SIHR::ITE(X1 = X1, y1 = y1, X2 = X2, y2 = y2,loading = loading)
Est <-#> [1] "step is 3"
#> [1] "step is 3"
### Point esitmator
$prop.est
Est#> [,1]
#> [1,] -1.192825
### Standard error
$se
Est#> [1] 1.915035
### Confidence interval
$CI
Est#> [1] -4.946225 2.560575
### test whether the linear ITE is below zero or not (1 indicates that it is above zero)
$decision
Est#> [1] 0
Inference for linear functional in high-dimensional logistic regression model
library(MASS)
function(rho,p){
A1gen <-matrix(0,p,p)
A1=for(i in 1:p){
for(j in 1:p){
^(abs(i-j))
A1[i,j]<-rho
}
}
A1
} 100
n = 400
p = rep(0,p)
mu <- 0.5
rho = (A1gen(rho,p))/2
Cov <- rep(0,p)
beta <-1:10] <-0.5*c(1:10)/10
beta[ MASS::mvrnorm(n,mu,Cov)
X <- X%*%beta
exp_val <- exp(exp_val)/(1+exp(exp_val))
prob <- rbinom(n,1,prob)
y <- MASS::mvrnorm(1,mu,Cov)
loading <- SIHR::LF_logistic(X = X, y = y, loading = loading, weight = rep(1,n), trans = TRUE)
Est =#> [1] "step is 3"
### trans = TRUE implies target quantity is the case probability
### Point esitmator
$prop.est
Est#> [1] 0.2503934
### Standard error
$se
Est#> [1] 0.5638263
### Confidence interval
$CI
Est#> [1] 0.0009256397 0.9917648061
### test whether the case probability is below 0.5 or not (1 indicates that it is above 0.5)
$decision
Est#> [1] 0
Individualised Treatment Effect in high-dimensional logistic model
function(rho,p){
A1gen <-matrix(0,p,p)
A1=for(i in 1:p){
for(j in 1:p){
^(abs(i-j))
A1[i,j]<-rho
}
}
A1
} 100
n1 = 100
n2 = 400
p = rep(0,p)
mu <- 0.5
rho = (A1gen(rho,p))/2
Cov <- rep(0,p)
beta1 <-1:10] <- c(1:10)/5
beta1[ rep(0,p)
beta2 <-1:5] <- c(1:5)/10
beta2[ MASS::mvrnorm(n1,mu,Cov)
X1 <- MASS::mvrnorm(n2,mu,Cov)
X2 <- X1%*%beta1
exp_val1 <- X2%*%beta2
exp_val2 <- exp(exp_val1)/(1+exp(exp_val1))
prob1 <- exp(exp_val2)/(1+exp(exp_val2))
prob2 <- rbinom(n1,1,prob1)
y1 <- rbinom(n2,1,prob2)
y2 <- MASS::mvrnorm(1,mu,Cov)
loading <- SIHR::ITE_Logistic(X1 = X1, y1 = y1, X2 = X2, y2 = y2,loading = loading, weight = NULL, trans = FALSE)
Est <-#> [1] "step is 4"
#> [1] "step is 3"
### trans = FALSE implies the target quantity is the difference between two linear combinations of the regression coefficients
### Point esitmator
$prop.est
Est#> [1] -3.827802
### Standard error
$se
Est#> [1] 37.2344
### Confidence interval
$CI
Est#> [1] -76.80589 69.15029
### test whether the first case probability is smaller than the second case probability or not (1 indicates that the first case probability is larger than the second case probability)
$decision
Est#> [1] 0
Inference for single regression coefficient in high-dimensional binary GLM (probit model)
20
sp = 500
n = 800
p =
toeplitz(seq(0.6, 0,length.out = p/10))
sig1 = Matrix::bdiag(rep(list(sig1),10))+diag(rep(0.4,p))
Sig = MASS::mvrnorm(n, mu=rep(0,p), Sigma=Sig)
X = rep(0,p)
b =1:sp] = rep(c(0.4,-0.4), sp/2)
b[
## Inference for single regression coefficient in high-dimensional binary probit model
function(x){
f =pnorm(x)
} f(X %*% b)
prob = array(dim = 1)
y =for(i in 1:n){
rbinom(1,1,prob[i])
y[i] =
} SIHR::GLM_binary(X = X, y = y,index = 1, model = "probit", intercept = FALSE)
Est =#> [1] "step is 3"
### Point esitmator
$prop.est
Est#> [1] 0.5456477
### Standard error
$se
Est#> [1] 0.1193006
### Confidence interval
$CI
Est#> [1] 0.3118229 0.7794725
### test whether the first regression coefficient is equal to zero or not (1 indicates that it is significantly different from zero)
$decision
Est#> [1] 1
Inference for single regression coefficient in high-dimensional binary GLM (inverse t1 model)
10
sp = 800
n = 400
p =
toeplitz(seq(0.6, 0,length.out = p/10))
sig1 = Matrix::bdiag(rep(list(sig1),10))+diag(rep(0.4,p))
Sig = MASS::mvrnorm(n, mu=rep(0,p), Sigma=Sig)
X = rep(0,p)
b =1:sp] = rep(c(0.4,-0.4), sp/2)
b[ function(x){
f =pt(x,1)
} f(X %*% b)
prob = array(dim = 1)
y =for(i in 1:n){
rbinom(1,1,prob[i])
y[i] =
} SIHR::GLM_binary(X = X, y = y, index = 2, model = "inverse t1", lambda=0.1*sqrt(log(p)/n))
Est =#> [1] "step is 4"
### Point esitmator
$prop.est
Est#> [1] -0.4599343
### Standard error
$se
Est#> [1] 0.1083813
### Confidence interval
$CI
Est#> [1] -0.6723578 -0.2475107
### test whether the second regression coefficient is equal to zero or not (1 indicates that it is significantly different from zero)
$decision
Est#> [1] 0
Inference for quadratic functional in high-dimensional linear model
library(MASS)
function(rho,p){
A1gen <-matrix(0,p,p)
A1=for(i in 1:p){
for(j in 1:p){
^(abs(i-j))
A1[i,j]<-rho
}
}
A1
} 0.6
rho = (A1gen(rho,400))
Cov <- rep(0,400)
mu <-1:5] <- c(1:5)/5
mu[ rep(0,400)
beta <-25:50] <- 0.08
beta[ MASS::mvrnorm(100,mu,Cov)
X <- X%*%beta + rnorm(100)
y <- c(30:100)
test.set <-
## Inference for Quadratic Functional with Population Covariance Matrix in middle
SIHR::QF(X = X, y = y, G=test.set)
Est =#> [1] "step is 5"
### Point esitmator
$prop.est
Est#> [,1]
#> [1,] 0.4856643
### Standard error
$se
Est#> [1] 0.1339019
### Confidence interval
$CI
Est#> [,1] [,2]
#> [1,] 0.2232215 0.7481071
### test whether the quadratic form is equal to zero or not (1 indicates that it is above zero)
$decision
Est#> [1] 1
## Inference for Quadratic Functional with known matrix A in middle
SIHR::QF(X = X, y = y, G=test.set, Cov.weight = FALSE,A = diag(1:length(test.set),length(test.set)))
Est =#> [1] "step is 3"
### Point esitmator
$prop.est
Est#> [,1]
#> [1,] 7.818209
### Standard error
$se
Est#> [1] 1.874088
### Confidence interval
$CI
Est#> [,1] [,2]
#> [1,] 4.145065 11.49135
### test whether the quadratic form is equal to zero or not (1 indicates that it is above zero)
$decision
Est#> [1] 1
## Inference for square norm of regression vector
SIHR::QF(X = X, y = y, G=test.set, Cov.weight = FALSE, A = diag(length(test.set)))
Est =#> [1] "step is 3"
### Point esitmator
$prop.est
Est#> [,1]
#> [1,] 0.2305753
### Standard error
$se
Est#> [1] 0.1078509
### Confidence interval
$CI
Est#> [,1] [,2]
#> [1,] 0.0191914 0.4419591
### test whether the quadratic form is equal to zero or not (1 indicates that it is above zero)
$decision
Est#> [1] 1
Finding projection direction in high dimensional linear regression
100
n = 400
p = matrix(sample(-2:2,n*p,replace = TRUE),nrow = n,ncol = p)
X = 1.5
resol = 3
step =
## Finding Projection Direction using fixed tuning parameter
SIHR::Direction_fixedtuning(X,loading=c(1,rep(0,(p-1))),mu=sqrt(2.01*log(p)/n)*resol^{-(step-1)})
Direction.est <-
### First 20 entries of the projection vector
$proj[1:20]
Direction.est#> [1] 1.027419e+00 1.137467e-21 -3.249525e-21 -5.040831e-21 -4.798542e-21
#> [6] 3.652147e-21 -1.541715e-21 3.920373e-03 1.604445e-21 4.330363e-21
#> [11] 5.887975e-21 3.153287e-21 -7.296738e-22 -7.101951e-22 -1.363728e-21
#> [16] -5.406470e-21 1.762665e-21 -1.155885e-20 4.894473e-21 -4.830918e-21
## Finding Projection Direction using best step size
SIHR::Direction_searchtuning(X,loading=c(1,rep(0,(p-1))))
Direction.est <-
### First 20 entries of the projection vector
$proj[1:20]
Direction.est#> [1] 1.027419e+00 1.100801e-21 -3.254499e-21 -4.997330e-21 -4.751100e-21
#> [6] 3.650647e-21 -1.485238e-21 3.920373e-03 1.548722e-21 4.339941e-21
#> [11] 5.863320e-21 3.217438e-21 -6.788858e-22 -6.577332e-22 -1.311394e-21
#> [16] -5.395191e-21 1.769135e-21 -1.150573e-20 4.907870e-21 -4.843679e-21
Finding projection direction in high dimensional logistic regression
50
n = 400
p = matrix(sample(-2:2,n*p,replace = TRUE),nrow=n,ncol=p)
X = rbinom(n,1,0.5)
y = 1/sqrt((1/n)*diag(t(X)%*%X)+0.0001);
col.norm <- X %*% diag(col.norm);
Xnor <- glmnet::cv.glmnet(Xnor, y, alpha=1,family = "binomial")
fit = as.vector(coef(fit, s = "lambda.min"))
htheta <-abs(htheta)>0.001)
support<-( cbind(rep(1,n),Xnor);
Xb <- cbind(rep(1,n),X);
Xc <- c(1,col.norm);
col.norm <- (p+1);
pp <- c(1,rep(0,(p-1)))
xnew =rep(0,pp)
loading=1]=1
loading[-1]=xnew
loading[ htheta*col.norm;
htheta <- as.vector(htheta)
htheta <- exp(Xc%*%htheta)/(1+exp(Xc%*%htheta))^2
f_prime <- 2
step <-
## Finding Projection Direction using fixed tuning parameter
SIHR::Direction_fixedtuning(X,loading=c(1,rep(0,(p-1))),mu=sqrt(2.01*log(p)/n)*resol^{-(step-1)},model = "logistic",weight = 1/f_prime, deriv.vec = f_prime)
Direction.est <-
### First 20 entries of the projection vector
$proj[1:20]
Direction.est#> [1] 3.234706e-01 5.235360e-23 -1.156472e-23 4.456602e-23 -4.399074e-23
#> [6] -7.384066e-24 1.794255e-24 1.385838e-23 -2.126070e-25 2.911442e-23
#> [11] -8.377458e-24 -2.552474e-23 3.018341e-23 -1.800046e-23 2.677253e-23
#> [16] -5.286996e-23 1.208285e-23 -1.330125e-22 8.063287e-24 1.428302e-24
## Finding Projection Direction using best step size
SIHR::Direction_searchtuning(Xc,loading,model = "logistic",weight = 1/f_prime, deriv.vec = f_prime)
Direction.est <-
### First 20 entries of the projection vector
$proj[1:20]
Direction.est#> [1] 3.701162e-01 2.686621e-01 -1.862471e-22 1.715356e-23 1.006911e-22
#> [6] -9.708787e-24 1.974327e-22 2.745049e-22 -7.526144e-23 1.130423e-22
#> [11] 1.101680e-22 -1.391776e-22 -3.206116e-22 -2.677714e-23 -3.981109e-22
#> [16] -1.707152e-22 -1.623773e-22 3.197569e-22 -4.557997e-22 -1.446905e-22
Cai, Tianxi, T. Tony Cai, and Zijian Guo. 2019. “Optimal Statistical Inference for Individualized Treatment Effects in High-Dimensional Models.” Journal of the Royal Statistical Society: Series B. https://arxiv.org/pdf/1904.12891.pdf.
Cai, T Tony, Zijian Guo, and Rong Ma. n.d. “Statistical Inference for High-Dimensional Generalized Linear Models with Binary Outcomes.” Journal of the American Statistical Association.
Guo, Zijian, Prabrisha Rakshit, Daniel S. Herman, and Jinbo Chen. 2019. “Inference for Case Probability in High-Dimensional Logistic Regression.” Unknown. https://arxiv.org/abs/2012.07133.
Guo, Zijian, Claude Renaux, Peter Buhlmann, and T. Tony Cai. 2019. “Group Inference in High Dimensions with Applications to Hierarchical Testing.” Unknown. https://arxiv.org/pdf/1909.01503.pdf.