glmnet: library for both lasso (alpha=1) and ridge (alpha=0). Check more glmnet examples at https://web.stanford.edu/~hastie/glmnet/glmnet_beta.html

# install.packages("glmnet")
library(glmnet)

Data

Load Data

The Prostate data can be found [here]. The data is to examine the correlation between the level of prostate-specific antigen and a number of clinical measures in men who were about to receive a radical prostatectomy.

  • lcavol: log cancer volume
  • lweight: log prostate weight
  • age: in years
  • lbph: log of the amount of benign prostatic hyperplasia
  • svi: seminal vesicle invasion
  • lcp: log of capsular penetration
  • gleason: a numeric vector
  • pgg45: percent of Gleason score 4 or 5
  • lpsa: response

The first 8 columns are predictors; column 9 is the outcome/response. We do not use column 10, which indicates which 67 observations were used as the “training set” and which 30 as the test set, as described on page 48 in the ESL book. For convenience, we rename the response column with a generic name Y.

myData = read.table(file = "https://hastie.su.domains/ElemStatLearn/datasets/prostate.data", header = TRUE)
myData = myData[, -10]
names(myData)[9] = 'Y'
names(myData)
[1] "lcavol"  "lweight" "age"     "lbph"    "svi"     "lcp"     "gleason" "pgg45"   "Y"      
n = dim(myData)[1]         # sample size
p = dim(myData)[2] - 1     # number of non-intercept predictors
X = as.matrix(myData[, names(myData) != "Y"]);  # some algorithms need the matrix/vector 
Y = myData$Y;       

Train/Test Split

Split the data into two parts: 80% for training and 20% for testing. Throughout, we will use the the training data to select variables and estimate coefficients and report the average MSE on the test data.

ntest = round(n*0.2)
ntrain = n - ntest;
test.id = sample(1:n, ntest);
Ytest = Y[test.id];

Full Model

Fit an ordinary linear regression model using all variables.

full.model = lm( Y ~ ., data = myData[-test.id, ]);  
Ytest.pred = predict(full.model, newdata= myData[test.id, ]);
sum((Ytest - Ytest.pred)^2)/ntest # averaged MSE on the test set
[1] 0.2199186

Ridge

Fit a set of ridge regression models using glmnet with the default \(\lambda\) sequence.

myridge = glmnet(X[-test.id, ], Y[-test.id], alpha = 0)
plot(myridge, label = TRUE, xvar = "lambda")

Output

Check the output from glmnet for ridge regression.

summary(myridge)

Retrieve the lambda values

length(myridge$lambda)

Retrieve coefficients using two different approaches.

dim(myridge$beta)       # coefficients for  non-intercept predictors
length(myridge$a0)      # intercept


# Retrieve coefficients (including intercept) using
# coef(myridge)
dim(coef(myridge))

The two coefficient matrices should be the same

sum((coef(myridge) - rbind(myridge$a0, myridge$beta))^2)
[1] 0

Note: Ridge regression coefficients may change signs along the path.

round(myridge$beta['age', ], dig = 4)

How are the intercepts computed?

k = 2; 
my.mean = apply(X[-test.id, ], 2, mean)  # 13x1 mean vector for training X
mean(Y[-test.id]) - sum(my.mean * myridge$beta[, k])
myridge$a0[k]  # intercept for lambda = myridge$lambda[k]

Check whether our intercept formula is true for all intercepts

sum((mean(Y[-test.id]) - my.mean %*% myridge$beta  - myridge$a0)^2)

CV & Path Plot

The CV results are stored in

  • cv.out$cvm: mean CV errors
  • cv.out$cvsd: standard errors of cvm
cv.out = cv.glmnet(X[-test.id, ], Y[-test.id], alpha = 0) 
plot(cv.out)

The CV performance continues to drop when \(\lambda\) gets smaller. So, we provide R with a \(\lambda\) sequence dense in [-6, 2].

lam.seq = exp(seq(-6, 2, length=100))
cv.out = cv.glmnet(X[-test.id,], Y[-test.id], alpha=0, lambda=lam.seq)  
plot(cv.out)

Two choices for lambda

  • lambda.min: the value of lambda that achieves the minimum cvm (the left dashed vertical line)
  • lambda.1se: the largest value of lambda (i.e., the largest regularization, the smallest df) whose cvm is within one-standard-error of the cvm of lambda.min. (the left dashed vertical line)

Note: by definition lambda.1se (right dashed vertical line) is always bigger than or equal to lambda.min (left dashed vertical line).

Check how lambda.min and lambda.1se are computed.

names(cv.out)
 [1] "lambda"     "cvm"        "cvsd"       "cvup"       "cvlo"       "nzero"     
 [7] "call"       "name"       "glmnet.fit" "lambda.min" "lambda.1se" "index"     
# Find lambda.min
cv.out$lambda.min
[1] 0.1656332
cv.out$lambda[which.min(cv.out$cvm)]
[1] 0.1656332
# Find lambda.1se
cv.out$lambda.1se
[1] 1.353955
tmp.id = which.min(cv.out$cvm)
max(cv.out$lambda[cv.out$cvm < cv.out$cvm[tmp.id] + cv.out$cvsd[tmp.id]])
[1] 1.353955

Prediction

Next we apply the two ridge regression models—one with lambda.min and one with lambda.1se—on a new test data set and report the corresponding mean squared prediction errors.

myridge = glmnet(X[-test.id,], Y[-test.id], alpha=0, lambda = lam.seq)
Ytest.pred = predict(myridge, s = cv.out$lambda.1se, newx=X[test.id,])
mean((Ytest.pred - Y[test.id])^2)
[1] 0.3779362
Ytest.pred=predict(myridge, s = cv.out$lambda.min, newx=X[test.id,])
mean((Ytest.pred - Y[test.id])^2)
[1] 0.235441

Lasso

Output

Check the output from glmnet for Lasso.

mylasso = glmnet(X[-test.id,], Y[-test.id], alpha = 1)
summary(mylasso)

CV & Path Plot

par(mfrow = c(1, 2))
plot(mylasso, label=TRUE, xvar = "norm")
plot(mylasso, label=TRUE, xvar = "lambda")
par(mfrow=c(1,1))

mylasso$df
 [1] 0 1 1 1 1 1 1 1 2 2 2 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 5 6 6 6 6 6 6 7 7 7 7 7 7 8
[45] 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8
cv.out = cv.glmnet(X[-test.id, ], Y[-test.id], alpha = 1)
plot(cv.out)

Try our own lambda sequences.

# lam.seq =  exp(seq(-4, 2, length=100))
# lam.seq =  exp(seq(-6, -1, length=100))
# cv.out = cv.glmnet(X[-test.id,], Y[-test.id], alpha = 1, lambda = l am.seq)
# plot(cv.out)

Check how lambda.min and lambda.1se are computed.

cv.out$lambda.min
tmp.id=which.min(cv.out$cvm)
cv.out$lambda[tmp.id]

cv.out$lambda.1se
max(cv.out$lambda[cv.out$cvm < cv.out$cvm[tmp.id] + cv.out$cvsd[tmp.id]])

Coefficients

How to retrieve Lasso coefficients?

mylasso.coef.min = predict(mylasso, s=cv.out$lambda.min, type="coefficients")
mylasso.coef.1se = predict(mylasso, s=cv.out$lambda.1se, type="coefficients")
cbind(mylasso.coef.min, mylasso.coef.1se)
9 x 2 sparse Matrix of class "dgCMatrix"
                    s1        s1
(Intercept) 0.22065356 1.4530552
lcavol      0.51079974 0.4347692
lweight     0.39003496 0.1021676
age         .          .        
lbph        0.05266784 .        
svi         0.56214447 0.2256772
lcp         .          .        
gleason     .          .        
pgg45       .          .        
# number of variables selected (including the intercept)
sum(mylasso.coef.1se != 0)
[1] 4
# names of selected non-intercept variables
row.names(mylasso.coef.1se)[which(mylasso.coef.1se != 0)[-1]]
[1] "lcavol"  "lweight" "svi"    

Prediction

Apply the two fitted models for prediction on test data.

mylasso = glmnet(X[-test.id, ], Y[-test.id], alpha = 1)
Ytest.pred = predict(mylasso, s = cv.out$lambda.min, newx = X[test.id,])
mean((Ytest.pred - Y[test.id])^2)
[1] 0.2833383
Ytest.pred = predict(mylasso, s = cv.out$lambda.1se, newx = X[test.id,])
mean((Ytest.pred - Y[test.id])^2)
[1] 0.4411785

Refit to Reduce Bias

We refit an ordinary linear regression model using the variables selected by lambda.1se and then use it for prediction.

mylasso.coef.1se = predict(mylasso, s = cv.out$lambda.1se, type="coefficients")
var.sel = row.names(mylasso.coef.1se)[which(mylasso.coef.1se != 0)[-1]]
var.sel; 
[1] "lcavol"  "lweight" "svi"    
tmp.X = X[, colnames(X) %in% var.sel]
mylasso.refit = coef(lm(Y[-test.id] ~ tmp.X[-test.id, ]))
Ytest.pred = mylasso.refit[1] + tmp.X[test.id,] %*% mylasso.refit[-1]
mean((Ytest.pred - Y[test.id])^2)
[1] 0.2601463
---
title: "Stat542: Ridge and Lasso"
date: "Fall 2022"
output:
  html_notebook:
    theme: readable
    toc: TRUE
    toc_float: TRUE
---


`glmnet`: library for both lasso (alpha=1) and ridge (alpha=0). Check more glmnet examples at 
https://web.stanford.edu/~hastie/glmnet/glmnet_beta.html

```{r}
# install.packages("glmnet")
library(glmnet)
```

## Data

### Load Data

The **Prostate data** can be found [[here](https://hastie.su.domains/ElemStatLearn/data.html)]. The data is to examine the correlation between the level of prostate-specific antigen and a number of clinical measures in men who were about to receive a radical prostatectomy.

- `lcavol`:  log cancer volume
- `lweight`: log prostate weight
- `age`: in years
- `lbph`: log of the amount of benign prostatic hyperplasia
- `svi`: seminal vesicle invasion
- `lcp`: log of capsular penetration
- `gleason`: a numeric vector
- `pgg45`: percent of Gleason score 4 or 5
- `lpsa`: response

The first 8 columns are predictors; column 9 is the outcome/response. We do not use column 10, which indicates which 67 observations were used as the "training set" and which 30 as the test set, as described on page 48 in the ESL book. For convenience, we rename the response column with a generic name Y.

```{r}
myData = read.table(file = "https://hastie.su.domains/ElemStatLearn/datasets/prostate.data", header = TRUE)
myData = myData[, -10]
names(myData)[9] = 'Y'
names(myData)
```

```{r}
n = dim(myData)[1]         # sample size
p = dim(myData)[2] - 1     # number of non-intercept predictors
X = as.matrix(myData[, names(myData) != "Y"]);  # some algorithms need the matrix/vector 
Y = myData$Y;       
```

### Train/Test Split

Split the data into two parts: 80% for training and 20% for testing. Throughout, we will use the the training data to select variables and estimate coefficients and report the average MSE on the test data.

```{r}
ntest = round(n*0.2)
ntrain = n - ntest;
test.id = sample(1:n, ntest);
Ytest = Y[test.id];
```

## Full Model

Fit an ordinary linear regression model using all variables.

```{r}
full.model = lm( Y ~ ., data = myData[-test.id, ]);  
Ytest.pred = predict(full.model, newdata= myData[test.id, ]);
sum((Ytest - Ytest.pred)^2)/ntest # averaged MSE on the test set
```

## Ridge 

Fit a set of ridge regression models using `glmnet` with the default $\lambda$ sequence. 

```{r}
myridge = glmnet(X[-test.id, ], Y[-test.id], alpha = 0)
plot(myridge, label = TRUE, xvar = "lambda")
```

### Output
Check the output from `glmnet` for ridge regression.

```{r}
summary(myridge)
```

Retrieve the lambda values
```{r}
length(myridge$lambda)
```

Retrieve coefficients using two different approaches. 
```{r}
dim(myridge$beta)       # coefficients for  non-intercept predictors
length(myridge$a0)      # intercept


# Retrieve coefficients (including intercept) using
# coef(myridge)
dim(coef(myridge))
```

The two coefficient matrices should be the same
```{r}
sum((coef(myridge) - rbind(myridge$a0, myridge$beta))^2)
```

**Note**: Ridge regression coefficients may change signs along the path.

```{r}
round(myridge$beta['age', ], dig = 4)
```

How are the intercepts computed?

```{r}
k = 2; 
my.mean = apply(X[-test.id, ], 2, mean)  # 13x1 mean vector for training X
mean(Y[-test.id]) - sum(my.mean * myridge$beta[, k])
myridge$a0[k]  # intercept for lambda = myridge$lambda[k]
```

Check whether our intercept formula is true for all intercepts 
```{r}
sum((mean(Y[-test.id]) - my.mean %*% myridge$beta  - myridge$a0)^2)
```

### CV & Path Plot

The CV results are stored in 

- `cv.out$cvm`: mean CV errors
- `cv.out$cvsd`: standard errors of cvm


```{r}
cv.out = cv.glmnet(X[-test.id, ], Y[-test.id], alpha = 0) 
plot(cv.out)
```

The CV performance continues to drop when $\lambda$ gets smaller. So, we provide R with a  $\lambda$ sequence dense in [-6, 2].

```{r}
lam.seq = exp(seq(-6, 2, length=100))
cv.out = cv.glmnet(X[-test.id,], Y[-test.id], alpha=0, lambda=lam.seq)  
plot(cv.out)
```

Two choices for lambda

- `lambda.min`: the value of lambda that achieves the minimum cvm (the left dashed vertical line)
- `lambda.1se`: the largest value of lambda (i.e., the largest regularization, the smallest df) whose cvm is within one-standard-error of the cvm of lambda.min.  (the left dashed vertical line)

**Note**: by definition `lambda.1se` (right dashed vertical line) is always bigger than or equal to `lambda.min` (left dashed vertical line). 


Check how `lambda.min` and `lambda.1se` are computed.

```{r}
names(cv.out)

# Find lambda.min
cv.out$lambda.min
cv.out$lambda[which.min(cv.out$cvm)]

# Find lambda.1se
cv.out$lambda.1se
tmp.id = which.min(cv.out$cvm)
max(cv.out$lambda[cv.out$cvm < cv.out$cvm[tmp.id] + cv.out$cvsd[tmp.id]])
```

### Prediction

Next we apply the two ridge regression models---one with `lambda.min` and one with `lambda.1se`---on a new test data set and report the corresponding mean squared prediction errors. 

```{r}
myridge = glmnet(X[-test.id,], Y[-test.id], alpha=0, lambda = lam.seq)
Ytest.pred = predict(myridge, s = cv.out$lambda.1se, newx=X[test.id,])
mean((Ytest.pred - Y[test.id])^2)
Ytest.pred=predict(myridge, s = cv.out$lambda.min, newx=X[test.id,])
mean((Ytest.pred - Y[test.id])^2)
```


## Lasso


### Output
Check the output from `glmnet` for Lasso. 

```{r}
mylasso = glmnet(X[-test.id,], Y[-test.id], alpha = 1)
summary(mylasso)
```


### CV & Path Plot

```{r}
par(mfrow = c(1, 2))
plot(mylasso, label=TRUE, xvar = "norm")
plot(mylasso, label=TRUE, xvar = "lambda")
par(mfrow=c(1,1))
```

```{r}
mylasso$df
cv.out = cv.glmnet(X[-test.id, ], Y[-test.id], alpha = 1)
plot(cv.out)
```

Try our own lambda sequences.
```{r}
# lam.seq =  exp(seq(-4, 2, length=100))
# lam.seq =  exp(seq(-6, -1, length=100))
# cv.out = cv.glmnet(X[-test.id,], Y[-test.id], alpha = 1, lambda = l am.seq)
# plot(cv.out)
```

Check how `lambda.min` and `lambda.1se` are computed.
```{r}
cv.out$lambda.min
tmp.id=which.min(cv.out$cvm)
cv.out$lambda[tmp.id]

cv.out$lambda.1se
max(cv.out$lambda[cv.out$cvm < cv.out$cvm[tmp.id] + cv.out$cvsd[tmp.id]])
```

### Coefficients

How to retrieve Lasso coefficients?

```{r}
mylasso.coef.min = predict(mylasso, s=cv.out$lambda.min, type="coefficients")
mylasso.coef.1se = predict(mylasso, s=cv.out$lambda.1se, type="coefficients")
cbind(mylasso.coef.min, mylasso.coef.1se)

# number of variables selected (including the intercept)
sum(mylasso.coef.1se != 0)
# names of selected non-intercept variables
row.names(mylasso.coef.1se)[which(mylasso.coef.1se != 0)[-1]]
```

### Prediction

Apply the two fitted models for prediction on test data.

```{r}
mylasso = glmnet(X[-test.id, ], Y[-test.id], alpha = 1)
Ytest.pred = predict(mylasso, s = cv.out$lambda.min, newx = X[test.id,])
mean((Ytest.pred - Y[test.id])^2)

Ytest.pred = predict(mylasso, s = cv.out$lambda.1se, newx = X[test.id,])
mean((Ytest.pred - Y[test.id])^2)
```

### Refit to Reduce Bias

We refit an ordinary linear regression model using the variables selected by `lambda.1se` and then use it for prediction.

```{r}
mylasso.coef.1se = predict(mylasso, s = cv.out$lambda.1se, type="coefficients")
var.sel = row.names(mylasso.coef.1se)[which(mylasso.coef.1se != 0)[-1]]
var.sel; 
tmp.X = X[, colnames(X) %in% var.sel]
mylasso.refit = coef(lm(Y[-test.id] ~ tmp.X[-test.id, ]))
Ytest.pred = mylasso.refit[1] + tmp.X[test.id,] %*% mylasso.refit[-1]
mean((Ytest.pred - Y[test.id])^2)
```



