Use AMI from http://www.louisaslett.com/RStudio_AMI/. Pick p2.xlarge.
Setup GPU for Keras.
# install.packages("keras")
library("keras")
install_keras(tensorflow = "gpu")
library(keras)
library(dplyr)
library(ggplot2)
library(pROC)
https://keras.rstudio.com/articles/tutorial_basic_text_classification.html
imdb <- dataset_imdb(num_words = 10000)
c(train_data, train_labels) %<-% imdb$train
c(test_data, test_labels) %<-% imdb$test
Check the keras page to understand how data are stored.
library(purrr) # for the map function
word_index <- dataset_imdb_word_index()
word_index_df <- data.frame(
word = names(word_index),
idx = unlist(word_index, use.names = FALSE),
stringsAsFactors = FALSE
)
# The first indices are reserved
word_index_df <- word_index_df %>% mutate(idx = idx + 3)
word_index_df <- word_index_df %>%
add_row(word = "<PAD>", idx = 0)%>%
add_row(word = "<START>", idx = 1)%>%
add_row(word = "<UNK>", idx = 2)%>%
add_row(word = "<UNUSED>", idx = 3)
word_index_df <- word_index_df %>% arrange(idx)
decode_review <- function(text){
paste(map(text, function(number) word_index_df %>%
filter(idx == number) %>%
select(word) %>%
pull()),
collapse = " ")
}
decode_review(train_data[[1]])
## [1] "<START> this film was just brilliant casting location scenery story direction everyone's really suited the part they played and you could just imagine being there robert <UNK> is an amazing actor and now the same being director <UNK> father came from the same scottish island as myself so i loved the fact there was a real connection with this film the witty remarks throughout the film were great it was just brilliant so much that i bought the film as soon as it was released for <UNK> and would recommend it to everyone to watch and the fly fishing was amazing really cried at the end it was so sad and you know what they say if you cry at a film it must have been good and this definitely was also <UNK> to the two little boy's that played the <UNK> of norman and paul they were just brilliant children are often left out of the <UNK> list i think because the stars that play them all grown up are such a big profile for the whole film but these children are amazing and should be praised for what they have done don't you think the whole story was so lovely because it was true and was someone's life after all that was shared with us all"
Each review needs to be converted into a numerical vector. We considered two approaches:
Approach I: One-hot-encode to convert each review to a sparse binary vector of length 10,000.
Approach II: Keep the first 256 words for each review, i.e., convert each review to a vector of length 256 with each entry being an integer from 1 to 10,000 (word index). If a review is of length less than 256, add zeros to the remaining entries.
vectorize_sequences <- function(sequences, dimension = 10000) {
results <- matrix(0, nrow = length(sequences), ncol = dimension)
for (i in 1:length(sequences))
results[i, sequences[[i]]] <- 1
results
}
x_train <- vectorize_sequences(train_data)
x_test <- vectorize_sequences(test_data)
model1 <- keras_model_sequential() %>%
layer_dense(units = 16, activation = "relu", input_shape = c(10000)) %>%
layer_dense(units = 16, activation = "relu") %>%
layer_dense(units = 1, activation = "sigmoid")
summary(model1)
## ___________________________________________________________________________
## Layer (type) Output Shape Param #
## ===========================================================================
## dense_1 (Dense) (None, 16) 160016
## ___________________________________________________________________________
## dense_2 (Dense) (None, 16) 272
## ___________________________________________________________________________
## dense_3 (Dense) (None, 1) 17
## ===========================================================================
## Total params: 160,305
## Trainable params: 160,305
## Non-trainable params: 0
## ___________________________________________________________________________
model1 %>% compile(
optimizer = "rmsprop",
loss = "binary_crossentropy",
metrics = c("accuracy")
)
history <- model1 %>%
fit(x_train, train_labels, epochs = 20, batch_size = 128)
plot(history)
predictions <- model1 %>% predict(x_test)
table(test_labels, predictions>0.5)
##
## test_labels FALSE TRUE
## 0 10149 2351
## 1 1592 10908
roc(test_labels, as.vector(predictions))
##
## Call:
## roc.default(response = test_labels, predictor = as.vector(predictions))
##
## Data: as.vector(predictions) in 12500 controls (test_labels 0) < 12500 cases (test_labels 1).
## Area under the curve: 0.9054
train_data_x <- pad_sequences(
train_data,
value = word_index_df %>% filter(word == "<PAD>") %>% select(idx) %>% pull(),
padding = "post",
maxlen = 256
)
test_data_x <- pad_sequences(
test_data,
value = word_index_df %>% filter(word == "<PAD>") %>% select(idx) %>% pull(),
padding = "post",
maxlen = 256
)
Check the training and test data sets.
dim(train_data_x)
## [1] 25000 256
dim(test_data_x)
## [1] 25000 256
train_data_x[1, ]
## [1] 1 14 22 16 43 530 973 1622 1385 65 458 4468 66 3941
## [15] 4 173 36 256 5 25 100 43 838 112 50 670 2 9
## [29] 35 480 284 5 150 4 172 112 167 2 336 385 39 4
## [43] 172 4536 1111 17 546 38 13 447 4 192 50 16 6 147
## [57] 2025 19 14 22 4 1920 4613 469 4 22 71 87 12 16
## [71] 43 530 38 76 15 13 1247 4 22 17 515 17 12 16
## [85] 626 18 2 5 62 386 12 8 316 8 106 5 4 2223
## [99] 5244 16 480 66 3785 33 4 130 12 16 38 619 5 25
## [113] 124 51 36 135 48 25 1415 33 6 22 12 215 28 77
## [127] 52 5 14 407 16 82 2 8 4 107 117 5952 15 256
## [141] 4 2 7 3766 5 723 36 71 43 530 476 26 400 317
## [155] 46 7 4 2 1029 13 104 88 4 381 15 297 98 32
## [169] 2071 56 26 141 6 194 7486 18 4 226 22 21 134 476
## [183] 26 480 5 144 30 5535 18 51 36 28 224 92 25 104
## [197] 4 226 65 16 38 1334 88 12 16 283 5 16 4472 113
## [211] 103 32 15 16 5345 19 178 32 0 0 0 0 0 0
## [225] 0 0 0 0 0 0 0 0 0 0 0 0 0 0
## [239] 0 0 0 0 0 0 0 0 0 0 0 0 0 0
## [253] 0 0 0 0
train_data[[1]]
## [1] 1 14 22 16 43 530 973 1622 1385 65 458 4468 66 3941
## [15] 4 173 36 256 5 25 100 43 838 112 50 670 2 9
## [29] 35 480 284 5 150 4 172 112 167 2 336 385 39 4
## [43] 172 4536 1111 17 546 38 13 447 4 192 50 16 6 147
## [57] 2025 19 14 22 4 1920 4613 469 4 22 71 87 12 16
## [71] 43 530 38 76 15 13 1247 4 22 17 515 17 12 16
## [85] 626 18 2 5 62 386 12 8 316 8 106 5 4 2223
## [99] 5244 16 480 66 3785 33 4 130 12 16 38 619 5 25
## [113] 124 51 36 135 48 25 1415 33 6 22 12 215 28 77
## [127] 52 5 14 407 16 82 2 8 4 107 117 5952 15 256
## [141] 4 2 7 3766 5 723 36 71 43 530 476 26 400 317
## [155] 46 7 4 2 1029 13 104 88 4 381 15 297 98 32
## [169] 2071 56 26 141 6 194 7486 18 4 226 22 21 134 476
## [183] 26 480 5 144 30 5535 18 51 36 28 224 92 25 104
## [197] 4 226 65 16 38 1334 88 12 16 283 5 16 4472 113
## [211] 103 32 15 16 5345 19 178 32
Our input data are first fed into an embedding layer, which maps each word in the vocabulary (vocab_size = 10000
) to a vector of length 16 (output_dim = 16
). Then each review, which has 256 words, is represented by a 256-by-16 matrix. Obtain the column mean of that mtarix by applying layer_global_average_pooling_1d()
. Finally each review is represented by a 16x1 numerical vector, which is then fed into a fully connected layer with 16 hidden units.
vocab_size <- 10000
model2 <- keras_model_sequential() %>%
layer_embedding(input_dim = vocab_size, output_dim = 16) %>%
layer_global_average_pooling_1d() %>%
layer_dense(units = 16, activation = "relu") %>%
layer_dense(units = 1, activation = "sigmoid")
summary(model2)
## ___________________________________________________________________________
## Layer (type) Output Shape Param #
## ===========================================================================
## embedding_1 (Embedding) (None, None, 16) 160000
## ___________________________________________________________________________
## global_average_pooling1d_1 (Glob (None, 16) 0
## ___________________________________________________________________________
## dense_4 (Dense) (None, 16) 272
## ___________________________________________________________________________
## dense_5 (Dense) (None, 1) 17
## ===========================================================================
## Total params: 160,289
## Trainable params: 160,289
## Non-trainable params: 0
## ___________________________________________________________________________
model2 %>% compile(
optimizer = 'adam',
loss = 'binary_crossentropy',
metrics = list('accuracy')
)
history <- model2 %>%
fit(train_data_x, train_labels,
epochs = 40, batch_size = 128, verbose=1
)
plot(history)
predictions <- model2 %>% predict(test_data_x)
table(test_labels, predictions>0.5)
##
## test_labels FALSE TRUE
## 0 10688 1812
## 1 2149 10351
roc(test_labels, as.vector(predictions))
##
## Call:
## roc.default(response = test_labels, predictor = as.vector(predictions))
##
## Data: as.vector(predictions) in 12500 controls (test_labels 0) < 12500 cases (test_labels 1).
## Area under the curve: 0.9173