Re-posted from: https://tensorflowjulia.blogspot.com/2018/09/intro-to-sparse-data-and-embeddings.html
This is the final exercise of Google’s Machine Learning Crash Course. We use the ACL 2011 IMDB dataset to train a Neural Network in predicting wether a movie review is favourable or not, based on the words used in the review text.
There are two notable differences from the original exercise:
- We do not build a proper input pipeline for the data. This creates a lot of computational overhead – in principle, we need to preprocess the whole dataset before we start training the network. In practise, this if often not feasible. It would be interesting to see how such a pipeline can be implemented for TensorFlow.jl. The Julia package MLLabelUtils.jl might come handy for this task.
- When visualizing the embedding layer, our Neural Network builds effectively a 1D-representation of keywords to describe if a movie has a favorable review or not. In the Python version, a real 2D embedding is obtained (see the pictures). The reasons for this difference are unknown.
Julia embedding – effectively a 1D line
Python embedding
The Jupyter notebook can be downloaded here.
In [0]:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
In [1]:
using Plots
using Distributions
gr()
using DataFrames
using TensorFlow
import CSV
import StatsBase
using PyCall
@pyimport sklearn.metrics as sklm
using Images
using Colors
using HDF5
sess=Session(Graph())
Out[1]:
In [2]:
c = h5open("train_data.h5", "r") do file
global train_labels=read(file, "output_labels")
global train_features=read(file, "output_features")
end
c = h5open("test_data.h5", "r") do file
global test_labels=read(file, "output_labels")
global test_features=read(file, "output_features")
end
train_labels=train_labels'
test_labels=test_labels';
In [4]:
test_labels[301,:]
Out[4]:
In [5]:
test_features[301]
Out[5]:
In [3]:
function create_batches(features, targets, steps, batch_size=5, num_epochs=0)
"""Create batches.
Args:
features: Input features.
targets: Target column.
steps: Number of steps.
batch_size: Batch size.
num_epochs: Number of epochs, 0 will let TF automatically calculate the correct number
Returns:
An extended set of feature and target columns from which batches can be extracted.
"""
if(num_epochs==0)
num_epochs=ceil(batch_size*steps/size(features,1))
end
features_batches=copy(features)
target_batches=copy(targets)
for i=1:num_epochs
select=shuffle(1:size(features,1))
if i==1
features_batches=(features[select,:])
target_batches=(targets[select,:])
else
features_batches=vcat(features_batches, features[select,:])
target_batches=vcat(target_batches, targets[select,:])
end
end
return features_batches, target_batches
end
Out[3]:
In [4]:
function construct_feature_columns(input_features):
"""Construct the TensorFlow Feature Columns.
Args:
input_features: The numerical input features to use.
Returns:
A set of feature columns
"""
out=convert(Array, input_features[:,:])
return convert.(Float64,out)
end
Out[4]:
In [5]:
function next_batch(features_batches, targets_batches, batch_size, iter)
"""Next batch.
Args:
features_batches: Features batches from create_batches.
targets_batches: Target batches from create_batches.
batch_size: Batch size.
iter: Number of the current iteration
Returns:
A batch of features and targets.
"""
select=mod((iter-1)*batch_size+1, size(features_batches,1)):mod(iter*batch_size, size(features_batches,1));
ds=features_batches[select,:];
target=targets_batches[select,:];
return ds, target
end
Out[5]:
In [6]:
function my_input_fn(features_batches, targets_batches, iter, batch_size=5, shuffle_flag=1):
"""Prepares a batch of features and labels for model training.
Args:
features_batches: Features batches from create_batches.
targets_batches: Target batches from create_batches.
iter: Number of the current iteration
batch_size: Batch size.
shuffle_flag: Determines wether data is shuffled before being returned
Returns:
Tuple of (features, labels) for next data batch
"""
# Construct a dataset, and configure batching/repeating.
ds, target = next_batch(features_batches, targets_batches, batch_size, iter)
# Shuffle the data, if specified.
if shuffle_flag==1
select=shuffle(1:size(ds, 1));
ds = ds[select,:]
target = target[select, :]
end
# Return the next batch of data.
return ds, target
end
Out[6]:
In [7]:
# 50 informative terms that compose our model vocabulary
informative_terms = ["bad", "great", "best", "worst", "fun", "beautiful",
"excellent", "poor", "boring", "awful", "terrible",
"definitely", "perfect", "liked", "worse", "waste",
"entertaining", "loved", "unfortunately", "amazing",
"enjoyed", "favorite", "horrible", "brilliant", "highly",
"simple", "annoying", "today", "hilarious", "enjoyable",
"dull", "fantastic", "poorly", "fails", "disappointing",
"disappointment", "not", "him", "her", "good", "time",
"?", ".", "!", "movie", "film", "action", "comedy",
"drama", "family"]
Out[7]:
In [8]:
# function for creating categorial colum from vocabulary list in one hot encoding
function create_data_columns(data, informative_terms)
onehotmat=zeros(length(data), length(informative_terms))
for i=1:length(data)
string=data[i]
for j=1:length(informative_terms)
if contains(string, informative_terms[j])
onehotmat[i,j]=1
end
end
end
return onehotmat
end
Out[8]:
In [9]:
train_feature_mat=create_data_columns(train_features, informative_terms)
test_features_mat=create_data_columns(test_features, informative_terms);
In [10]:
function train_linear_classifier_model(learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets)
"""Trains a linear classifier model.
Args:
learning_rate: A `float`, the learning rate.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples, etc: The input data.
Returns:
weight: The weights of the linear model.
bias: The bias of the linear model.
validation_probabilities: Probabilities for the validation examples.
p1: Plot of loss function for the different periods
"""
periods = 10
steps_per_period = steps / periods
# Create feature columns.
feature_columns = placeholder(Float32)
target_columns = placeholder(Float32)
eps=1E-8
# these two variables need to be initialized as 0, otherwise method gives problems
m=Variable(zeros(size(training_examples,2),1).+0.0)
b=Variable(0.0)
ytemp=nn.sigmoid(feature_columns*m + b)
y= clip_by_value(ytemp, 0.0, 1.0)
loss = -reduce_mean(log(y+eps).*target_columns + log(1-y-eps).*(1-target_columns))
features_batches, targets_batches = create_batches(training_examples, training_targets, steps, batch_size)
# Advanced Adam optimizer decent with gradient clipping
my_optimizer=(train.AdamOptimizer(learning_rate))
gvs = train.compute_gradients(my_optimizer, loss)
capped_gvs = [(clip_by_norm(grad, 5.0), var) for (grad, var) in gvs]
my_optimizer = train.apply_gradients(my_optimizer,capped_gvs)
run(sess, global_variables_initializer()) #this needs to be run after constructing the optimizer!
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
println("Training model...")
println("LogLoss (on training data):")
training_log_losses = []
validation_log_losses=[]
for period in 1:periods
# Train the model, starting from the prior state.
for i=1:steps_per_period
features, labels = my_input_fn(features_batches, targets_batches, convert(Int,(period-1)*steps_per_period+i), batch_size)
run(sess, my_optimizer, Dict(feature_columns=>construct_feature_columns(features), target_columns=>construct_feature_columns(labels)))
end
# Take a break and compute predictions.
training_probabilities = run(sess, y, Dict(feature_columns=> construct_feature_columns(training_examples)));
validation_probabilities = run(sess, y, Dict(feature_columns=> construct_feature_columns(validation_examples)));
# Compute loss.
training_log_loss=run(sess,loss,Dict(feature_columns=> construct_feature_columns(training_examples), target_columns=>construct_feature_columns(training_targets)))
validation_log_loss =run(sess,loss,Dict(feature_columns=> construct_feature_columns(validation_examples), target_columns=>construct_feature_columns(validation_targets)))
# Occasionally print the current loss.
println(" period ", period, ": ", training_log_loss)
weight = run(sess,m)
bias = run(sess,b)
loss_val=run(sess,loss,Dict(feature_columns=> construct_feature_columns(training_examples), target_columns=>construct_feature_columns(training_targets)))
# Add the loss metrics from this period to our list.
push!(training_log_losses, training_log_loss)
push!(validation_log_losses, validation_log_loss)
end
weight = run(sess,m)
bias = run(sess,b)
println("Model training finished.")
# Output a graph of loss metrics over periods.
p1=plot(training_log_losses, label="training", title="LogLoss vs. Periods", ylabel="LogLoss", xlabel="Periods")
p1=plot!(validation_log_losses, label="validation")
println("Final LogLoss (on training data): ", training_log_losses[end])
# calculate additional ouputs
validation_probabilities = run(sess, y, Dict(feature_columns=> construct_feature_columns(validation_examples)));
return weight, bias, validation_probabilities, p1
end
Out[10]:
In [14]:
weight, bias, validation_probabilities, p1 = train_linear_classifier_model(
0.0005, #learning rate
1000, #steps
50, #batch_size
train_feature_mat,
train_labels,
test_features_mat,
test_labels)
Out[14]:
In [15]:
plot(p1)
Out[15]:
In [13]:
# Function for converting probabilities to 0/1 decision
function castto01(probabilities)
out=copy(probabilities)
for i=1:length(probabilities)
if(probabilities[i]<0.5)
out[i]=0
else
out[i]=1
end
end
return out
end
Out[13]:
In [17]:
evaluation_metrics=DataFrame()
false_positive_rate, true_positive_rate, thresholds = sklm.roc_curve(
vec(construct_feature_columns(test_labels)), vec(validation_probabilities))
evaluation_metrics[:auc]=sklm.roc_auc_score(construct_feature_columns(test_labels), vec(validation_probabilities))
validation_predictions=castto01(validation_probabilities);
evaluation_metrics[:accuracy]=accuracy = sklm.accuracy_score(test_labels, validation_predictions)
p2=plot(false_positive_rate, true_positive_rate, label="our model")
p2=plot!([0, 1], [0, 1], label="random classifier");
In [18]:
println("AUC on the validation set: ", evaluation_metrics[:auc])
println("Accuracy on the validation set: ", evaluation_metrics[:accuracy])
In [19]:
plot(p2)
Out[19]:
In [11]:
function train_nn_classification_model(learning_rate,
steps,
batch_size,
hidden_units,
is_embedding,
keep_probability,
training_examples,
training_targets,
validation_examples,
validation_targets)
"""Trains a neural network classification model.
Args:
learning_rate: A `float`, the learning rate.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
hidden_units: A vector describing the layout of the neural network.
is_embedding: 'true' or 'false' depending on if the first layer of the NN is an embedding layer.
keep_probability: A `float`, the probability of keeping a node active during one training step.
Returns:
p1: Plot of the loss function for the different periods.
y: The final layer of the TensorFlow network.
final_probabilities: Final predicted probabilities on the validation examples.
weight_export: The weights of the first layer of the NN
feature_columns: TensorFlow feature columns.
target_columns: TensorFlow target columns.
"""
periods = 10
steps_per_period = steps / periods
# Create feature columns.
feature_columns = placeholder(Float32, shape=[-1, size(training_examples,2)])
target_columns = placeholder(Float32, shape=[-1, size(training_targets,2)])
# Network parameters
push!(hidden_units,size(training_targets,2)) #create an output node that fits to the size of the targets
activation_functions = Vector{Function}(size(hidden_units,1))
activation_functions[1:end-1]=z->nn.dropout(nn.relu(z), keep_probability)
activation_functions[end] = nn.sigmoid #Last function should be idenity as we need the logits
# create network
flag=0
weight_export=Variable([1])
Zs = [feature_columns]
for (ii,(hlsize, actfun)) in enumerate(zip(hidden_units, activation_functions))
Wii = get_variable("W_$ii"*randstring(4), [get_shape(Zs[end], 2), hlsize], Float32)
bii = get_variable("b_$ii"*randstring(4), [hlsize], Float32)
if((is_embedding==true) & (flag==0))
Zii=Zs[end]*Wii
else
Zii = actfun(Zs[end]*Wii + bii)
end
push!(Zs, Zii)
if(flag==0)
weight_export=Wii
flag=1
end
end
y=Zs[end]
eps=1e-8
cross_entropy = -reduce_mean(log(y+eps).*target_columns + log(1-y+eps).*(1-target_columns))
features_batches, targets_batches = create_batches(training_examples, training_targets, steps, batch_size)
# Standard Adam Optimizer
my_optimizer=train.minimize(train.AdamOptimizer(learning_rate), cross_entropy)
run(sess, global_variables_initializer())
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
println("Training model...")
println("LogLoss error (on validation data):")
training_log_losses = []
validation_log_losses = []
for period in 1:periods
# Train the model, starting from the prior state.
for i=1:steps_per_period
features, labels = my_input_fn(features_batches, targets_batches, convert(Int,(period-1)*steps_per_period+i), batch_size)
run(sess, my_optimizer, Dict(feature_columns=>construct_feature_columns(features), target_columns=>construct_feature_columns(labels)))
end
# Take a break and compute log loss.
training_log_loss = run(sess, cross_entropy, Dict(feature_columns=> construct_feature_columns(training_examples), target_columns=>construct_feature_columns(training_targets)));
validation_log_loss = run(sess, cross_entropy, Dict(feature_columns=> construct_feature_columns(validation_examples), target_columns=>construct_feature_columns(validation_targets)));
# Occasionally print the current loss.
println(" period ", period, ": ", training_log_loss)
# Add the loss metrics from this period to our list.
push!(training_log_losses, training_log_loss)
push!(validation_log_losses, validation_log_loss)
end
println("Model training finished.")
# Calculate final predictions (not probabilities, as above).
final_probabilities = run(sess, y, Dict(feature_columns=> validation_examples, target_columns=>validation_targets))
final_predictions=0.0.*copy(final_probabilities)
final_predictions=castto01(final_probabilities)
accuracy = sklm.accuracy_score(validation_targets, final_predictions)
println("Final accuracy (on validation data): ", accuracy)
# Output a graph of loss metrics over periods.
p1=plot(training_log_losses, label="training", title="LogLoss vs. Periods", ylabel="LogLoss", xlabel="Periods")
p1=plot!(validation_log_losses, label="validation")
return p1, y, final_probabilities, weight_export, feature_columns, target_columns
end
Out[11]:
In [21]:
sess=Session(Graph())
p1, y, final_probabilities, weight_export, feature_columns, target_columns = train_nn_classification_model(
0.003, #learning rate
1000, #steps
50, #batch_size
[20, 20], #hidden_units
false, #is_embedding
1.0, # keep probability
train_feature_mat,
train_labels,
test_features_mat,
test_labels)
Out[21]:
In [22]:
plot(p1)
Out[22]:
In [23]:
evaluation_metrics=DataFrame()
false_positive_rate, true_positive_rate, thresholds = sklm.roc_curve(
vec(construct_feature_columns(test_labels)), vec(final_probabilities))
evaluation_metrics[:auc]=sklm.roc_auc_score(construct_feature_columns(test_labels), vec(final_probabilities))
validation_predictions=castto01(final_probabilities);
evaluation_metrics[:accuracy]=accuracy = sklm.accuracy_score(test_labels, validation_predictions)
p2=plot(false_positive_rate, true_positive_rate, label="our model")
p2=plot!([0, 1], [0, 1], label="random classifier");
println("AUC on the validation set: ", evaluation_metrics[:auc])
println("Accuracy on the validation set: ", evaluation_metrics[:accuracy])
In [24]:
plot(p2)
Out[24]:
In [14]:
sess=Session(Graph())
p1, y, final_probabilities, weight_export, feature_columns, target_columns = train_nn_classification_model(
0.003, #learning rate
1000, #steps
50, #batch_size
[2, 20, 20], #hidden_units
true,
1.0, # keep probability
train_feature_mat,
train_labels,
test_features_mat,
test_labels)
Out[14]:
In [15]:
plot(p1)
Out[15]:
In [16]:
evaluation_metrics=DataFrame()
false_positive_rate, true_positive_rate, thresholds = sklm.roc_curve(
vec(construct_feature_columns(test_labels)), vec(final_probabilities))
evaluation_metrics[:auc]=sklm.roc_auc_score(construct_feature_columns(test_labels), vec(final_probabilities))
validation_predictions=castto01(final_probabilities);
evaluation_metrics[:accuracy]=accuracy = sklm.accuracy_score(test_labels, validation_predictions)
p2=plot(false_positive_rate, true_positive_rate, label="our model")
p2=plot!([0, 1], [0, 1], label="random classifier");
println("AUC on the validation set: ", evaluation_metrics[:auc])
println("Accuracy on the validation set: ", evaluation_metrics[:accuracy])
In [17]:
plot(p2)
Out[17]:
In [18]:
xy_coord=run(sess, weight_export, Dict(feature_columns=> test_features_mat, target_columns=>test_labels))
p3=plot(title="Embedding Space", xlims=(minimum(xy_coord[:,1])-0.3, maximum(xy_coord[:,1])+0.3), ylims=(minimum(xy_coord[:,2])-0.1, maximum(xy_coord[:,2]) +0.3) )
for term_index=1:length(informative_terms)
p3=annotate!(xy_coord[term_index,1], xy_coord[term_index,1], informative_terms[term_index] )
end
plot(p3)
Out[18]:
In [30]:
vocabulary=Array{String}(0)
open("terms.txt") do file
for ln in eachline(file)
push!(vocabulary, ln)
end
end
In [31]:
vocabulary
Out[31]:
In [48]:
using JLD
train_features_full=load("IMDB_fullmatrix_datacolumns.jld", "train_features_full")
test_features_full=load("IMDB_fullmatrix_datacolumns.jld", "test_features_full")
Out[48]:
In [49]:
sess=Session(Graph())
p1, y, final_probabilities, weight_export, feature_columns, target_columns = train_nn_classification_model(
# TWEAK THESE VALUES TO SEE HOW MUCH YOU CAN IMPROVE THE RMSE
0.003, #learning rate
1000, #steps
50, #batch_size
[2, 20, 20], #hidden_units
true,
1.0, # keep probability
train_features_full,
train_labels,
test_features_full,
test_labels)
Out[49]:
In [4]:
#plot(p1)
In [51]:
evaluation_metrics=DataFrame()
false_positive_rate, true_positive_rate, thresholds = sklm.roc_curve(
vec(construct_feature_columns(test_labels)), vec(final_probabilities))
evaluation_metrics[:auc]=sklm.roc_auc_score(construct_feature_columns(test_labels), vec(final_probabilities))
validation_predictions=castto01(final_probabilities);
evaluation_metrics[:accuracy]=accuracy = sklm.accuracy_score(test_labels, validation_predictions)
p2=plot(false_positive_rate, true_positive_rate, label="our model")
p2=plot!([0, 1], [0, 1], label="random classifier");
println("AUC on the validation set: ", evaluation_metrics[:auc])
println("Accuracy on the validation set: ", evaluation_metrics[:accuracy])
In [3]:
#plot(p2)
In [54]:
train_features_sparse=sparse(train_features_full)
test_features_sparse=sparse(test_features_full)
Out[54]:
In [56]:
# For saving the data
#save("IMDB_sparsematrix_datacolumns.jld", "train_features_sparse", train_features_sparse, "test_features_sparse", test_features_sparse)
In [55]:
sess=Session(Graph())
p1, y, final_probabilities, weight_export, feature_columns, target_columns = train_nn_classification_model(
0.003, #learning rate
1000, #steps
50, #batch_size
[2, 20, 20], #hidden_units
true,
1.0, # keep probability
train_features_sparse,
train_labels,
test_features_sparse,
test_labels)
Out[55]:
In [2]:
#plot(p1)
In [57]:
evaluation_metrics=DataFrame()
false_positive_rate, true_positive_rate, thresholds = sklm.roc_curve(
vec(construct_feature_columns(test_labels)), vec(final_probabilities))
evaluation_metrics[:auc]=sklm.roc_auc_score(construct_feature_columns(test_labels), vec(final_probabilities))
validation_predictions=castto01(final_probabilities);
evaluation_metrics[:accuracy]=accuracy = sklm.accuracy_score(test_labels, validation_predictions)
p2=plot(false_positive_rate, true_positive_rate, label="our model")
p2=plot!([0, 1], [0, 1], label="random classifier");
println("AUC on the validation set: ", evaluation_metrics[:auc])
println("Accuracy on the validation set: ", evaluation_metrics[:accuracy])
In [1]:
#plot(p2)