Re-posted from: https://tensorflowjulia.blogspot.com/2018/08/feature-crosses.html
The next part of the Machine Learning Crash Course deals with constructing bucketized features and feature crosses. The Jupyter notebook can be downloaded here.
We use quantiles to put the feature data into different categories. This is done in the functions get_quantile_based_boundaries and construct_bucketized_column. To construct one hot feature columns, we use construct_bucketized_onehot_column. The drawback of these methods is that they loop through the whole dataset for conversion, which is computationally very expensive. Leave a comment if you have an idea for a better method!
Another difference from the original programming exercise is the use of the Adam Optimizer instead of the FTLR Optimizer. To my knowledge, TensorFlow.jl exposes only the following optimizers:
- Gradient Descent
- Momentum Optimizer
- Adam Optimizer
- Gradient Descent
- Momentum Optimizer
- Adagrad Optimizer
- Adadelta Optimizer
- Adam Optimizer
- Ftrl Optimizer
- RMSProp Optimizer
Some information on those can be found here. For a more technical discussion with lots of background infos, have a look at this excellent blog post. If you know how to get other optimizers to work in Julia, I would be very interested.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Plots
gr()
using DataFrames
using TensorFlow
import CSV
import StatsBase
sess=Session()
california_housing_dataframe = CSV.read("california_housing_train.csv", delim=",");
california_housing_dataframe = california_housing_dataframe[shuffle(1:size(california_housing_dataframe, 1)),:];
function preprocess_features(california_housing_dataframe)
"""Prepares input features from California housing data set.
Args:
california_housing_dataframe: A DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the features to be used for the model, including
synthetic features.
"""
selected_features = california_housing_dataframe[
[:latitude,
:longitude,
:housing_median_age,
#:total_rooms,
#:total_bedrooms,
#:population,
:households,
:median_income]]
processed_features = selected_features
# Create a synthetic feature.
processed_features[:rooms_per_person] = (
california_housing_dataframe[:total_rooms] ./
california_housing_dataframe[:population])
return processed_features
end
function preprocess_targets(california_housing_dataframe)
"""Prepares target features (i.e., labels) from California housing data set.
Args:
california_housing_dataframe: A DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the target feature.
"""
output_targets = DataFrame()
# Scale the target to be in units of thousands of dollars.
output_targets[:median_house_value] = (
california_housing_dataframe[:median_house_value] ./ 1000.0)
return output_targets
end
# Choose the first 12000 (out of 17000) examples for training.
training_examples = preprocess_features(head(california_housing_dataframe,12000))
training_targets = preprocess_targets(head(california_housing_dataframe,12000))
# Choose the last 5000 (out of 17000) examples for validation.
validation_examples = preprocess_features(tail(california_housing_dataframe,5000))
validation_targets = preprocess_targets(tail(california_housing_dataframe,5000))
# Double-check that we've done the right thing.
println("Training examples summary:")
describe(training_examples)
println("Validation examples summary:")
describe(validation_examples)
println("Training targets summary:")
describe(training_targets)
println("Validation targets summary:")
describe(validation_targets)
function construct_feature_columns(input_features)
"""Construct the TensorFlow Feature Columns.
Args:
input_features: The names of the numerical input features to use.
Returns:
A set of feature columns
"""
out=convert(Array, input_features[:,:])
return convert.(Float64,out)
end
function create_batches(features, targets, steps, batch_size=5, num_epochs=0)
"""Create batches.
Args:
features: Input features.
targets: Target column.
steps: Number of steps.
batch_size: Batch size.
num_epochs: Number of epochs, 0 will let TF automatically calculate the correct number
Returns:
An extended set of feature and target columns from which batches can be extracted.
"""
if(num_epochs==0)
num_epochs=ceil(batch_size*steps/size(features,1))
end
names_features=names(features);
names_targets=names(targets);
features_batches=copy(features)
target_batches=copy(targets)
for i=1:num_epochs
select=shuffle(1:size(features,1))
if i==1
features_batches=(features[select,:])
target_batches=(targets[select,:])
else
append!(features_batches, features[select,:])
append!(target_batches, targets[select,:])
end
end
return features_batches, target_batches
end
function next_batch(features_batches, targets_batches, batch_size, iter)
"""Next batch.
Args:
features_batches: Features batches from create_batches.
targets_batches: Target batches from create_batches.
batch_size: Batch size.
iter: Number of the current iteration
Returns:
An extended set of feature and target columns from which batches can be extracted.
"""
select=mod((iter-1)*batch_size+1, size(features_batches,1)):mod(iter*batch_size, size(features_batches,1));
ds=features_batches[select,:];
target=targets_batches[select,:];
return ds, target
end
function my_input_fn(features_batches, targets_batches, iter, batch_size=5, shuffle_flag=1):
"""Trains a linear regression model of one feature.
Args:
features: DataFrame of features
targets: DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
# Construct a dataset, and configure batching/repeating.
#ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
ds, target = next_batch(features_batches, targets_batches, batch_size, iter)
# Shuffle the data, if specified.
if shuffle_flag==1
select=shuffle(1:size(ds, 1));
ds = ds[select,:]
target = target[select, :]
end
# Return the next batch of data.
# features, labels = ds.make_one_shot_iterator().get_next()
return ds, target
end
function train_model(learning_rate,
steps,
batch_size,
feature_column_function::Function,
training_examples,
training_targets,
validation_examples,
validation_targets)
"""Trains a linear regression model of one feature.
Args:
learning_rate: A `float`, the learning rate.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
feature_column_function: Function for transforming the feature columns.
training_examples:
training_targets:
validation_examples:
validation_targets:
Returns:
weight: The weights of the model.
bias: Bias of the model.
p1: Graph containing the loss function values for the different iterations.
"""
periods = 10
steps_per_period = steps / periods
# Create feature columns.
feature_columns = placeholder(Float32)
target_columns = placeholder(Float32)
# Create a linear regressor object.
m=Variable(zeros(size(feature_column_function(training_examples),2),1))
b=Variable(0.0)
y=(feature_columns*m) .+ b
loss=reduce_sum((target_columns - y).^2)
features_batches, targets_batches = create_batches(training_examples, training_targets, steps, batch_size)
# Set up Adam optimizer
my_optimizer=(train.AdamOptimizer(learning_rate))
gvs = train.compute_gradients(my_optimizer, loss)
capped_gvs = [(clip_by_norm(grad, 5.), var) for (grad, var) in gvs]
my_optimizer = train.apply_gradients(my_optimizer,capped_gvs)
run(sess, global_variables_initializer())
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
println("Training model...")
println("RMSE (on training data):")
training_rmse = []
validation_rmse=[]
for period in 1:periods
# Train the model, starting from the prior state.
for i=1:steps_per_period
features, labels = my_input_fn(features_batches, targets_batches, convert(Int,(period-1)*steps_per_period+i), batch_size)
run(sess, my_optimizer, Dict(feature_columns=>feature_column_function(features), target_columns=>construct_feature_columns(labels)))
end
# Take a break and compute predictions.
training_predictions = run(sess, y, Dict(feature_columns=> feature_column_function(training_examples)));
validation_predictions = run(sess, y, Dict(feature_columns=> feature_column_function(validation_examples)));
# Compute loss.
training_mean_squared_error = mean((training_predictions- construct_feature_columns(training_targets)).^2)
training_root_mean_squared_error = sqrt(training_mean_squared_error)
validation_mean_squared_error = mean((validation_predictions- construct_feature_columns(validation_targets)).^2)
validation_root_mean_squared_error = sqrt(validation_mean_squared_error)
# Occasionally print the current loss.
println(" period ", period, ": ", training_root_mean_squared_error)
# Add the loss metrics from this period to our list.
push!(training_rmse, training_root_mean_squared_error)
push!(validation_rmse, validation_root_mean_squared_error)
end
weight = run(sess,m)
bias = run(sess,b)
println("Model training finished.")
# Output a graph of loss metrics over periods.
p1=plot(training_rmse, label="training", title="Root Mean Squared Error vs. Periods", ylabel="RMSE", xlabel="Periods")
p1=plot!(validation_rmse, label="validation")
println("Final RMSE (on training data): ", training_rmse[end])
println("Final Weight (on training data): ", weight)
println("Final Bias (on training data): ", bias)
return weight, bias, p1 #, calibration_data
end
weight, bias, p1 = train_model(
# TWEAK THESE VALUES TO SEE HOW MUCH YOU CAN IMPROVE THE RMSE
0.003, #learning rate
500, #steps
5, #batch_size
construct_feature_columns, # feature column function
training_examples,
training_targets,
validation_examples,
validation_targets)
plot(p1)
function get_quantile_based_boundaries(feature_values, num_buckets)
#Investigate why [:] is necessary - there is some conflict that construct_feature_columns
# spits out Array{Float64,2} where it should be Array{Float64,1}!!
quantiles = StatsBase.nquantile(construct_feature_columns(feature_values)[:], num_buckets)
return quantiles# [quantiles[q] for q in keys(quantiles)]
end
function construct_bucketized_column(input_features, boundaries)
data_out=zeros(size(input_features))
for i=1:size(input_features,2)
curr_feature=input_features[:,i]
curr_boundary=boundaries[i]
for k=1:length(curr_boundary)
for j=1:size(input_features,1)
if(curr_feature[j] >= curr_boundary[k] )
data_out[j,i]+=1
end
end
end
end
return data_out
end
function construct_bucketized_onehot_column(input_features, boundaries)
length_out=0
for i=1:length(boundaries)
length_out+=length(boundaries[i])-1
end
data_out=zeros(size(input_features,1), length_out)
curr_index=1;
for i=1:size(input_features,2)
curr_feature=input_features[:,i]
curr_boundary=boundaries[i]
for k=1:length(curr_boundary)-1
for j=1:size(input_features,1)
if((curr_feature[j] >= curr_boundary[k]) && (curr_feature[j] < curr_boundary[k+1] ))
data_out[j,curr_index]+=1
end
end
curr_index+=1;
end
end
return data_out
end
# Divide households into 7 buckets.
households = california_housing_dataframe[:households]
bucketized_households = construct_bucketized_onehot_column(
households, [get_quantile_based_boundaries(
california_housing_dataframe[:households], 7)])
# Divide longitude into 10 buckets.
longitude = california_housing_dataframe[:longitude]
bucketized_longitude = construct_bucketized_onehot_column(
longitude, [get_quantile_based_boundaries(
california_housing_dataframe[:longitude], 10)])
quantiles_latitude=get_quantile_based_boundaries(
training_examples[:latitude], 10)
quantiles_longitude=get_quantile_based_boundaries(
training_examples[:longitude], 10)
quantiles_housing_median_age=get_quantile_based_boundaries(
training_examples[:housing_median_age], 7)
quantiles_households=get_quantile_based_boundaries(
training_examples[:households], 7)
quantiles_median_income=get_quantile_based_boundaries(
training_examples[:median_income], 7)
quantiles_rooms_per_person=get_quantile_based_boundaries(
training_examples[:rooms_per_person], 7)
quantiles_vec=[quantiles_latitude,
quantiles_longitude,
quantiles_housing_median_age,
quantiles_households,
quantiles_median_income,
quantiles_rooms_per_person
]
weight, bias, p1 = train_model(
# TWEAK THESE VALUES TO SEE HOW MUCH YOU CAN IMPROVE THE RMSE
0.03, #learning rate
2000, #steps
100, #batch_size
x -> construct_bucketized_onehot_column(x, quantiles_vec), # feature column function
training_examples,
training_targets,
validation_examples,
validation_targets)
plot(p1)
function construct_latXlong_onehot_column(input_features, boundaries)
#latitude and longitude are the first two columns - treat them separately
#initialization - calculate total length of feature_vec
length_out=0
# lat and long
length_lat=length(boundaries[1])-1
length_long= length(boundaries[2])-1
length_out+=length_lat*length_long
# all other features
for i=3:length(boundaries)
length_out+=length(boundaries[i])-1
end
data_out=zeros(size(input_features,1), length_out)
# all other features
curr_index=length_lat*length_long+1;
for i=3:size(input_features,2)
curr_feature=input_features[:,i]
curr_boundary=boundaries[i]
#println(curr_boundary)
for k=1:length(curr_boundary)-1
for j=1:size(input_features,1)
if((curr_feature[j] >= curr_boundary[k]) && (curr_feature[j] < curr_boundary[k+1] ))
data_out[j,curr_index]+=1
end
end
curr_index+=1;
end
end
# lat and long
data_temp=zeros(size(input_features,1), length_lat+length_long)
curr_index=1
for i=1:2
curr_feature=input_features[:,i]
curr_boundary=boundaries[i]
#println(curr_boundary)
for k=1:length(curr_boundary)-1
for j=1:size(input_features,1)
if((curr_feature[j] >= curr_boundary[k]) && (curr_feature[j] < curr_boundary[k+1] ))
data_temp[j,curr_index]+=1
end
end
curr_index+=1;
end
end
vec_temp=1
for j=1:size(input_features,1)
vec1=data_temp[j,1:length_lat]
vec2=data_temp[j, length_lat+1:length_lat+length_long]
vec_temp=vec1*vec2'
data_out[j, 1:length_lat*length_long]= (vec_temp)[:]
end
return data_out
end
weight, bias, p1 = train_model(
# TWEAK THESE VALUES TO SEE HOW MUCH YOU CAN IMPROVE THE RMSE
0.03, #learning rate
2000, #steps
100, #batch_size
x -> construct_latXlong_onehot_column(x, quantiles_vec), # feature column function
training_examples,
training_targets,
validation_examples,
validation_targets)
plot(p1)