trainable=False)(errors)  # calculate weighted error by layer
errors_by_time = Flatten()(errors_by_time)  # will be (batch_size, nt)
final_errors = Dense(1,
                     weights=[time_loss_weights,
                              np.zeros(1)],
                     trainable=False)(errors_by_time)  # weight errors by time
#Create Model
model = Model(inputs=inputs, outputs=final_errors)
# Loading Pretrained weights
#model.load_weights('/home/daniel/AnacondaProjects/prednet-master_2/model_data_keras2/prednet_kitti_weights-extrapfinetuned.hdf5')
# configuring Model
model.compile(loss='mean_absolute_error', optimizer='adam')

train_generator = SequenceGenerator(train_file,
                                    train_sources,
                                    nt,
                                    batch_size=batch_size,
                                    shuffle=True)
val_generator = SequenceGenerator(val_file,
                                  val_sources,
                                  nt,
                                  batch_size=batch_size,
                                  N_seq=N_seq_val)

lr_schedule = lambda epoch: 0.001 if epoch < 75 else 0.0001  # start with lr of 0.001 and then drop to 0.0001 after 75 epochs
callbacks = [LearningRateScheduler(lr_schedule)]
if save_model:
    if not os.path.exists(WEIGHTS_DIR): os.mkdir(WEIGHTS_DIR)
    callbacks.append(
        ModelCheckpoint(filepath=weights_file,
                        monitor='val_loss',
data_format = layer_config[
    'data_format'] if 'data_format' in layer_config else layer_config[
        'dim_ordering']
prednet = PredNet(weights=orig_model.layers[1].get_weights(), **layer_config)

input_shape = list(orig_model.layers[0].batch_input_shape[1:])
input_shape[0] = nt

inputs = Input(input_shape)
predictions = prednet(inputs)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss=extrap_loss, optimizer='adam')

train_generator = SequenceGenerator(train_file,
                                    train_sources,
                                    nt,
                                    batch_size=batch_size,
                                    shuffle=True,
                                    output_mode='prediction')
val_generator = SequenceGenerator(val_file,
                                  val_sources,
                                  nt,
                                  batch_size=batch_size,
                                  N_seq=N_seq_val,
                                  output_mode='prediction')

lr_schedule = lambda epoch: 0.0001 if epoch < 75 else 0.00001  # start with lr of 0.001 and then drop to 0.0001 after 75 epochs
callbacks = [LearningRateScheduler(lr_schedule)]
if save_model:
    if not os.path.exists(WEIGHTS_DIR): os.mkdir(WEIGHTS_DIR)
    callbacks.append(
        ModelCheckpoint(filepath=weights_file,
Exemple #3
0
layer_config = train_model.layers[1].get_config()
layer_config['output_mode'] = 'prediction'
data_format = layer_config[
    'data_format'] if 'data_format' in layer_config else layer_config[
        'dim_ordering']
test_pred_rgcLSTM = Pred_rgcLSTM(weights=train_model.layers[1].get_weights(),
                                 **layer_config)
input_shape = list(train_model.layers[0].batch_input_shape[1:])
input_shape[0] = nt
inputs = Input(shape=tuple(input_shape))
predictions = test_pred_rgcLSTM(inputs)
test_model = Model(inputs=inputs, outputs=predictions)

test_generator = SequenceGenerator(test_file,
                                   test_sources,
                                   nt,
                                   sequence_start_mode='unique',
                                   data_format=data_format)
X_test = test_generator.create_all()
X_hat = test_model.predict(X_test, batch_size)
if data_format == 'channels_first':
    X_test = np.transpose(X_test, (0, 1, 3, 4, 2))
    X_hat = np.transpose(X_hat, (0, 1, 3, 4, 2))

# Compare MSE of Predrgc_LSTM predictions vs. using last frame.  Write results to prediction_scores.txt
mse_model = np.mean(
    (X_test[:, 1:] -
     X_hat[:, 1:])**2)  # look at all timesteps except the first
mae_model = np.mean(np.abs(X_test[:, 1:] - X_hat[:, 1:]))
mse_prev = np.mean((X_test[:, :-1] - X_test[:, 1:])**2)
ssim = evaluu.compare_ssim(X_test[:, 1:],
Exemple #4
0
# Create testing model (to output predictions)
layer_config = train_model.layers[1].get_config()
layer_config['output_mode'] = 'prediction'
dim_ordering = layer_config['dim_ordering']
test_prednet = PredNet(weights=train_model.layers[1].get_weights(),
                       **layer_config)
input_shape = list(train_model.layers[0].batch_input_shape[1:])
input_shape[0] = nt
inputs = Input(shape=tuple(input_shape))
predictions = test_prednet(inputs)
test_model = Model(input=inputs, output=predictions)

test_generator = SequenceGenerator(test_file,
                                   test_sources,
                                   nt,
                                   sequence_start_mode='unique',
                                   dim_ordering=dim_ordering)
X_test = test_generator.create_all()
X_hat = test_model.predict(X_test, batch_size)
if dim_ordering == 'th':
    X_test = np.transpose(X_test, (0, 1, 3, 4, 2))
    X_hat = np.transpose(X_hat, (0, 1, 3, 4, 2))

# Compare MSE of PredNet predictions vs. using last frame.  Write results to prediction_scores.txt
mse_model = np.mean(
    (X_test[:, 1:] -
     X_hat[:, 1:])**2)  # look at all timesteps except the first
mse_prev = np.mean((X_test[:, :-1] - X_test[:, 1:])**2)
if not os.path.exists(RESULTS_SAVE_DIR): os.mkdir(RESULTS_SAVE_DIR)
f = open(RESULTS_SAVE_DIR + 'prediction_scores.txt', 'w')
Exemple #5
0
def main():
    client = storage.Client()
    bucket = client.bucket('meniscus_cloud_data_1000')

    val_recordings = [('clouds', 'validation')]
    test_recordings = [('clouds', 'testing')]
    categories = 'clouds'

    desired_im_sz = (1000, 1000)

    #### Trainiing data #######

    training_dataset = []
    for blob in bucket.list_blobs(prefix='kitti_data/clouds/training'):
        training_dataset.append(blob.name)

    training_images = []
    for i in len(training_dataset):
        name = 'kitti_data/clouds/validation/' + training_dataset[i]
        for blob in bucket.list_blobs(prefix='kitti_data/clouds/training/' +
                                      training_dataset[i]):
            #training_images += name + '/' + blob.name
            training_images += blob.id

    X = np.zeros((len(training_images), ) + desired_im_sz + (3, ), np.uint8)

    for i, filename in enumerate(training_images):
        with open(filename, "wb") as file_obj:
            im_file = blob.download_to_file(file_obj)
            im = scipy.misc.imread(im_file)
            X[i] = im
    train_sources = training_images
    train_file = X

    #### Validation data #######
    validation_dataset = []
    for blob in bucket.list_blobs(prefix='kitti_data/clouds/validation'):
        validation_dataset.append(blob.name)

    validation_images = []
    for i in len(validation_dataset):
        name = 'kitti_data/clouds/validation/' + validation_dataset[i]
        for blob in bucket.list_blobs(prefix=name):
            #validation_images_blob += name + '/' + blob.name
            validation_images += blob.id

    X = np.zeros((len(validation_images), ) + desired_im_sz + (3, ), np.uint8)

    for i, im_file in enumerate(validation_images):
        with open(filename, "wb") as file_obj:
            im_file = blob.download_to_file(file_obj)
            im = scipy.misc.imread(im_file)
            X[i] = im

    val_sources = validation_images
    val_file = X

    save_model = True  # if weights will be saved
    weights_file = WEIGHTS_DIR + 'prednet_kitti_weights.hdf5'  # where weights will be saved
    json_file = WEIGHTS_DIR + 'prednet_kitti_model.json'
    saved_models = WEIGHTS_DIR + 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'

    #Training parameters
    nb_epoch = 50
    samples_per_epoch = 28

    batch_size = 8
    N_seq_val = 4  # number of sequences to use for validation
    # number of timesteps used for sequences in training
    nt = 4

    lr = 0.001  #learning rate
    up_lr = 0.0001  #learinig rate is updated to this new value
    up_lr_ep = 40  #point at which learinig rate should be updated

    # Model parameters
    #n_channels, im_height, im_width = (3, 128, 160)
    n_channels, im_height, im_width = (3, 1000, 1000)
    input_shape = (
        n_channels, im_height,
        im_width) if K.image_data_format() == 'channels_first' else (
            im_height, im_width, n_channels)
    stack_sizes = (n_channels, 48, 96, 192)
    #Lstm stack_sizes
    R_stack_sizes = stack_sizes
    #convolutional filter size
    A_filt_sizes = (3, 3, 3)
    #prdiction convloutinal filter size
    Ahat_filt_sizes = (3, 3, 3, 3)
    # recurrent convolution filter size
    R_filt_sizes = (3, 3, 3, 3)
    # weighting for each layer in final loss; "L_0" model:  [1, 0, 0, 0], "L_all": [1, 0.1, 0.1, 0.1]
    layer_loss_weights = np.array([1., 0., 0., 0.])
    layer_loss_weights = np.expand_dims(layer_loss_weights, 1)

    # equally weight all timesteps except the first
    time_loss_weights = 1. / (nt - 1) * np.ones((nt, 1))
    time_loss_weights[0] = 0

    #-----------------------------------------Arcitecture---------------------------------------------------#
    prednet = PredNet(stack_sizes,
                      R_stack_sizes,
                      A_filt_sizes,
                      Ahat_filt_sizes,
                      R_filt_sizes,
                      output_mode='error',
                      return_sequences=True)

    #-----------------------------------------Layers--------------------------------------------------------#
    #initializing a tensor for input                                                                        #
    inputs = Input(shape=(nt, ) + input_shape)  #
    # errors will be (batch_size, nt, nb_layers)                                                            #
    errors = prednet(inputs)  #
    # calculate weighted error by layer                                                                     #
    errors_by_time = TimeDistributed(
        Dense(1, trainable=False),
        weights=[layer_loss_weights, np.zeros(1)],  #
        trainable=False)(errors)  #
    # will be (batch_size, nt)                                                                              #
    errors_by_time = Flatten()(errors_by_time)  #
    # weight errors by time                                                                                 #
    # dense() creates a densely connected network                                                           #                                                                              #
    final_errors = Dense(1,
                         weights=[time_loss_weights,
                                  np.zeros(1)],
                         trainable=False)(errors_by_time)  #
    #-------------------------------------------------------------------------------------------------------#

    #----------------------------------Create model---------------------------------------------------------#
    model = Model(inputs=inputs, outputs=final_errors)
    #model = load_model(weights_file,custom_objects={'prednet': PredNet})                                                   #
    model.compile(loss='mean_absolute_error', optimizer='adam')
    #model.load_weights(weights_file)
    #-------------------------------------------------------------------------------------------------------#

    #-------------------------------------Data Preprocessing------------------------------------------------#
    train_generator = SequenceGenerator(train_file,
                                        train_sources,
                                        nt,
                                        batch_size=batch_size,
                                        shuffle=True)  #
    val_generator = SequenceGenerator(val_file,
                                      val_sources,
                                      nt,
                                      batch_size=batch_size,
                                      N_seq=N_seq_val)  #
    #-------------------------------------------------------------------------------------------------------#

    #-------------------------------------Callback functions for training--------------------------------------#
    # start with lesrning rate of 0.001 and then drop to 0.0001 after 75 epochs                                #
    lr_schedule = lambda epoch: lr if epoch < up_lr_ep else up_lr  #
    callbacks = [LearningRateScheduler(lr_schedule)]  #
    #
    #save model best model check points                                                                        #
    # if save_model:                                                                                             #
    # if not os.path.exists(WEIGHTS_DIR): os.mkdir(WEIGHTS_DIR)                                              #
    # callbacks.append(ModelCheckpoint(filepath=weights_file, monitor='val_loss', save_best_only=True))
    #
    callbacks.append(
        ModelCheckpoint(filepath=weights_file,
                        monitor='val_loss',
                        save_best_only=True))

    #Tensorboard for visualization                                                                             #
    tb = TensorBoard(log_dir=GRAPH_DIR,
                     batch_size=batch_size,
                     histogram_freq=2,
                     write_graph=True,
                     write_images=True)
    tb.set_model(model)
    callbacks.append(tb)

    checkPoints = ModelCheckpoint(saved_models,
                                  monitor='val_loss',
                                  verbose=0,
                                  save_best_only=True,
                                  save_weights_only=False,
                                  mode='auto',
                                  period=1)  #

    callbacks.append(checkPoints)

    #earlyStops = EarlyStopping(monitor='val_loss', min_delta=0,
    #					patience=0, verbose=0, mode='auto')

    #callbacks.append(earlyStops)
    #--------------------------------------------------------------------------------------------------------------#

    #-------------------------------------Training-----------------------------------------------------------------#
    history = model.fit_generator(
        train_generator,
        samples_per_epoch / batch_size,
        nb_epoch,
        callbacks=callbacks,  #
        validation_data=val_generator,
        validation_steps=N_seq_val / batch_size)  #
    #
    #save model				                                                                                       #
    # if save_model:                                                                                                 #
    # json_string = model.to_json()                                                                              #
    # with open(json_file, "w") as f:                                                                            #
    # f.write(json_string)

    json_string = model.to_json()
    with file_io.FileIO(json_string, mode='r') as input_f:
        with file_io.FileIO(json_file, mode='w+') as output_f:
            output_f.write(input_f.read())
Exemple #6
0
print("%\n%\n%\n%\n%\n%\n%\n%")
print("\n======== Confirming PredNet class ========\n")
print("prednet_static:", PredNet_static)
print("\nprednet_dynamic:", PredNet_dynamic)
print("%\n%\n%\n%\n%\n%\n%\n%")

# Replicate the model on G GPUs
parallel_model = multi_gpu_model(model, gpus=G)
parallel_model.compile(loss=weighted_loss,
                       optimizer="adam",
                       metrics=[full_loss, dynamic_loss])

train_generator = SequenceGenerator(train_path,
                                    train_source_path,
                                    num_tsteps,
                                    batch_size=batch_size,
                                    shuffle=True,
                                    output_mode="prediction")
val_generator = SequenceGenerator(val_path,
                                  val_source_path,
                                  num_tsteps,
                                  batch_size=batch_size,
                                  N_seq=num_seq_val,
                                  output_mode="prediction")

print("Shapes: ", train_generator.X.shape, val_generator.X.shape)
print("train generator", np.amax(train_generator.X),
      np.amin(train_generator.X))

lr_schedule = lambda epoch: 0.00001 if epoch < 75 else 0.0001  # start with lr of 0.001 and then drop to 0.0001 after 75 epochs
f.close()
train_model = model_from_json(json_string, custom_objects = {'PredNet': PredNet})
train_model.load_weights(weights_file)

# Create testing model (to output predictions)
layer_config = train_model.layers[1].get_config()
layer_config['output_mode'] = 'prediction'
dim_ordering = layer_config['dim_ordering']
test_prednet = PredNet(weights=train_model.layers[1].get_weights(), **layer_config)
input_shape = list(train_model.layers[0].batch_input_shape[1:])
input_shape[0] = nt
inputs = Input(shape=tuple(input_shape))
predictions = test_prednet(inputs)
test_model = Model(input=inputs, output=predictions)

test_generator = SequenceGenerator(test_file, test_sources, nt, sequence_start_mode='unique', dim_ordering=dim_ordering)
X_test = test_generator.create_all()
X_hat = test_model.predict(X_test, batch_size)
if dim_ordering == 'th':
    X_test = np.transpose(X_test, (0, 1, 3, 4, 2))
    X_hat = np.transpose(X_hat, (0, 1, 3, 4, 2))

# Compare MSE of PredNet predictions vs. using last frame.  Write results to prediction_scores.txt
mse_model = np.mean( (X_test[:, 1:] - X_hat[:, 1:])**2 )  # look at all timesteps except the first
mse_prev = np.mean( (X_test[:, :-1] - X_test[:, 1:])**2 )
if not os.path.exists(RESULTS_SAVE_DIR): os.mkdir(RESULTS_SAVE_DIR)
f = open(RESULTS_SAVE_DIR + 'prediction_scores.txt', 'w')
f.write("Model MSE: %f\n" % mse_model)
f.write("Previous Frame MSE: %f" % mse_prev)
f.close()
Exemple #8
0
layer_config = train_model.layers[1].get_config()
layer_config['output_mode'] = 'prediction'
data_format = layer_config[
    'data_format'] if 'data_format' in layer_config else layer_config[
        'dim_ordering']
test_prednet = PredNet(weights=train_model.layers[1].get_weights(),
                       **layer_config)
input_shape = list(train_model.layers[0].batch_input_shape[1:])
input_shape[0] = nt_1stp
inputs = Input(shape=tuple(input_shape))
predictions = test_prednet(inputs)
test_model = Model(inputs=inputs, outputs=predictions)

test_generator = SequenceGenerator(test_file,
                                   test_sources,
                                   nt,
                                   sequence_start_mode='unique',
                                   data_format=data_format,
                                   batch_size=batch_size)


def predict_multistep(nt, nt_1stp, X_test, test_model):
    '''
    Multi-step prediction using PredNet model

    '''
    X_hat = np.zeros(X_test.shape, dtype=np.float32)
    X_tmp = X_test.copy()
    ntpred = nt - nt_1stp
    for n in range(ntpred):
        n1 = n
        n2 = nt_1stp + n
Exemple #9
0
batch_size = 1
nt = 24
nt_1stp = 12  #time dimension for one-step prediction

# scale factor for converting [0-1] range data to [0-201.0] mm/h
scale_factor = 201.0

test_file = os.path.join(DATA_DIR, 'jma_2hr_128_train_2015-2016_data.hkl')
test_sources = os.path.join(DATA_DIR,
                            'jma_2hr_128_train_2015-2016_sources.hkl')

# Prep data
test_generator = SequenceGenerator(test_file,
                                   test_sources,
                                   nt,
                                   sequence_start_mode='unique',
                                   data_format='channels_last',
                                   batch_size=batch_size)

# First load all the data, then select by index
X_test = test_generator.create_all()
X_test = X_test


def print_range_by_value(X_test, value, fwrite):

    f = open(RESULTS_SAVE_DIR + fwrite, 'w')

    for i in range(X_test.shape[0]):
        print('index:', i, ', :max value', np.max(X_test[i, :, :, :, :]))
        Xtmp = np.max(X_test[i, :, :, :, :], axis=(1, 2, 3))
# 对每一个误差进行权重赋值,这里取第一时间
final_errors = Dense(1,
                     weights=[time_loss_weights,
                              np.zeros(1)],
                     trainable=False)(errors_by_time)  # weight errors by time
# 构建预测模型,输入为时间轴,HxWxC,输出为final_errors,使用优化器adam,loss为MSE
model = Model(inputs=inputs, outputs=final_errors)
model.compile(loss='mean_absolute_error', optimizer='adam')
# 继续原先的训练
if os.path.exists(weights_file):
    model.load_weights(weights_file)

# 训练数据生成器和校准数据生成器
train_generator = SequenceGenerator(train_file,
                                    train_sources,
                                    nt,
                                    batch_size=batch_size,
                                    shuffle=True,
                                    sequence_start_mode='unique')
val_generator = SequenceGenerator(val_file,
                                  val_sources,
                                  nt,
                                  batch_size=batch_size,
                                  N_seq=N_seq_val,
                                  sequence_start_mode='unique')

# 学习率lr在75周期后从0.001下降到0.0001
lr_schedule = lambda epoch: 0.001 if epoch < 75 else 0.0001  # start with lr of 0.001 and then drop to 0.0001 after 75 epochs
callbacks = [LearningRateScheduler(lr_schedule)]
# 如果存储模型则增加该模块
if save_model:
    if not os.path.exists(WEIGHTS_DIR):
Exemple #11
0
def main(verbose=False):

    save_model = True  # if weights will be saved
    weights_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_weights.hdf5')  # where weights will be saved
    json_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_model.json')

    # Data files
    train_file = os.path.join(DATA_DIR, 'X_train.hkl')
    train_sources = os.path.join(DATA_DIR, 'sources_train.hkl')
    val_file = os.path.join(DATA_DIR, 'X_val.hkl')
    val_sources = os.path.join(DATA_DIR, 'sources_val.hkl')

    # Training parameters
    nb_epoch = 75
    batch_size = 4
    samples_per_epoch = 250
    N_seq_val = 100  # number of sequences to use for validation

    # Model parameters
    n_channels, im_height, im_width = (3, 128, 160)
    input_shape = (n_channels, im_height, im_width) if K.image_data_format() == 'channels_first' else (im_height, im_width, n_channels)
    stack_sizes = (n_channels, 48, 96, 192)
    R_stack_sizes = stack_sizes
    A_filt_sizes = (3, 3, 3)
    Ahat_filt_sizes = (3, 3, 3, 3)
    R_filt_sizes = (3, 3, 3, 3)
    layer_loss_weights = np.array([1., 0., 0., 0.])  # weighting for each layer in final loss; "L_0" model:  [1, 0, 0, 0], "L_all": [1, 0.1, 0.1, 0.1]
    layer_loss_weights = np.expand_dims(layer_loss_weights, 1)
    nt = 10  # number of timesteps used for sequences in training
    time_loss_weights = 1./ (nt - 1) * np.ones((nt,1))  # equally weight all timesteps except the first
    time_loss_weights[0] = 0


    prednet = PredNet(stack_sizes, R_stack_sizes,
                    A_filt_sizes, Ahat_filt_sizes, R_filt_sizes,
                    output_mode='error', return_sequences=True)

    inputs = Input(shape=(nt,) + input_shape)

    # The output will have shape (batch_size, nt, nb_layers). The outputs correspond
    # to the errors at each time step and layer.
    errors = prednet(inputs)

    # This merely computes a weighted sum of the errors layer by layer throughout time.
    # The output has shape (batch_size, nt, 1).
    errors_by_time = TimeDistributed(
        Dense(1, trainable=False), 
        weights=[layer_loss_weights, np.zeros(1)], 
        trainable=False)(errors) 

    # Will have shape (batch_size, nt)
    errors_by_time = Flatten()(errors_by_time)

    # The output of this final layer is the weighted sum over time of the weighted
    # sums of the errors layer-by-layer, which is the final L_train function from
    # the original paper.
    final_errors = Dense(
        1, 
        weights=[time_loss_weights, np.zeros(1)], 
        trainable=False)(errors_by_time)

    model = Model(inputs=inputs, outputs=final_errors)
    model.compile(loss='mean_absolute_error', optimizer='adam')

    if verbose:
        model.summary()

        # dense_2 = model.layers[-1]
        # from pprint import pprint
        # pprint(dir(dense_2))

        # print("_initial_weights: ", dense_2._initial_weights)
        # print("_losses: ", dense_2._losses)
        # print("_per_input_losses: ", dense_2._per_input_losses)
        # print("_per_input_updates: ", dense_2._per_input_updates)
        # print("_trainable_weights: ", dense_2._trainable_weights)
        # print("_updates: ", dense_2._updates)
        # print("bias:", dense_2.bias)
        # print("count_params: ", dense_2.count_params())
        # print("config: ", dense_2.get_config())
        # print("weights: ", dense_2.get_weights())
        # print("losses: ", dense_2.losses)
        # print("kernel: ", dense_2.kernel.name)
        # print("trainable_weights: ", dense_2.trainable_weights)

        # print("This is going to be interesting...")

        # model._make_train_function()
        # training_function = model.train_function

        # print("Training Function: ", training_function)
        # pprint(dir(training_function))

        # print("_callable_fn: ", training_function._callable_fn)
        # print("_feed_arrays: ", training_function._feed_arrays)
        # print("_feed_symbols: ", training_function._feed_symbols)
        # print("feed_dict: ", training_function.feed_dict)
        # print("fetches: ", training_function.fetches)
        # print("inputs: ", training_function.inputs)
        # print("outputs: ", training_function.outputs)
        # print("name: ", training_function.name)
        # print("session_kwargs: ", training_function.session_kwargs)
        # print("updates_op: ", training_function.updates_op)

    train_generator = SequenceGenerator(train_file, train_sources, nt, batch_size=batch_size, shuffle=True)
    val_generator = SequenceGenerator(val_file, val_sources, nt, batch_size=batch_size, N_seq=N_seq_val)

    # start with lr of 0.001 and then drop to 0.0001 after 75 epochs
    lr_schedule = lambda epoch: 0.001 if epoch < 75 else 0.0001
    callbacks = [LearningRateScheduler(lr_schedule)]
    if save_model:
        if not os.path.exists(WEIGHTS_DIR): 
            os.mkdir(WEIGHTS_DIR)
        callbacks.append(ModelCheckpoint(filepath=weights_file, monitor='val_loss', save_best_only=True))

    history = model.fit_generator(train_generator, samples_per_epoch / batch_size, nb_epoch, callbacks=callbacks,
                    validation_data=val_generator, validation_steps=N_seq_val / batch_size)

    if save_model:
        json_string = model.to_json()
        with open(json_file, "w") as f:
            f.write(json_string)
Exemple #12
0
def execute_test():
    print "Preparing to execute the test..."
    # Load trained model
    f = open(json_file, 'r')
    json_string = f.read()
    f.close()
    train_model = model_from_json(json_string,
                                  custom_objects={'PredNet': PredNet})
    train_model.load_weights(weights_file)

    # Create testing model (to output predictions)
    layer_config = train_model.layers[1].get_config()
    layer_config['output_mode'] = 'prediction'  #'prediction'
    layer_config['extrap_start_time'] = extrap
    data_format = layer_config[
        'data_format'] if 'data_format' in layer_config else layer_config[
            'dim_ordering']
    test_prednet = PredNet(weights=train_model.layers[1].get_weights(),
                           **layer_config)
    input_shape = list(train_model.layers[0].batch_input_shape[1:])
    input_shape[0] = nt
    inputs = Input(shape=tuple(input_shape))
    predictions = test_prednet(inputs)
    test_model = Model(inputs=inputs, outputs=predictions)

    test_generator = SequenceGenerator(test_file,
                                       test_sources,
                                       nt,
                                       sequence_start_mode='unique',
                                       data_format=data_format)  # orig: unique
    X_test = test_generator.create_all()
    X_hat = test_model.predict(X_test, batch_size)
    if data_format == 'channels_first':
        X_test = np.transpose(X_test, (0, 1, 3, 4, 2))
        X_hat = np.transpose(X_hat, (0, 1, 3, 4, 2))

    # Compare MSE of PredNet predictions vs. using last frame.  Write results to prediction_scores.txt
    mse_model = np.mean(
        (X_test[:, 1:] -
         X_hat[:, 1:])**2)  # look at all timesteps except the first
    mse_prev = np.mean((X_test[:, :-1] - X_test[:, 1:])**2)
    if not os.path.exists(RESULTS_DIR): os.mkdir(RESULTS_DIR)
    f = open(os.path.join(RESULTS_DIR, 'prediction_scores.txt'), 'w')
    f.write("Model MSE: %f\n" % mse_model)
    f.write("Previous Frame MSE: %f" % mse_prev)
    f.close()

    # Plot some predictions
    aspect_ratio = float(X_hat.shape[2]) / X_hat.shape[3]
    plt.figure(figsize=(nt, 2 * aspect_ratio))
    gs = gridspec.GridSpec(2, nt)
    gs.update(wspace=0., hspace=0.)
    plot_save_dir = os.path.join(RESULTS_DIR, 'prediction_plots/')
    if not os.path.exists(plot_save_dir): os.mkdir(plot_save_dir)

    # Output the sequence of all the predicted images
    for test in range(numtests):
        testdir = os.path.join("single/", testdir_name)
        testdir = os.path.join(plot_save_dir, testdir)
        if not os.path.exists(testdir): os.makedirs(testdir)
        print "///////// NT: " + str(nt)
        for t in range(nt):
            imsave(testdir + "/pred-%02d.jpg" % (t, ), X_hat[test, t])
            imsave(testdir + "/orig-%02d.jpg" % (t, ), X_test[test, t])
    print "Test data saved in " + testdir
model.summary()
print("%\n%\n%\n%\n%\n%\n%\n%")
print("\n======== Confirming PredNet class ========\n")
print("prednet_static:", PredNet_static)
print("\nprednet_dynamic:", PredNet_dynamic)
print("%\n%\n%\n%\n%\n%\n%\n%")

# Replicate the model on G GPUs
parallel_model = multi_gpu_model(model, gpus=G)
parallel_model.compile(loss=weighted_loss,
                       optimizer="adam",
                       metrics=[err_loss_static, err_loss_dynamic])

train_generator = SequenceGenerator(train_path,
                                    train_source_path,
                                    num_tsteps,
                                    batch_size=batch_size,
                                    shuffle=True)
val_generator = SequenceGenerator(val_path,
                                  val_source_path,
                                  num_tsteps,
                                  batch_size=batch_size,
                                  N_seq=num_seq_val)

print("Shapes: ", train_generator.X.shape, val_generator.X.shape)
print("train generator", np.amax(train_generator.X),
      np.amin(train_generator.X))

lr_schedule = lambda epoch: 0.0001 if epoch < 75 else 0.0001
lr_callback = LearningRateScheduler(lr_schedule)
Exemple #14
0
inputs = Input(shape=(nt, ) + input_shape)
errors = prednet(inputs)  # errors will be (batch_size, nt, nb_layers)
errors_by_time = TimeDistributed(
    Dense(1, trainable=False),
    weights=[layer_loss_weights, np.zeros(1)],
    trainable=False)(errors)  # calculate weighted error by layer
errors_by_time = Flatten()(errors_by_time)  # will be (batch_size, nt)
final_errors = Dense(1,
                     weights=[time_loss_weights,
                              np.zeros(1)],
                     trainable=False)(errors_by_time)  # weight errors by time
model = Model(inputs=inputs, outputs=final_errors)
model.compile(loss='mean_absolute_error', optimizer='adam')

train_generator = SequenceGenerator(train_file,
                                    nt,
                                    batch_size=batch_size,
                                    shuffle=True)
val_generator = SequenceGenerator(val_file,
                                  nt,
                                  batch_size=batch_size,
                                  N_seq=N_seq_val)

lr_schedule = lambda epoch: 0.001 if epoch < 75 else 0.0001  # start with lr of 0.001 and then drop to 0.0001 after 75 epochs
callbacks = [LearningRateScheduler(lr_schedule)]

history = model.fit_generator(train_generator,
                              samples_per_epoch / batch_size,
                              nb_epoch,
                              callbacks=callbacks,
                              validation_data=val_generator,
                              validation_steps=N_seq_val / batch_size)
Exemple #15
0
def run_training(subdir, hyperparams):
    '''
    Run the training of Prednet with the given dataset and 
    choice of hyperparameters, log the training metrics and 
    save the trained model for running inference.
    '''
    save_model = True  # if weights will be saved
    if not os.path.exists(os.path.join(WEIGHTS_DIR, subdir)):
        os.mkdir(os.path.join(WEIGHTS_DIR, subdir))

    weights_file = os.path.join(
        WEIGHTS_DIR, subdir,
        'prednet_ucsd_weights.hdf5')  # where weights will be saved
    json_file = os.path.join(WEIGHTS_DIR, subdir, 'prednet_ucsd_model.json')

    # Data files
    train_file = os.path.join(DATA_DIR, subdir, 'X_Train.hkl')
    train_sources = os.path.join(DATA_DIR, subdir, 'sources_Train.hkl')
    val_file = os.path.join(DATA_DIR, subdir, 'X_Val.hkl')
    val_sources = os.path.join(DATA_DIR, subdir, 'sources_Val.hkl')

    if not os.path.exists(LOG_DIR):
        os.mkdir(LOG_DIR)

    if not os.path.exists(os.path.join(LOG_DIR, subdir)):
        os.mkdir(os.path.join(LOG_DIR, subdir))

    now = datetime.now
    folder_now = now().strftime("%Y_%m_%d-%H%M")

    if not os.path.exists(os.path.join(LOG_DIR, subdir, folder_now)):
        os.mkdir(os.path.join(LOG_DIR, subdir, folder_now))

    training_log = os.path.join(LOG_DIR, subdir, folder_now, 'log.csv')
    model_weights = os.path.join(LOG_DIR, subdir, folder_now, 'weights.h5')
    hyperparam = os.path.join(LOG_DIR, subdir, folder_now, 'hyperparam.json')
    # Training parameters
    nb_epoch = hyperparams['nb_epoch']
    batch_size = hyperparams['batch_size']
    samples_per_epoch = hyperparams['samples_per_epoch']
    N_seq_val = hyperparams[
        'N_seq_val']  # number of sequences to use for validation
    old_learning_rate = hyperparams['old_learning_rate']
    new_learning_rate = hyperparams['new_learning_rate']
    epoch_learning_rate_number = hyperparams['epoch_learning_rate_number']

    # Model parameters
    n_channels, im_height, im_width = (1, 128, 160)
    input_shape = (
        n_channels, im_height,
        im_width) if K.image_data_format() == 'channels_first' else (
            im_height, im_width, n_channels)
    sz1 = hyperparams['sz1']
    sz2 = hyperparams['sz2']
    sz3 = hyperparams['sz3']
    stack_sizes = (n_channels, sz1, sz2, sz3)
    R_stack_sizes = stack_sizes
    fz = hyperparams['fz']
    A_filt_sizes = (fz, fz, 1)
    Ahat_filt_sizes = (fz, fz, fz, 1)
    R_filt_sizes = (fz, fz, fz, 1)
    layer_loss_weights = np.array(
        [1., 0., 0., 0.]
    )  # weighting for each layer in final loss; "L_0" model:  [1, 0, 0, 0], "L_all": [1, 0.1, 0.1, 0.1]
    layer_loss_weights = np.expand_dims(layer_loss_weights, 1)
    nt = 10  #5  # number of timesteps used for sequences in training
    time_loss_weights = 1. / (nt - 1) * np.ones(
        (nt, 1))  # equally weight all timesteps except the first
    time_loss_weights[0] = 0

    hyperparam_dict = {
        'epoch': nb_epoch,
        'batch_size': batch_size,
        'samples_per_epoch': samples_per_epoch,
        'N_seq_val': N_seq_val,
        'stack_sz': sz1,
        'stack_sz2': sz2,
        'stack_sz3': sz3,
        'A_filt_sz': fz,
        'old_learning_rate': old_learning_rate,
        'new_learning_rate': new_learning_rate,
        'epoch_learning_rate': epoch_learning_rate_number
    }
    with open(hyperparam, 'w') as f:
        json.dump(hyperparam_dict, f)

    prednet = PredNet(stack_sizes,
                      R_stack_sizes,
                      A_filt_sizes,
                      Ahat_filt_sizes,
                      R_filt_sizes,
                      output_mode='error',
                      return_sequences=True)

    inputs = Input(shape=(nt, ) + input_shape)
    errors = prednet(inputs)  # errors will be (batch_size, nt, nb_layers)
    errors = Dropout(0.2)(errors)
    errors_by_time = TimeDistributed(
        Dense(1, trainable=False),
        weights=[layer_loss_weights, np.zeros(1)],
        trainable=False)(errors)  # calculate weighted error by layer
    errors_by_time = Flatten()(errors_by_time)  # will be (batch_size, nt)
    final_errors = Dense(1,
                         weights=[time_loss_weights,
                                  np.zeros(1)],
                         trainable=False)(
                             errors_by_time)  # weight errors by time
    model = Model(inputs=inputs, outputs=final_errors)
    model.compile(loss='mean_absolute_error', optimizer='adam')

    train_generator = SequenceGenerator(train_file,
                                        train_sources,
                                        nt,
                                        batch_size=batch_size,
                                        shuffle=True)
    val_generator = SequenceGenerator(val_file,
                                      val_sources,
                                      nt,
                                      batch_size=batch_size,
                                      N_seq=N_seq_val)

    lr_schedule = lambda epoch: old_learning_rate if epoch < epoch_learning_rate_number else new_learning_rate  # start with lr of 0.001 and then drop to 0.0001 after 75 epochs
    callbacks = [LearningRateScheduler(lr_schedule)]
    if save_model:
        if not os.path.exists(WEIGHTS_DIR):
            os.mkdir(WEIGHTS_DIR)
        callbacks.append(
            ModelCheckpoint(filepath=weights_file,
                            monitor='val_loss',
                            save_best_only=True))
        callbacks.append(CSVLogger(training_log))

    history = model.fit_generator(train_generator,
                                  samples_per_epoch / batch_size,
                                  nb_epoch,
                                  callbacks=callbacks,
                                  validation_data=val_generator,
                                  validation_steps=N_seq_val / batch_size)
    model.save(model_weights)

    if save_model:
        json_string = model.to_json()
        with open(json_file, "w") as f:
            f.write(json_string)
def run_evaluation(subdir_model, subdir_test, n_plot=20, batch_size=10, nt=10):
    '''
    This function runs the evalution of the trained deep learning network
    over the selected test dataset, calculates the various metrics such as
    MSE, SD and PSNR, and generates and saves the results.
    '''
    weights_file = os.path.join(WEIGHTS_DIR, subdir_model,
                                'prednet_ucsd_weights.hdf5')
    json_file = os.path.join(WEIGHTS_DIR, subdir_model,
                             'prednet_ucsd_model.json')
    test_file = os.path.join(DATA_DIR, subdir_test, 'X_Test.hkl')
    test_sources = os.path.join(DATA_DIR, subdir_test, 'sources_Test.hkl')

    # Load trained model
    f = open(json_file, 'r')
    json_string = f.read()
    f.close()
    train_model = model_from_json(json_string,
                                  custom_objects={'PredNet': PredNet})
    train_model.load_weights(weights_file)

    # Create testing model (to output predictions)
    layer_config = train_model.layers[1].get_config()
    layer_config['output_mode'] = 'prediction'
    data_format = layer_config[
        'data_format'] if 'data_format' in layer_config else layer_config[
            'dim_ordering']
    test_prednet = PredNet(weights=train_model.layers[1].get_weights(),
                           **layer_config)
    input_shape = list(train_model.layers[0].batch_input_shape[1:])
    input_shape[0] = nt
    inputs = Input(shape=tuple(input_shape))
    predictions = test_prednet(inputs)
    test_model = Model(inputs=inputs, outputs=predictions)

    test_generator = SequenceGenerator(test_file,
                                       test_sources,
                                       nt,
                                       sequence_start_mode='unique',
                                       data_format=data_format)
    X_test = test_generator.create_all()
    X_hat = test_model.predict(X_test, batch_size)
    if data_format == 'channels_first':
        X_test = np.transpose(X_test, (0, 1, 3, 4, 2))
        X_hat = np.transpose(X_hat, (0, 1, 3, 4, 2))

    X_hat = np.squeeze(X_hat, axis=-1)
    X_test = np.squeeze(X_test, axis=-1)
    #
    Xhat_filename = 'Xhat.npy'
    Xtest_filename = 'Xtest.npy'

    mse_videos_filename = 'mse_videos.json'
    mse_frame_filename = 'mse_frame.json'
    mse_prev_frame_filename = 'mse_prev_frame.json'
    mse_err_prev_frame_filename = 'mse_err_prev_frame.json'

    overall_mse_filename = 'predictions.txt'

    mse_videos_sd_filename = 'mse_videos_sd.json'
    mse_frame_sd_filename = 'mse_frame_sd.json'
    mse_prev_frame_sd_filename = 'mse_prev_frame_sd.json'
    mse_err_prev_frame_sd_filename = 'mse_err_prev_frame_sd.json'

    psnr_frame_filename = 'psnr_frame.json'
    psnr_prev_frame_filename = 'psnr_prev_frame.json'

    pred_save_dir = 'prediction_plots'
    err_save_dir = 'error_plots'
    err_prev_save_dir = 'prev_frame_plots'
    err_model_prev_save_dir = 'model_prev_frame_plots'
    sd_save_dir = 'sd_plots'
    sd_prev_save_dir = 'sd_prev_frame_plots'
    psnr_save_dir = 'psnr_plots'

    now = datetime.now
    folder_now = now().strftime("%Y_%m_%d-%H%M")

    if not os.path.exists(RESULTS_SAVE_DIR):
        os.mkdir(RESULTS_SAVE_DIR)

    if not os.path.exists(os.path.join(RESULTS_SAVE_DIR, subdir_test)):
        os.mkdir(os.path.join(RESULTS_SAVE_DIR, subdir_test))

    if not os.path.exists(
            os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now)):
        os.mkdir(os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now))

    if not os.path.exists(
            os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                         pred_save_dir)):
        os.mkdir(
            os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                         pred_save_dir))

    if not os.path.exists(
            os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                         err_save_dir)):
        os.mkdir(
            os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                         err_save_dir))

    if not os.path.exists(
            os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                         err_prev_save_dir)):
        os.mkdir(
            os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                         err_prev_save_dir))

    if not os.path.exists(
            os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                         err_model_prev_save_dir)):
        os.mkdir(
            os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                         err_model_prev_save_dir))

    if not os.path.exists(
            os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                         psnr_save_dir)):
        os.mkdir(
            os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                         psnr_save_dir))

    if not os.path.exists(
            os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                         sd_save_dir)):
        os.mkdir(
            os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                         sd_save_dir))

    if not os.path.exists(
            os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                         sd_prev_save_dir)):
        os.mkdir(
            os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                         sd_prev_save_dir))

    Xhat_path = os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                             Xhat_filename)
    Xtest_path = os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                              Xtest_filename)

    mse_videos_path = os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                                   mse_videos_filename)
    mse_frame_path = os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                                  mse_frame_filename)
    mse_prev_frame_path = os.path.join(RESULTS_SAVE_DIR, subdir_test,
                                       folder_now, mse_prev_frame_filename)
    mse_err_prev_frame_path = os.path.join(RESULTS_SAVE_DIR, subdir_test,
                                           folder_now,
                                           mse_err_prev_frame_filename)

    mse_videos_sd_path = os.path.join(RESULTS_SAVE_DIR, subdir_test,
                                      folder_now, mse_videos_sd_filename)
    mse_frame_sd_path = os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                                     mse_frame_sd_filename)
    mse_prev_frame_sd_path = os.path.join(RESULTS_SAVE_DIR, subdir_test,
                                          folder_now,
                                          mse_prev_frame_sd_filename)
    mse_err_prev_frame_sd_path = os.path.join(RESULTS_SAVE_DIR, subdir_test,
                                              folder_now,
                                              mse_err_prev_frame_sd_filename)

    psnr_frame_path = os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                                   psnr_frame_filename)
    psnr_prev_frame_path = os.path.join(RESULTS_SAVE_DIR, subdir_test,
                                        folder_now, psnr_prev_frame_filename)

    overall_mse_path = os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                                    overall_mse_filename)
    pred_save_path = os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                                  pred_save_dir)
    err_save_path = os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                                 err_save_dir)
    err_prev_save_path = os.path.join(RESULTS_SAVE_DIR, subdir_test,
                                      folder_now, err_prev_save_dir)
    err_model_prev_save_path = os.path.join(RESULTS_SAVE_DIR, subdir_test,
                                            folder_now,
                                            err_model_prev_save_dir)

    sd_save_path = os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                                sd_save_dir)
    sd_prev_save_path = os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                                     sd_prev_save_dir)
    psnr_save_path = os.path.join(RESULTS_SAVE_DIR, subdir_test, folder_now,
                                  psnr_save_dir)

    if subdir_test == 'total':
        im_list, source_list = get_test_splits(['UCSDped1', 'UCSDped2'])
    else:
        im_list, source_list = get_test_splits([subdir_test])

    if subdir_test == 'total':
        im_list.sort(
            key=lambda x: os.path.basename(os.path.dirname(x)) + '_' + os.path.
            basename(os.path.dirname(os.path.dirname(os.path.dirname(x)))))
    else:
        im_list.sort()
    source_list.sort()

    curr_location = 0
    possible_starts = defaultdict(list)
    while curr_location < len(im_list) - nt + 1:
        if source_list[curr_location] == source_list[curr_location + nt - 1]:
            possible_starts[source_list[curr_location]].append(curr_location)
            curr_location += nt
        else:
            curr_location += 1

    mse_videos = dict()
    mse_model_frame = defaultdict(list)
    mse_prev_frame = defaultdict(list)
    mse_err_prev_frame = defaultdict(list)

    mse_videos_sd = dict()
    mse_model_frame_sd = defaultdict(list)
    mse_prev_frame_sd = defaultdict(list)
    mse_err_prev_frame_sd = defaultdict(list)

    psnr_model_frame = defaultdict(list)
    psnr_prev_frame = defaultdict(list)

    i = 0
    for k, v in sorted(possible_starts.items()):
        n_mini_clips = len(v)
        mse_model_video = np.mean((X_test[i:i + n_mini_clips, 1:] -
                                   X_hat[i:i + n_mini_clips, 1:])**2).item()
        mse_prev_video = np.mean((X_test[i:i + n_mini_clips, :-1] -
                                  X_test[i:i + n_mini_clips, 1:])**2).item()
        mse_err_prev_video = np.mean(
            (X_hat[i:i + n_mini_clips, 1:-1] -
             X_hat[i:i + n_mini_clips, 2:])**2).item()

        mse_model_video_sd = np.std((X_test[i:i + n_mini_clips, 1:] -
                                     X_hat[i:i + n_mini_clips, 1:])).item()
        mse_prev_video_sd = np.std((X_test[i:i + n_mini_clips, :-1] -
                                    X_test[i:i + n_mini_clips, 1:])).item()
        mse_err_prev_video_sd = np.std((X_hat[i:i + n_mini_clips, 1:-1] -
                                        X_hat[i:i + n_mini_clips, 2:])).item()

        for j in range(n_mini_clips):
            for z in range(1, nt):
                mse_model_frame[k].append(
                    np.mean(
                        (X_test[i + j, z, :] - X_hat[i + j, z, :])**2).item())
                mse_prev_frame[k].append(
                    np.mean((X_test[i + j, z - 1, :] -
                             X_test[i + j, z, :])**2).item())

                mse_model_frame_sd[k].append(
                    np.std((X_test[i + j, z, :] - X_hat[i + j, z, :])).item())
                mse_prev_frame_sd[k].append(
                    np.std((X_test[i + j, z - 1, :] -
                            X_test[i + j, z, :])).item())

                psnr_model_frame[k].append(
                    psnr(np.mean(
                        (X_test[i + j, z, :] - X_hat[i + j, z, :])**2)))
                psnr_prev_frame[k].append(
                    psnr(
                        np.mean((X_test[i + j, z - 1, :] -
                                 X_test[i + j, z, :])**2)))

                if z > 1:
                    mse_err_prev_frame[k].append(
                        np.mean((X_hat[i + j, z - 1, :] -
                                 X_test[i + j, z, :])**2).item())
                    mse_err_prev_frame_sd[k].append(
                        np.std((X_hat[i + j, z - 1, :] -
                                X_test[i + j, z, :])).item())

        mse_videos[k] = (mse_model_video, mse_prev_video, mse_err_prev_video)
        mse_videos_sd[k] = (mse_model_video_sd, mse_prev_video_sd,
                            mse_err_prev_video_sd)
        i += n_mini_clips

    mse_model = np.mean(
        (X_test[:, 1:] -
         X_hat[:, 1:])**2)  # look at all timesteps except the first
    mse_prev = np.mean((X_test[:, :-1] - X_test[:, 1:])**2)

    mse_model_sd = np.std(
        (X_test[:, 1:] -
         X_hat[:, 1:]))  # look at all timesteps except the first
    mse_prev_sd = np.std((X_test[:, :-1] - X_test[:, 1:]))

    with open(mse_videos_path, 'w') as fp:
        json.dump(mse_videos, fp, sort_keys=True, indent=4)

    with open(mse_frame_path, 'w') as fp:
        json.dump(mse_model_frame, fp, sort_keys=True, indent=4)

    with open(mse_prev_frame_path, 'w') as fp:
        json.dump(mse_prev_frame, fp, sort_keys=True, indent=4)

    with open(mse_err_prev_frame_path, 'w') as fp:
        json.dump(mse_err_prev_frame, fp, sort_keys=True, indent=4)

    with open(mse_videos_sd_path, 'w') as fp:
        json.dump(mse_videos_sd, fp, sort_keys=True, indent=4)

    with open(mse_frame_sd_path, 'w') as fp:
        json.dump(mse_model_frame_sd, fp, sort_keys=True, indent=4)

    with open(mse_prev_frame_sd_path, 'w') as fp:
        json.dump(mse_prev_frame_sd, fp, sort_keys=True, indent=4)

    with open(mse_err_prev_frame_sd_path, 'w') as fp:
        json.dump(mse_err_prev_frame_sd, fp, sort_keys=True, indent=4)

    with open(psnr_frame_path, 'w') as fp:
        json.dump(psnr_model_frame, fp, sort_keys=True, indent=4)

    with open(psnr_prev_frame_path, 'w') as fp:
        json.dump(psnr_prev_frame, fp, sort_keys=True, indent=4)

    #np.save(Xhat_path, X_hat)
    #np.save(Xtest_path, X_test)

    # Compare MSE of PredNet predictions vs. using last frame.  Write results to prediction_scores.txt
    f = open(overall_mse_path, 'w')
    f.write("Model MSE: %f\n" % mse_model)
    f.write("Previous Frame MSE: %f\n" % mse_prev)
    f.write("Model SDE: %f\n" % mse_model_sd)
    f.write("Previous Frame SDE: %f\n" % mse_prev_sd)
    f.close()

    compare_results(pred_save_path, X_test, X_hat, nt, n_plot)
    make_error_plot(mse_model_frame, err_save_path)
    make_error_plot(mse_prev_frame, err_prev_save_path)
    make_error_plot(mse_err_prev_frame, err_model_prev_save_path)
    make_error_plot(mse_model_frame_sd, sd_save_path, 2)
    make_error_plot(mse_prev_frame_sd, sd_prev_save_path, 2)
    make_error_plot(psnr_model_frame, psnr_save_path)