Example #1
0
    assert cfg.shuffle_data == False
    input_data_next = np.zeros_like(input_data)
    time_span = input_data.shape[1]
    temp = input_data[:-1, 1:, :]
    temp2 = input_data[1:, time_span / 2 + 0, :][:, np.newaxis, :]
    input_data_next[:-1, :] = np.concatenate((temp, temp2), axis=1)
    return input_data_next


encoder_input_data_next = _get_next_timestetp_data(encoder_input_data[:, :,
                                                                      0, :, :])
# data_sanity_check(encoder_input_data,decoder_target_data,decoder_input_data)

## ensure dividable by batch size
sample_ind = rand_sample_ind(total_num_samples, num_testing_sample, batch_size)
encoder_input_data = rand_sample(encoder_input_data, sample_ind)
decoder_input_data = rand_sample(decoder_input_data, sample_ind)
decoder_target_data = rand_sample(decoder_target_data, sample_ind)
others_pst_input_data = rand_sample(others_pst_input_data, sample_ind)

### ====================Training====================
# model = load_model('convLSTM_endec_11_256tanh_epoch12-1.2859.h5')
# model = load_model('convLSTM_wholespan_targetrecons_trj_decodernotanh_epoch10-0.1658.h5')
# tag = 'convLSTM_wholespan_targetrecons_trj_decodernotanh_epoch'
# tag = 'convLSTM_wholespan_targetrecons_trj_decodernotanh_epoch'
# tag = 'concat_cnn_kernel_1_5_convlstmlatent16_epoch'
tag = 'concat_cnn1_5_16_meanvar_epoch'

model_checkpoint = ModelCheckpoint(tag + '{epoch:02d}-{val_loss:.4f}.h5',
                                   monitor='val_loss',
                                   save_best_only=True)
Example #2
0
    else:
        others_input_data = _video_db_future_oth.reshape(
            (_video_db_future_oth.shape[0], _video_db_future_oth.shape[1],
             num_user - 1, -1))
    # others_input_data = util.get_gt_target_xyz_oth(_video_db_future_oth)

if cfg.sample_and_refeed or cfg.stateful_across_batch:
    # if using the generate fake batch layer, the dataset size has to
    # be dividable by the batch size
    sample_ind = util.rand_sample_ind(encoder_input_data.shape[0],
                                      0,
                                      batch_size,
                                      validation_ratio=0.1)
    if not cfg.shuffle_data:
        sample_ind = sorted(sample_ind)
    encoder_input_data = util.rand_sample(encoder_input_data, sample_ind)
    decoder_input_data = util.rand_sample(decoder_input_data, sample_ind)
    decoder_target_data = util.rand_sample(decoder_target_data, sample_ind)
    others_input_data = util.rand_sample(others_input_data, sample_ind)
    # sanity check
    ind = np.random.randint(encoder_input_data.shape[0])
    assert encoder_input_data[ind,
                              -1, :].sum() == decoder_input_data[ind,
                                                                 0, :].sum()

### ====================Training====================
# tag='AMEonLSTMs_pair_similarity_sep26'
# tag='AMEonLSTMs_tar_also_hidden_pair_similarity_sep26' ##also use hidden states for h_tar, but h_i dim 64, h_t 32
# tag='AMEonLSTMs_tar_also_hidden_pair_similarity_noembedding_sep27'#others use 32 dim LSTM
# tag='AMEonLSTMs_tar_hidden_pair_sim_NLL_sep27'
# tag='AMEonLSTMs_tar_hidden_pair_sim_NLL_MSE_sep28'
    time_span = input_data.shape[1]
    temp = input_data[:-1, 1:, :]
    temp2 = input_data[1:, time_span / 2 + 0, :][:, np.newaxis, :]
    input_data_next[:-1, :] = np.concatenate((temp, temp2), axis=1)
    return input_data_next


encoder_input_data_next = _get_next_timestetp_data(encoder_input_data)
decoder_target_data_oth = _get_next_timestetp_data_span(others_pst_input_data)

data_sanity_check(encoder_input_data, decoder_target_data, decoder_input_data)

## ensure dividable by batch size
sample_ind = util.rand_sample_ind(total_num_samples, num_testing_sample,
                                  batch_size)
encoder_input_data = util.rand_sample(encoder_input_data, sample_ind)
decoder_input_data = util.rand_sample(decoder_input_data, sample_ind)
decoder_target_data = util.rand_sample(decoder_target_data, sample_ind)
others_pst_input_data = util.rand_sample(others_pst_input_data, sample_ind)
encoder_input_data_next = util.rand_sample(encoder_input_data_next, sample_ind)
decoder_target_data_oth = util.rand_sample(decoder_target_data_oth, sample_ind)

### ====================Training====================
# model = load_model('convLSTM_endec_11_256tanh_epoch12-1.2859.h5')
# model = load_model('convLSTM_wholespan_targetrecons_trj_decodernotanh_epoch10-0.1658.h5')
# tag = 'convLSTM_wholespan_targetrecons_trj_decodernotanh_epoch'
# tag = '3_3layerconvLSTM_wholespan_latent32_8_pred_err_concat_input_epoch'
# tag = '3_3layerconvLSTM_wholespan_concat_input_meanvar_epoch'
# tag = 'convLSTM_wholespan_fclstm_meanvarinput_TFor_epoch'
# tag = '2recons1predloss_raw_epoch'
tag = 'fc_seq2seq_attention+convlstm_others_aug18'
Example #4
0
if cfg.input_mean_var:
    input_data = util.get_gt_target_xyz(_video_db_future_input)
else:
    input_data = _video_db_future_input
input_data = input_data[:,0,:][:,np.newaxis,:] #1-step input
target_data = util.get_gt_target_xyz(_video_db_future)


# if using the generate fake batch layer, the dataset size has to
# be dividable by the batch size
validation_ratio=0.1
if cfg.sample_and_refeed or cfg.stateful_across_batch:
    sample_ind = util.rand_sample_ind(input_data.shape[0],0,batch_size,validation_ratio=validation_ratio)
    if not cfg.shuffle_data:
        sample_ind = sorted(sample_ind)
    input_data = util.rand_sample(input_data,sample_ind)
    target_data = util.rand_sample(target_data,sample_ind)

### ====================Training====================
tag = 'single_LSTM_keras_10steploss_sep25'
model_checkpoint = ModelCheckpoint(tag+'{epoch:02d}-{val_loss:.4f}.h5', monitor='val_loss', save_best_only=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
                                 patience=3, min_lr=1e-6)
stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='auto')
model.fit(input_data, target_data,
          batch_size=batch_size,
          epochs=epochs,
          validation_split=validation_ratio,
          shuffle=False,
          initial_epoch=0,
          callbacks=[model_checkpoint, reduce_lr, stopping])
        0, 1, 3, 4, 2)
    decoder_input_data = one_hot_future_input[:-num_testing_sample,
                                              0, :][:, np.
                                                    newaxis, :, :].transpose(
                                                        0, 1, 3, 4, 2)
    decoder_target_data = one_hot_future[:-num_testing_sample, :, :].transpose(
        0, 1, 3, 4, 2)

if cfg.sample_and_refeed or cfg.stateful_across_batch:
    # if using the generate fake batch layer, the dataset size has to
    # be dividable by the batch size
    sample_ind = rand_sample_ind(total_num_samples, num_testing_sample,
                                 batch_size)
    if not cfg.shuffle_data:
        sample_ind = sorted(sample_ind)
    encoder_input_data = rand_sample(encoder_input_data, sample_ind)
    decoder_input_data = rand_sample(decoder_input_data, sample_ind)
    decoder_target_data = rand_sample(decoder_target_data, sample_ind)
    # sanity check
    ind = np.random.randint(encoder_input_data.shape[0])
    assert encoder_input_data[ind,
                              -1, :].sum() == decoder_input_data[ind,
                                                                 0, :].sum()

### ====================Training====================
# tag = 'weightedce_onehot_mat_3layertanh_stateful_noshuffle_raw_512-1024july10'
# tag = 'convLSTMtar_seqseq_THU_traintest_split_NLL_Aug7'
# tag = 'convLSTMtar_seqseq_shanghai_traintest_split_Aug9'
# tag = 'convLSTMtar_seqseq_shanghai_traintest_split_predmeanvar_Aug9'
# tag = 'convLSTMtar_seqseq_shanghai_traintest_split_meanvarmeanvar_Aug10'
# tag = 'convLSTMtar_seqseq_dilation_predmeanvar_Aug21'##NOT finished!!!