dt_test = dataset_manager.encode_data_with_label_all_data(test)

if normalize_over == "train":
    dataset_manager.calculate_divisors(dt_train)
elif normalize_over == "all":
    dt_all = dataset_manager.extract_timestamp_features(data)
    dt_all = dataset_manager.extract_duration_features(dt_all)
    dataset_manager.calculate_divisors(dt_all)
else:
    print("unknown normalization mode")

dt_test = dataset_manager.normalize_data(dt_test)

print("Done: %s" % (time.time() - start))

max_len = dataset_manager.get_max_case_length(dt_train)
activity_cols = [col for col in dt_train.columns if col.startswith("act")]
n_activities = len(activity_cols)
data_dim = dt_train.shape[1] - 3

# compile a model with same parameters that was trained, and load the weights of the trained model
print('Building model...')
start = time.time()
main_input = Input(shape=(max_len, data_dim), name='main_input')
# train a 2-layer LSTM with one shared layer
l1 = LSTM(lstmsize,
          input_shape=(max_len, data_dim),
          consume_less='gpu',
          init='glorot_uniform',
          return_sequences=True,
          dropout_W=dropout)(main_input)  # the shared layer