all_test_x,
    pd.read_csv('../predict_location/pl_L1_ET_vA1_submission.csv').values[:,
                                                                          3:]),
                            axis=1)
dataset = {
    'train_x': all_train_x,
    'train_y': all_train_y,
    'train_seq': train_seq,
    'test_x': all_test_x
}

# Add past data
dataset = {
    'train_x':
    np.c_[all_train_x,
          fd.get_past_data(all_train_x, train_seq, 1, 0),
          fd.get_past_data(all_train_x, train_seq, 2, 0),
          fd.get_future_data(all_train_x, train_seq, 1, 0)],
    'train_y':
    all_train_y,
    'train_seq':
    train_seq,
    'test_x':
    np.c_[all_test_x,
          fd.get_past_data(all_test_x, rows[:, 0], 1, 0),
          fd.get_past_data(all_test_x, rows[:, 0], 2, 0),
          fd.get_future_data(all_test_x, rows[:, 0], 1, 0)]
}


# Define prediction function
Пример #2
0
all_test_x = fd.load_submissions(files_txt)
train_seq = fd.get_clean_sequences([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
rows, tmp = fd.load_test(['ds_pir_v0'])

dataset = {
    'train_x': all_train_x,
    'train_y': all_train_y,
    'train_seq': train_seq,
    'test_x': all_test_x
}

# Add past data
dataset = {
    'train_x':
    np.c_[all_train_x,
          fd.get_past_data(all_train_x, train_seq, 1, -9999, 200),
          fd.get_past_data(all_train_x, train_seq, 2, -9999, 200),
          fd.get_future_data(all_train_x, train_seq, 1, -9999, 200)],
    'train_y':
    all_train_y,
    'train_seq':
    train_seq,
    'test_x':
    np.c_[all_test_x,
          fd.get_past_data(all_test_x, rows[:, 0], 1, -9999),
          fd.get_past_data(all_test_x, rows[:, 0], 2, -9999),
          fd.get_future_data(all_test_x, rows[:, 0], 1, -9999)]
}


# Define prediction function
all_train_x, all_train_y, train_seq = fd.load_sequences(sequence_train, data_source)
rows, all_test_x = fd.load_test(data_source)

# Preprocess the whole data
prepwd_params = {'remove_nan_targets':True, 'missing':0, 'float32':True, 'scale':True}
all_train_x, all_train_y, train_seq, rows, all_test_x = fd.whole_preprocess(all_train_x, 
             all_train_y, train_seq, rows, all_test_x, params=prepwd_params)

# Add preprocessed data)
all_train_x = np.concatenate((all_train_x, pd.read_csv('../predict_location/pl_L1_ET_vA1_valid.csv').values), axis=1)
all_test_x = np.concatenate((all_test_x, 
             pd.read_csv('../predict_location/pl_L1_ET_vA1_submission.csv').values[:, 3:]), axis=1)
dataset = {'train_x':all_train_x, 'train_y':all_train_y, 'train_seq':train_seq, 'test_x':all_test_x}

# Add past data
dataset = {'train_x':np.c_[all_train_x, fd.get_past_data(all_train_x, train_seq, 1, 0),
                           fd.get_past_data(all_train_x, train_seq, 2, 0),
                           fd.get_future_data(all_train_x, train_seq, 1, 0)], 
            'train_y':all_train_y, 
            'train_seq':train_seq, 
            'test_x':np.c_[all_test_x, fd.get_past_data(all_test_x, rows[:,0], 1, 0),
                           fd.get_past_data(all_test_x, rows[:,0], 2, 0),
                           fd.get_future_data(all_test_x, rows[:,0], 1, 0)]}
                           
# Define prediction function
def predict_model(train_x, train_y, test_x, test_y=None, class_weights=None, random_state=0):
    # Learn the KNN model 
    params = {'layers': [[1000, 0.90], [100, 0.60]], 'loss': 'categorical_crossentropy'}
    model = fp.PMC_NeuralNetwork_T1(train_x.shape[1], train_y.shape[1], params, bags=1) 
    model.fit(train_x, train_y, test_x, test_y, batch_size=64, nb_epoch=20)