コード例 #1
0
def trainModel(modelName):
    data = mi.getAllMaps()
    data = mDriver.ready_data(data)
    x_train, x_test, y_train, y_test = mDriver.split_data(data)
    model, score = mDriver.tree_regression(x_train, x_test, y_train, y_test)
    details = mi.saveModel(model, modelName)
    return jsonify({"msg": "Model Trained and Saved!"})
コード例 #2
0
def runPrediction(modelName):
    theModel = mi.getModel(modelName)  # Same name as stored in db
    data = mi.getAllMaps()
    data = mDriver.ready_data(data)
    x_train, x_test, y_train, y_test = mDriver.split_data(data)
    byte_image = mDriver.charts(theModel, x_test, y_test)
    return send_file(byte_image,
                     attachment_filename='plot.png',
                     mimetype='image/png')
コード例 #3
0
def test_split_data():
    X = np.array([[0, 1],[2, 3],[4, 5],[6, 7],[8, 9], [10,11]])
    y = np.array([0, 0, 0, 1, 1, 1])
    
    X_train, X_val, y_train, y_val = split_data(X, y)
    
    assert np.array_equal(X_train, np.array([[2, 3],[10, 11],[0, 1],[8, 9]]))
    assert np.array_equal(X_val, np.array([[6, 7], [4, 5]]))
    assert np.array_equal(y_train, np.array([0, 1, 0, 1]))
    assert np.array_equal(y_val, np.array([1, 0]))
コード例 #4
0
ファイル: main.py プロジェクト: martindansc/LolPredict
def roc_curve_model(cmodel, selected_features):
    data = transforms.get_players_match_data(0, 550)

    selected_features = model.select_features(data, features[selected_features])
    (data, target) = model.convert_to_np(data)
    _, test_data, _, test_target = model.split_data(data, target)

    scores = model.get_column(cmodel.predict_proba(test_data), 1)
    
    (fpr, tpr, _) = roc_curve(test_target, scores)
    roc_auc = auc(fpr, tpr)

    return (fpr, tpr, roc_auc)
コード例 #5
0
def min_samples_leaf(original_images, original_labels):

    images = preprocess_images(original_images)
    labels = preprocess_labels(original_labels)

    (training_images,
     training_labels), (val_images, val_labels), (testing_images,
                                                  testing_labels) = split_data(
                                                      images, labels)

    cl = tree.DecisionTreeClassifier(min_samples_leaf=10)
    clf = cl.fit(training_images, training_labels)

    clf.predict(val_images)
    clf.score(val_images, val_labels)
    errors = test_model(cl, testing_images, testing_labels,
                        "decision_tree_min_samples_leaf")
    save_misclassified(errors, "decision_tree_min_samples_leaf")
コード例 #6
0
def baseline_dt(original_images, original_labels):
    # Split data into each bin

    images = preprocess_images(original_images)
    labels = preprocess_labels(original_labels)

    (training_images,
     training_labels), (val_images, val_labels), (testing_images,
                                                  testing_labels) = split_data(
                                                      images, labels)

    cl = tree.DecisionTreeClassifier()
    clf = cl.fit(training_images, training_labels)

    clf.predict(val_images)
    clf.score(val_images, val_labels)
    errors = test_model(cl, testing_images, testing_labels,
                        "decision_tree_baseline")
    save_misclassified(errors, "decision_tree_baseline")
コード例 #7
0
def features_dt(original_images, original_labels):
    compress = preprocess_images(average_chunk(original_images, 14, 14))
    all_avg = average_all(original_images)
    avg_row = average_row(original_images)
    avg_col = average_column(original_images)

    images = preprocess_images(original_images)
    labels = preprocess_labels(original_labels)
    features = np.concatenate((images, compress, all_avg.T, avg_row, avg_col),
                              axis=1)

    (training_images,
     training_labels), (val_images, val_labels), (testing_images,
                                                  testing_labels) = split_data(
                                                      features, labels)

    cl = tree.DecisionTreeClassifier(min_samples_leaf=10)
    clf = cl.fit(training_images, training_labels)

    clf.predict(val_images)
    clf.score(val_images, val_labels)
    errors = test_model(cl, testing_images, testing_labels,
                        "decision_tree_features")
    save_misclassified(errors, "decision_tree_features")
コード例 #8
0
# IMG_LR_DIR_3X = '../DataSet/IMG_LR_X3'

# TRAIN_IDS = '../DataSet/train.txt'
# TEST_IDS = '../DataSet/test5.txt'
# VAL_IDS = '../DataSet/val.txt'

# REMAIDER:
# When you choose a Downscale factor, make sure to use the appropriate directory
# for low resolution images:
# DOWNSCALE = 2 -> Use IMG_LR_DIR_2X folder in load_LR_img
# DOWNSCALE = 3 ->  Use IMG_LR_DIR_2X folder in load_LR_img
DOWNSCALE = 2
VISUALIZE = False

# Load test data:
_, test_ids, _ = split_data(TRAIN_IDS, TEST_IDS, VAL_IDS)

# build network, agnostic to input size
params = {
    'dim': None,
    'batch_size': 1,
    'n_channels': 1,
    'downscale': DOWNSCALE,
    'shuffle': False
}
model = predict_model(params, None)

# load model with a weigth file:
modelname_mehdi_Y = 'mehdi_Y.2800-0.00084.hdf5'  # arch 2
modelname_mehdi_Y_div2k = 'mehdi_Y.40-0.00075.hdf5'  # arch 2
modelname_mehdi_Y_small = 'mehdi_small_Y.2800-0.00115.hdf5'  # arch 1
コード例 #9
0
Returns:
    list of plotly fig data
"""
num_epochs = 100
lookback = 20
data = modelPack.get_stock_history('ETH-USD')
data

price = data[['Close']]
scaler = MinMaxScaler(feature_range=(-1, 1))
vals = scaler.fit_transform(price['Close'].values.reshape(-1, 1))
price2 = pd.DataFrame()
price2['Close'] = vals.reshape(-1)

x_train, y_train, x_test, y_test = modelPack.split_data(price, 20)

model = modelPack.LSTM()
criterion = torch.nn.MSELoss(reduction='mean')
optimiser = torch.optim.Adam(model.parameters(), lr=0.01)

hist = np.zeros(num_epochs)
start_time = time.time()
lstm = []
for t in range(num_epochs):
    y_train_pred = model(x_train)
    loss = criterion(y_train_pred, y_train)
    #print("Epoch ", t, "MSE: ", loss.item())
    hist[t] = loss.item()
    optimiser.zero_grad()
    loss.backward()