Exemplo n.º 1
0
def main():
    X_tr, X_te, y_tr, y_te = read_dataset()
    model = build_model()
    model.fit(X_tr, y_tr)
    print("Train train score", model.score(X_tr, y_tr))
    print("Train test score ", model.score(X_te, y_te))
    print("Train set")
    print(classification_report(model.predict(X_tr), y_tr))
    print("Test set")
    print(classification_report(model.predict(X_te), y_te))
    build_model()
def lsq_pos_l1_penalty(matrix, rhs, cost_multiplier, weights_0):
    """
    min 2-norm (matrix*w - rhs)** + 1-norm(cost_multiplier*(w-w0))
    s.t. e'w = 1
           w >= 0
    """
    # define model
    model = mModel.build_model('lsqSparse')

    # introduce n non-negative weight variables
    weights = mModel.weights(model, "weights", n=matrix.shape[1], lb=0.0)

    # e'*w = 1
    mBound.equal(model, Expr.sum(weights), 1.0)

    # sum of squared residuals
    v = mMath.l2_norm_squared(model, "2-norm(res)**", __residual(matrix, rhs, weights))
    print matrix.shape[1]
    print weights_0
    print weights
    # \Gamma*(w - w0), p is an expression
    p = mMath.mat_vec_prod(cost_multiplier, Expr.sub(weights, weights_0))
    t = mMath.l1_norm(model, 'abs(weights)', p)

    # Minimise v + t
    mModel.minimise(model, __sum_weighted(1.0, v, 1.0, t))
    return np.array(weights.level())
Exemplo n.º 3
0
def test_main_model(data):
    X_tr, X_te = data
    model = build_model()
    model.fit(X_tr)
    predictions = model.predict(X_te)
    revenue_in_2_weeks = predictions.groupby("company_name").sum()
    print(revenue_in_2_weeks)
Exemplo n.º 4
0
def main():
    model = build_model()
    X_tr, X_te = read_dataset(n_days=14)
    model.fit(X_tr)
    predictions = model.predict(X_te)
    revenue_in_2_weeks = predictions.groupby("company_name").sum()
    print(revenue_in_2_weeks)
    revenue_in_2_weeks.to_csv('predictions.csv', index=False)
Exemplo n.º 5
0
def main(data_path, week):
    data = prepare_dataset(data_path)
    (X_tr, y_tr), (X_te, y_te) = train_last_week_split(data,
                                                       week=week,
                                                       full_training=False)
    clf = build_model()
    clf.fit(X_tr, y_tr)

    submission = X_te.loc[:, ["i", "j"]]
    submission["prediction"] = clf.predict_proba(X_te)[:, 1]
    submission.to_csv("prediction.csv", index=False)
def markowitz(exp_ret, covariance_mat, aversion):
    # define model
    model = mModel.build_model("mean_var")

    # set of n weights (unconstrained)
    weights = mModel.weights(model, "weights", n=len(exp_ret))

    mBound.equal(model, Expr.sum(weights), 1.0)

    # standard deviation induced by covariance matrix
    var = mMath.variance(model, "var", weights, covariance_mat)

    mModel.maximise(model=model, expr=Expr.sub(Expr.dot(exp_ret, weights), Expr.mul(aversion, var)))
    return np.array(weights.level())
def markowitz_riskobjective(exp_ret, covariance_mat, bound):
    # define model
    model = mModel.build_model("mean_var")

    # set of n weights (unconstrained)
    weights = mModel.weights(model, "weights", n=len(exp_ret))

    # standard deviation induced by covariance matrix
    stdev = mMath.stdev(model, "std", weights, covariance_mat)

    # impose a bound on this standard deviation
    mBound.upper(model, stdev, bound)

    mModel.maximise(model=model, expr=Expr.dot(exp_ret, weights))
    return np.array(weights.level())
def lasso(matrix, rhs, lamb):
    """
    min 2-norm (matrix*w - rhs)^2 + lamb * 1-norm(w)
    """
    # define model	
    model = mModel.build_model('lasso')

    # introduce variables and constraints
    weights = mModel.weights(model, "weights", matrix.shape[1])
    v = mMath.l2_norm_squared(model, "2-norm(res)**", __residual(matrix, rhs, weights))
    t = mMath.l1_norm(model, "1-norm(w)", weights)

    # Minimise 1.0*v + lambda * t
    mModel.minimise(model=model, expr=__sum_weighted(c1=1.0, expr1=v, c2=lamb, expr2=t))

    return np.array(weights.level())
def lsq_ls(matrix, rhs):
    """
    min 2-norm (matrix*w - rhs)^2
    s.t. e'w = 1
    """
    # define model
    model = mModel.build_model('lsqPos')

    # introduce n non-negative weight variables
    weights = mModel.weights(model, "weights", n=matrix.shape[1])

    # e'*w = 1
    mBound.equal(model, Expr.sum(weights), 1.0)

    v = mMath.l2_norm(model, "2-norm(res)", expr=__residual(matrix, rhs, weights))

    # minimization of the residual
    mModel.minimise(model=model, expr=v)

    return np.array(weights.level())
Exemplo n.º 10
0
def main():

    path = "/home/alisher/Documents/data/brats/MICCAI_BraTS_2019_Data_Training/HGG/BraTS19_2013_10_1"
    input_shape = (4, 80, 96, 64)
    seqs = ["flair", "t1", "t2", "t1ce", "seg"]
    data_paths = [{}]
    for name in os.listdir(path):
        for seq in seqs:
            if seq in name:
                data_paths[0][seq] = f"{path}/{name}"
                break

    output_channels = 3

    data = np.empty((len(data_paths[:4]), ) + input_shape, dtype=np.float32)
    labels = np.empty((len(data_paths[:4]), output_channels) + input_shape[1:],
                      dtype=np.uint8)

    for i, imgs in enumerate(data_paths):
        try:
            data[i] = np.array(
                [preprocess(nib.load(imgs[m]), input_shape[1:]) for m in seqs],
                dtype=np.float32)
            labels[i] = preprocess_label(read_img(imgs['seg']),
                                         input_shape[1:])[None, ...]
        except Exception as e:
            print(
                f'Something went wrong with {imgs["t1"]}, skipping...\n Exception:\n{str(e)}'
            )
            continue
    config, _ = get_config(os.path.abspath('config.json'))

    if os.path.exists(config.model_path):
        print("Pretrained model is loaded")
        model = load_model(config.model_path)
    else:
        print("New model was initialized")
        model = build_model(input_shape=input_shape,
                            output_channels=config.label_num)

    model.fit(data, [labels, data], batch_size=1, epochs=1)
Exemplo n.º 11
0
    dev_images1 = dev_images1.astype('float32') / 255

    # general categoricla variable
    # notice I am add "s" at train_label, to distinguish from earlier numpy.
    train_labels = to_categorical(train_label_np)
    dev_labels = to_categorical(dev_label_np)

    train_labels1 = to_categorical(train_label_np1)
    dev_labels1 = to_categorical(dev_label_np1)

    train_labels_merged = merge_labels(train_labels, train_labels1)
    dev_labels_merged = merge_labels(dev_labels, dev_labels1)

    sample_size = dev_labels_merged.shape[0]  # sample size

    for i in range(sample_size):
        print(train_labels_merged[i, :], ",", train_label_np[i], ",",
              train_label_np1[i])

    params = {}
    params['height'] = height
    params['width'] = width
    params['channel'] = channel

    model = build_model(True, params)
    history = train_model(model, train_labels_merged, train_images,
                          dev_labels_merged, dev_images)

    # print the graph of learning history for diagnostic purpose.
    print_plot_keras_metrics(history)
Exemplo n.º 12
0
def initial_fit(num_timesteps, num_targets, train_percent=.93, num_tweets=300):
    print("started init fit")
    dir_path = os.path.dirname(os.path.abspath(__file__))

    #clear contents of log files
    open(os.path.join(dir_path, 'logs/context_prices.txt'), 'w').close()
    open(os.path.join(dir_path, 'logs/actuals.txt'), 'w').close()
    open(os.path.join(dir_path, 'logs/predictions.txt'), 'w').close()
    open(os.path.join(dir_path, 'logs/history.txt'), 'w').close()
    open(os.path.join(dir_path, 'logs/proxy_log.txt'), 'w').close()

    data = get_historical(num_tweets, from_date="")

    X_train, y_train, X_test, y_test, ref = load_data(
        data,
        num_timesteps,
        num_targets=num_targets,
        train_percent=train_percent
    )  #TODO: make higher percentage of training when this goes into "prod"

    # store recent data so that we can get a live prediction
    recent_reference = []
    recent_data = data[-num_timesteps:, 1:]
    recent_data = normalize_timestep(recent_data, recent_reference)

    print("    X_train", X_train.shape)
    print("    y_train", y_train.shape)
    print("    X_test", X_test.shape)
    print("    y_test", y_test.shape)

    model = build_model([9, num_timesteps, num_targets])
    #train the model
    print("TRAINING")
    model.fit(X_train,
              y_train,
              batch_size=512,
              epochs=600,
              validation_split=0.1,
              verbose=2)
    save_model(model)

    trainScore = model.evaluate(X_train, y_train, verbose=100)
    print('Train Score: %.2f MSE (%.2f RMSE) (%.2f)' %
          (trainScore[0], math.sqrt(trainScore[0]), trainScore[1]))

    testScore = model.evaluate(X_test, y_test, verbose=100)
    print('Test Score: %.2f MSE (%.2f RMSE) (%.2f)' %
          (testScore[0], math.sqrt(testScore[0]), testScore[1]))

    #make predictions
    print("PREDICTING")
    p = model.predict(X_test)

    recent_data = [
        recent_data
    ]  # One-sample predictions need list wrapper. Argument must be 3d.
    recent_data = np.asarray(recent_data)
    future = model.predict(recent_data)

    # document results in file
    print("WRITING TO LOG")
    file = open(os.path.join(dir_path, "logs/log_initial.txt"), "w")
    for i in range(0, len(X_train)):
        for s in range(0, num_timesteps):
            file.write(str(X_train[i][s]) + "\n")
        file.write("Target: " + str(y_train[i]) + "\n")
        file.write("\n")

    for i in range(0, len(X_test)):
        for s in range(0, num_timesteps):
            file.write(str(X_test[i][s]) + "\n")
        file.write("Target: " + str(y_test[i]) + "\n")
        file.write("Prediction: " + str(p[i]) + "\n")
        file.write("\n")
    file.close()

    # de-normalize
    print("DENORMALIZING")
    for i in range(0, len(p)):
        p[i] = (p[i] + 1) * ref[round(.9 * len(ref) + i)]
        y_test[i] = (y_test[i] + 1) * ref[round(.9 * len(ref) + i)]

    future[0] = (future[0] + 1) * recent_reference[0]
    recent_data[0] = (recent_data[0] + 1) * recent_reference[0]

    file = open(os.path.join(dir_path, "logs/predictions.txt"), "a")
    file.write(str(future[0][0]) + "\n")
    file.close()

    # plot historical predictions
    print("PLOTTING")
    for i in range(0, len(p)):
        if i % (num_targets * 2) == 0:
            plot_index = i  #for filling plot indexes
            plot_indexes = []
            plot_values = p[i]
            for j in range(0, num_targets):
                plot_indexes.append(plot_index)
                plot_index += 1
            plt.plot(plot_indexes, plot_values, color="red")

    # plot historical actual
    plt.plot(y_test[:, 0], color='blue',
             label='Actual')  # actual price history

    # plot recent prices
    plot_indexes = [len(y_test) - 1]
    plot_values = [y_test[-1, 0]]
    plot_index = None
    for i in range(0, len(recent_data[0])):
        plot_values.append(recent_data[0][i][0])
        plot_index = len(y_test) + i
        plot_indexes.append(len(y_test) + i)
    plt.plot(plot_indexes, plot_values, color='blue')

    # plot future predictions
    plot_indexes = [plot_index]
    plot_values = [recent_data[0][-1][0]]
    for i in range(0, len(future[0])):
        plot_index += 1
        plot_values.append(future[0][i])
        plot_indexes.append(plot_index)
    plt.plot(plot_indexes, plot_values, color="red", label="Prediction")

    #show/save plot
    print("SENDING EMAILS")
    plt.legend(loc="upper left")
    plt.title("ETH Price Predictions")
    plt.xlabel("Hours")
    plt.ylabel("Price ($)")
    filename = str(arrow.utcnow().format("YYYY-MM-DD"))
    plt.savefig(os.path.join(dir_path, "graphs/" + filename))
    #plt.show()
    plt.close()
    send_email()

    return
Exemplo n.º 13
0
    print(xx,"\n")
        #print(xxx)
    y = sigmoid(float(y.rstrip()))
    x_train.append(xx)

    #x_train.append(x)
    y_train.append(y)



x_train = np.array(x_train)
y_train = np.array(y_train)

x_train =x_train.reshape(248,1,-1)
#print("feat:" ,a,b,c , "lab:" , y , "\n")

model = build_model(input_shape=x_train.shape)
model.train(x_train,y_train,epochs=500)
x_predict = '2017-5-6'
x_predict = x_predict.split('-')
predict = []
for x in x_predict:
    predict.append(sigmoid(int(x)))
predict = np.array(predict)
predict =predict.reshape(1,3,1)
print(predict.shape)
res = model.predict(x_train)
plt.scatter(range(248),res,c='r')
plt.scatter(range(248),y_train,c='g')
plt.show()
Exemplo n.º 14
0
def test_model_pipeline(data):
    X, y = xy(data)
    model = build_model()
    model.fit(X, y)
Exemplo n.º 15
0
    model_dir = args.output_path + '/models'
    log_dir = '../logs'
    tb_dir = args.output_path + '/logs'

    _clear_logs(log_dir, tb_dir)
    x_train, x_test, y_train, y_test = _load_data(args.input_path)
    x_train, y_train = training_xform(x_train, y_train)

    tf.config.threading.set_inter_op_parallelism_threads = 0
    tf.config.threading.set_intra_op_parallelism_threads = 0

    save_prefix = model_dir + '/'
    checkpoint_path = os.path.join(save_prefix, 'cp')

    hp = HypParams()
    model = build_model(hp)

    early_stopping_cb = K.callbacks.EarlyStopping(monitor='val_loss',
                                                  min_delta=0.00001,
                                                  restore_best_weights=True,
                                                  verbose=1,
                                                  patience=7)
    checkpoint_cb = K.callbacks.ModelCheckpoint(checkpoint_path,
                                                save_best_only=True,
                                                verbose=1,
                                                monitor='val_loss')
    lr_scheduler_cb = K.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                    min_delta=0.0001,
                                                    factor=0.5,
                                                    patience=2,
                                                    verbose=1)
Exemplo n.º 16
0
def test_handles_model():
    build_model()
Exemplo n.º 17
0
def main():

    for_test = False

    # preprocessing step
    logger = Logger(path=os.path.abspath('logs/'), name="trainining_process")
    config, _ = get_config(os.path.abspath('config.json'))
    # data_helper = DataHelper(config, logger)
    # missing_dict, history = data_helper.check_missing()
    # s = pd.Series(missing_dict, name = "Modalities")
    # s.to_csv(os.path.abspath("missing_two.csv"))

    # preprocessing step
    # for (index, id) in enumerate(history.keys()):
    #     if (len(history[id]) > 1):
    #         logger.info(f"{id} - {history[id]}")

    input_shape = (CHANNELS_NUM, ) + tuple(config.image_shape)

    # print(mri_image.get_data())

    data_generator = DataGenerator(config)
    main_path = config.main_path

    hdf_path = f"{main_path}/data/{config.data_set}"
    data_file = tables.open_file(hdf_path, 'r')

    train_indices, validaiton_indices = data_generator.validation_split(
        f"{main_path}/data/train_keys.csv",
        f"{main_path}/data/validation_kes.csv")
    train_indices = list(train_indices)
    validaiton_indices = list(validaiton_indices)

    # train_generator, validation_generator, train_steps, validation_steps = trainer.get_generators(data_file, train_indices,validaiton_indices, config.patch_shape, config.patch_start_offset, config.batch_size)
    # logger.info("Generators are created")

    params = {
        'n_labels': config.label_num,
        'labels': config.labels,
        'dim': tuple(config.image_shape),
        'batch_size': 1,
        'shuffle': True
    }

    logger.info("Generators are creating")
    train_generator = DataGeneratorNew(train_indices, data_file, **params)
    validation_generator = DataGeneratorNew(validaiton_indices, data_file,
                                            **params)
    logger.info("Generators are ready")

    logger.info("Model is loading")

    if os.path.exists(config.model_path) and for_test:
        is_compile = not for_test
        model = load_model(
            config.model_path, compile=is_compile
        )  #custom_objects={'loss_gt_':loss_gt(), 'loss_VAE_':loss_VAE, 'dice_coefficient':dice_coefficient})
        logger.info("Pretrained model is loaded")
    else:
        logger.info("New model was initialized")
        model = build_model(input_shape=input_shape,
                            output_channels=config.label_num)

    logger.info("Training model is started")

    if not for_test:
        model.fit_generator(
            generator=train_generator,
            epochs=config.epochs,
            validation_data=validation_generator,
            callbacks=get_callbacks(
                config.model_path,
                initial_learning_rate=config.initial_learning_rate,
                learning_rate_drop=config.learning_rate_drop))
    else:

        # some validation index
        validation_case = validaiton_indices[0]

        (y, replication) = model.predict(
            data_file.root.data[validation_case][np.newaxis])

        replication_nii = nib.Nifti1Image(replication[0][0], affine=np.eye(4))
        label_nii = nib.Nifti1Image(y[0][0], affine=np.eye(4))
        original_nill = nib.Nifti1Image(
            data_file.root.data[validation_case][0], affine=np.eye(4))

        nib.save(label_nii, f'/home/alisher/Desktop/label.nii.gz')
        nib.save(original_nill, f'/home/alisher/Desktop/original.nii.gz')
        nib.save(replication_nii, f'/home/alisher/Desktop/replication.nii.gz')

    data_file.close()