Ejemplo n.º 1
0
def predict():
    string = str('test')
    hist_pred_n = string + "hist_pred.jpeg"

    # Loading from .pkl files
    pkl_hnd = store(app.config['static_path'], app.root_path)
    clf = pkl_hnd.load('model')
    n_labels = pkl_hnd.load('n_labels')
    enc = pkl_hnd.load('enc')

    # Feature extraction
    data = utils.file_parser_test(
        os.path.join(app.config['upload_path'], "test.txt"))
    features = utils.feature_extractor(data['text'], 5000)

    # Preprocessing features
    data_x = utils.preprocess_features(features, 2500)

    # Predicting
    pr = predict_model(data_x)
    pred_enc = pr.predict_model(clf)

    # Decoding the encoded prediction
    pred = utils.label_encoder(pred_enc, True, enc)
    pkl_hnd.save_pred(data_x, pred)
    # Saving predicted value and data into .csv file

    #Plotting histogram of prediction
    pkl_hnd.plot_hist(pred, hist_pred_n)

    return render_template(
        "predict_result.html",
        img_hist_pred=url_for(app.config['static_path'], filename=hist_pred_n),
    )
def feature_extract(image_path):
    with tf.Graph().as_default():
        image = tf.placeholder(tf.float32,
                               [None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNEL])
        logits, features = predict_model(image, is_training=False)
        variables_to_use = slim.get_variables_to_restore()
        variables_restorer = tf.train.Saver(variables_to_use)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            variables_restorer.restore(sess, BEST_MODEL_PATH)
            img = read_one_image(image_path, args.batch)
            if args.batch == "True":
                score, feature = sess.run([logits, features],
                                          feed_dict={image: img})
                #print(feature.shape)
                if args.save_results == "True":
                    np.save("tmp_data/image_features.npy", feature)
                    np.save("tmp_data/image_scores.npy", score)
                return feature, score
            else:
                score, feature = sess.run([logits, features],
                                          feed_dict={image: img})
                #print(feature.shape)
                return feature, score
def predict(input_path, output_path, resources_path):
    """
    This is the skeleton of the prediction function.
    The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)
    with your predictions in the BIES format.
    
    The resources folder should contain everything you need to make the predictions. It is the "resources" folder in your submission.
    
    N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.

    :param input_path: the path of the input file to predict.
    :param output_path: the path of the output file (where you save your predictions)
    :param resources_path: the path of the resources folder containing your model and stuff you might need.
    :return: None
    """
    print("Loading......")
    predict_model(input_path,output_path,resources_path)
    print("Done!")
 def post(self):
     args = api_args.parse_args()
     predict = predict_model(args['item_weight'], args['item_fat_content'],
                             args['item_visibility'], args['item_type'],
                             args['item_mrp'], args['years_established'],
                             args['outlet_size'],
                             args['outlet_location_type'],
                             args['outlet_type'])
     return {'predict': round(float(predict), 2)}, 201
def evaluate_currency(model, steps, data, s_d, ):

    """
    Calculates the likely revenue per stock, accompanied by a failure rate and standard deviation.
    :param model: The RNN
    :param history:
    :param steps:
    :param data:
    :return:
    """

    cur_mult = predict_model(model, data, steps)
Ejemplo n.º 6
0
 def post(self):
     args = api_args.parse_args()
     predict = predict_model(
         args['crim'],
         args['zn'],
         args['indus'],
         args['chas'],
         args['nox'],
         args['rm'],
         args['age'],
         args['dis'],
         args['rad'],
         args['tax'],
         args['ptratio'],
         args['black'],
         args['lstat'],
     )
     return {'predict': round(float(predict), 2)}, 201
Ejemplo n.º 7
0
def validate_model():
    data = prepare_data()
    #build graph
    with tf.Graph().as_default():
        image_preprocessing_fn = preprocessing_factory.get_preprocessing(
            'resnet_v1_50', is_training=False)
        processed_image, score = load_data(data['val_image_names'],
                                           data['val_image_scores'], 1,
                                           image_preprocessing_fn, 128, False)
        score = tf.reshape(score, [-1, 1])

        logits, _ = predict_model(processed_image, is_training=False)
        variables_to_use = slim.get_variables_to_restore()
        variables_restorer = tf.train.Saver(variables_to_use)
        #Loss
        with tf.name_scope('loss'):
            #MSE loss
            loss = tf.sqrt(tf.reduce_mean(tf.square(logits - score)))

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            variables_restorer.restore(sess, SAVE_MODEL_PATH)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)
            sum_ls = 0.0
            steps = 0
            try:
                while not coord.should_stop():
                    ls = sess.run(loss)
                    sum_ls += ls
                    steps += 1

            except tf.errors.OutOfRangeError:
                print("Validating: mean loss %f" % (sum_ls / steps))
            finally:
                coord.request_stop()
            coord.join(threads)
    return sum_ls / steps
Ejemplo n.º 8
0
def train_model():
    data = prepare_data()
    #build graph
    with tf.Graph().as_default():
        image_preprocessing_fn = preprocessing_factory.get_preprocessing(
            args.model_name, is_training=True)
        processed_image, score = load_data(data['train_image_names'],
                                           data['train_image_scores'],
                                           args.epoch_num,
                                           image_preprocessing_fn,
                                           args.batch_size, True)
        score = tf.reshape(score, [-1, 1])
        print(score.shape)
        logits, _ = predict_model(processed_image, is_training=True)
        print(logits.shape)
        variables_to_restore = slim.get_variables_to_restore(
            exclude=['resnet_v1_50/logits'])
        variables_restorer = tf.train.Saver(variables_to_restore)

        #Loss
        with tf.name_scope('ls'):
            #MSE loss
            loss = tf.sqrt(tf.reduce_mean(tf.square(logits - score)))
            tf.summary.scalar('loss', loss)

        current_epoch = tf.Variable(0, trainable=False)
        decay_step = EPOCHS_PER_LR_DECAY * len(
            data['train_image_names']) // args.batch_size
        learning_rate = tf.train.exponential_decay(args.lr,
                                                   current_epoch,
                                                   decay_step,
                                                   LR_DECAY_FACTORY,
                                                   staircase=True)

        opt = tf.train.MomentumOptimizer(learning_rate, 0.9)
        #opt = tf.train.AdamOptimizer(learning_rate)
        optimizer = slim.learning.create_train_op(loss,
                                                  opt,
                                                  global_step=current_epoch)

        saver = tf.train.Saver()
        summary_op = tf.summary.merge_all()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:
            summary_writer = tf.summary.FileWriter(TRAIN_LOG_DIR, sess.graph)
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            variables_restorer.restore(sess, RES_v1_50_MODEL_PATH)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)
            sum_ls = 0.0
            batch_num = len(data['train_image_scores']) // args.batch_size
            val_step = 0
            best_val_ls = 100.0
            try:
                while not coord.should_stop():
                    _, ls, step, summary = sess.run(
                        [optimizer, loss, current_epoch, summary_op])
                    sum_ls += ls

                    if step % 50 == 0:
                        print("Epoch %d, loss %f" % (step / batch_num + 1, ls))
                        summary_writer.add_summary(summary, step)
                    if step % batch_num == 0 and step != 0:
                        print("Epoch %d, mean loss %f" %
                              (step / batch_num + 1, sum_ls / batch_num))
                        sum_ls = 0.0
                        saver.save(sess, SAVE_MODEL_PATH)
                        val_ls = validate_model()
                        if val_ls < best_val_ls:
                            best_val_ls = val_ls
                            saver.save(sess, BEST_MODEL_PATH)
                        print('best val loss %f' % (best_val_ls))
            except tf.errors.OutOfRangeError:
                saver.save(sess, SAVE_MODEL_PATH)
            finally:
                coord.request_stop()
            coord.join(threads)
Ejemplo n.º 9
0
# DOWNSCALE = 3 ->  Use IMG_LR_DIR_2X folder in load_LR_img
DOWNSCALE = 2
VISUALIZE = False

# Load test data:
_, test_ids, _ = split_data(TRAIN_IDS, TEST_IDS, VAL_IDS)

# build network, agnostic to input size
params = {
    'dim': None,
    'batch_size': 1,
    'n_channels': 1,
    'downscale': DOWNSCALE,
    'shuffle': False
}
model = predict_model(params, None)

# load model with a weigth file:
modelname_mehdi_Y = 'mehdi_Y.2800-0.00084.hdf5'  # arch 2
modelname_mehdi_Y_div2k = 'mehdi_Y.40-0.00075.hdf5'  # arch 2
modelname_mehdi_Y_small = 'mehdi_small_Y.2800-0.00115.hdf5'  # arch 1

model.load_weights(osp.join('weights', modelname_mehdi_Y_div2k))

# Evaluate the PSNR for each image of the test set
# Set VISUALIZE = True to register the images in 'results' directory
PSNR_bicubic = []
PSNR_pred = []
for id in test_ids:
    try:
        imgHR = load_HR_img(id, folder=IMG_HR_DIR, ext='png')
Ejemplo n.º 10
0
def train():

    clf = request.form['train']
    if allowed_classifier(clf):
        string = str('train')
        hist_n = string + "hist.jpeg"
        cnmt_n = string + "cnmt.jpeg"
        pkl_hnd = store(app.config['static_path'], app.root_path)

        # Feature extraction
        data = utils.file_parser(
            os.path.join(app.config['upload_path'], "data.txt"))
        features = utils.feature_extractor(data['text'], 5000).todense()
        sh = data.shape

        # Preprocessing features and labels
        data_x = utils.preprocess_features(features, 2500)
        data_y, enc = utils.label_encoder(data['label'], False, None)
        pkl_hnd.dump(enc, 'enc')  # storing encoder

        # Splitting data into training set and validation set
        train_x, train_y, valid_x, valid_y = utils.train_valid(
            data_x, data_y, 0.2)

        #Balancing data with SMOTE
        text, label = utils.balance_data(train_x, train_y)

        # Selecting model and tuning hyperparameters
        tr = model(clf, text[:sh[0], :], label[:sh[0]], valid_x, valid_y)
        comb_mod = tr.model_selection()

        # Fitting model and predicting
        mod = tr.build_model(comb_mod)
        pkl_hnd.dump(mod, 'model')  # storing the model
        pr = predict_model(valid_x)
        pred = pr.predict_model(mod)

        #Training Statistics
        st = stats(pred, valid_y)
        acc, f1 = st.train_stats()

        #Plotting histogram and confusion matrix
        pkl_hnd.plot_hist(data['label'], hist_n)
        n_labels = np.unique(np.asarray(data['label']))
        pkl_hnd.dump(n_labels, 'n_labels')  # storing labels
        cnf_matrix = st.cnf_mtx()
        pkl_hnd.plot_confusion_matrix(
            cnf_matrix,
            n_labels,
            cnmt_n,
            normalize=True,
            title='Confusion matrix',
            cmap=plt.cm.Blues,
        )

        return render_template("train_result.html",
                               accuracy=acc,
                               img_hist=url_for(app.config['static_path'],
                                                filename=hist_n),
                               img_cfmt=url_for(app.config['static_path'],
                                                filename=cnmt_n),
                               f1=f1)
    else:
        flash('Please enter a valid classifier')
        return redirect(url_for('index'))
Ejemplo n.º 11
0
def model_currency(k=10):
    """
    Creates a neural network to model a currency.
    :param k: The degree of cross-validation to be performed.
    :return:
    """

    coin_dict, data = cr.read_csv()

    data = cr.split_data_coins(coin_dict, data)
    coin = select_currency(data)
    data = data[coin]

    model_weights = []
    model_errors = []

    split_data = cr.split_data(data, k)
    split_data = [[[float(e[2]), float(e[5]), float(e[3]), float(e[4])] for e in s] for s in split_data]

    print("Modeling neural networks with k-fold cross-validation")

    for i in range(k):
        model = m.create_model(4, [8, 8, 2])

        raw_data = split_data[:i] + split_data[i+1:]
        training_data = np.array([s[:-1] for s in raw_data])
        m.train_model(model, training_data, np.array([to_expected(s) for s in raw_data]))
        error = m.test_model(model, np.array([split_data[i][:-1]]), np.array([to_expected(split_data[i])]))
        model_weights.append(np.array(m.get_weights(model)))
        model_errors.append(error[0])

    sum_error = sum(1/e for e in model_errors)

    for idx, error in enumerate(model_errors):

        proportion = (1/error)/sum_error
        model_weights[idx] = proportion * model_weights[idx]

    true_weights = sum(model_weights)
    true_model = m.create_model(4, [8, 8, 2])
    m.set_weights(true_model, true_weights)

    while True:
        print("For how long would you like to invest?")
        steps = input("Choice:   ")
        try:
            steps = int(steps)
            assert steps > 0
        except ValueError or AssertionError:
            print("That was not a valid amount of time.")
        break

    revenue = m.predict_model(true_model, np.array([[split_data[-1][-1]]]), steps)
    error = m.test_model(true_model, np.array([s[:-1] for s in split_data]), np.array([to_expected(s) for s in split_data]))
    multiply = [1, 1]
    for r in revenue:
        multiply[0] *= r[0][0]
        multiply[1] *= r[0][1]
    print("Expected revenue: {}  with error percentage at: {}%".format(multiply, error[0]*100))

    return revenue, error