Esempio n. 1
0
def load(ckpt_name, epoch):

    ckpt_name = "resnet-sgd-lr0.1-momentum0.9-wdecay0.0005-run0-resetFalse"
    path = get_probe_path(ckpt_name, epoch)
    probe = torch.load(path, map_location=torch.device(device))

    class Args(object):
        def __init__(self):
            self.batchsize = 128
            self.model = ckpt_name[:ckpt_name.index("-")]

    args = Args()

    train_loader, test_loader = main.build_dataset(args)

    try:
        net = main.build_model(args, device, ckpt=probe)
    except:
        # patch for different pytorch versions (leading "module." in state dict keys)
        for key in list(probe['net'].keys()):
            if len(key) >= len("module.") and key[:len("module."
                                                       )] == "module.":
                val = probe['net'].pop(key)
                probe['net'][key[len("module."):]] = val
        net = main.build_model(args, device, ckpt=probe)

    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(
        net.parameters(), lr=0.1)  # lr placeholder, overwritten by state dict
    optimizer.load_state_dict(probe['optimizer'])

    return train_loader, test_loader, net, criterion, optimizer
Esempio n. 2
0
def test_model_build_output_dimention():
    input_dim = (3, )
    output_dim = 2
    mock_env = Mock(observation_space=Mock(shape=input_dim),
                    action_space=Mock(n=output_dim))
    model = main.build_model(mock_env)
    assert model.output_shape == (None, output_dim)
Esempio n. 3
0
def train_model():
    train = pd.read_csv('./data/train.csv')
    y = train.pop('Survived')
    train = main.clean_data(train)
    train = main.one_hot_encode(train)
    model = main.build_model()
    model.fit(train, y)
    return model
Esempio n. 4
0
def load_checkpoint(filepath):
    checkpoint = torch.load(filepath)
    model = main.build_model(checkpoint['arch'], checkpoint['hidden_size'])

    Criterion = checkpoint['criterion']
    Optimizer = checkpoint['optimizer']
    model.load_state_dict(checkpoint['state.dict'])

    return model, checkpoint['model.class_to_idx']
Esempio n. 5
0
def _example_run(args):
    """Command to run example for ECT-584. First runs the extract on the raw emails to produce a raw dataset.
    This dataset is then cleaned and the training data is staged to produce a pre_processed dataset test dataset
    with predicted event values appended. This is then processed to create a test and training arff dataset.
    The training dataset is used to create a classification model using weka's J48 implementation of the C4.5 
    classification algorithm. Once the model has been created the test dataset has events predicated and these 
    predicated values are appended to the test dataset and saved to an excel file. Then this file is loaded and
    compared against the validation dataset to access the accuracy of the model.
    """
    #calls main.py run)extract with relative file paths
    #run_extract(os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'raw')),
    #            os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'stage')), 
    #            'extract', 'text', False)
    #calls main.py run_clean with relative file paths
    run_clean(os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'stage')),
              os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'stage')),
              'extract', 'text')
    #calls main.py stage_train with relative file paths
    stage_train(os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'stage')),
                'extract_cleaned',
                'text',
                os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'train')),
                't_data',
                'excel',
                os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'stage')))
    #calls main.py run_process with relative file paths
    run_process(os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'stage')),
                os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'process')), 
                'pre_processed', 
                'text')
    #calls main.py build_model with relative file paths
    build_model(os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'process', 'train.arff')), 
                os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'process', 'model.model')))        
    #calls main.py classify with relative file paths
    classify(os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'process', 'test')), 
             os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'process', 'model.model')), 
             os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'results', 'output.xls')), 
             'event')
    #calls main.py validate with relative file paths
    validate(os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'results')), 
             'output')
Esempio n. 6
0
def predict():
    data = pd.read_csv('/media/aayush/Other/Udacity Data Real/CH2_002/output/filtered_only_center.csv')
    model_name = ['nvidia', 'darknet53', ''][1]
    checkpoint_path = '/media/aayush/Other/Udacity Data Real/CH2_002/output/checkpoints_real/'+model_name+'weights.47-0.00'
    n_train_samples = 22706
    image_paths = data['img'].values
    angles = data['angle'].values
    angles = 2 * ((angles - np.min(angles)) / (np.max(angles) - np.min(angles))) - 1
    speeds = data['speed'].values
    X_test = image_paths[n_train_samples:]
    y_test = angles[n_train_samples:]
    num_batches = 66
    batch_size = 86

    img_height = 105
    img_width = 240
    model = main.build_model(batch_size, img_height, img_width)
    model.load_weights(checkpoint_path)
    dframe_list = []

    for batch_no in range(num_batches):

        #preprocessing
        images = [cv2.imread(path) for path in X_test[batch_no*batch_size: (batch_no+1)*batch_size]]
        images = [utils.crop(image, 200, None) for image in images]
        images = [utils.resize(image, img_width, img_height) for image in images]
        images = [cv2.normalize(image, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) for image in images]

        images = np.stack(images, 0)
        gt_angles = y_test[batch_no*batch_size: (batch_no+1)*batch_size]
        speed_batch = speeds[batch_no*batch_size: (batch_no+1)*batch_size]
        pred_angles = model.predict(images, batch_size=batch_size)
        pred_angles = np.squeeze(pred_angles)
        df = pd.DataFrame({'img': X_test[batch_no*batch_size: (batch_no+1)*batch_size], 'gt': gt_angles, 'preda': pred_angles, 'speed': speed_batch})
        dframe_list.append(df)

    dframes = pd.concat(dframe_list, ignore_index=True)



    dframes.to_csv('/media/aayush/Other/Udacity Data Real/CH2_002/output/out_angles.csv')
Esempio n. 7
0
 def test_reinforce_correct(self):
     records = [
         {
             'state':[0,0,0,0],
             'new_state':[0,0,0,0],
             'reward': 0,
             'action': 1,
             'done': False,
         },
     ]*5 + [
         {
             'state':[0,0,0,0],
             'new_state':[0,0,0,1],
             'reward': 1,
             'action': 0,
             'done': False,
         },
     ]* 5
     model = main.build_model(env)
     main.train_model( model, records, env, batch_size=64)
     prediction = main.predict(model,[0,0,0,0])
     assert np.argmax(prediction) == 0, prediction
Esempio n. 8
0
 def test_predict_future_reward(self):
     """When predicting future rewards, we want to see the network give correct directions"""
     good_sequence = [
         ([0,0,0,0],1,[0,0,0,1]),
         ([0,0,0,1],0,[1,0,1,0]),
         ([1,0,1,0],1,[1,1,1,1]),
     ]
     bad_sequence = [
         ([0,0,0,0],0,[1,0,0,1]),
         ([1,0,0,1],1,[0,0,1,0]),
         ([0,0,1,0],1,[0,1,1,1]),
     ]
     def expand(r, final_reward):
         results = []
         for i,(state,action,new_state) in enumerate(r):
             record = {
                 'state': np.array(state,'f'),
                 'new_state': np.array(new_state,'f'),
                 'action': action,
                 'done': i >= len(r),
                 'reward': final_reward
             }
             results.append(record)
         assert results[-1]['reward'] == final_reward
         return results 
     records = expand(good_sequence,1.0) + expand(bad_sequence,-1.0)
     print(records)
     records = records * 256
     model = main.build_model(env)
     main.train_model( model, records, env, batch_size=8)
     for (state,action,new_state) in good_sequence:
         prediction = main.predict(model,state)
         assert np.argmax(prediction) == action, (state,action,prediction)
     
     for (state,action,new_state) in bad_sequence:
         prediction = main.predict(model,state)
         assert np.argmax(prediction) != action, (state,action,prediction)
Esempio n. 9
0

def send_control(steering_angle, throttle):
    sio.emit("steer",
             data={
                 'steering_angle': steering_angle.__str__(),
                 'throttle': throttle.__str__()
             },
             skip_sid=True)


if __name__ == '__main__':

    checkpoint_path = '/media/aayush/Other/beta_simulator_linux/checkpoints/weights.11-0.09'
    out_img_path = '/media/aayush/Other/beta_simulator_linux/output_images/'
    model = main.build_model(1, 66, 200)
    model.load_weights(checkpoint_path)

    # if args.image_folder != '':
    #     print("Creating image folder at {}".format(args.image_folder))
    #     if not os.path.exists(args.image_folder):
    #         os.makedirs(args.image_folder)
    #     else:
    #         shutil.rmtree(args.image_folder)
    #         os.makedirs(args.image_folder)
    #     print("RECORDING THIS RUN ...")
    # else:
    # print("NOT RECORDING THIS RUN ...")

    # wrap Flask application with engineio's middleware
    app = socketio.Middleware(sio, app)
Esempio n. 10
0
    "/Users/liliangong/workspace_lil/mars_develop/Mathematical-Knowledge-Entity-Recognition"
)
from main import get_named_entities, build_model
import pandas as pd
import numpy as np
from googletrans import Translator
from flask import Flask, request, jsonify
import json
from flask_cors import CORS, cross_origin

app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'

t = Translator()
ckpt_file, _model, saver, sess = build_model()

baike_df = pd.read_csv(
    '/Users/liliangong/workspace_lil/mars_develop/agg_data/baike_w_eng_v2.csv')
baike_df[baike_df['eng_def'] != None]


def get_text():
    data = request.get_data()
    return json.loads(data.decode("utf-8"))


def get_eng_entities(text):
    lan = t.detect(text)
    if lan != 'zh-cn':
        text = t.translate(text, dest='zh-cn').text
Esempio n. 11
0
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import sub
import main

#define input

args = main.process_inputs()

# load / transform data

dataloader, class_to_idx = sub.load_data(args.data_dir)

#build model

model = main.build_model(args.arch, args.hidden_units)

# set device = cpu/gpu

device = sub.set_device(args.gpu)

# train/validate model

criterion = nn.NLLLoss()
optimizer = optim.SGD(model.classifier.parameters(), lr=args.learnrate)
main.train_model(model, dataloader, args.epochs, args.learnrate, optimizer,
                 criterion, device)

# return final statistics (after printing during process)
# save
    data_dir = results.data_dir
    save_dir = results.save_dir
    droppercentage = results.dropout
    arch = results.arch
    LR = results.learning_rate
    hidden_units = results.hidden_units
    epochs = results.epochs
    gpu = results.gpu

    train_set, valid_set, test_set, train_loader, valid_loader, test_loader = load_data(
        data_dir)

    pretrained_model = results.arch
    pretrained_model = getattr(models, pretrained_model)(pretrained=True)

    input_layer = pretrained_model.classifier[0].in_features

    model, hls = build_model(arch, gpu, input_layer, droppercentage)

    model, criterion, optimizer = train_model(model, train_set, valid_set,
                                              train_loader, valid_loader,
                                              epochs, LR, gpu)

    accuracy = accuracy_test(model, test_loader)

    print("Accuracy of your trained model: {:.2f}%".format(accuracy * 100))

    save_checkpoint(model, hls, epochs, droppercentage, train_set, optimizer,
                    criterion, save_dir)
    print('Model has been saved!')
Esempio n. 13
0
def main():
    model_name = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
    model = '../../models/{}.npz'.format(model_name)
    dev_datasets = [
        '../../data/word_sequence/premise_snli_1.0_dev.txt',
        '../../data/word_sequence/hypothesis_snli_1.0_dev.txt',
        '../../data/word_sequence/label_snli_1.0_dev.txt'
    ]
    test_datasets = [
        '../../data/word_sequence/premise_snli_1.0_test.txt',
        '../../data/word_sequence/hypothesis_snli_1.0_test.txt',
        '../../data/word_sequence/label_snli_1.0_test.txt'
    ]
    dictionary = '../../data/word_sequence/vocab_cased.pkl'

    # load model model_options
    with open('%s.pkl' % model, 'rb') as f:
        options = pkl.load(f)

    print options
    # load dictionary and invert
    with open(dictionary, 'rb') as f:
        word_dict = pkl.load(f)
    word_idict = dict()
    for kk, vv in word_dict.iteritems():
        word_idict[vv] = kk

    dev = TextIterator(dev_datasets[0],
                       dev_datasets[1],
                       dev_datasets[2],
                       dictionary,
                       n_words=options['n_words'],
                       batch_size=options['valid_batch_size'],
                       shuffle=False)

    test = TextIterator(test_datasets[0],
                        test_datasets[1],
                        test_datasets[2],
                        dictionary,
                        n_words=options['n_words'],
                        batch_size=options['valid_batch_size'],
                        shuffle=False)

    # allocate model parameters
    params = init_params(options, word_dict)

    # load model parameters and set theano shared variables
    params = load_params(model, params)
    tparams = init_tparams(params)

    trng, use_noise, \
        x1, x1_mask, char_x1, char_x1_mask, x2, x2_mask, char_x2, char_x2_mask, y, \
        opt_ret, \
        cost, \
        f_pred, f_prods = \
        build_model(tparams, options)

    use_noise.set_value(0.)
    dev_acc = pred_acc(f_pred, prepare_data, options, dev, word_idict)
    test_acc = pred_acc(f_pred, prepare_data, options, test, word_idict)

    print 'dev accuracy', dev_acc
    print 'test accuracy', test_acc

    predict_labels_dev = pred_label(f_prods, prepare_data, options, dev,
                                    word_idict)
    predict_labels_test = pred_label(f_prods, prepare_data, options, test,
                                     word_idict)

    with open('predict_gold_samples_dev.txt', 'w') as fw:
        with open(dev_datasets[0], 'r') as f1:
            with open(dev_datasets[1], 'r') as f2:
                with open(dev_datasets[-1], 'r') as f3:
                    for a, b, c, d in zip(predict_labels_dev, f3, f1, f2):
                        fw.write(
                            str(a) + '\t' + b.rstrip() + '\t' + c.rstrip() +
                            '\t' + d.rstrip() + '\n')

    with open('predict_gold_samples_test.txt', 'w') as fw:
        with open(test_datasets[0], 'r') as f1:
            with open(test_datasets[1], 'r') as f2:
                with open(test_datasets[-1], 'r') as f3:
                    for a, b, c, d in zip(predict_labels_test, f3, f1, f2):
                        fw.write(
                            str(a) + '\t' + b.rstrip() + '\t' + c.rstrip() +
                            '\t' + d.rstrip() + '\n')

    print 'Done'
Esempio n. 14
0
File: gen.py Progetto: smith6036/nli
def main():
    model_name = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
    model = "{}.npz".format(model_name)
    datasets = [
        "../../data/word_sequence/premise_snli_1.0_train.txt",
        "../../data/word_sequence/hypothesis_snli_1.0_train.txt",
        "../../data/word_sequence/label_snli_1.0_train.txt",
    ]

    valid_datasets = [
        "../../data/word_sequence/premise_snli_1.0_dev.txt",
        "../../data/word_sequence/hypothesis_snli_1.0_dev.txt",
        "../../data/word_sequence/label_snli_1.0_dev.txt",
    ]

    test_datasets = [
        "../../data/word_sequence/premise_snli_1.0_test.txt",
        "../../data/word_sequence/hypothesis_snli_1.0_test.txt",
        "../../data/word_sequence/label_snli_1.0_test.txt",
    ]
    dictionary = "../../data/word_sequence/vocab_cased.pkl"

    # load model model_options
    with open("%s.pkl" % model, "rb") as f:
        options = pkl.load(f)

    print(options)
    # load dictionary and invert
    with open(dictionary, "rb") as f:
        word_dict = pkl.load(f)

    n_words = options["n_words"]
    valid_batch_size = options["valid_batch_size"]

    valid = TextIterator(
        valid_datasets[0],
        valid_datasets[1],
        valid_datasets[2],
        dictionary,
        n_words=n_words,
        batch_size=valid_batch_size,
        shuffle=False,
    )
    test = TextIterator(
        test_datasets[0],
        test_datasets[1],
        test_datasets[2],
        dictionary,
        n_words=n_words,
        batch_size=valid_batch_size,
        shuffle=False,
    )

    # allocate model parameters
    params = init_params(options, word_dict)

    # load model parameters and set theano shared variables
    params = load_params(model, params)
    tparams = init_tparams(params)

    trng, use_noise, x1, x1_mask, x2, x2_mask, y, opt_ret, cost, f_pred = build_model(
        tparams, options)

    use_noise.set_value(0.0)
    valid_acc = pred_acc(f_pred, prepare_data, options, valid)
    test_acc = pred_acc(f_pred, prepare_data, options, test)

    print("valid accuracy", valid_acc)
    print("test accuracy", test_acc)

    predict_labels_valid = pred_label(f_pred, prepare_data, valid)
    predict_labels_test = pred_label(f_pred, prepare_data, test)

    with open("predict_gold_samples_valid.txt", "w") as fw:
        with open(valid_datasets[0], "r") as f1:
            with open(valid_datasets[1], "r") as f2:
                with open(valid_datasets[-1], "r") as f3:
                    for a, b, c, d in zip(predict_labels_valid, f3, f1, f2):
                        fw.write(
                            str(a) + "\t" + b.rstrip() + "\t" + c.rstrip() +
                            "\t" + d.rstrip() + "\n")

    with open("predict_gold_samples_test.txt", "w") as fw:
        with open(test_datasets[0], "r") as f1:
            with open(test_datasets[1], "r") as f2:
                with open(test_datasets[-1], "r") as f3:
                    for a, b, c, d in zip(predict_labels_test, f3, f1, f2):
                        fw.write(
                            str(a) + "\t" + b.rstrip() + "\t" + c.rstrip() +
                            "\t" + d.rstrip() + "\n")

    print("Done")
Esempio n. 15
0
def main():
    model_name = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
    model = '../../models/{}.npz'.format(model_name)
    valid_datasets   = ['../../data/sequence_and_features/premise_snli_1.0_dev_token.txt', 
                        '../../data/sequence_and_features/hypothesis_snli_1.0_dev_token.txt',
                        '../../data/sequence_and_features/premise_snli_1.0_dev_lemma.txt', 
                        '../../data/sequence_and_features/hypothesis_snli_1.0_dev_lemma.txt',
                        '../../data/sequence_and_features/label_snli_1.0_dev.txt']
    test_datasets    = ['../../data/sequence_and_features/premise_snli_1.0_test_token.txt', 
                        '../../data/sequence_and_features/hypothesis_snli_1.0_test_token.txt',
                        '../../data/sequence_and_features/premise_snli_1.0_test_lemma.txt', 
                        '../../data/sequence_and_features/hypothesis_snli_1.0_test_lemma.txt',
                        '../../data/sequence_and_features/label_snli_1.0_test.txt']
    dictionary       = ['../../data/sequence_and_features/vocab_cased.pkl',
                        '../../data/sequence_and_features/vocab_cased_lemma.pkl']
    # load model model_options
    with open('%s.pkl' % model, 'rb') as f:
        options = pkl.load(f)

    print options
    # load dictionary and invert
    with open(dictionary[0], 'rb') as f:
        word_dict = pkl.load(f)

    print 'Loading knowledge base ...'
    kb_dicts = options['kb_dicts']
    with open(kb_dicts[0], 'rb') as f:
        kb_dict = pkl.load(f)

    n_words = options['n_words']
    valid_batch_size = options['valid_batch_size']

    valid = TextIterator(valid_datasets[0], valid_datasets[1], valid_datasets[2], valid_datasets[3], valid_datasets[4],
                         dictionary[0], dictionary[1],
                         n_words=n_words,
                         batch_size=valid_batch_size,
                         shuffle=False)
    test = TextIterator(test_datasets[0], test_datasets[1], test_datasets[2], test_datasets[3], test_datasets[4],
                         dictionary[0], dictionary[1],
                         n_words=n_words,
                         batch_size=valid_batch_size,
                         shuffle=False)

    # allocate model parameters
    params = init_params(options, word_dict)

    # load model parameters and set theano shared variables
    params = load_params(model, params)
    tparams = init_tparams(params)

    trng, use_noise, \
        x1, x1_mask, x1_kb, x2, x2_mask, x2_kb, kb_att, y, \
        opt_ret, \
        cost, \
        f_pred, \
        f_probs = \
        build_model(tparams, options)

    use_noise.set_value(0.)
    valid_acc = pred_acc(f_pred, prepare_data, options, valid, kb_dict)
    test_acc = pred_acc(f_pred, prepare_data, options, test, kb_dict)

    print 'valid accuracy', valid_acc
    print 'test accuracy', test_acc

    predict_labels_valid = pred_label(f_pred, prepare_data, options, valid, kb_dict)
    predict_labels_test = pred_label(f_pred, prepare_data, options, test, kb_dict)

    with open('predict_gold_samples_valid.txt', 'w') as fw:
        with open(valid_datasets[0], 'r') as f1:
            with open(valid_datasets[1], 'r') as f2:
                with open(valid_datasets[-1], 'r') as f3:
                    for a, b, c, d in zip(predict_labels_valid, f3, f1, f2):
                        fw.write(str(a) + '\t' + b.rstrip() + '\t' + c.rstrip() + '\t' + d.rstrip() + '\n')

    with open('predict_gold_samples_test.txt', 'w') as fw:
        with open(test_datasets[0], 'r') as f1:
            with open(test_datasets[1], 'r') as f2:
                with open(test_datasets[-1], 'r') as f3:
                    for a, b, c, d in zip(predict_labels_test, f3, f1, f2):
                        fw.write(str(a) + '\t' + b.rstrip() + '\t' + c.rstrip() + '\t' + d.rstrip() + '\n')

    print 'Done'
Esempio n. 16
0
def pre_process(m_image):
    # result = cv2.resize(src=m_image, dsize=None, fx=1, fy=1)
    m_image = m_image / divided_factor
    return m_image


def post_process(m_coordinates):
    return m_coordinates


if __name__ == '__main__':
    m_max = 0
    m_model_name = FLAGS.model_name

    m_model, _ = build_model()

    try:
        if FLAGS.model_name is None:
            for x in os.listdir(FLAGS.model_dir):
                if m_max < int(x[8:10]):
                    m_model_name = x
                    m_max = int(x[8:10])
        # m_model = tf.keras.models.load_model(filepath=os.path.join(FLAGS.model_dir, m_model_name))
        m_model.load_weights(
            filepath=os.path.join(FLAGS.model_dir, m_model_name))
    except tf.errors.NotFoundError:
        print('model file cannot be found')

    roi_save_file = open(os.path.join(FLAGS.roi_save_dir, "figCon_nn.txt"),
                         'w')
Esempio n. 17
0

def draw_path_on(img, speed_ms, angle_steers, color=(0, 0, 255)):
    path_x = np.arange(0., 50.1, 0.5)
    path_y, _ = calc_lookahead_offset(speed_ms, angle_steers, path_x)
    draw_path(img, path_x, path_y, color)


if __name__ == "__main__":
    data = pd.read_csv(
        '/media/aayush/Other/Udacity Data Real/CH2_002/output/out_angles.csv')
    img_height = 105
    img_width = 240
    model_name = ['nvidia', 'darknet53', ''][1]
    checkpoint_path = '/media/aayush/Other/Udacity Data Real/CH2_002/output/checkpoints_real/' + model_name + '/xxx'
    model = main.build_model(1, img_height, img_width, model_name)
    model.load_weights(checkpoint_path)

    img_paths = data['img']
    gt_angles = data['gt']
    pred_angles = data['preda']
    #pred_angles = 2 * ((pred_angles - np.min(pred_angles)) / (np.max(pred_angles) - np.min(pred_angles))) - 1
    speeds = data['speed']

    for i in range(len(img_paths)):
        image = cv2.imread(img_paths[i])
        image_prepro = utils.crop(image, 200, None)
        image_prepro = utils.resize(image_prepro, img_width, img_height)
        image_prepro = cv2.normalize(image_prepro,
                                     None,
                                     alpha=0,