예제 #1
1
def main():
    tf.set_random_seed(10)
    with tf.Session() as sess:
        rnn_cell = tf.nn.rnn_cell.LSTMCell(10)

        # defining initial state
        initial_state = rnn_cell.zero_state(4, dtype=tf.float32)

        inputs = tf.Variable(tf.random_uniform(shape = (4, 30, 100)), name='input')
        inputs = tf.identity(inputs, "input_node")

        # 'state' is a tensor of shape [batch_size, cell_state_size]
        outputs, state = tf.nn.dynamic_rnn(rnn_cell, inputs, initial_state=initial_state, dtype=tf.float32)

        y1 = tf.identity(outputs, 'outputs')
        y2 = tf.identity(state, 'state')

        t1 = tf.ones([4, 30, 10])
        t2 = tf.ones([4, 10])

        loss = tf.reduce_sum((y1 - t1) * (y1 - t1)) + tf.reduce_sum((y2 - t2) * (y2 - t2))
        tf.identity(loss, name = "lstm_loss")
        # tf.summary.FileWriter('/tmp/log', tf.get_default_graph())

        net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split(','))
        run_model(net_outputs, argv[1], None, argv[3] == 'True')
예제 #2
0
def main():
    """
    You can also run these commands manually to generate the pb file
    1. git clone https://github.com/tensorflow/models.git
    2. export PYTHONPATH=Path_to_your_model_folder
    3. python temporal_convolution.py
    """
    tf.set_random_seed(1024)
    input_width = 32
    input_channel = 3
    inputs = tf.Variable(tf.random_uniform((1, input_width, input_channel)),
                         name='input')
    inputs = tf.identity(inputs, "input_node")
    filter_width = 4
    output_channels = 6
    filters = tf.Variable(
        tf.random_uniform((filter_width, input_channel, output_channels)))
    conv_out = tf.nn.conv1d(inputs, filters, stride=1, padding="VALID")
    bias = tf.Variable(tf.zeros([output_channels]))

    output = tf.nn.tanh(tf.nn.bias_add(conv_out, bias), name="output")

    net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x),
                      argv[2].split(','))
    run_model(net_outputs, argv[1], backward=(argv[3] == 'True'))
예제 #3
0
파일: decoder.py 프로젝트: ru003ar/BigDL
def main():

    tf.set_random_seed(1)
    n_steps = 2
    n_input = 10
    n_hidden = 10

    xs = tf.Variable(tf.random_uniform([4, n_steps, n_input]) + 10, name='input', dtype=tf.float32)
    xs = tf.identity(xs, name="input_node")
    x = tf.unstack(xs, n_steps, 1)

    cell = tf.contrib.rnn.BasicLSTMCell(n_hidden)
    init_state = cell.zero_state(4, tf.float32)

    outputs = []
    for i in range(n_steps):
        if i == 0:
            output, state = cell(x[-1], init_state)
        else:
            output, state = cell(output, state)
        outputs.append(output)

    final = tf.identity(outputs, name="output")

    net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split(','))
    run_model(net_outputs, argv[1], 'rnn', argv[3] == 'True')
예제 #4
0
def main():
    tf.set_random_seed(10)
    with tf.Session() as sess:
        inputs = tf.Variable(tf.random_uniform((20, 30, 32)), name='input')
        inputs = tf.identity(inputs, "input_node")

        input1, input2, input3, input4 = tf.split(inputs, 4, 0)
        # scatter and gather
        tensor_array = tf.TensorArray(tf.float32, 128)
        tensor_array = tensor_array.scatter([1, 2, 5, 4, 3], input1)
        tensor_array.gather([1, 2, 5, 4, 3], name='scatter_and_gather')

        # split and concat
        tensor_array = tf.TensorArray(tf.float32, 2)
        tensor_array = tensor_array.split(input2, [2, 3])
        tf.identity(tensor_array.concat(), name='split_and_concat')

        # write and read
        tensor_array = tf.TensorArray(tf.float32, 5)
        tensor_array = tensor_array.identity()
        tensor_array = tensor_array.write(1, input3)
        tf.cast(tensor_array.size(), tf.float32, name='size1')
        tensor_array.read(1, name='write_and_read')
        tf.cast(tensor_array.size(), tf.float32, name='size2')

        # unstack and stack
        tensor_array = tf.TensorArray(tf.float32, 5)
        tensor_array = tensor_array.unstack(input4)
        tf.identity(tensor_array.stack(), name='unstack_and_stack')

        net_outputs = map(
            lambda x: tf.get_default_graph().get_tensor_by_name(x),
            argv[2].split(','))
        run_model(net_outputs, argv[1], None, argv[3] == 'True')
예제 #5
0
def main():
    """
    Run this command to generate the pb file
    1. mkdir model
    2. python rnn_lstm.py
    """
    tf.set_random_seed(1)
    n_steps = 2
    n_input = 10
    n_hidden = 20
    n_output = 5
    # xs = tf.placeholder(tf.float32, [None, n_steps, n_input])
    xs = tf.Variable(tf.random_uniform([4, n_steps, n_input]) + 10,
                     name='input',
                     dtype=tf.float32)
    xs = tf.identity(xs, "input_node")
    weight = tf.Variable(tf.random_uniform([n_hidden, n_output]) + 10,
                         name="weight",
                         dtype=tf.float32)
    bias = tf.Variable(tf.random_uniform([n_output]) + 10,
                       name="bias",
                       dtype=tf.float32)

    x = tf.unstack(xs, n_steps, 1)

    cell = rnn.BasicLSTMCell(n_hidden)

    output, states = rnn.static_rnn(cell, x, dtype=tf.float32)

    final = tf.nn.bias_add(tf.matmul(output[-1], weight), bias, name='output')

    net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x),
                      argv[2].split(','))
    run_model(net_outputs, argv[1], 'rnn', argv[3] == 'True')
예제 #6
0
def main():
    tf.set_random_seed(10)
    with tf.Session() as sess:
        rnn_cell = tf.nn.rnn_cell.LSTMCell(10)

        # defining initial state
        initial_state = rnn_cell.zero_state(4, dtype=tf.float32)

        inputs = tf.Variable(tf.random_uniform(shape=(4, 30, 100)),
                             name='input')
        inputs = tf.identity(inputs, "input_node")

        # 'state' is a tensor of shape [batch_size, cell_state_size]
        outputs, state = tf.nn.dynamic_rnn(rnn_cell,
                                           inputs,
                                           initial_state=initial_state,
                                           dtype=tf.float32)

        y1 = tf.identity(outputs, 'outputs')
        y2 = tf.identity(state, 'state')

        t1 = tf.ones([4, 30, 10])
        t2 = tf.ones([4, 10])

        loss = tf.reduce_sum((y1 - t1) * (y1 - t1)) + tf.reduce_sum(
            (y2 - t2) * (y2 - t2))
        tf.identity(loss, name="lstm_loss")
        # tf.summary.FileWriter('/tmp/log', tf.get_default_graph())

        net_outputs = map(
            lambda x: tf.get_default_graph().get_tensor_by_name(x),
            argv[2].split(','))
        run_model(net_outputs, argv[1], None, argv[3] == 'True')
예제 #7
0
def main():

    tf.set_random_seed(1)
    n_steps = 2
    n_input = 10
    n_hidden = 10

    xs = tf.Variable(tf.random_uniform([4, n_steps, n_input]) + 10,
                     name='input',
                     dtype=tf.float32)
    xs = tf.identity(xs, name="input_node")
    x = tf.unstack(xs, n_steps, 1)

    cell = tf.contrib.rnn.BasicLSTMCell(n_hidden)
    init_state = cell.zero_state(4, tf.float32)

    outputs = []
    for i in range(n_steps):
        if i == 0:
            output, state = cell(x[-1], init_state)
        else:
            output, state = cell(output, state)
        outputs.append(output)

    final = tf.identity(outputs, name="output")

    net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x),
                      argv[2].split(','))
    run_model(net_outputs, argv[1], 'rnn', argv[3] == 'True')
예제 #8
0
def main():

    inputs = tf.Variable(tf.reshape(tf.range(0.0, 16), [1, 1, 4, 4]), name = 'input')
    inputs = tf.identity(inputs, "input_node")
    output = tf.layers.batch_normalization(inputs, axis=1, training=True)

    named_output = tf.nn.relu(output, name="output")

    net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split(','))
    run_model(net_outputs, argv[1], 'batchNorm', argv[3] == 'True')
예제 #9
0
def main():

    inputs = tf.Variable(tf.reshape(tf.range(0.0, 4.0), [4, 1]), name = 'input')
    inputs = tf.identity(inputs, "input_node")

    output = tf.concat([inputs, inputs], axis=0)

    named_output = tf.nn.relu(output, name="output")

    net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split(','))
    run_model(net_outputs, argv[1], 'two_edge', argv[3] == 'True')
예제 #10
0
파일: vgg19.py 프로젝트: vaquarkhan/BigDL
def main():
    """
    You can also run these commands manually to generate the pb file
    1. git clone https://github.com/tensorflow/models.git
    2. export PYTHONPATH=Path_to_your_model_folder
    3. python alexnet.py
    """
    height, width = 224, 224
    inputs = tf.Variable(tf.random_uniform((1, height, width, 3)), name='input')
    net, end_points  = vgg.vgg_19(inputs, is_training = False)
    print("nodes in the graph")
    for n in end_points:
        print(n + " => " + str(end_points[n]))
    net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split())
    run_model(net_outputs, argv[1])
예제 #11
0
파일: vgga.py 프로젝트: ru003ar/BigDL
def main():
    """
    You can also run these commands manually to generate the pb file
    1. git clone https://github.com/tensorflow/models.git
    2. export PYTHONPATH=Path_to_your_model_folder
    3. python alexnet.py
    """
    height, width = 224, 224
    inputs = tf.Variable(tf.random_uniform((1, height, width, 3)), name='input')
    inputs = tf.identity(inputs, "input_node")
    net, end_points  = vgg.vgg_a(inputs, is_training = False)
    print("nodes in the graph")
    for n in end_points:
        print(n + " => " + str(end_points[n]))
    net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split(','))
    run_model(net_outputs, argv[1], 'vgg_a', argv[3] == 'True')
예제 #12
0
def main():
    """
    You can also run these commands manually to generate the pb file
    1. git clone https://github.com/tensorflow/models.git
    2. export PYTHONPATH=Path_to_your_model_folder
    3. python alexnet.py
    """
    height, width = 32, 32
    inputs = tf.Variable(tf.random_uniform((1, height, width, 3)),
                         name='input')
    inputs = tf.identity(inputs, "input_node")
    net, end_points = lenet.lenet(inputs)
    print("nodes in the graph")
    for n in end_points:
        print(n + " => " + str(end_points[n]))
    net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x),
                      argv[2].split(','))
    run_model(net_outputs, argv[1], 'LeNet', argv[3] == 'True')
예제 #13
0
def main():

    inputs = tf.Variable(tf.reshape(tf.range(0.0, 4.0), [4, 1]), name = 'input')
    inputs = tf.identity(inputs, "input_node")
    W1 = tf.Variable(tf.zeros([1,10])+0.2)
    b1 = tf.Variable(tf.zeros([10])+0.1)
    out1 = tf.nn.bias_add(tf.matmul(inputs, W1), b1)

    W2 = tf.Variable(tf.zeros([1,10])+0.2)
    b2 = tf.Variable(tf.zeros([10])+0.1)
    out2 = tf.nn.bias_add(tf.matmul(inputs, W2), b2)

    with tf.control_dependencies([inputs]):
        output = tf.add_n([out1, out2])

    named_output = tf.nn.relu(output, name="output")

    net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split(','))
    run_model(net_outputs, argv[1], 'control_dep', argv[3] == 'True')
예제 #14
0
def main():
    """
    You can also run these commands manually to generate the pb file
    1. git clone https://github.com/tensorflow/models.git
    2. export PYTHONPATH=Path_to_your_model_folder
    3. python alexnet.py
    """
    tf.set_random_seed(1)
    height, width = 299, 299
    inputs = tf.Variable(tf.random_uniform((2, height, width, 3)),
                         name='input')
    inputs = tf.identity(inputs, "input_node")
    net, end_points = inception_resnet_v2.inception_resnet_v2(
        inputs, is_training=False)
    print("nodes in the graph")
    for n in end_points:
        print(n + " => " + str(end_points[n]))
    net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x),
                      argv[2].split(','))
    run_model(net_outputs, argv[1], 'InceptionResnetV2', argv[3] == 'True')
예제 #15
0
def main():

    inputs = tf.Variable(tf.reshape(tf.range(0.0, 4.0), [4, 1]), name='input')
    inputs = tf.identity(inputs, "input_node")
    W1 = tf.Variable(tf.zeros([1, 10]) + 0.2)
    b1 = tf.Variable(tf.zeros([10]) + 0.1)
    out1 = tf.nn.bias_add(tf.matmul(inputs, W1), b1)

    W2 = tf.Variable(tf.zeros([1, 10]) + 0.2)
    b2 = tf.Variable(tf.zeros([10]) + 0.1)
    out2 = tf.nn.bias_add(tf.matmul(inputs, W2), b2)

    with tf.control_dependencies([inputs]):
        output = tf.add_n([out1, out2])

    named_output = tf.nn.relu(output, name="output")

    net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x),
                      argv[2].split(','))
    run_model(net_outputs, argv[1], 'control_dep', argv[3] == 'True')
예제 #16
0
def main():
    """
    You can also run these commands manually to generate the pb file
    1. git clone https://github.com/tensorflow/models.git
    2. export PYTHONPATH=Path_to_your_model_folder
    3. python temporal_convolution.py
    """
    tf.set_random_seed(1024)
    input_width = 32
    input_channel = 3
    inputs = tf.Variable(tf.random_uniform((1, input_width, input_channel)), name='input')
    inputs = tf.identity(inputs, "input_node")
    filter_width = 4
    output_channels = 6
    filters = tf.Variable(tf.random_uniform((filter_width, input_channel, output_channels)))
    conv_out = tf.nn.conv1d(inputs, filters, stride=1, padding="VALID")
    bias = tf.Variable(tf.zeros([output_channels]))

    output = tf.nn.tanh(tf.nn.bias_add(conv_out, bias), name="output")

    net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split(','))
    run_model(net_outputs, argv[1], backward=(argv[3] == 'True'))
예제 #17
0
def main():
    """
    You can also run these commands manually to generate the pb file
    1. git clone https://github.com/tensorflow/models.git
    2. export PYTHONPATH=Path_to_your_model_folder
    3. python alexnet.py
    """
    height, width = 224, 224
    batchSize = 1
    if len(argv) == 5:
        batchSize = int(argv[4])
    inputs = tf.Variable(tf.random_uniform((batchSize, 3, height, width)),
                         name='input')
    net, end_points = alexnet_v1(inputs,
                                 is_training=False,
                                 spatial_squeeze=False)

    print("nodes in the graph")
    for n in end_points:
        print(n + " => " + str(end_points[n]))
    net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x),
                      argv[2].split(','))
    run_model(net_outputs, argv[1], 'alexnet', argv[3] == 'True')
예제 #18
0
    def eval_model(self, Manager, text_file, dirname, train_load, val_load,
                   test_load, train_data, val_data, test_data, training):

        options = Manager.options
        train_args = Manager.train_args
        """Preparing for training"""
        best_val_loss = np.inf
        best_val_auc = 0

        if training == 'testing':
            train_args['NumEpochs'] = 1
        else:
            text_file = open(text_file, "w")

        for t in range(Manager.train_args['NumEpochs']):
            tini = time.time()

            train_acc, train_loss, train_auc, _, train_out, train_collect = run_model(
                t,
                Manager=Manager,
                data_load=train_load,
                train_data=train_data,
                phase=training,
                use_gpu=train_args['use_gpu'])

            if options['use_val']:
                val_acc, val_loss, val_auc, _, val_out, val_collect = run_model(
                    t,
                    Manager=Manager,
                    data_load=val_load,
                    train_data=val_data,
                    phase='val',
                    use_gpu=train_args['use_gpu'])
            else:
                val_acc = 0
                val_auc = 0
                val_loss = 0

            test_acc, test_loss, test_auc, _, test_out, test_collect = run_model(
                t,
                Manager=Manager,
                data_load=test_load,
                train_data=test_data,
                phase='test',
                use_gpu=train_args['use_gpu'])

            if t % 10 == 0:
                print_cams(test_collect, t, 2)

            Manager.scheduler.step()
            """Save Model"""
            if (val_loss <= best_val_loss) and training == 'training':
                best_val_loss = val_loss
                if Manager.train_args['RunPar']:
                    torch.save(Manager.net.module.state_dict(),
                               os.path.join(dirname, 'PRE0'))
                else:
                    torch.save(Manager.net.state_dict(),
                               os.path.join(dirname, 'PRE0'))

                np.save(os.path.join(dirname, 'train_features'),
                        train_collect['features'])
                np.save(os.path.join(dirname, 'val_features'),
                        val_collect['features'])
                np.save(os.path.join(dirname, 'test_features'),
                        test_collect['features'])

                np.save(os.path.join(dirname, 'train_out'),
                        train_collect['out'])
                np.save(os.path.join(dirname, 'val_out'), val_collect['out'])
                np.save(os.path.join(dirname, 'test_out'), test_collect['out'])

                np.save(os.path.join(dirname, 'train_labels'),
                        train_collect['labels'])
                np.save(os.path.join(dirname, 'val_labels'),
                        val_collect['labels'])
                np.save(os.path.join(dirname, 'test_labels'),
                        test_collect['labels'])

            if (val_auc >= best_val_auc) and training == 'training':
                best_val_auc = val_auc
                if Manager.train_args['RunPar']:
                    torch.save(Manager.net.module.state_dict(),
                               os.path.join(dirname, 'PRE1'))
                else:
                    torch.save(Manager.net.state_dict(),
                               os.path.join(dirname, 'PRE1'))
            """Print Stats"""
            print(
                '{} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.1f}'
                .format(t, train_acc, val_acc, test_acc, train_loss, val_loss,
                        test_loss, val_auc, test_auc,
                        time.time() - tini))

            if training == 'training':
                text_file.write(
                    '{} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.1f} \n'
                    .format(t, train_acc, val_acc, test_acc, train_loss,
                            val_loss, test_loss, val_auc, test_auc,
                            time.time() - tini))
                text_file.flush()

        return train_out, val_out, test_out
import random

from fastFM.als import FMRegression
from joblib import Parallel, delayed
from sklearn.pipeline import make_union

from util import create_basic_feature_extractor, create_BoW_feature_extractor, run_model


def fm_predict(X_train, y_train, X_test):
    fm = FMRegression(init_stdev=0.0001, rank=128, l2_reg_w=20, l2_reg_V=400,
                      n_iter=7, random_state=random.randint(0, 1000))
    fm.fit(X_train, y_train)
    return fm.predict(X_test)


def run_fm(train, test, y_train, num_models):
    feature_extractor = make_union(create_basic_feature_extractor(), create_BoW_feature_extractor())
    X_train = feature_extractor.fit_transform(train)
    X_test = feature_extractor.transform(test)
    return Parallel(n_jobs=-1, max_nbytes=None)(
        delayed(fm_predict)(X_train, y_train, X_test) for _ in range(num_models)
    )


if __name__ == '__main__':
    run_model(model_runner=run_fm, model_name='FM')

예제 #20
0

def rnn_predict(keras_input_train, y_train, keras_input_test):
    model = build_rnn_model(
        basic_input_dim=keras_input_train['basic'].shape[1],
        vocabulary_size_name=keras_input_train['name'].max() + 1,
        vocabulary_size_desc=keras_input_train['item_description'].max() + 1)
    model.fit(keras_input_train,
              y_train,
              batch_size=1024,
              epochs=3,
              verbose=True)
    pred = model.predict(keras_input_test, batch_size=1024).ravel()

    del model
    gc.collect()
    K.clear_session()
    return pred


def run_rnn(train, test, y_train, num_models):
    keras_input_train, keras_input_test = create_keras_input(train, test)
    return [
        rnn_predict(keras_input_train, y_train, keras_input_test)
        for _ in range(num_models)
    ]


if __name__ == '__main__':
    run_model(model_runner=run_rnn, model_name='RNN')
from sklearn.linear_model import Ridge
from sklearn.pipeline import make_union

from util import create_basic_feature_extractor, create_BoW_feature_extractor, run_model


def lr_predict(X_train, y_train, X_test):
    ridge = Ridge(alpha=15.0, solver='sag', max_iter=100)
    ridge.fit(X_train, y_train)
    return ridge.predict(X_test)


def run_lr(train, test, y_train, num_models):
    feature_extractor = make_union(create_basic_feature_extractor(),
                                   create_BoW_feature_extractor())
    X_train = feature_extractor.fit_transform(train)
    X_test = feature_extractor.transform(test)
    return [lr_predict(X_train, y_train, X_test) for _ in range(num_models)]


if __name__ == '__main__':
    run_model(model_runner=run_lr, model_name='LR')
    x = Dense(64, activation='relu')(x)
    output = Dense(1)(x)

    model = keras.Model(model_input, output)
    model.compile(loss='mean_squared_error',
                  optimizer=keras.optimizers.Adam(lr=3e-3))
    return model


def mlp_predict(X_train, y_train, X_test):
    model = build_mlp_model(input_dim=X_train.shape[1])
    model.fit(X_train, y_train, batch_size=2048, epochs=3, verbose=True)
    pred = model.predict(X_test, batch_size=2048).ravel()

    del model
    gc.collect()
    K.clear_session()
    return pred


def run_mlp(train, test, y_train, num_models):
    feature_extractor = make_union(create_basic_feature_extractor(),
                                   create_BoW_feature_extractor())
    X_train = feature_extractor.fit_transform(train)
    X_test = feature_extractor.transform(test)
    return [mlp_predict(X_train, y_train, X_test) for _ in range(num_models)]


if __name__ == '__main__':
    run_model(model_runner=run_mlp, model_name='MLP')
예제 #23
0
def cnn_predict(keras_input_train, y_train, keras_input_test):
    model = build_cnn_model(
        basic_input_dim=keras_input_train['basic'].shape[1],
        vocabulary_size_name=keras_input_train['name'].max() + 1,
        vocabulary_size_desc=keras_input_train['item_description'].max() + 1,
    )
    model.fit(keras_input_train,
              y_train,
              batch_size=2048,
              epochs=3,
              verbose=True)
    pred = model.predict(keras_input_test, batch_size=2048).ravel()

    del model
    gc.collect()
    K.clear_session()
    return pred


def run_cnn(train, test, y_train, num_models):
    keras_input_train, keras_input_test = create_keras_input(train, test)
    return [
        cnn_predict(keras_input_train, y_train, keras_input_test)
        for _ in range(num_models)
    ]


if __name__ == '__main__':
    run_model(model_runner=run_cnn, model_name='CNN')
예제 #24
0
print('\nThis is the mdoel training part')
while True:
    print(
        'Please input the NUMBER of these model options, if you want to finish running model, input #\n'
    )
    print('1. SVM')
    print('2. Decision Tree')
    print('3. Logistic Regression')
    print('4. Xgboost')
    print('5. Random Forest')
    print('6. CNN')

    model_number = input()
    if model_number != '#':
        if model_number == '1':
            run_model('SVM', review, X, y)

        elif model_number == '2':
            run_model('DT', review, X, y)

        elif model_number == '3':
            run_model('LR', review, X, y)

        elif model_number == '4':
            print('this takes several hours')
            run_model('Xgboost', review, X, y)

        elif model_number == '5':
            run_model('RandomForest', review, X, y)

        elif model_number == '6':