示例#1
0
class TrainAndServe(LightningFlow):
    def __init__(self):
        super().__init__()
        self.train_model = TrainModel()
        self.model_server = MLServer(
            name="mnist-svm",
            implementation="mlserver_sklearn.SKLearnModel",
            workers=8,
        )
        self.performance_tester = Locust(num_users=100)

    def run(self):
        self.train_model.run()
        self.model_server.run(self.train_model.best_model_path)
        if self.model_server.alive():
            # The performance tester needs the model server to be up
            # and running to be started, so the URL is added in the UI.
            self.performance_tester.run(self.model_server.url)

    def configure_layout(self):
        return [
            {
                "name": "Server",
                "content": self.model_server.url + "/docs"
            },
            {
                "name": "Server Testing",
                "content": self.performance_tester
            },
        ]
示例#2
0
 def __init__(self):
     super().__init__()
     self.train_model = TrainModel()
     self.model_server = MLServer(
         name="mnist-svm",
         implementation="mlserver_sklearn.SKLearnModel",
         workers=8,
     )
     self.performance_tester = Locust(num_users=100)
示例#3
0
def main():
    X = pd.read_csv('./data/Training_data.csv')

    # observe the data distribution
    print(X[X['target'] == 1].sample(5))
    print(X[X['target'] == 0].sample(5))
    print(X['target'].value_counts())

    # remove the derviation value in non-fraud domain
    X = X.drop(X.index[find_anomalies(X)])

    X['col14'] = pd.Series(X['col7'] * X['col6'], index=X.index)

    # X_fraud = X[X['target'] == 1].sample(1000)
    # X_non_fraud = X[X['target'] == 0].sample(1000)
    # X_shuffle = X_fraud.append(X_non_fraud)
    # X_shuffle = X_shuffle.reindex(np.random.permutation(X_shuffle.index))

    model = TrainModel(X, 0.0001)
    model.train()

    # prepare testing data
    X_pred = pd.read_csv('./data/Testing_data.csv')
    X_pred['col14'] = pd.Series(X_pred['col7'] * X_pred['col6'],
                                index=X_pred.index)

    model.predict(X_pred)
示例#4
0
def train(text: str, artist: str, model_to_train: nn.Module,
          model_config: ModelConfig):
    """
    Train a model and show a loss graph
    """
    tnp = TrainModel(
        text=text,
        model_name=artist,
        model=model_to_train,
        model_config=model_config,
        model_save_folder=MODEL_CACHE,
        run_in_streamlit=True,
    )

    losses, model, vocab_to_int, int_to_vocab = tnp.run_training()

    fig, ax = plt.subplots(1, 1, figsize=[16, 8])
    ax.plot(losses)
    ax.set_title("Loss Vs epoch")
    st.write(fig)

    return model, vocab_to_int, int_to_vocab
示例#5
0
    def optimization_loop(self, model):
        """Optimize the parameters based on suggestions."""
        for i in range(100):
            self.logger.info('Optimization Loop Count: %d', i)

            # assign suggestions to parameters and hyperparameters
            self.get_suggestions()

            # update model class
            self.model = model()
            self.update_parameters()
            self.model.build_model()

            # update training class
            train = TrainModel(self.model, n_epochs=200, batch_size=128)

            # run the training stuff
            self.acc = train.train_model()
            train.reset_model()

            # report to SigOpt
            self.report_observation()
示例#6
0
def main():
    # Device
    SEED = 1
    cuda = torch.cuda.is_available()
    print("Cuda is available ?", cuda)
    torch.manual_seed(SEED)
    if cuda:
        torch.cuda.manual_seed(SEED)
    device = torch.device("cuda" if cuda else "cpu")

    # Create Train and Test Loader
    trainloader = Loader.getDataLoader(dataset_name, trainSet_dict,
                                       trainLoad_dict)
    testloader = Loader.getDataLoader(dataset_name, testSet_dict,
                                      testLoad_dict)
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
               'ship', 'truck')

    # Loss Function
    criterion = nn.NLLLoss()

    # Optimizer
    model = Net().to(device)
    optimizer = optim.SGD(model.parameters(),
                          lr=LR,
                          momentum=MOMENTUM,
                          weight_decay=WEIGHT_DECAY)

    # Start training
    for epoch in range(EPOCHS):
        train_loss, train_acc = TrainModel.train(model, device, trainloader,
                                                 criterion, optimizer, epoch)
        train_losses.append(train_loss)
        train_accuracy.append(train_acc)
        test_loss, test_acc = TestModel.test(model, device, testloader,
                                             criterion)
        test_losses.append(test_loss)
        test_accuracy.append(test_acc)

    # Plot and Save Graph
    getPlottedGraph(EPOCHS,
                    train_losses,
                    train_accuracy,
                    test_losses,
                    test_accuracy,
                    name="cifar_10_plot",
                    PATH=IMAGE_PATH)

    # Save Models
    torch.save(model.state_dict(), MODEL_PATH + "model7.pth")
示例#7
0
def Tune():
    infos = []
    cfgs = []
    iteration = 10
    cfg_tmp = Cfg()
    training_set = TrainingSet(cfg_tmp.train.data_dir, cfg_tmp.augmentation)
    dataloader = DataLoader(training_set,
                            batch_size=cfg_tmp.train.batch_size,
                            shuffle=True)
    n_classes = len(training_set.mapping)
    for i in range(iteration):
        print('hyper param tunning, iteration {}/{}'.format(i + 1, iteration))
        cfg = Cfg()
        cfg.model.n_classes = n_classes
        cfg = _SetRandomCfg(cfg)
        info = TrainModel(cfg, dataloader)
        infos.append(info)
        cfgs.append(cfg)
    return infos, cfgs
示例#8
0
def main():
    # Device
    SEED = 1
    cuda = torch.cuda.is_available()
    print("Cuda is available ?", cuda)
    torch.manual_seed(SEED)
    if cuda:
        torch.cuda.manual_seed(SEED)
    device = torch.device("cuda" if cuda else "cpu")

    # Create Train and Test Loader
    trainloader = Loader.getDataLoader(dataset_name, trainSet_dict, trainLoad_dict)
    testloader = Loader.getDataLoader(dataset_name, testSet_dict, testLoad_dict)
    classes = ('plane', 'car', 'bird', 'cat',
               'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

    # Loss Function
    criterion = nn.CrossEntropyLoss()

    # Optimizer
    model = ResNet18().to(device)
    optimizer = optim.SGD(model.parameters(), lr=LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
    scheduler = StepLR(optimizer, step_size=13, gamma=0.1)

    # Start training
    for epoch in range(EPOCHS):
        train_loss, train_acc = TrainModel.train(model, device, trainloader, criterion, optimizer, epoch)
        # scheduler.step()
        train_losses.append(train_loss)
        train_accuracy.append(train_acc)
        test_loss, test_acc = TestModel.test(model, device, testloader, criterion)
        test_losses.append(test_loss)
        test_accuracy.append(test_acc)

    # Plot and Save Graph
    getPlottedGraph(EPOCHS, train_losses, train_accuracy, test_losses, test_accuracy,name="cifar_10_plot_using_resnet18_v3", PATH=IMAGE_PATH)

    # Save Models
    torch.save(model.state_dict(), MODEL_PATH+"model8_v3.pth")

    #misclassified images
    ms.show_save_misclassified_images(model, device, testloader, classes, list(img_mean), list(img_std), "fig_cifar10_v1", IMAGE_PATH, 25)
示例#9
0
def main():
    # Create Train Loader and Test Loader
    trainloader = Loader.getDataLoader(dataset_name, trainSet_dict, trainLoad_dict)
    testloader = Loader.getDataLoader(dataset_name, testSet_dict, testLoad_dict)

    classes = ('plane', 'car', 'bird', 'cat',
               'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
    # Start training
    for epoch in range(EPOCHS):
        #Train
        train_loss, train_acc = TrainModel.train(model, device, trainloader, criterion, optimizer, epoch)
        scheduler.step()
        train_losses.append(train_loss)
        train_accuracy.append(train_acc)
        #Test
        test_loss, test_acc = TestModel.test(model, device, testloader, criterion)
        test_losses.append(test_loss)
        test_accuracy.append(test_acc)

        #Save model
        state = {'epoch': epoch + 1, 'state_dict': model.state_dict(),
                 'optimizer': optimizer.state_dict()}
        torch.save(state, filename)


    # Plot and Save Graph
    getPlottedGraph(EPOCHS, train_losses, train_accuracy, test_losses, test_accuracy, name="S9_plot_final",
                        PATH=MODEL_PATH)
    # Show and Save correct classified images
    show_save_correctly_classified_images(model, testloader, device, IMAGE_PATH, name="correct_classified_imgs",
                                          max_correctly_classified_images_imgs=25, labels_list=classes)
    # Show and Save misclassified images
    show_save_misclassified_images(model, testloader, device, IMAGE_PATH, name="misclassified_imgs",
                                   max_misclassified_imgs=25, labels_list=classes)
    # Visualize Activation Map
    misclassified_imgs, correctly_classified_images = classify_images(model, testloader, device, 5)
    layers_list = ["layer1", "layer2", "layer3", "layer4"]
    display_gradcam = VisualizeCam(model, classes, layers_list)
    correct_pred_imgs = []
    for i in range(len(correctly_classified_images)):
        correct_pred_imgs.append(torch.as_tensor(correctly_classified_images[i]["img"]))
    display_gradcam(torch.stack(correct_pred_imgs), layers_list, PATH="./" + str("visualization"), metric="correct")
示例#10
0
    def tune_cnn_with_gridsearch():
        """Grid search to identify best hyperparameters for CNN model."""
        cnn_model_values = []
        n_epoch_list = [100, 200, 300, 400, 500]  # 5
        batch_size_list = [16, 32, 64, 128, 256]  # 5
        learning_rate_list = [.0001, .0005, .00001, .00005]  # 4
        dropout_rate_list = [0.2, 0.5, 0.7]  # 3

        try:
            for n_epoch in n_epoch_list:
                for batch_size in batch_size_list:
                    for learning_rate in learning_rate_list:
                        for dropout_rate in dropout_rate_list:
                            for num_filt_1 in [8, 16, 32]:  # CNN ONLY  # 3
                                for num_filt_2 in [10, 20, 30,
                                                   40]:  # CNN ONLY  # 4
                                    for num_fc_1 in [10, 20, 30,
                                                     40]:  # CNN ONLY  # 4
                                        CNN = TrainModel(
                                            CNNModel, n_epoch, batch_size,
                                            learning_rate, dropout_rate)
                                        CNN.model.num_filt_1 = num_filt_1
                                        CNN.model.num_filt_2 = num_filt_2
                                        CNN.model.num_fc_1 = num_fc_1
                                        CNN.model.build_model()
                                        CNN.calculate_helpers()
                                        acc = CNN.train_model()
                                        CNN.reset_model()

                                        results = [
                                            acc, n_epoch, batch_size,
                                            learning_rate, dropout_rate,
                                            num_filt_1, num_filt_2, num_fc_1
                                        ]
                                        cnn_model_values.append(results)
        except:
            pass
        finally:
            best_cnn_run = max(cnn_model_values, key=lambda x: x[0])
            logger.info('Best CNN run: {}'.format(best_cnn_run))
            logger.info('All CNN runs: {}'.format(cnn_model_values))
示例#11
0
    def tune_rnn_with_gridsearch():
        """Grid search to identify best hyperparameters for RNN."""
        rnn_model_values = []
        n_epoch_list = [200, 400, 600, 800, 1000]  # 5
        batch_size_list = [16, 32, 64, 128, 256]  # 5
        learning_rate_list = [.001, .005, .0001, .0005]  # 4
        dropout_rate_list = [0.2, 0.5, 0.7]  # 3

        for n_epoch in n_epoch_list:
            for batch_size in batch_size_list:
                for learning_rate in learning_rate_list:
                    for dropout_rate in dropout_rate_list:
                        for n_hidden in [8, 16, 32]:  # RNN ONLY
                            for num_fc_1 in [10, 20, 30, 40]:  # RNN ONLY
                                for n_layers in [1, 2, 3]:  # RNN ONLY
                                    RNN = TrainModel(RNNModel, n_epoch,
                                                     batch_size, learning_rate,
                                                     dropout_rate)
                                    RNN.model.n_hidden = n_hidden
                                    RNN.model.num_fc_1 = num_fc_1
                                    RNN.model.n_layers = n_layers

                                    RNN.model.build_model()
                                    RNN.calculate_helpers()
                                    acc = RNN.train_model()
                                    RNN.reset_model()

                                    rnn_model_values.append([
                                        acc, n_epoch, batch_size,
                                        learning_rate, dropout_rate, n_hidden,
                                        num_fc_1, n_layers
                                    ])

            best_rnn_run = max(rnn_model_values, key=lambda x: x[0])
            logger.info('Best RNN run: {}'.format(best_rnn_run))
            logger.info('All RNN runs: {}'.format(rnn_model_values))
示例#12
0
            for cnt in range(1 if args.test_model else args.run_time):
                model_index = args.dataset + '_{}_{}_{}_{}'.format(
                    'test' if args.test_model else args.index, args.test_name,
                    this_arg, cnt + 1)
                print('Model index: {}'.format(model_index))
                result_writer = ResultWriter(
                    "results/{}.txt".format(model_index))

                exec("%s = %d" % ('args.{}'.format(args.test_name), this_arg))

                if args.remove_old_files:
                    remove_oldfiles(model_index)

                result_writer.write(str(args))

                model_trainer = TrainModel(model_index, args)
                print("\nStrat training DSAN...\n")
                model_trainer.train()

                args.load_saved_data = True
                K.clear_session()

                if args.test_model:
                    remove_oldfiles(model_index)

    else:
        for cnt in range(1 if args.test_model else args.run_time):
            model_index = args.dataset + '_{}_{}'.format(
                'test' if args.test_model else args.index, cnt + 1)
            print('Model index: {}'.format(model_index))
            result_writer = ResultWriter("results/{}.txt".format(model_index))
示例#13
0
def main():

    # Hyper parameters
    EPOCHS = 2

    # For reproducibility
    SEED = 1
    # Check for CUDA?
    cuda = torch.cuda.is_available()
    print("Cuda is available ?", cuda)
    torch.manual_seed(SEED)
    if cuda:
        torch.cuda.manual_seed(SEED)

    train_dataset, test_dataset, train_loader, test_loader = DataLoaders.dataload(
    )

    device = torch.device("cuda" if cuda else "cpu")

    # Summary
    # summary(model, input_size=(1, 28, 28))

    # Optimizer
    model1 = bn_model().to(device)
    optimizer1 = optim.SGD(model1.parameters(), lr=0.01, momentum=0.9)
    scheduler1 = StepLR(optimizer1, step_size=7, gamma=0.1)

    model2 = gbn_model().to(device)
    optimizer2 = optim.SGD(model2.parameters(), lr=0.01, momentum=0.9)
    scheduler2 = StepLR(optimizer2, step_size=7, gamma=0.1)

    for epoch in range(EPOCHS):
        # With L1
        l1_train_loss, l1_train_acc = TrainModel.train(model1,
                                                       device,
                                                       train_loader,
                                                       optimizer1,
                                                       epoch,
                                                       L1_regularization=reg,
                                                       m_type="L1")
        l1_train_losses.append(l1_train_loss)
        l1_train_accuracy.append(l1_train_acc)
        #scheduler1.step_size = 23
        scheduler1.step()
        l1_test_loss, l1_test_acc = TestModel.test(model1, device, test_loader)
        l1_test_losses.append(l1_test_loss)
        l1_test_accuracy.append(l1_test_acc)

        # With L2
        optimizer1.param_groups[0]['weight_decay'] = 0.0001
        l2_train_loss, l2_train_acc = TrainModel.train(model1,
                                                       device,
                                                       train_loader,
                                                       optimizer1,
                                                       epoch,
                                                       m_type="L2")
        l2_train_losses.append(l2_train_loss)
        l2_train_accuracy.append(l2_train_acc)
        #scheduler1.step_size = 3
        scheduler1.step()
        l2_test_loss, l2_test_acc = TestModel.test(model1, device, test_loader)
        l2_test_losses.append(l2_test_loss)
        l2_test_accuracy.append(l2_test_acc)

        # With L1 and L2
        optimizer1.param_groups[0]['weight_decay'] = 0.0001
        l1_l2_train_loss, l1_l2_train_acc = TrainModel.train(
            model1,
            device,
            train_loader,
            optimizer1,
            epoch,
            L1_regularization=reg,
            m_type="L1&L2")
        l1_l2_train_losses.append(l1_l2_train_loss)
        l1_l2_train_accuracy.append(l1_l2_train_acc)
        # scheduler1.step_size = 19
        scheduler1.step()
        l1_l2_test_loss, l1_l2_test_acc = TestModel.test(
            model1, device, test_loader)
        l1_l2_test_losses.append(l1_l2_test_loss)
        l1_l2_test_accuracy.append(l1_l2_test_acc)

        # With GBN
        gbn_train_loss, gbn_train_acc = TrainModel.train(model2,
                                                         device,
                                                         train_loader,
                                                         optimizer2,
                                                         epoch,
                                                         m_type="GBN")
        gbn_train_losses.append(gbn_train_loss)
        gbn_train_accuracy.append(gbn_train_acc)
        # scheduler2.step_size = 11
        scheduler2.step()
        gbn_test_loss, gbn_test_acc = TestModel.test(model2, device,
                                                     test_loader)
        gbn_test_losses.append(gbn_test_loss)
        gbn_test_accuracy.append(gbn_test_acc)

        # GBN With L2
        optimizer2.param_groups[0]['weight_decay'] = 0.0001
        gbn_l2_train_loss, gbn_l2_train_acc = TrainModel.train(model2,
                                                               device,
                                                               train_loader,
                                                               optimizer2,
                                                               epoch,
                                                               m_type="GBN&L2")
        gbn_l2_train_losses.append(gbn_l2_train_loss)
        gbn_l2_train_accuracy.append(gbn_l2_train_acc)
        # scheduler2.step_size = 6
        scheduler2.step()
        gbn_l2_test_loss, gbn_l2_test_acc = TestModel.test(
            model2, device, test_loader)
        gbn_l2_test_losses.append(gbn_l2_test_loss)
        gbn_l2_test_accuracy.append(gbn_l2_test_acc)

        # GBN With L1 and L2
        optimizer2.param_groups[0]['weight_decay'] = 0.0001
        gbn_l1_l2_train_loss, gbn_l1_l2_train_acc = TrainModel.train(
            model2,
            device,
            train_loader,
            optimizer2,
            epoch,
            L1_regularization=reg,
            m_type="GBN&L1&L2")
        gbn_l1_l2_train_losses.append(gbn_l1_l2_train_loss)
        gbn_l1_l2_train_accuracy.append(gbn_l1_l2_train_acc)
        # scheduler2.step_size = 21
        scheduler2.step()
        gbn_l1_l2_test_loss, gbn_l1_l2_test_acc = TestModel.test(
            model2, device, test_loader)
        gbn_l1_l2_test_losses.append(gbn_l1_l2_test_loss)
        gbn_l1_l2_test_accuracy.append(gbn_l1_l2_test_acc)

    #Save Models
    #PATH = "/content/drive/My Drive/Lab/Loss_and_accuracy_plot.png"
    torch.save(model1, MODEL_PATH)
    torch.save(model2, MODEL_PATH)

    #Plot and save graph of losses and accuracy
    getPlottedGraph(EPOCHS,
                    l1_train_losses,
                    l1_train_accuracy,
                    l1_test_losses,
                    l1_test_accuracy,
                    l2_train_losses,
                    l2_train_accuracy,
                    l2_test_losses,
                    l2_test_accuracy,
                    l1_l2_train_losses,
                    l1_l2_train_accuracy,
                    l1_l2_test_losses,
                    l1_l2_test_accuracy,
                    gbn_train_losses,
                    gbn_train_accuracy,
                    gbn_test_losses,
                    gbn_test_accuracy,
                    gbn_l2_train_losses,
                    gbn_l2_train_accuracy,
                    gbn_l2_test_losses,
                    gbn_l2_test_accuracy,
                    gbn_l1_l2_train_losses,
                    gbn_l1_l2_train_accuracy,
                    gbn_l1_l2_test_losses,
                    gbn_l1_l2_test_accuracy,
                    name="plot",
                    PATH=IMAGE_PATH)

    #Save misclassified images
    MI.show_save_misclassified_images(model2,
                                      test_loader,
                                      name="fig1",
                                      PATH=IMAGE_PATH,
                                      max_misclassified_imgs=25)
    MI.show_save_misclassified_images(model2,
                                      test_loader,
                                      name="fig2",
                                      PATH=IMAGE_PATH,
                                      max_misclassified_imgs=25)
示例#14
0
data.make_train_dev_test_random(num_train, num_dev)

# now we actually make the batches
batch_train, batch_train_label = make_format_batch(batch_size,
                                                   data.sequence_ix_train,
                                                   data.label,
                                                   data.sequence_feature)
batch_dev, batch_dev_label = make_format_batch(batch_size,
                                               data.sequence_ix_dev,
                                               data.label,
                                               data.sequence_feature)

# print ('\nsample of batch\n')
# print (batch_train_label[0])
# print (batch_dev_label[0])

# make model
# model = biLSTM_maxpool ( num_of_classes,dropout_rate,True,num_of_word,word_emb_dim,lstm_hidden_dim,batch_size,feature_dim,is_feature)
model = biLSTM_dynamic_maxpool(num_of_classes, dropout_rate, True, num_of_word,
                               word_emb_dim, lstm_hidden_dim, num_segment,
                               batch_size, feature_dim, is_feature)
# model.cuda()
# y = model.forward( batch_train[0]['seq_tensor'], batch_train[0]['seq_lengths'], batch_train[0]['seq_feature'] )
# print (F.softmax(y,dim=1) )

# train model
train_model = TrainModel(model, num_of_classes, num_epoch, save_path,
                         batch_size, True)
train_model.trainNepoch(batch_train, batch_train_label, batch_dev,
                        batch_dev_label)
示例#15
0
from train import TrainModel
from test import TestModel
from option import opt

if __name__ == "__main__":
    if not opt.test:
        print('--- Train Mode ---')
        Trainer = TrainModel()
        Trainer()
    else:
        print('--- Test Mode ---')
        Tester = TestModel()
        Tester()
示例#16
0
"""
Created on Wed Feb 7 2018
main file
@author: mengshu
"""
from train import TrainModel
import os
"""
DON'T CHANGE THE CUR_DIR
"""
cur_dir = os.getcwd()

train = TrainModel(cur_dir, suffix='.tif', size=512)
train.run()
示例#17
0
def train(dataset):
    train_model = TrainModel(dataset)
    train_model.populate_text_model()
示例#18
0
from train import TrainModel
from test import TestModel
from assess import Assessment

trainer = TrainModel('HAM-Train-Test/HAM-Train.txt', 0.001, 0.998)
tester = TestModel('HAM-Train-Test/HAM-Test.txt')
# trainer = TrainModel('HAM-Train-Test/mytrain.txt')
# tester = TestModel('HAM-Train-Test/mytest.txt')
assessor = Assessment()
assessor.calculateEstimationParameters(
    tester.testModel(trainer.calculateModel()))
示例#19
0
                   '1: to convert image to npy file.\n'
                   '2: to run the training.\n'
                   '3: to test the model.\n'
                   'action: ')
    if (action == '0'):
        print('INFO: Please provide the data path')
        path = input('path to data: ')
        list_categories(path)
    elif (action == '1'):
        print('INFO: Please provide the path to the images and the filename')
        path = input('path to the images: ')
        filename = input('the npy filename: ')
        image_to_npy(filename=filename, path=path, img_size=(64, 64))
    elif (action == '2'):
        print('INFO: Please provide the data path')
        data_path = input('data path: ')
        data = np.load(data_path, allow_pickle=True)
        images = np.array([i[0] for i in data])
        labels = np.array([i[1] for i in data])
        run_training = TrainModel(train_x=images, train_y=labels)
        run_training.train()
    elif (action == '3'):
        print('INFO: Please provide the image to classify and the model path!')
        image_path = input('image path: ')
        model_path = input('modelpath: ')
        run_classification = Test(image_path=image_path, graph_path=model_path)
        category = run_classification.classify()
        print(category)
    else:
        print('ERROR: Wrong choise of action!')
    def run_wrapper_model(self):

        total_iteration = len(self.lstm_parameters_dict['maxlen'])\
                          * len(self.lstm_parameters_dict['batch_size'])\
                          * len(self.lstm_parameters_dict['lstm_hidden_layer'])\
                          * len(self.lstm_parameters_dict['dropout'])

        model_num = 1
        for maxlen in self.lstm_parameters_dict['maxlen']:
            for batch_size in self.lstm_parameters_dict['batch_size']:
                for dropout in self.lstm_parameters_dict['dropout']:
                    for lstm_hidden_layer in self.lstm_parameters_dict[
                            'lstm_hidden_layer']:

                        # run single lstm model with the following configuration
                        try:
                            lstm_parameters_dict = {
                                'max_features':
                                self.lstm_parameters_dict['max_features'],
                                'maxlen':
                                maxlen,
                                'batch_size':
                                batch_size,
                                'embedding_size':
                                self.lstm_parameters_dict['embedding_size'],
                                'lstm_hidden_layer':
                                lstm_hidden_layer,  # TODO change to different values
                                'num_epoch':
                                self.lstm_parameters_dict['num_epoch'],
                                'dropout':
                                dropout,  # 0.2
                                'recurrent_dropout':
                                self.lstm_parameters_dict['recurrent_dropout'],
                                'tensor_board_bool':
                                self.lstm_parameters_dict['tensor_board_bool'],
                                'max_num_words':
                                self.lstm_parameters_dict['max_num_words'],
                                'optimizer':
                                self.lstm_parameters_dict['optimizer'],
                                'patience':
                                self.lstm_parameters_dict['patience']
                            }

                            logging.info('')
                            logging.info(
                                '**************************************************************'
                            )
                            logging.info('')
                            logging.info('start model number: ' +
                                         str(model_num) + '/' +
                                         str(total_iteration))
                            logging.info('lstm parameters: ' +
                                         str(lstm_parameters_dict))

                            train_obj = TrainModel(
                                self.input_data_file, self.vertical_type,
                                self.output_results_folder,
                                self.tensor_board_dir, lstm_parameters_dict,
                                df_configuration_dict,
                                multi_class_configuration_dict,
                                attention_configuration_dict,
                                self.cv_configuration, self.test_size,
                                self.embedding_pre_trained,
                                self.embedding_type, logging)

                            model_num += 1

                            logging.info('')
                            train_obj.load_clean_csv_results()  # load data set
                            train_obj.df_pre_processing()
                            train_obj.run_experiment()
                        except Exception as e:

                            logging.info(
                                'exception found during maxlen: {}, batch_size: {}, dropout: {}, lstm_hidden_layer: {}'
                                .format(maxlen, batch_size, dropout,
                                        lstm_hidden_layer))
                            logging.info('trace: {}'.format(e))
                            logging.info('continue next configuration')
                            logging.info('')
                            logging.info('')
                            logging.info('')

        return