예제 #1
0
def train_eval_network(dataset_name, train_gen, validate_gen, test_x, test_y, seq_len, epochs, batch_size,
                       batch_epoch_ratio, initial_weights, size, cnn_arch, learning_rate,
                       optimizer, cnn_train_type, pre_weights, lstm_conf, len_train, len_valid, dropout, classes,
                       patience_es=15, patience_lr=5):
    """the function build, compine fit and evaluate a certain architechtures on a dataset"""
    set_random_seed(2)
    seed(1)
    print("Experiment Runnning with CNN:",str(cnn_arch))
    result = dict(dataset=dataset_name, cnn_train=cnn_train_type,
                  cnn=cnn_arch.__name__, lstm=lstm_conf[0].__name__, epochs=epochs,
                  learning_rate=learning_rate, batch_size=batch_size, dropout=dropout,
                  optimizer=optimizer[0].__name__, initial_weights=initial_weights, seq_len=seq_len)
    print("run experimnt " + str(result))
    model = BuildModel_basic.build(size=size, seq_len=seq_len, learning_rate=learning_rate,
                                   optimizer_class=optimizer, initial_weights=initial_weights,
                                   cnn_class=cnn_arch, pre_weights=pre_weights, lstm_conf=lstm_conf,
                                   cnn_train_type=cnn_train_type, dropout=dropout, classes=classes)

    # the network is trained on data generatores and apply the callacks when the validation loss is not improving:
    # 1. early stop to training after n iteration
    # 2. reducing the learning rate after k iteration where k< n
    test_history = TestCallback((test_x, test_y))
    history = model.fit_generator(
        steps_per_epoch=int(float(len_train) / float(batch_size * batch_epoch_ratio)),
        generator=train_gen,
        epochs=epochs,
        validation_data=validate_gen,
        validation_steps=int(float(len_valid) / float(batch_size)),
        callbacks=[EarlyStopping(monitor='val_loss', min_delta=0.001, patience=patience_es, ),
                   ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=patience_lr, min_lr=1e-8, verbose=1),
                   test_history
                   ]
    )
    history_to_save = history.history
    history_to_save['test accuracy'] = test_history.test_acc
    history_to_save['test loss'] = test_history.test_loss

    model_name = ""
    for k, v in result.items():
        model_name = model_name + "_" + str(k) + "-" + str(v).replace(".", "d")
    model_path = os.path.join(res_path, model_name)
    pd.DataFrame(history_to_save).to_csv(model_path + "_train_results.csv")
    result['validation loss'] = min(history.history['val_loss'])
    result['validation accuracy'] = max(history.history['val_acc'])
    result['last validation loss'] = history.history['val_loss'][-1]
    result['last validation accuracy'] = history.history['val_acc'][-1]

    result['train accuracy'] = max(history.history['acc'])
    result['train loss'] = min(history.history['loss'])
    result['last train accuracy'] = history.history['acc'][-1]
    result['last train loss'] = history.history['loss'][-1]

    result['test accuracy'] = max(test_history.test_acc)
    result['test loss'] = min(test_history.test_loss)
    result['last test accuracy'] = test_history.test_acc[-1]
    result['last test loss'] = test_history.test_loss[-1]

    result['final lr'] = history.history['lr'][-1]
    result['total epochs'] = len(history.history['lr'])
    return result
예제 #2
0
def evaluate(dataset_name,
             train_gen,
             validate_gen,
             test_x,
             test_y,
             seq_len,
             epochs,
             batch_size,
             batch_epoch_ratio,
             initial_weights,
             size,
             cnn_arch,
             learning_rate,
             optimizer,
             cnn_train_type,
             pre_weights,
             lstm_conf,
             len_train,
             len_valid,
             dropout,
             classes,
             patience_es=15,
             patience_lr=5):
    """the function build, compine fit and evaluate a certain architechtures on a dataset"""
    result = dict(dataset=dataset_name,
                  cnn_train=cnn_train_type,
                  cnn=cnn_arch.__name__,
                  lstm=lstm_conf[0].__name__,
                  epochs=epochs,
                  learning_rate=learning_rate,
                  batch_size=batch_size,
                  dropout=dropout,
                  optimizer=optimizer[0].__name__,
                  initial_weights=initial_weights,
                  seq_len=seq_len)
    # print("run experimnt " + str(result))
    model = BuildModel_basic.build(size=size,
                                   seq_len=seq_len,
                                   learning_rate=learning_rate,
                                   optimizer_class=optimizer,
                                   initial_weights=initial_weights,
                                   cnn_class=cnn_arch,
                                   pre_weights=pre_weights,
                                   lstm_conf=lstm_conf,
                                   cnn_train_type=cnn_train_type,
                                   dropout=dropout,
                                   classes=classes)
    res = model.evaluate_generator(validate_gen,
                                   steps=int(
                                       float(len_valid) / float(batch_size)),
                                   max_queue_size=10,
                                   workers=1,
                                   use_multiprocessing=False,
                                   verbose=0)
    print(model.metrics_names)
    print(res)
    def train_eval_network(dataset_name,
                           train_gen,
                           validate_gen,
                           test_x,
                           test_y,
                           seq_len,
                           epochs,
                           batch_size,
                           batch_epoch_ratio,
                           initial_weights,
                           size,
                           cnn_arch,
                           learning_rate,
                           optimizer,
                           cnn_train_type,
                           pre_weights,
                           lstm_conf,
                           len_train,
                           len_valid,
                           dropout,
                           classes,
                           patience_es=15,
                           patience_lr=5):
        """the function build, compine fit and evaluate a certain architechtures on a dataset"""
        global first_called
        global result
        global model
        global ave_acore
        if first_called:
            print("loading the model")
            #tensorflow.set_random_seed(2)
            tensorflow.random.set_random_seed(2)
            seed(1)
            result = dict(dataset=dataset_name,
                          cnn_train=cnn_train_type,
                          cnn=cnn_arch.__name__,
                          lstm=lstm_conf[0].__name__,
                          epochs=epochs,
                          learning_rate=learning_rate,
                          batch_size=batch_size,
                          dropout=dropout,
                          optimizer=optimizer[0].__name__,
                          initial_weights=initial_weights,
                          seq_len=seq_len)
            print("run experimnt " + str(result))
            model = BuildModel_basic.build(size=size,
                                           seq_len=seq_len,
                                           learning_rate=learning_rate,
                                           optimizer_class=optimizer,
                                           initial_weights=initial_weights,
                                           cnn_class=cnn_arch,
                                           pre_weights=pre_weights,
                                           lstm_conf=lstm_conf,
                                           cnn_train_type=cnn_train_type,
                                           dropout=dropout,
                                           classes=classes)

            # the network is trained on data generatores and apply the callacks when the validation loss is not improving:
            # 1. early stop to training after n iteration
            # 2. reducing the learning rate after k iteration where k< n
            test_history = TestCallback((test_x, test_y))
            #model_path = './model_16_.h5'
            #checkpoint = ModelCheckpoint(filepath=model_path,monitor='val_loss',mode='auto' ,save_best_only='True')

            #default_values = dict(epoch=10,\
            #                  learning_rate=0.0004,\
            #                  batch_size=16,\
            #                  optimizer=Adam,\
            #                  initial_weights=0,\
            #                  cnn_class=Xception,\
            #                  pre_weights='Xavier',\
            #                  lstm_conf=(LSTM,dict(units = 256)),\
            #                  cnn_train_type='static'
            #                  )
            #model.compile(optimizer=default_values["optimizer"], loss='categorical_crossentropy')

            h5model_path = './model_sc_2g_transfer.h5'
            model.load_weights(h5model_path)
            first_called = 0
        # print("predicting..")
        history = model.predict_generator(
            validate_gen, int(float(len_valid) / float(batch_size)))

        # print(history)
        ave_acore = np.sum(history) / len(history)
        print(ave_acore)
        res_scores.append(ave_acore)
        # print(model.metrics_names)
        return 1
        history_to_save = history.history
        history_to_save['test accuracy'] = test_history.test_acc
        history_to_save['test loss'] = test_history.test_loss

        model_name = ""
        for k, v in result.items():
            model_name = model_name + "_" + str(k) + "-" + str(v).replace(
                ".", "d")
        model_path = os.path.join(res_path, model_name)
        pd.DataFrame(history_to_save).to_csv(model_path + "_train_results.csv")
        result['validation loss'] = min(history.history['val_loss'])
        result['validation accuracy'] = max(history.history['val_acc'])
        result['last validation loss'] = history.history['val_loss'][-1]
        result['last validation accuracy'] = history.history['val_acc'][-1]

        result['train accuracy'] = max(history.history['acc'])
        result['train loss'] = min(history.history['loss'])
        result['last train accuracy'] = history.history['acc'][-1]
        result['last train loss'] = history.history['loss'][-1]

        result['test accuracy'] = max(test_history.test_acc)
        result['test loss'] = min(test_history.test_loss)
        result['last test accuracy'] = test_history.test_acc[-1]
        result['last test loss'] = test_history.test_loss[-1]

        result['final lr'] = history.history['lr'][-1]
        result['total epochs'] = len(history.history['lr'])
        return result