def train_model_mnist(single_model, is_verify=False):
    ############################################################################
    # FUNCTION DESCRIPTION: main function to train MNIST dataset
    ############################################################################
    file = MAIN_FILE

    global INDEX_MODEL
    is_complete_model = check_complete_model(single_model)

    if not is_complete_model:
        single_model = get_latest_model_list(single_model, file)
        model_name = single_model[0]
        cur_model_num = get_current_model_number(model_name)
        INDEX_MODEL = get_new_model_number(cur_model_num)

    temp_single_model = make_data_global(single_model)

    mnist, train_data, train_labels, eval_data, eval_labels = load_data_mnist()
    mnist_classifier = implement_cnn(is_verify)
    logging_hook = set_up_logging()
    train_the_model(mnist_classifier, train_data, train_labels, logging_hook)
    eval_results = evaluate_model(mnist_classifier, eval_data, eval_labels)

    print(eval_results)

    if not is_verify:
        save_trained_model_in_csv(file, temp_single_model, eval_results)

    print(temp_single_model)
    reset_global_data()
    INDEX_MODEL += 1

    return eval_results['accuracy']
예제 #2
0
def train(single_model):
    file = MAIN_FILE

    global model_index

    is_complete_model = check_complete_model(single_model)

    if not is_complete_model:
        single_model = get_latest_model_list(single_model, file)
        model_name = single_model[0]
        cur_model_num = get_current_model_number(model_name)
        model_index = get_new_model_number(cur_model_num)

    print("________________ single_model: ", single_model)
    temp_single_model = make_data_global(single_model)

    mnist, train_data, train_labels, eval_data, eval_labels = load_data_mnist()
    mnist_classifier = implement_cnn()
    logging_hook = set_up_logging()
    train_the_model(mnist_classifier, train_data, train_labels, logging_hook)
    eval_results = evaluate_model(mnist_classifier, eval_data, eval_labels)
    print(eval_results)

    file_name = "fixed_model_dict.csv"
    save_to_file(temp_single_model, file_name, eval_results)

    print(temp_single_model)
    reset_global_data()
    model_index += 1

    return eval_results['accuracy']
def train_model_cifar10( single_model, is_verify = False):
    ############################################################################
    # FUNCTION DESCRIPTION: main function to train CIFAR-10 dataset
    ############################################################################
    file = MAIN_FILE
    is_complete_model = check_complete_model(single_model)

    if not is_complete_model:
        single_model = get_latest_model_list(single_model,file)
        model_name = single_model[0]
        cur_model_num = get_current_model_number(model_name)
        model_index = get_new_model_number(cur_model_num)

    print("single_model: ",single_model)
    (x_train, y_train), (x_test, y_test) = load_data_cifar10()
    y_train, y_test = convert_class_vec2matrix(y_train,y_test)

    model = Sequential()
    tmp_single_model = get_topology_only(single_model)
    num_layer = count_model_layer(tmp_single_model)

    model = cnn_model_fn(model,num_layer,single_model)

    # initiate RMSprop optimizer
    opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)

    # Let's train the model using RMSprop
    model.compile(loss='categorical_crossentropy',
                    optimizer=opt,
                    metrics=['accuracy'])

    x_train, x_test = format_data(x_train,x_test)

    if not DATA_AUGMENTATION:
        no_data_augmentation(model,x_train,x_test,y_train,y_test)
    else:
        data_augmentation(model,x_train,x_test,y_train,y_test)

    if not is_verify:
        save_model_keras_cifar10(model,single_model)

    # Score trained model.
    scores = model.evaluate(x_test, y_test, verbose=1)
    print('Test loss:', scores[0])
    print('Test accuracy:', scores[1])

    loss = scores[0]
    accuracy = scores[1]
    print("Model ", single_model)

    if not is_verify:
        save_trained_model_in_csv(file,single_model,scores)
    print('\n')
    clear_session()
    return accuracy
def train_model_svhn(single_model):
    file = MAIN_FILE_SVHN
    is_complete_model = check_complete_model(single_model)

    if not is_complete_model:
        single_model = get_latest_model_list(single_model, file)

    print("single_model: ", single_model)
    # (x_train, y_train), (x_test, y_test) = load_data_cifar10()
    # y_train, y_test = convert_class_vec2matrix(y_train,y_test)
    x_train, y_train, x_test, y_test = load_data_svhn()

    model = Sequential()
    tmp_single_model = get_topology_only(single_model)
    num_layer = count_model_layer(tmp_single_model)

    model = cnn_model_fn(model, num_layer, single_model)

    # initiate RMSprop optimizer
    opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)

    # Let's train the model using RMSprop
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    # x_train, x_test = format_data(x_train,x_test)

    if not data_augmentation:
        no_data_augmentation(model, x_train, x_test, y_train, y_test)
    else:
        data_augmentation(model, x_train, x_test, y_train, y_test)

    save_model_keras_svhn(model, single_model)

    # Score trained model.
    scores = model.evaluate(x_test, y_test, verbose=1)
    print('Test loss:', scores[0])
    print('Test accuracy:', scores[1])

    loss = scores[0]
    accuracy = scores[1]
    print("Model ", single_model[:-2])
    file_name = MAIN_FILE_SVHN

    save_trained_model_in_csv(file_name, single_model, scores)
    print('\n')
    clear_session()
    return accuracy