def train_model_cifar10( single_model, is_verify = False):
    ############################################################################
    # FUNCTION DESCRIPTION: main function to train CIFAR-10 dataset
    ############################################################################
    file = MAIN_FILE
    is_complete_model = check_complete_model(single_model)

    if not is_complete_model:
        single_model = get_latest_model_list(single_model,file)
        model_name = single_model[0]
        cur_model_num = get_current_model_number(model_name)
        model_index = get_new_model_number(cur_model_num)

    print("single_model: ",single_model)
    (x_train, y_train), (x_test, y_test) = load_data_cifar10()
    y_train, y_test = convert_class_vec2matrix(y_train,y_test)

    model = Sequential()
    tmp_single_model = get_topology_only(single_model)
    num_layer = count_model_layer(tmp_single_model)

    model = cnn_model_fn(model,num_layer,single_model)

    # initiate RMSprop optimizer
    opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)

    # Let's train the model using RMSprop
    model.compile(loss='categorical_crossentropy',
                    optimizer=opt,
                    metrics=['accuracy'])

    x_train, x_test = format_data(x_train,x_test)

    if not DATA_AUGMENTATION:
        no_data_augmentation(model,x_train,x_test,y_train,y_test)
    else:
        data_augmentation(model,x_train,x_test,y_train,y_test)

    if not is_verify:
        save_model_keras_cifar10(model,single_model)

    # Score trained model.
    scores = model.evaluate(x_test, y_test, verbose=1)
    print('Test loss:', scores[0])
    print('Test accuracy:', scores[1])

    loss = scores[0]
    accuracy = scores[1]
    print("Model ", single_model)

    if not is_verify:
        save_trained_model_in_csv(file,single_model,scores)
    print('\n')
    clear_session()
    return accuracy
def train_model_svhn(single_model):
    file = MAIN_FILE_SVHN
    is_complete_model = check_complete_model(single_model)

    if not is_complete_model:
        single_model = get_latest_model_list(single_model, file)

    print("single_model: ", single_model)
    # (x_train, y_train), (x_test, y_test) = load_data_cifar10()
    # y_train, y_test = convert_class_vec2matrix(y_train,y_test)
    x_train, y_train, x_test, y_test = load_data_svhn()

    model = Sequential()
    tmp_single_model = get_topology_only(single_model)
    num_layer = count_model_layer(tmp_single_model)

    model = cnn_model_fn(model, num_layer, single_model)

    # initiate RMSprop optimizer
    opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)

    # Let's train the model using RMSprop
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    # x_train, x_test = format_data(x_train,x_test)

    if not data_augmentation:
        no_data_augmentation(model, x_train, x_test, y_train, y_test)
    else:
        data_augmentation(model, x_train, x_test, y_train, y_test)

    save_model_keras_svhn(model, single_model)

    # Score trained model.
    scores = model.evaluate(x_test, y_test, verbose=1)
    print('Test loss:', scores[0])
    print('Test accuracy:', scores[1])

    loss = scores[0]
    accuracy = scores[1]
    print("Model ", single_model[:-2])
    file_name = MAIN_FILE_SVHN

    save_trained_model_in_csv(file_name, single_model, scores)
    print('\n')
    clear_session()
    return accuracy
def cnn_model_fn_2(features, labels, mode):
    ############################################################################
    # FUNCTION DESCRIPTION: main function to attach all topologies together into a model.
    ############################################################################

    tmp_single_model = get_topology_only(GLOBAL_DATA)
    num_layer = count_model_layer(tmp_single_model)
    input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])

    layer = input_layer
    temp_layer = 0
    for index in range(1, num_layer):

        if GLOBAL_DATA[index] == 'c_1':
            temp_layer = make_conv2d(layer, c_1)
        elif GLOBAL_DATA[index] == 'c_2':
            temp_layer = make_conv2d(layer, c_2)
        elif GLOBAL_DATA[index] == 'c_3':
            temp_layer = make_conv2d(layer, c_3)
        elif GLOBAL_DATA[index] == 'c_4':
            temp_layer = make_conv2d(layer, c_4)
        elif GLOBAL_DATA[index] == 'c_5':
            temp_layer = make_conv2d(layer, c_5)
        elif GLOBAL_DATA[index] == 'c_6':
            temp_layer = make_conv2d(layer, c_6)
        elif GLOBAL_DATA[index] == 'c_7':
            temp_layer = make_conv2d(layer, c_7)
        elif GLOBAL_DATA[index] == 'c_8':
            temp_layer = make_conv2d(layer, c_8)
        elif GLOBAL_DATA[index] == 'c_9':
            temp_layer = make_conv2d(layer, c_9)
        elif GLOBAL_DATA[index] == 'c_10':
            temp_layer = make_conv2d(layer, c_10)
        elif GLOBAL_DATA[index] == 'c_11':
            temp_layer = make_conv2d(layer, c_11)
        elif GLOBAL_DATA[index] == 'c_12':
            temp_layer = make_conv2d(layer, c_12)
        elif GLOBAL_DATA[index] == 'm_1':
            temp_layer = make_pool2d(layer, m_1)
        elif GLOBAL_DATA[index] == 'm_2':
            temp_layer = make_pool2d(layer, m_2)
        elif GLOBAL_DATA[index] == 'm_3':
            temp_layer = make_pool2d(layer, m_3)
        elif GLOBAL_DATA[index] == 's':
            break
        layer = temp_layer

    shape_array = layer.get_shape()
    pool2_flat = tf.reshape(
        layer, [-1, shape_array[1] * shape_array[2] * shape_array[3]])

    dense = tf.layers.dense(inputs=pool2_flat,
                            units=1024,
                            activation=tf.nn.relu)

    dropout = tf.layers.dropout(inputs=dense,
                                rate=0.5,
                                training=mode == tf.estimator.ModeKeys.TRAIN)

    logits = tf.layers.dense(inputs=dropout, units=10)

    predictions = {
        "classes": tf.argmax(input=logits, axis=1),
        "probabilities": tf.nn.softmax(logits, name="softmax_tensor")
    }

    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)

    loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)

    if mode == tf.estimator.ModeKeys.TRAIN:
        optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
        train_op = optimizer.minimize(loss=loss,
                                      global_step=tf.train.get_global_step())
        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=loss,
                                          train_op=train_op)

    eval_metric_ops = {
        "accuracy":
        tf.metrics.accuracy(labels=labels, predictions=predictions["classes"])
    }
    return tf.estimator.EstimatorSpec(mode=mode,
                                      loss=loss,
                                      eval_metric_ops=eval_metric_ops)