def MCC_CM_calculator(validation_labels_linear, predicted_labels_linear):
    #Return MCC and confusion matrix

    MCC = multimcc(validation_labels_linear, predicted_labels_linear)
    MCC = round(MCC,3)
    MCC_line = "MCC=" + str(MCC)

    CM = confusion_matrix(validation_labels_linear, predicted_labels_linear)

    CM_lines = ";p_E;p_G;p_L\n"

    for i in range(len(CM[0])):
        if i == 0:
            l = "r_E"
        elif i == 1:
            l = "r_G"
        elif i == 2:
            l = "r_L"

        CM_lines += l + ";"
        for j in CM[0][i]:
            CM_lines += str(j) + ";"
        CM_lines += "\n"

    return MCC_line, CM_lines
def MCC_CM_calculator(validation_labels_linear, predicted_labels_linear):
    #Return MCC and confusion matrix

    MCC = multimcc(validation_labels_linear, predicted_labels_linear)
    MCC = round(MCC, 3)
    MCC_line = "MCC=" + str(MCC)

    CM = confusion_matrix(validation_labels_linear, predicted_labels_linear)

    CM_lines = ";p_E;p_G;p_L\n"

    for i in range(len(CM[0])):
        if i == 0:
            l = "r_E"
        elif i == 1:
            l = "r_G"
        elif i == 2:
            l = "r_L"

        CM_lines += l + ";"
        for j in CM[0][i]:
            CM_lines += str(j) + ";"
        CM_lines += "\n"

    return MCC_line, CM_lines
コード例 #3
0
def MCC_CM_calculator(validation_labels_linear, predicted_labels_linear):
    '''Return MCC and confusion matrix'''

    #print(len(validation_labels_linear), validation_labels_linear.shape)
    #print(len(predicted_labels_linear), predicted_labels_linear.shape)
    MCC = multimcc(validation_labels_linear, predicted_labels_linear)
    MCC = round(MCC,3)
    MCC_line = "MCC=" + str(MCC) + "\n"

    CM = confusion_matrix(validation_labels_linear, predicted_labels_linear)
    CM_lines = ""

    for i in range(len(CM)):
        for j in CM[i]:
            CM_lines += str(j) + ";"
        CM_lines += "\n"

    return MCC_line, CM_lines
コード例 #4
0
def MCC_CM_calculator(validation_labels_linear, predicted_labels_linear):
    '''Return MCC and confusion matrix'''

    #print(len(validation_labels_linear), validation_labels_linear.shape)
    #print(len(predicted_labels_linear), predicted_labels_linear.shape)
    MCC = multimcc(validation_labels_linear, predicted_labels_linear)
    MCC = round(MCC, 3)
    MCC_line = "MCC=" + str(MCC) + "\n"

    CM = confusion_matrix(validation_labels_linear, predicted_labels_linear)
    CM_lines = ""

    for i in range(len(CM)):
        for j in CM[i]:
            CM_lines += str(j) + ";"
        CM_lines += "\n"

    return MCC_line, CM_lines
コード例 #5
0
    cls_prob = [str(el) for el in predicted_labels_train[i]]
    real_label = np.argmax(hard_train_labels[i])
    line = [hard_train_images[i], str(real_label), ";".join(cls_prob)]
    predicted_labels_linear.append(np.argmax(predicted_labels_train[i]))
    prediction_summary_train.write("\t".join(line) + "\n")
    prediction_summary_train.flush()

train_labels_linear = []

for lbl in hard_train_labels:
    train_labels_linear.append(np.argmax(lbl))

train_labels_linear = np.array(train_labels_linear)
predicted_labels_linear = np.array(predicted_labels_linear)

MCC = multimcc(train_labels_linear, predicted_labels_linear)
print("#MCC Val:", MCC)
prediction_summary_train.write("MCC: " + str(round(MCC, 3)))
prediction_summary_train.close()

if HARD_VALIDATION_MAP is not None:
    print "\n"
    print "#\tPerforming Predict on Validation Hard Labels"
    predicted_features_validation = model.predict(hard_validation)
    np.savetxt(
        OUTDIR + "V_L_So_" + F_TYPE + "_" + FC_MODEL +
        "_hardlabels_bottleneck_validation.txt", predicted_features_validation)

    predicted_labels_validation = top_model.predict(
        predicted_features_validation)
    prediction_summary_validation = open(
コード例 #6
0
def model(train, train_labels, validation, validation_labels, GPU, NB_EPOCHS,
          VGG_WEIGHTS):
    import os
    import h5py
    from hyperas.distributions import choice
    from mcc_multiclass import multimcc
    #import keras.backend.tensorflow_backend as K
    import numpy as np
    from keras.models import Sequential
    from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
    from keras.layers import Dropout, Flatten, Dense
    from mcc_multiclass import multimcc
    from hyperopt import STATUS_OK
    from keras.optimizers import SGD, RMSprop, Adam

    # path to the model weights files.

    weights_path = VGG_WEIGHTS

    img_width, img_height = 224, 224
    nb_epochs = NB_EPOCHS
    print("Entering GPU Model")
    #with K.tf.device('/gpu:' + str(GPU)):
    with open('FAKELOG', "w"):
        #K.set_session(K.tf.Session(config=K.tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)))
        #session = K.get_session()
        # build the VGG16 network
        model = Sequential()
        model.add(ZeroPadding2D((1, 1),
                                input_shape=(3, img_width, img_height)))

        model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        # load the weights of the VGG16 networks
        # (trained on ImageNet, won the ILSVRC competition in 2014)
        # note: when there is a complete match between your model definition
        # and your weight savefile, you can simply call model.load_weights(filename)

        assert os.path.exists(
            weights_path
        ), 'Model weights not found (see "weights_path" variable in script).'
        f = h5py.File(weights_path)
        for k in range(f.attrs['nb_layers']):
            if k >= len(model.layers):
                # we don't look at the last (fully-connected) layers in the savefile
                break
            g = f['layer_{}'.format(k)]
            weights = [
                g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])
            ]
            model.layers[k].set_weights(weights)
        f.close()
        print('Model loaded.')

        # build a classifier model to put on top of the convolutional model
        activation_function = 'relu'
        print "\n\t#Chosen Activation:", activation_function
        dense_size = 512
        print "\t#Chosen Dense Size:", dense_size
        dropout_rate = {{choice([0.0, 0.25, 0.5, 0.75])}}
        print "\t#Chosen Dropout Rate:", dropout_rate
        model.add(Flatten())
        model.add(Dense(dense_size, activation=activation_function))
        model.add(Dropout(dropout_rate))
        if 'two' == 'two':
            print "\t#Chosen FC Size: Double"
            model.add(Dense(dense_size, activation=activation_function))
            model.add(Dropout(dropout_rate))
        else:
            print "\t#Chosen FC Size: Single"
        final_classifier = 'softmax'
        print "\t#Chosen Final Classifier:", final_classifier
        model.add(Dense(3, activation=final_classifier))

        # note that it is necessary to start with a fully-trained
        # classifier, including the top classifier,
        # in order to successfully do fine-tuning
        # top_model.load_weights(top_model_weights_path)

        # set the first 25 layers (up to the last conv block)
        # to non-trainable (weights will not be updated)
        for layer in model.layers[:25]:
            layer.trainable = False

        trial_model_optimizer_dict = {}
        #trial_model_optimizer_list = {{choice(['rmsprop', 'adam', 'sgd','adagrad','adadelta','adamax'])}}
        trial_model_optimizer_list = {{choice(['adam', 'sgd'])}}
        print "\t#Chosen Optimizer: ", trial_model_optimizer_list
        epsilon = 1e-08
        lr = {{choice([1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7])}}
        momentum = {{choice([0.7, 0.8, 0.9, 1.0])}}
        nesterov = {{choice([True, False])}}
        if trial_model_optimizer_list == 'adam':
            print "\t\t#Chosen Epsilon:", epsilon

            print "\t\t#Chosen Learning Rate:", lr
            # beta_1 = {{uniform(0.5, 1)}}
            # beta_2 = {{uniform(0.6, 1)}}
            #trial_model_optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2,epsilon=epsilon )
            trial_model_optimizer = Adam(lr=lr, epsilon=epsilon)
            trial_model_optimizer_dict['adam'] = {'lr': lr, 'epsilon': epsilon}

        elif trial_model_optimizer_list == 'rmsprop':
            #epsilon={{choice([0,1e-04, 1e-05,1e-06,1e-07,1e-08, 1e-09, 1e-10])}}
            print "\t\t#Chosen Epsilon:", epsilon
            #lr = {{choice([0.1,0.5,0.01,0.05,0.001,0.005,0.0001,0.0005])}}

            print "\t\t#Chosen Learning Rate:", lr
            # rho = {{uniform(0.5, 1)}}
            #trial_model_optimizer = RMSprop(lr=lr, rho=rho, epsilon=epsilon)
            trial_model_optimizer = RMSprop(lr=lr, epsilon=epsilon)
            trial_model_optimizer_dict['rmsprop'] = {
                'lr': lr,
                'epsilon': epsilon
            }

        elif trial_model_optimizer_list == 'sgd':

            print "\t\t#Chosen Nesterov:", nesterov
            #lr = {{choice([0.1,0.5,0.01,0.05,0.001,0.005,0.0001,0.0005])}}

            print "\t\t#Chosen Learning Rate:", lr

            print "\t\t#Chosen Momentum:", momentum
            # decay={{uniform(0, 0.5)}}
            #trial_model_optimizer = SGD(lr=lr, momentum=momentum, decay=decay, nesterov=nesterov)
            trial_model_optimizer = SGD(lr=lr,
                                        momentum=momentum,
                                        nesterov=nesterov)
            trial_model_optimizer_dict['sgd'] = {
                'lr': lr,
                'momentum': momentum,
                'nesterov': nesterov
            }
        elif trial_model_optimizer_list == 'adagrad':
            print "\t\t#Chosen Epsilon:", epsilon

            print "\t\t#Chosen Learning Rate:", lr
            # beta_1 = {{uniform(0.5, 1)}}
            # beta_2 = {{uniform(0.6, 1)}}
            #trial_model_optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2,epsilon=epsilon )
            trial_model_optimizer = Adagrad(lr=lr, epsilon=epsilon)
            trial_model_optimizer_dict['adagrad'] = {
                'lr': lr,
                'epsilon': epsilon
            }
        elif trial_model_optimizer_list == 'adamax':
            print "\t\t#Chosen Epsilon:", epsilon

            print "\t\t#Chosen Learning Rate:", lr
            # beta_1 = {{uniform(0.5, 1)}}
            # beta_2 = {{uniform(0.6, 1)}}
            #trial_model_optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2,epsilon=epsilon )
            trial_model_optimizer = Adamax(lr=lr, epsilon=epsilon)
            trial_model_optimizer_dict['adamax'] = {
                'lr': lr,
                'epsilon': epsilon
            }
        elif trial_model_optimizer_list == 'adadelta':
            print "\t\t#Chosen Epsilon:", epsilon

            print "\t\t#Chosen Learning Rate:", lr
            # beta_1 = {{uniform(0.5, 1)}}
            # beta_2 = {{uniform(0.6, 1)}}
            #trial_model_optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2,epsilon=epsilon )
            trial_model_optimizer = Adadelta(lr=lr, epsilon=epsilon)
            trial_model_optimizer_dict['adadelta'] = {
                'lr': lr,
                'epsilon': epsilon
            }
        # elif trial_model_optimizer_list == 'nadam':
        #     print "\t\t#Chosen Epsilon:", epsilon
        #     lr = 1e-4
        #     print "\t\t#Chosen Learning Rate:", lr
        #     # beta_1 = {{uniform(0.5, 1)}}
        #     # beta_2 = {{uniform(0.6, 1)}}
        #     #trial_model_optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2,epsilon=epsilon )
        #     trial_model_optimizer = Nadam(lr=lr,epsilon=epsilon )
        #     trial_model_optimizer_dict['nadam'] = {'lr': lr,
        #                                       'epsilon': epsilon}

        saved_clean_model = model.to_json()

        # compile the model with a SGD/momentum optimizer
        # and a very slow learning rate.
        model.compile(loss='categorical_crossentropy',
                      optimizer=trial_model_optimizer,
                      metrics=['accuracy'])

        # fit the model
        batch_size = 128
        print "\t#Chosen batch size:", batch_size, "\n"
        model.fit(train,
                  train_labels,
                  nb_epoch=nb_epochs,
                  batch_size=batch_size)
        predicted_labels = model.predict(validation)
        predicted_labels_linear = []
        for i in range(len(predicted_labels)):
            cls_prob = predicted_labels[i]
            predicted_labels_linear.append(np.argmax(cls_prob))

        validation_labels_linear = []

        for lbl in validation_labels:
            if lbl[0] == 1:
                validation_labels_linear.append(0)
            if lbl[1] == 1:
                validation_labels_linear.append(1)
            if lbl[2] == 1:
                validation_labels_linear.append(2)

        validation_labels_linear = np.array(validation_labels_linear)
        predicted_labels_linear = np.array(predicted_labels_linear)

        MCC = multimcc(validation_labels_linear, predicted_labels_linear)
        print(MCC)
        output_model = {
            'model': saved_clean_model,
            'optimizer': trial_model_optimizer_dict,
            'batch_size': batch_size
        }
    #session.close()
    return {'loss': -MCC, 'status': STATUS_OK, 'model': output_model}
コード例 #7
0
print "#Writing Prediction Output"
prediction_summary_train = open(OUTDIR + "V_L_So_" + F_TYPE + "_" + FC_MODEL + "_validation_summary.txt", "w")
prediction_summary_train.write("\t".join(['FILENAME', 'REAL_LABEL', 'PREDICTED_LABELS']) + '\n')

predicted_labels_linear = []

for i in range(len(predicted_labels_train)):
    cls_prob = [str(el) for el in predicted_labels_train[i]]
    if VALIDATION_LABELS is not None:
        real_label = np.argmax(validation_labels[i])
    else:
        real_label = 'NA'
    line = [validation_images[i], str(real_label), ";".join(cls_prob)]
    predicted_labels_linear.append(np.argmax(predicted_labels_train[i]))
    prediction_summary_train.write("\t".join(line) + "\n")
    prediction_summary_train.flush()

if VALIDATION_LABELS is not None:
    train_labels_linear = []

    for lbl in validation_labels:
        train_labels_linear.append(np.argmax(lbl))

    train_labels_linear = np.array(train_labels_linear)
    predicted_labels_linear = np.array(predicted_labels_linear)

    MCC = multimcc(train_labels_linear, predicted_labels_linear)
    print "#MCC Val:", MCC
    prediction_summary_train.write("MCC: " + str(round(MCC, 3)))
prediction_summary_train.close()
コード例 #8
0
def main():
    WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
    WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'

    parser = myArgumentParser(description='Run a prediction experiment using pretrained VGG16, specified on the deepstreet DataSet.',
            fromfile_prefix_chars='@')
    parser.add_argument('--gpu', type=int, default=0, help='GPU Device (default: %(default)s)')
    parser.add_argument('--output_dir', type=str, default="./experiment_output/",help='Output directory')
    parser.add_argument('--input_dir', type=str, default="./",help='Input directory')
    parser.add_argument('--debug', type=bool, default=False, help='Debug mode')

    args = parser.parse_args()
    GPU = args.gpu
    OUTDIR = args.output_dir+"/"
    INDIR = args.input_dir+"/"
    DEBUG = args.debug

    if not os.path.exists(OUTDIR):
        os.makedirs(OUTDIR)


    if DEBUG:
        validation_data_dir = INDIR + "small_dataset/val/"
    else:
        #validation_data_dir = "dataset/val/"
        validation_data_dir = INDIR + "val/"

    if os.path.exists(INDIR + validation_data_dir + ".DS_Store"):
        os.remove(INDIR + validation_data_dir + ".DS_Store")

    #set dimensions of the images
    img_rows, img_cols = 224, 224

    if K.image_data_format() == 'channels_first':
        shape_ord = (3, img_rows, img_cols)
    else:  # channel_last
        shape_ord = (img_rows, img_cols, 3)

    vgg16_model = vgg16.VGG16(weights=None, include_top=False, input_tensor=Input(shape_ord))
    vgg16_model.summary()

    #add last fully-connected layers
    x = Flatten(input_shape=vgg16_model.output.shape)(vgg16_model.output)
    x = Dense(4096, activation='relu', name='ft_fc1')(x)
    x = Dropout(0.5)(x)
    x = BatchNormalization()(x)
    predictions = Dense(43, activation='softmax')(x)

    model = Model(inputs=vgg16_model.input, outputs=predictions)

    #compile the model
    model.compile(optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
                loss='categorical_crossentropy', metrics=['accuracy'])


    #load validation images and create labels list
    validation_filenames = os.listdir(validation_data_dir)
    validation_filenames.sort()
    validation_images = []
    validation_labels = []

    for name in validation_filenames:
        if name.endswith(".ppm"):
            validation_images.append(validation_data_dir + name)
            label = name.split("_")[0]
            label_int = int(label)
            labels_array = [0]*43
            labels_array[label_int] = 1
            validation_labels.append(labels_array)
        else:
            validation_filenames.remove(name)

    print("Validation Filenames loaded.")


    validation = np.array(load_im2(validation_images, img_cols, img_rows))
    print("Validation images loaded.")

    model.load_weights("experiment_output/vgg16_deepstreet_training1.h5")

    predicted_labels = model.predict(validation)
    print("Labels predicted.")

    #write summary file
    prediction_summary = open(OUTDIR + "vgg16_deepstreet_t_prediction_summary_deepstreet_v.txt", "w")
    prediction_summary.write("\t".join(['FILENAME', 'REAL_LABEL', 'PREDICTED_LABELS']) + '\n')

    predicted_labels_linear = []
    validation_labels_linear = []

    #make linear labels list
    for lbl in validation_labels:
        for i,val in enumerate(lbl):
            if val == 1:
                validation_labels_linear.append(i)


    for i in range(len(predicted_labels)):
        cls_prob = predicted_labels[i]     #percentage of belonging for i image

        predicted_label_index = np.argmax(cls_prob) #get the index of the class with higher probability
        line = [validation_images[i], str(validation_labels_linear[i]), str(predicted_label_index), str(round(cls_prob[predicted_label_index],3))]

        s = ""
        for i in range(42):
            s += "{}:{}; ".format(i,round(cls_prob[i],3))
            #s += str(i) + ":" + str(round(cls_prob[i],3)) + "; "
        s += "42:{}".format(round(cls_prob[42],3))
        #s += "42:" + str(round(cls_prob[42],3))

        line.append(s)

        predicted_labels_linear.append(np.argmax(cls_prob))
        prediction_summary.write(";".join(line) + "\n")
        prediction_summary.flush()


    validation_labels_linear = np.array(validation_labels_linear)
    predicted_labels_linear = np.array(predicted_labels_linear)

    #calculate MCC
    MCC = multimcc(validation_labels_linear, predicted_labels_linear)
    print(MCC)

    prediction_summary.write("MCC = {}".format(MCC))
    prediction_summary.flush()
    prediction_summary.close()

    #compute confusion matrix and save the image
    conf_matrix = confusion_matrix(validation_labels_linear,predicted_labels_linear)[0]
    plt.matshow(conf_matrix)
    plt.colorbar()
    plt.savefig("confusion_matrix.png")

    end = timer()
    print("Total time: ", end - start)
コード例 #9
0
        elif cl == 1 and j == 1:
            real_label = "Good"

        elif cl == 1 and j == 2:
            real_label = "Late"

    line = [validation_images[i], real_label,
            "Early:" + str(round(cls_prob[0], 3)) + ";Good:" + str(round(cls_prob[1], 3)) + ";Late:" + str(
                round(cls_prob[2], 3))]
    predicted_labels_linear.append(np.argmax(cls_prob))
    prediction_summary.write("\t".join(line) + "\n")
    prediction_summary.flush()

prediction_summary.close()

validation_labels_linear = []

for lbl in validation_labels:
    if lbl[0] == 1:
        validation_labels_linear.append(0)
    if lbl[1] == 1:
        validation_labels_linear.append(1)
    if lbl[2] == 1:
        validation_labels_linear.append(2)

validation_labels_linear = np.array(validation_labels_linear)
predicted_labels_linear = np.array(predicted_labels_linear)

MCC = multimcc(validation_labels_linear, predicted_labels_linear)
print(MCC)
コード例 #10
0
    "w")
prediction_summary_train.write(
    "\t".join(['FILENAME', 'REAL_LABEL', 'PREDICTED_LABELS']) + '\n')

predicted_labels_linear = []

for i in range(len(predicted_labels_train)):
    cls_prob = [str(el) for el in predicted_labels_train[i]]
    if VALIDATION_LABELS is not None:
        real_label = np.argmax(validation_labels[i])
    else:
        real_label = 'NA'
    line = [validation_images[i], str(real_label), ";".join(cls_prob)]
    predicted_labels_linear.append(np.argmax(predicted_labels_train[i]))
    prediction_summary_train.write("\t".join(line) + "\n")
    prediction_summary_train.flush()

if VALIDATION_LABELS is not None:
    train_labels_linear = []

    for lbl in validation_labels:
        train_labels_linear.append(np.argmax(lbl))

    train_labels_linear = np.array(train_labels_linear)
    predicted_labels_linear = np.array(predicted_labels_linear)

    MCC = multimcc(train_labels_linear, predicted_labels_linear)
    print "#MCC Val:", MCC
    prediction_summary_train.write("MCC: " + str(round(MCC, 3)))
prediction_summary_train.close()
コード例 #11
0
def model(train, train_labels, validation, validation_labels, GPU, NB_EPOCHS, VGG_WEIGHTS):
    import os
    import h5py
    from hyperas.distributions import choice
    from mcc_multiclass import multimcc
    #import keras.backend.tensorflow_backend as K
    import numpy as np
    from keras.models import Sequential
    from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
    from keras.layers import Dropout, Flatten, Dense
    from mcc_multiclass import multimcc
    from hyperopt import STATUS_OK
    from keras.optimizers import SGD, RMSprop, Adam

    # path to the model weights files.

    weights_path = VGG_WEIGHTS

    img_width, img_height = 224, 224
    nb_epochs = NB_EPOCHS
    print ("Entering GPU Model")
    #with K.tf.device('/gpu:' + str(GPU)):
    with open('FAKELOG',"w"):
        #K.set_session(K.tf.Session(config=K.tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)))
        #session = K.get_session()
        # build the VGG16 network
        model = Sequential()
        model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))

        model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        # load the weights of the VGG16 networks
        # (trained on ImageNet, won the ILSVRC competition in 2014)
        # note: when there is a complete match between your model definition
        # and your weight savefile, you can simply call model.load_weights(filename)

        assert os.path.exists(weights_path), 'Model weights not found (see "weights_path" variable in script).'
        f = h5py.File(weights_path)
        for k in range(f.attrs['nb_layers']):
            if k >= len(model.layers):
                # we don't look at the last (fully-connected) layers in the savefile
                break
            g = f['layer_{}'.format(k)]
            weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
            model.layers[k].set_weights(weights)
        f.close()
        print('Model loaded.')

        # build a classifier model to put on top of the convolutional model
        activation_function = 'relu'
        print "\n\t#Chosen Activation:", activation_function
        dense_size = 512
        print "\t#Chosen Dense Size:", dense_size
        dropout_rate = {{choice([0.0,0.25,0.5,0.75])}}
        print "\t#Chosen Dropout Rate:", dropout_rate
        model.add(Flatten())
        model.add(Dense(dense_size, activation=activation_function))
        model.add(Dropout(dropout_rate))
        if 'two' == 'two':
            print "\t#Chosen FC Size: Double"
            model.add(Dense(dense_size, activation=activation_function))
            model.add(Dropout(dropout_rate))
        else:
            print "\t#Chosen FC Size: Single"
        final_classifier = 'softmax'
        print "\t#Chosen Final Classifier:", final_classifier
        model.add(Dense(3, activation=final_classifier))

        # note that it is necessary to start with a fully-trained
        # classifier, including the top classifier,
        # in order to successfully do fine-tuning
        # top_model.load_weights(top_model_weights_path)


        # set the first 25 layers (up to the last conv block)
        # to non-trainable (weights will not be updated)
        for layer in model.layers[:25]:
            layer.trainable = False

        trial_model_optimizer_dict = {}
        #trial_model_optimizer_list = {{choice(['rmsprop', 'adam', 'sgd','adagrad','adadelta','adamax'])}}
        trial_model_optimizer_list = {{choice([ 'adam', 'sgd'])}}
        print "\t#Chosen Optimizer: ", trial_model_optimizer_list
        epsilon = 1e-08
        lr = {{choice([1e-1, 1e-2,1e-3,1e-4,1e-5,1e-6,1e-7])}}
        momentum={{choice([0.7,0.8,0.9,1.0])}}
        nesterov = {{choice([True,False])}}
        if trial_model_optimizer_list == 'adam':
            print "\t\t#Chosen Epsilon:", epsilon

            print "\t\t#Chosen Learning Rate:", lr
            # beta_1 = {{uniform(0.5, 1)}}
            # beta_2 = {{uniform(0.6, 1)}}
            #trial_model_optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2,epsilon=epsilon )
            trial_model_optimizer = Adam(lr=lr,epsilon=epsilon )
            trial_model_optimizer_dict['adam'] = {'lr': lr,
                                              'epsilon': epsilon}

        elif trial_model_optimizer_list == 'rmsprop':
            #epsilon={{choice([0,1e-04, 1e-05,1e-06,1e-07,1e-08, 1e-09, 1e-10])}}
            print "\t\t#Chosen Epsilon:", epsilon
            #lr = {{choice([0.1,0.5,0.01,0.05,0.001,0.005,0.0001,0.0005])}}

            print "\t\t#Chosen Learning Rate:", lr
            # rho = {{uniform(0.5, 1)}}
            #trial_model_optimizer = RMSprop(lr=lr, rho=rho, epsilon=epsilon)
            trial_model_optimizer = RMSprop(lr=lr, epsilon=epsilon)
            trial_model_optimizer_dict['rmsprop'] = {'lr': lr,
                                              'epsilon': epsilon}

        elif trial_model_optimizer_list == 'sgd':

            print "\t\t#Chosen Nesterov:", nesterov
            #lr = {{choice([0.1,0.5,0.01,0.05,0.001,0.005,0.0001,0.0005])}}

            print "\t\t#Chosen Learning Rate:", lr

            print "\t\t#Chosen Momentum:", momentum
            # decay={{uniform(0, 0.5)}}
            #trial_model_optimizer = SGD(lr=lr, momentum=momentum, decay=decay, nesterov=nesterov)
            trial_model_optimizer = SGD(lr=lr, momentum=momentum, nesterov=nesterov)
            trial_model_optimizer_dict['sgd'] = {'lr': lr,
                                              'momentum': momentum,
                                              'nesterov': nesterov}
        elif trial_model_optimizer_list == 'adagrad':
            print "\t\t#Chosen Epsilon:", epsilon

            print "\t\t#Chosen Learning Rate:", lr
            # beta_1 = {{uniform(0.5, 1)}}
            # beta_2 = {{uniform(0.6, 1)}}
            #trial_model_optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2,epsilon=epsilon )
            trial_model_optimizer = Adagrad(lr=lr,epsilon=epsilon )
            trial_model_optimizer_dict['adagrad'] = {'lr': lr,
                                              'epsilon': epsilon}
        elif trial_model_optimizer_list == 'adamax':
            print "\t\t#Chosen Epsilon:", epsilon

            print "\t\t#Chosen Learning Rate:", lr
            # beta_1 = {{uniform(0.5, 1)}}
            # beta_2 = {{uniform(0.6, 1)}}
            #trial_model_optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2,epsilon=epsilon )
            trial_model_optimizer = Adamax(lr=lr,epsilon=epsilon )
            trial_model_optimizer_dict['adamax'] = {'lr': lr,
                                              'epsilon': epsilon}
        elif trial_model_optimizer_list == 'adadelta':
            print "\t\t#Chosen Epsilon:", epsilon

            print "\t\t#Chosen Learning Rate:", lr
            # beta_1 = {{uniform(0.5, 1)}}
            # beta_2 = {{uniform(0.6, 1)}}
            #trial_model_optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2,epsilon=epsilon )
            trial_model_optimizer = Adadelta(lr=lr,epsilon=epsilon )
            trial_model_optimizer_dict['adadelta'] = {'lr': lr,
                                              'epsilon': epsilon}
        # elif trial_model_optimizer_list == 'nadam':
        #     print "\t\t#Chosen Epsilon:", epsilon
        #     lr = 1e-4
        #     print "\t\t#Chosen Learning Rate:", lr
        #     # beta_1 = {{uniform(0.5, 1)}}
        #     # beta_2 = {{uniform(0.6, 1)}}
        #     #trial_model_optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2,epsilon=epsilon )
        #     trial_model_optimizer = Nadam(lr=lr,epsilon=epsilon )
        #     trial_model_optimizer_dict['nadam'] = {'lr': lr,
        #                                       'epsilon': epsilon}




        saved_clean_model = model.to_json()

        # compile the model with a SGD/momentum optimizer
        # and a very slow learning rate.
        model.compile(loss='categorical_crossentropy',
                      optimizer=trial_model_optimizer,
                      metrics=['accuracy'])

        # fit the model
        batch_size = 128
        print "\t#Chosen batch size:", batch_size,"\n"
        model.fit(train, train_labels, nb_epoch=nb_epochs, batch_size=batch_size)
        predicted_labels = model.predict(validation)
        predicted_labels_linear = []
        for i in range(len(predicted_labels)):
            cls_prob = predicted_labels[i]
            predicted_labels_linear.append(np.argmax(cls_prob))

        validation_labels_linear = []

        for lbl in validation_labels:
            if lbl[0] == 1:
                validation_labels_linear.append(0)
            if lbl[1] == 1:
                validation_labels_linear.append(1)
            if lbl[2] == 1:
                validation_labels_linear.append(2)

        validation_labels_linear = np.array(validation_labels_linear)
        predicted_labels_linear = np.array(predicted_labels_linear)

        MCC = multimcc(validation_labels_linear, predicted_labels_linear)
        print(MCC)
        output_model = {
            'model': saved_clean_model,
            'optimizer': trial_model_optimizer_dict,
            'batch_size': batch_size
        }
    #session.close()
    return {'loss': -MCC, 'status': STATUS_OK, 'model': output_model}