Example #1
0
 def evaluate_cnn(self, model, pixels, batch_size=64, merge=True):
     predictions = model.predict_generator(generator=self.tile_generator(
         pixels, batch_size=batch_size, merge=merge),
                                           steps=len(pixels) // batch_size,
                                           verbose=1)
     eval_generator = self.tile_generator(pixels, batch_size=1)
     labels = np.empty(predictions.shape)
     count = 0
     while count < len(labels):
         image_b, label_b = next(eval_generator)
         labels[count] = label_b
         count += 1
     label_index = np.argmax(labels, axis=1)
     pred_index = np.argmax(predictions, axis=1)
     np.set_printoptions(precision=2)
     # Plot non-normalized confusion matrix
     util.plot_confusion_matrix(label_index,
                                pred_index,
                                classes=np.array(
                                    list(util.indexed_dictionary)),
                                class_dict=util.indexed_dictionary)
     # Plot normalized confusion matrix
     util.plot_confusion_matrix(label_index,
                                pred_index,
                                classes=np.array(
                                    list(util.indexed_dictionary)),
                                class_dict=util.indexed_dictionary,
                                normalize=True)
     count = 0
     for i in range(len(label_index)):
         if (label_index[i] == pred_index[i]):
             count += 1
     print("Accuracy is {}".format(count / len(label_index)))
def predict():

    file_list_predict = get_data_paths('predict')
    line_parser = MbtiParser()
    predict_generator = BatchGenerator(file_list_predict,
                                       line_parser,
                                       batch_size=batch_size_predict,
                                       build_voc=False,
                                       max_sentence_length=max_sentence_length,
                                       voc_path=voc_path)

    cnn = CnnTextClassifier(**cnn_hyperparameters)

    pred_dic = cnn.predict(predict_generator)

    plt.figure()

    plot_confusion_matrix(pred_dic['ground_truth'],
                          pred_dic['predictions'],
                          list(range(num_classes)),
                          title='Confusion matrix, without normalization')

    plt.figure()

    plot_confusion_matrix(pred_dic['ground_truth'],
                          pred_dic['predictions'],
                          list(range(num_classes)),
                          title='Confusion matrix, with normalization',
                          normalize=True)
    plt.show()
Example #3
0
    def visualize_evaluation(self,
                             X,
                             Y,
                             labels,
                             title,
                             normalize=None,
                             save=True,
                             path=None):
        """
        

        Parameters
        ----------
        X : TYPE
            DESCRIPTION.
        Y : TYPE
            DESCRIPTION.
        labels : TYPE
            DESCRIPTION.
        normalize : {‘true’, ‘pred’, ‘all’}, optional
            Normalizes confusion matrix over the true (rows), predicted (columns) conditions or 
            all the population. If None, confusion matrix will not be normalized. The default 
            is None.
        save : TYPE, optional
            DESCRIPTION. The default is True.
        path : TYPE, optional
            DESCRIPTION. The default is None.

        Raises
        ------
        ValueError
            DESCRIPTION.

        Returns
        -------
        None.

        """
        if save and path == None:
            raise ValueError(
                'Invalid value given to `path`, `path` can not be `None` when `save` is True'
            )
        #Evaluate and print report of classification
        Y_pred, acc, values = self.__get_acc(X, Y, labels)
        #Get and plot Confusion matrix
        cm_n = confusion_matrix(Y, Y_pred, normalize=normalize)
        cm = confusion_matrix(Y, Y_pred)
        if '.png' not in path:
            path += '.png'
        path_n = path.split('.png')[0] + '_Normalised.png'
        plot_confusion_matrix(cm, values, path, title)
        plot_confusion_matrix(cm_n, values, path_n, title)
        return acc
Example #4
0
def evaluate_discriminator(X_test,
                           labels,
                           gans_path,
                           path_cm,
                           title,
                           normalize='all'):
    print(
        '\n___________________________________________________________________________'
    )
    print('Evaluating Discriminator...\n')
    #loading Discriminator model
    if gans_path[-1] != '/':
        gans_path += '/'
    discriminator = load_model(gans_path + 'Discriminator.h5')
    generator = load_model(gans_path + 'Generator.h5')

    #Preparing data
    X_benignware = X_test[0].todense()
    X_malware = X_test[1].todense()

    #Getting predictions from Generator
    X_generated = generator.predict(X_malware)
    len_gen = X_generated.shape[0]

    #Preparing X_real, Y_real & Y_generated
    X_real = X_benignware
    len_real = X_real.shape[0]
    Y_real = np.zeros(len_real)
    Y_generated = np.ones(len_gen)

    #Making predictions on Discriminator
    Y_pred_real = discriminator.predict(X_real)
    Y_pred_gen = discriminator.predict(X_generated)

    Y_pred = np.concatenate((Y_pred_real, Y_pred_gen))
    Y_pred = np.where(Y_pred > 0.5, 1, 0)
    Y = np.array(np.concatenate((Y_real, Y_generated)))

    acc, values = get_acc(Y, Y_pred, labels)
    #Get and plot Confusion matrix
    cm_n = confusion_matrix(Y, Y_pred, normalize=normalize)
    cm = confusion_matrix(Y, Y_pred)
    if '.png' not in path_cm:
        path_cm += '.png'
    path_n = path_cm.split('.png')[0] + '_Normalised.png'
    plot_confusion_matrix(cm, values, path_cm, title)
    plot_confusion_matrix(cm_n, values, path_n, title)
    print('\nEvaluation of Discriminator is Completed!')
    print(
        '___________________________________________________________________________\n'
    )
    return acc
Example #5
0
def predict():
    
    
    X_train, X_test, y_train, y_test  = create_data_set() 

    fd = FaceDetector()
    
    pred_dic = fd.predict(X_test,y_test)
    
    plt.figure()
    
    plot_confusion_matrix(pred_dic['ground_truth'],pred_dic['predictions'], classes=range(np.max(pred_dic['predictions'])),
                      title='Confusion matrix, without normalization')

    plt.show()
    
    return pred_dic
pydot.find_graphviz = lambda: True

x, y = make_classification(n_samples=100,
                           n_informative=2,
                           n_features=2,
                           n_redundant=0,
                           n_clusters_per_class=1,
                           random_state=7)

y

model = Sequential()
model.add(Dense(units=1, input_shape=(2, ), activation='sigmoid'))

#sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
#model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

model.compile(optimizer='sgd',
              loss='binary_crossentropy',
              metrics=['accuracy'])
history = model.fit(x=x, y=y, verbose=0, epochs=200)

print(model.get_weights())
print(model.summary())
plot_loss_accuracy(history)
plot_decision_boundary(lambda x: model.predict(x), x, y)

y_pred = model.predict_classes(x, verbose=0)

plot_confusion_matrix(model, x, y)
Example #7
0
import pandas as pd
import numpy as np
from keras.utils import np_utils
pydot.find_graphviz = lambda: True

input = pd.read_csv("sample1.csv")



X_train = input.iloc[:,1:3].as_matrix()
y_train = input["Output"].as_matrix()

model1 = Sequential()
model1.add(Dense(units =1,input_shape=(2,),activation='sigmoid'))

#sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
#model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

model1.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
history1 = model1.fit(x = X_train,y = y_train,verbose=0,epochs=1)



print(model1.get_weights())
plot_loss_accuracy(history1)
plot_decision_boundary(lambda X_train: model1.predict(X_train),X_train,y_train)

y_pred = model1.predict_classes(X_train, verbose=0)

plot_confusion_matrix(model1,X_train,y_train)
Example #8
0
def test_casme(batch_size, spatial_epochs, temporal_epochs, train_id, dB,
               spatial_size, flag, tensorboard):
    ############## Path Preparation ######################
    root_db_path = "/media/ice/OS/Datasets/"
    workplace = root_db_path + dB + "/"
    inputDir = root_db_path + dB + "/" + dB + "/"
    ######################################################
    classes = 5
    if dB == 'CASME2_TIM':
        table = loading_casme_table(workplace + 'CASME2_label_Ver_2.xls')
        listOfIgnoredSamples, IgnoredSamples_index = ignore_casme_samples(
            inputDir)

        ############## Variables ###################
        r = w = spatial_size
        subjects = 2
        samples = 246
        n_exp = 5
        # VidPerSubject = get_subfolders_num(inputDir, IgnoredSamples_index)
        listOfIgnoredSamples = []
        VidPerSubject = [2, 1]
        timesteps_TIM = 10
        data_dim = r * w
        pad_sequence = 10
        channel = 3
        ############################################

        os.remove(workplace + "Classification/CASME2_TIM_label.txt")

    elif dB == 'CASME2_Optical':
        table = loading_casme_table(workplace + 'CASME2_label_Ver_2.xls')
        listOfIgnoredSamples, IgnoredSamples_index, _ = ignore_casme_samples(
            inputDir)

        ############## Variables ###################
        r = w = spatial_size
        subjects = 26
        samples = 246
        n_exp = 5
        VidPerSubject = get_subfolders_num(inputDir, IgnoredSamples_index)
        timesteps_TIM = 9
        data_dim = r * w
        pad_sequence = 9
        channel = 3
        ############################################

        # os.remove(workplace + "Classification/CASME2_TIM_label.txt")

    elif dB == 'CASME2_RGB':
        # print(inputDir)
        table = loading_casme_table(workplace +
                                    'CASME2_RGB/CASME2_label_Ver_2.xls')
        listOfIgnoredSamples, IgnoredSamples_index = ignore_casmergb_samples(
            inputDir)
        ############## Variables ###################
        r = w = spatial_size
        subjects = 26
        samples = 245  # not used, delete it later
        n_exp = 5
        VidPerSubject = get_subfolders_num(inputDir, IgnoredSamples_index)
        timesteps_TIM = 10
        data_dim = r * w
        pad_sequence = 10
        channel = 3
        ############################################

    elif dB == 'SMIC_TIM10':
        table = loading_smic_table(root_db_path, dB)
        listOfIgnoredSamples = []
        IgnoredSamples_index = np.empty([0])

        ################# Variables #############################
        r = w = spatial_size
        subjects = 16
        samples = 164
        n_exp = 3
        VidPerSubject = get_subfolders_num(inputDir, IgnoredSamples_index)
        timesteps_TIM = 10
        data_dim = r * w
        pad_sequence = 10
        channel = 1
        classes = 3
        #########################################################

    elif dB == 'SAMM_Optical':
        table, table_objective = loading_samm_table(root_db_path, dB)
        listOfIgnoredSamples = []
        IgnoredSamples_index = np.empty([0])

        ################# Variables #############################
        r = w = spatial_size
        subjects = 29
        samples = 159
        n_exp = 8
        VidPerSubject = get_subfolders_num(inputDir, IgnoredSamples_index)
        timesteps_TIM = 9
        data_dim = r * w
        pad_sequence = 10
        channel = 3
        classes = 8
        #########################################################

    elif dB == 'SAMM_TIM10':
        table, table_objective = loading_samm_table(root_db_path, dB)
        listOfIgnoredSamples = []
        IgnoredSamples_index = np.empty([0])

        ################# Variables #############################
        r = w = spatial_size
        subjects = 29
        samples = 159
        n_exp = 8
        VidPerSubject = get_subfolders_num(inputDir, IgnoredSamples_index)
        timesteps_TIM = 10
        data_dim = r * w
        pad_sequence = 10
        channel = 3
        classes = 8
        #########################################################

    # print(VidPerSubject)

    ############## Flags ####################
    tensorboard_flag = tensorboard
    resizedFlag = 1
    train_spatial_flag = 0
    train_temporal_flag = 0
    svm_flag = 0
    finetuning_flag = 0
    cam_visualizer_flag = 0
    channel_flag = 0

    if flag == 'st':
        train_spatial_flag = 1
        train_temporal_flag = 1
        finetuning_flag = 1
    elif flag == 's':
        train_spatial_flag = 1
        finetuning_flag = 1
    elif flag == 't':
        train_temporal_flag = 1
    elif flag == 'nofine':
        svm_flag = 1
    elif flag == 'scratch':
        train_spatial_flag = 1
        train_temporal_flag = 1
    elif flag == 'st4':
        train_spatial_flag = 1
        train_temporal_flag = 1
        channel_flag = 1
    elif flag == 'st7':
        train_spatial_flag = 1
        train_temporal_flag = 1
        channel_flag = 2
    elif flag == 'st4vis':
        train_spatial_flag = 1
        train_temporal_flag = 1
        channel_flag = 3
    #########################################

    ############ Reading Images and Labels ################
    SubperdB = Read_Input_Images(inputDir, listOfIgnoredSamples, dB,
                                 resizedFlag, table, workplace, spatial_size,
                                 channel)
    print("Loaded Images into the tray...")
    labelperSub = label_matching(workplace, dB, subjects, VidPerSubject)
    print("Loaded Labels into the tray...")

    if channel_flag == 1:
        inputDir = root_db_path + dB + "/" + dB + "/"

        SubperdB_strain = Read_Input_Images(
            root_db_path + 'CASME2_Strain_TIM10' + '/' +
            'CASME2_Strain_TIM10' + '/', listOfIgnoredSamples,
            'CASME2_Strain_TIM10', resizedFlag, table, workplace, spatial_size,
            3)
        SubperdB_gray = Read_Input_Images(
            root_db_path + 'CASME2_TIM' + '/' + 'CASME2_TIM' + '/',
            listOfIgnoredSamples, 'CASME2_TIM', resizedFlag, table, workplace,
            spatial_size, 3)

    elif channel_flag == 3:
        inputDir_strain = '/media/ice/OS/Datasets/CASME2_Strain_TIM10/CASME2_Strain_TIM10/'
        SubperdB_strain = Read_Input_Images(inputDir_strain,
                                            listOfIgnoredSamples,
                                            'CASME2_Strain_TIM10', resizedFlag,
                                            table, workplace, spatial_size, 3)
        inputDir_gray = '/media/ice/OS/Datasets/CASME2_TIM/CASME2_TIM/'
        SubperdB_gray = Read_Input_Images(inputDir_gray, listOfIgnoredSamples,
                                          'CASME2_TIM', resizedFlag, table,
                                          workplace, spatial_size, 3)

    elif channel_flag == 2:
        SubperdB_strain = Read_Input_Images(inputDir, listOfIgnoredSamples,
                                            'CASME2_Strain_TIM10', resizedFlag,
                                            table, workplace, spatial_size, 1)
        SubperdB_gray = Read_Input_Images(inputDir, listOfIgnoredSamples,
                                          'CASME2_TIM', resizedFlag, table,
                                          workplace, spatial_size, 3)
    #######################################################

    ########### Model Configurations #######################
    sgd = optimizers.SGD(lr=0.0001, decay=1e-7, momentum=0.9, nesterov=True)
    adam = optimizers.Adam(lr=0.00001, decay=0.000001)

    # Different Conditions for Temporal Learning ONLY
    if train_spatial_flag == 0 and train_temporal_flag == 1 and dB != 'CASME2_Optical':
        data_dim = spatial_size * spatial_size
    elif train_spatial_flag == 0 and train_temporal_flag == 1 and dB == 'CASME2_Optical':
        data_dim = spatial_size * spatial_size * 3
    else:
        data_dim = 8192

    ########################################################

    ########### Image Data Generator ##############
    image_generator = ImageDataGenerator(zca_whitening=True,
                                         rotation_range=0.2,
                                         width_shift_range=0.2,
                                         height_shift_range=0.2,
                                         zoom_range=0.2,
                                         horizontal_flip=True,
                                         rescale=1.5)
    ###############################################

    ########### Training Process ############
    # Todo:
    # 1) LOSO (done)
    # 2) call model (done)
    # 3) saving model architecture
    # 4) Saving Checkpoint (done)
    # 5) make prediction (done)
    if tensorboard_flag == 1:
        tensorboard_path = "/home/ice/Documents/Micro-Expression/tensorboard/"

    # total confusion matrix to be used in the computation of f1 score
    tot_mat = np.zeros((n_exp, n_exp))

    weights_dir = '/media/ice/OS/Datasets/Weights/53/'
    image_path = '/home/ice/Documents/Micro-Expression/image/'
    table_count = 0
    for sub in range(subjects):
        ############### Reinitialization & weights reset of models ########################

        temporal_model_weights = weights_dir + 'temporal_enrichment_ID_' + str(
            train_id) + '_' + str(dB) + '_' + str(sub) + '.h5'
        vgg_model_weights = weights_dir + 'vgg_spatial_' + str(
            train_id) + '_' + str(dB) + '_' + str(sub) + '.h5'
        vgg_model_strain_weights = weights_dir + 'vgg_spatial_strain_' + str(
            train_id) + '_' + str(dB) + '_' + str(sub) + '.h5'
        conv_ae_weights = weights_dir + 'autoencoder_' + str(
            train_id) + '_' + str(dB) + '_' + str(sub) + '.h5'
        conv_ae_strain_weights = weights_dir + 'autoencoder_strain_' + str(
            train_id) + '_' + str(dB) + '_' + str(sub) + '.h5'

        temporal_model = temporal_module(data_dim=data_dim,
                                         timesteps_TIM=timesteps_TIM,
                                         weights_path=temporal_model_weights)
        temporal_model.compile(loss='categorical_crossentropy',
                               optimizer=adam,
                               metrics=[metrics.categorical_accuracy])

        conv_ae = convolutional_autoencoder(spatial_size=spatial_size,
                                            weights_path=conv_ae_weights)
        conv_ae.compile(loss='binary_crossentropy', optimizer=adam)

        conv_ae_strain = convolutional_autoencoder(
            spatial_size=spatial_size, weights_path=conv_ae_strain_weights)
        conv_ae_strain.compile(loss='binary_crossentropy', optimizer=adam)

        vgg_model = VGG_16(spatial_size=spatial_size,
                           classes=classes,
                           weights_path=vgg_model_weights)
        vgg_model.compile(loss='categorical_crossentropy',
                          optimizer=adam,
                          metrics=[metrics.categorical_accuracy])

        vgg_model_strain = VGG_16(spatial_size=spatial_size,
                                  classes=classes,
                                  weights_path=vgg_model_strain_weights)
        vgg_model_strain.compile(loss='categorical_crossentropy',
                                 optimizer=adam,
                                 metrics=[metrics.categorical_accuracy])

        svm_classifier = SVC(kernel='linear', C=1)
        ####################################################################################

        Train_X, Train_Y, Test_X, Test_Y, Test_Y_gt = data_loader_with_LOSO(
            sub, SubperdB, labelperSub, subjects, classes)

        # Rearrange Training labels into a vector of images, breaking sequence
        Train_X_spatial = Train_X.reshape(Train_X.shape[0] * timesteps_TIM, r,
                                          w, channel)
        Test_X_spatial = Test_X.reshape(Test_X.shape[0] * timesteps_TIM, r, w,
                                        channel)

        # Special Loading for 4-Channel
        if channel_flag == 1 or channel_flag == 3:
            Train_X_Strain, _, Test_X_Strain, _, _ = data_loader_with_LOSO(
                sub, SubperdB_strain, labelperSub, subjects, classes)
            Train_X_Strain = Train_X_Strain.reshape(
                Train_X_Strain.shape[0] * timesteps_TIM, r, w, 3)
            Test_X_Strain = Test_X_Strain.reshape(
                Test_X.shape[0] * timesteps_TIM, r, w, 3)

            Train_X_Gray, _, Test_X_Gray, _, _ = data_loader_with_LOSO(
                sub, SubperdB_gray, labelperSub, subjects, classes)
            Test_X_Gray = Test_X_Gray.reshape(Test_X_Gray.shape[0] * 10, r, w,
                                              3)
            # print(Train_X_Strain.shape)
            # Train_X_Strain = Train_X_Strain[0]
            # Train_X_Strain = Train_X_Strain.reshape((224, 224, 3, 1))
            # Train_X_Strain = Train_X_Strain.reshape((224, 224, 3))

            # cv2.imwrite('steveharvey.png', Train_X_Strain)
            # Concatenate Train X & Train_X_Strain
            # Train_X_spatial = np.concatenate((Train_X_spatial, Train_X_Strain), axis=3)
            # Test_X_spatial = np.concatenate((Test_X_spatial, Test_X_Strain), axis=3)

            total_channel = 4

        # Extend Y labels 10 fold, so that all images have labels
        Train_Y_spatial = np.repeat(Train_Y, timesteps_TIM, axis=0)
        Test_Y_spatial = np.repeat(Test_Y, timesteps_TIM, axis=0)

        ##################### Training & Testing #########################

        # print(Train_X_spatial.shape)

        test_X = Test_X_spatial.reshape(Test_X_spatial.shape[0], channel, r, w)
        test_y = Test_Y_spatial.reshape(Test_Y_spatial.shape[0], classes)
        normalized_test_X = test_X.astype('float32') / 255.

        Test_X_Strain = Test_X_Strain.reshape(Test_X_Strain.shape[0], channel,
                                              r, w)
        Test_X_Gray = Test_X_Gray.reshape(Test_X_Gray.shape[0], channel, r, w)
        # test_y = Test_Y_spatial.reshape(Test_Y_spatial.shape[0], classes)
        normalized_test_X_strain = test_X.astype('float32') / 255.

        # print(X.shape)

        ###### conv weights must be freezed for transfer learning ######
        if finetuning_flag == 1:
            for layer in vgg_model.layers[:33]:
                layer.trainable = False

        if train_spatial_flag == 1 and train_temporal_flag == 1:

            # vgg
            model = Model(inputs=vgg_model.input,
                          outputs=vgg_model.layers[35].output)
            plot_model(model,
                       to_file="spatial_module_FULL_TRAINING.png",
                       show_shapes=True)
            output = model.predict(test_X)

            # vgg strain
            model_strain = Model(inputs=vgg_model_strain.input,
                                 outputs=vgg_model_strain.layers[35].output)
            plot_model(model_strain,
                       to_file="spatial_module_FULL_TRAINING_strain.png",
                       show_shapes=True)
            output_strain = model_strain.predict(Test_X_Strain)

            # ae
            # model_ae = Model(inputs=conv_ae.input, outputs=conv_ae.output)
            # plot_model(model_ae, to_file='autoencoders.png', show_shapes=True)
            # output_ae = model_ae.predict(normalized_test_X)
            # output_ae = model.predict(output_ae)

            # ae strain
            # model_ae_strain = Model(inputs=conv_ae_strain.input, outputs=conv_ae_strain.output)
            # plot_model(model_ae, to_file='autoencoders.png', show_shapes=True)
            # output_ae_strain = model_ae_strain.predict(normalized_test_X_strain)
            # output_ae_strain = model_ae_strain.predict(output_ae_strain)

            # concatenate features
            output = np.concatenate((output, output_strain), axis=1)
            features = output.reshape(int(Test_X.shape[0]), timesteps_TIM,
                                      output.shape[1])

            # temporal
            predict = temporal_model.predict_classes(features,
                                                     batch_size=batch_size)

            # visualize cam
            countcam = 0
            file = open(
                workplace + 'Classification/' + 'Result/' + dB + '/log_hde' +
                str(train_id) + '.txt', 'a')
            file.write(str(sub + 1) + "\n")
            for item_idx in range(len(predict)):
                test_strain = Test_X_Gray[item_idx + countcam]
                test_strain = test_strain.reshape((224, 224, 3))
                item = test_strain

                cam_output = visualize_cam(model, 29, 0, item)
                cam_output2 = visualize_cam(model, 29, 1, item)
                cam_output3 = visualize_cam(model, 29, 2, item)
                cam_output4 = visualize_cam(model, 29, 3, item)
                cam_output5 = visualize_cam(model, 29, 4, item)

                overlaying_cam = overlay(item, cam_output)
                overlaying_cam2 = overlay(item, cam_output2)
                overlaying_cam3 = overlay(item, cam_output3)
                overlaying_cam4 = overlay(item, cam_output4)
                overlaying_cam5 = overlay(item, cam_output5)

                cv2.imwrite(
                    image_path + '_' + str(sub) + '_' + str(item_idx) + '_' +
                    str(predict[item_idx]) + '_' + str(Test_Y_gt[item_idx]) +
                    '_coverlayingcam0.png', overlaying_cam)
                cv2.imwrite(
                    image_path + '_' + str(sub) + '_' + str(item_idx) + '_' +
                    str(predict[item_idx]) + '_' + str(Test_Y_gt[item_idx]) +
                    '_coverlayingcam1.png', overlaying_cam2)
                cv2.imwrite(
                    image_path + '_' + str(sub) + '_' + str(item_idx) + '_' +
                    str(predict[item_idx]) + '_' + str(Test_Y_gt[item_idx]) +
                    '_coverlayingcam2.png', overlaying_cam3)
                cv2.imwrite(
                    image_path + '_' + str(sub) + '_' + str(item_idx) + '_' +
                    str(predict[item_idx]) + '_' + str(Test_Y_gt[item_idx]) +
                    '_coverlayingcam3.png', overlaying_cam4)
                cv2.imwrite(
                    image_path + '_' + str(sub) + '_' + str(item_idx) + '_' +
                    str(predict[item_idx]) + '_' + str(Test_Y_gt[item_idx]) +
                    '_coverlayingcam4.png', overlaying_cam5)

                countcam += 9

                ######## write the log file for megc 2018 ############

                result_string = table[table_count, 1] + ' ' + str(
                    int(Test_Y_gt[item_idx])) + ' ' + str(
                        predict[item_idx]) + '\n'
                file.write(result_string)
                ######################################################
                table_count += 1
        ##############################################################

        #################### Confusion Matrix Construction #############
        print(predict)
        print(Test_Y_gt)

        ct = confusion_matrix(Test_Y_gt, predict)
        # print(type(ct))a
        # check the order of the CT
        order = np.unique(np.concatenate((predict, Test_Y_gt)))

        # create an array to hold the CT for each CV
        mat = np.zeros((n_exp, n_exp))
        # put the order accordingly, in order to form the overall ConfusionMat
        for m in range(len(order)):
            for n in range(len(order)):
                mat[int(order[m]), int(order[n])] = ct[m, n]

        tot_mat = mat + tot_mat

        ################################################################

        #################### cumulative f1 plotting ######################
        microAcc = np.trace(tot_mat) / np.sum(tot_mat)
        [f1, precision, recall] = fpr(tot_mat, n_exp)

        file = open(
            workplace + 'Classification/' + 'Result/' + dB + '/f1_' +
            str(train_id) + '.txt', 'a')
        file.write(str(f1) + "\n")
        file.close()

        ##################################################################

        ################# write each CT of each CV into .txt file #####################
        record_scores(workplace, dB, ct, sub, order, tot_mat, n_exp, subjects)
        ###############################################################################

    tot_mat_cm = np.asarray(tot_mat, dtype=int)

    plt.figure()
    classes_test = [0, 1, 2, 3, 4]
    plot_confusion_matrix(tot_mat_cm,
                          classes_test,
                          normalize=True,
                          title='Confusion matrix_single_db')

    plt.show()
#X, y = make_moons(n_samples=1000, noise=0.05, random_state=0)
plot_data(X, y)

#single perceptron model for binary classifcation
model1 = Sequential()
model1.add(Dense(1, input_shape=(2, ), activation='sigmoid'))

model1.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
plot_model(model1, show_shapes=True, to_file='model1.png')

history1 = model1.fit(X, y, verbose=0, epochs=100)
plot_loss_accuracy(history1)
plot_decision_boundary(lambda x: model1.predict(x), X, y)

y_pred = model1.predict_classes(X, verbose=0)
plot_confusion_matrix(model1, X, y)

#mlp model for binary classification
model2 = Sequential()
model2.add(Dense(4, input_shape=(2, ), activation='tanh'))
model2.add(Dense(2, activation='tanh'))
model2.add(Dense(1, activation='sigmoid'))

model2.compile(Adam(lr=0.01), 'binary_crossentropy', metrics=['accuracy'])
plot_model(model2, show_shapes=True, to_file='model2.png')

history2 = model2.fit(X, y, verbose=0, epochs=50)

plot_loss_accuracy(history2)
plot_decision_boundary(lambda x: model2.predict(x), X, y)
Example #10
0
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from utilities import plot_confusion_matrix

true_labels = [2, 0, 0, 2, 4, 4, 1, 0, 3, 3, 3]
pred_labels = [2, 1, 0, 2, 4, 3, 1, 0, 1, 3, 3]

confusion_mat = confusion_matrix(true_labels, pred_labels)
print(confusion_mat)
targets = ['Class-0', 'Class-1', 'Class-2', 'Class-3', 'Class-4']
print('\n',
      classification_report(true_labels, pred_labels, target_names=targets))

plot_confusion_matrix(confusion_mat, true_labels, normalize=True)
Example #11
0
np.save('no_fs_time', no_fs_time)
np.save('hs_time', hs_time)
np.save('cs_time', cs_time)
np.save('dfa_time', dfa_time)

np.save('no_fs_score', no_fs_score)
np.save('hs_score', hs_score)
np.save('cs_score', cs_score)
np.save('dfa_score', dfa_score)

np.save('no_fs_f1', no_fs_f1)
np.save('hs_f1', hs_f1)
np.save('cs_f1', cs_f1)
np.save('dfa_f1', dfa_f1)

time_msg = "Times: %f(no_fs) %f(hs) %f(cs) %f(dfa)" % (
    no_fs_time.mean(), hs_time.mean(), cs_time.mean(), dfa_time.mean())
print(time_msg)

scores_msg = "Scores: %f(no_fs) %f(hs) %f(cs) %f(dfa)" % (
    no_fs_score.mean(), hs_score.mean(), cs_score.mean(), dfa_score.mean())
print(scores_msg)

f1_msg = "f1: %f(no_fs) %f(hs) %f(cs) %f(dfa)" % (
    no_fs_f1.mean(), hs_f1.mean(), cs_f1.mean(), dfa_f1.mean())
print(f1_msg)

plot_confusion_matrix(correct_y, no_fs_y, genres)
plot_confusion_matrix(correct_y, hs_y, genres)
plot_confusion_matrix(correct_y, cs_y, genres)
plot_confusion_matrix(correct_y, dfa_y, genres)
Example #12
0
                            fprs[label][median],
                            label='%s vs rest' % genre_list[label])

    all_pr_scores = np.asarray(pr_scores.values()).flatten()
    summary = (np.mean(scores), np.std(scores), np.mean(all_pr_scores),
               np.std(all_pr_scores))
    #print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)

    #saving the trained model to disk
    joblib.dump(clf, 'saved_model/model_ceps.pkl')

    return np.mean(train_errors), np.mean(test_errors), np.asarray(cms)


if __name__ == "__main__":
    start = timeit.default_timer()
    print
    print " Starting of classification model \n"
    print " Classification is running ... \n"
    X, y = read_ceps(genre_list)
    train_avg, test_avg, cms = train_model(X, y, "ceps", plot=True)
    cm_avg = np.mean(cms, axis=0)
    cm_norm = cm_avg / np.sum(cm_avg, axis=0)
    print " Classification is done \n"
    stop = timeit.default_timer()
    print " Total time taken (s) = ", (stop - start)
    print "\n Plotting of confusion matrix ... \n"
    plot_confusion_matrix(cm_norm, genre_list, "ceps",
                          "CEPS classifier - Confusion matrix")
    print " Please check the plots in 'graphs' directory \n"
Example #13
0
        # Relative importance of each feature within the whole feature set
        importance_ratio = high_rank_fre / sum(high_rank_fre)
        trace = go.Bar(x=feature_names, y=importance_ratio, name=classifier)
        importance_traces.append(trace)

        # print accuracy of each classifier
        print("Accuracy of", classifier, "classifier is:",
              "%.4f" % accuracy_score(label_true, label_predicted))
        # plot confusion matrix
        cm = confusion_matrix(label_true, label_predicted)
        print("Confusion matrix:")
        print(cm)
        print()
        plot_confusion_matrix(cm=cm,
                              label_names=label_names,
                              title="Confusion matrix of " + classifier +
                              " classifier",
                              normalize=True)

    layout = dict(
        title=dict(
            text='Relative importance of features by different classifier',
            x=0.5,
        ),
        yaxis=dict(title="ratio"),
        xaxis=dict(title='candy characteristic', ),
    )

    fig = dict(data=importance_traces, layout=layout)
    py.plot(fig, filename=importance_path,
            auto_open=False)  # show importance of features
Example #14
0
    loss_acc_plot.semilogy(epoch_vector, data, label=label)
loss_acc_plot.legend(loc="upper left")

# Plot confusion matrix

# Compute confusion matrix
y_pred = model.predict(X_test)
cnf_matrix = confusion_matrix(y_test.argmax(1), y_pred.argmax(1))

# Plot normalized confusion matrix
classes = [0]
classes.extend(df.digit.unique()[:-1])
classes = list(map(str, classes))
plt.figure()
plot_confusion_matrix(cnf_matrix,
                      classes=classes,
                      normalize=True,
                      title='Normalized confusion matrix',
                      cmap=plt.cm.Greys)

plt.show()

# Calculate the F1 score
print("F1 score per label: " + ', '.join(
    map(
        str,
        np.around(f1_score(y_test.argmax(1), y_pred.argmax(1), average=None),
                  2))))
print("Global F1 score: %.2f" %
      f1_score(y_test.argmax(1), y_pred.argmax(1), average="micro"))
Example #15
0
def inference_validation_data(args):

    # Arugments & parameters
    dataset_dir = args.dataset_dir
    subdir = args.subdir
    workspace = args.workspace
    holdout_fold = args.holdout_fold
    iteration = args.iteration
    filename = args.filename
    cuda = args.cuda

    labels = config.labels

    if 'mobile' in subdir:
        devices = ['a', 'b', 'c']
    else:
        devices = ['a']

    validation = True
    classes_num = len(labels)

    # Paths
    hdf5_path = os.path.join(workspace, 'features', 'logmel', subdir,
                             'development.h5')

    dev_train_csv = os.path.join(dataset_dir, subdir, 'evaluation_setup',
                                 'fold1_train.txt')
                                 
    dev_validate_csv = os.path.join(dataset_dir, subdir, 'evaluation_setup',
                                    'fold{}_evaluate.txt'.format(holdout_fold))

    model_path = os.path.join(workspace, 'models', subdir, filename,
                              'holdout_fold={}'.format(holdout_fold),
                              'md_{}_iters.tar'.format(iteration))

    # Load model
    model = Model(classes_num)
    checkpoint = torch.load(model_path)
    model.load_state_dict(checkpoint['state_dict'])

    if cuda:
        model.cuda()

    # Predict & evaluate
    for device in devices:

        print('Device: {}'.format(device))

        # Data generator
        generator = DataGenerator(hdf5_path=hdf5_path,
                                  batch_size=batch_size,
                                  dev_train_csv=dev_train_csv,
                                  dev_validate_csv=dev_validate_csv)

        generate_func = generator.generate_validate(data_type='validate', 
                                                     devices=device, 
                                                     shuffle=False)

        # Inference
        dict = forward(model=model,
                       generate_func=generate_func, 
                       cuda=cuda, 
                       return_target=True)

        outputs = dict['output']    # (audios_num, classes_num)
        targets = dict['target']    # (audios_num, classes_num)

        predictions = np.argmax(outputs, axis=-1)

        classes_num = outputs.shape[-1]
        
        # Evaluate
        confusion_matrix = calculate_confusion_matrix(
            targets, predictions, classes_num)
            
        class_wise_accuracy = calculate_accuracy(targets, predictions, 
                                                 classes_num)

        # Print
        print_accuracy(class_wise_accuracy, labels)
        print('confusion_matrix: \n', confusion_matrix)

        # Plot confusion matrix
        plot_confusion_matrix(
            confusion_matrix,
            title='Device {}'.format(device.upper()), 
            labels=labels,
            values=class_wise_accuracy)
Example #16
0
def train(lr, batchSize, epochs, tvweightdetection, tvweightsegmentation,
          saveImages):
    avDev = getDev()
    print(avDev)

    seed = 1
    setSeed(seed)

    from utilities import showImagesDetection, showImagesSegmentation, showDetectedImages, visualiseSegmented

    from dataloader import CudaVisionDataLoader

    listTransforms_train = [
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    ]
    listTransforms_test = [
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    ]

    data = CudaVisionDataLoader()
    parentDir = './small_data'
    dirDetectionDataset = os.path.join(parentDir, 'detection')
    dirSegmentationDataset = os.path.join(parentDir, 'segmentation')

    train_loader_detection = data.__call__(
        os.path.join(dirDetectionDataset, 'train'), "detection",
        listTransforms_train, batchSize)
    validate_loader_detection = data.__call__(
        os.path.join(dirDetectionDataset, 'validate'), "detection",
        listTransforms_test, batchSize)
    test_loader_detection = data.__call__(
        os.path.join(dirDetectionDataset, 'test'), "detection",
        listTransforms_test, batchSize)
    train_loader_segmentation = data.__call__(
        os.path.join(dirSegmentationDataset, 'train'), "segmentation",
        listTransforms_train, batchSize)
    validate_loader_segmentation = data.__call__(
        os.path.join(dirSegmentationDataset, 'validate'), "segmentation",
        listTransforms_test, batchSize)
    test_loader_segmentation = data.__call__(
        os.path.join(dirSegmentationDataset, 'test'), "segmentation",
        listTransforms_test, batchSize)

    from model import soccerSegment
    from metrics import det_metrics, segmentationAccuracy, seg_iou, det_confusion_matrix, seg_confusion_matrix
    from utilities import get_colored_image, get_predected_centers, plot_confusion_matrix

    import torchvision.models as models
    print("Loading pretrained ResNet18")
    resnet18 = models.resnet18(pretrained=True)
    model = soccerSegment(resnet18, [5, 6, 7, 8], [64, 128, 256, 256, 0],
                          [512, 256, 256, 128], [512, 512, 256], 256)
    model.to(avDev)

    criterionDetected = nn.MSELoss()
    criterionSegmented = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    checkpoint_path = './checkpoints/checkpoint'

    val_patience = 10
    val_counter = 0
    best_acc = np.NINF
    epoch = 0
    color_classes = [[255, 0, 0], [0, 0, 255], [0, 255, 0]]
    detlosses = []
    seglosses = []
    valdetlosses = []
    valseglosses = []
    print("Load saved model if any")
    if os.path.exists(checkpoint_path):
        checkpoint = torch.load(checkpoint_path)
        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        epoch = checkpoint['epoch'] + 1
        seglosses = checkpoint['seglosses']
        detlosses = checkpoint['detlosses']
        valseglosses = checkpoint['valseglosses']
        valdetlosses = checkpoint['valdetlosses']
        print("Checkpoint Loaded")

    print("Specified epochs for training: ", epoch,
          " The patience value for early stopping: ", val_patience,
          "Batch Size: ", batchSize, " Learning Rate: ", lr)
    print("Starting training")
    while epoch < epochs:
        detlosstrain = 0
        seglosstrain = 0
        print("Epoch: ", epoch)
        det_train_acc = 0.0
        seg_train_acc = 0.0

        model.train()
        steps = 0
        fullDataCovered = 0
        dataiter_detection = train_loader_detection.__iter__()
        dataiter_segmentation = train_loader_segmentation.__iter__()

        while (fullDataCovered != 1):
            for i in range(4):
                try:
                    images, targets, target_centers = dataiter_detection.next()
                except:
                    fullDataCovered = 1
                    dataiter_detection = train_loader_detection.__iter__()
                    images, targets, target_centers = dataiter_detection.next()
                images = images.to(avDev)
                targets = targets.to(avDev)
                optimizer.zero_grad()
                segmented, detected = model(images)
                tvLoss = TVLossDetect(tvweightdetection)
                total_variation_loss = tvLoss.forward(detected)
                mseloss = criterionDetected(detected, targets)
                loss = mseloss + total_variation_loss
                detlosstrain += loss.item()
                steps += 1
                # Getting gradients w.r.t. parameters
                loss.backward()
                # Updating parameters
                optimizer.step()

                ground_truth_centers = get_predected_centers(target_centers)
                colored_images = get_colored_image(detected)
                det_train_acc += np.average(
                    det_metrics(ground_truth_centers, colored_images,
                                color_classes)[0])
                i += 1
            try:
                images, targets = dataiter_segmentation.next()
            except:
                fullDataCovered = 1
                dataiter_segmentation = train_loader_segmentation.__iter__()
                images, targets = dataiter_segmentation.next()
            images = images.to(avDev)
            targets = targets.to(avDev)
            optimizer.zero_grad()
            segmented, detected = model(images)
            segmentedLabels = torch.argmax(segmented, dim=1)
            accuracies = segmentationAccuracy(segmentedLabels.long(), targets,
                                              [0, 1, 2])
            tvLoss = TVLossSegment(tvweightsegmentation)
            total_variation_loss = tvLoss.forward(segmented)
            cross_entropy_loss = criterionSegmented(segmented, targets)
            loss = cross_entropy_loss + total_variation_loss
            seglosstrain += loss.item()
            steps += 1
            # Getting gradients w.r.t. parameters
            loss.backward()
            # Updating parameters
            optimizer.step()
            seg_train_acc += accuracies[3]

        det_train_acc /= len(train_loader_detection)
        seg_train_acc /= len(train_loader_segmentation)
        detlosstrain /= len(train_loader_detection)
        seglosstrain /= len(train_loader_segmentation)
        print("Training finished , starting with validation")
        det_val_acc = 0.0
        seg_val_acc = 0.0
        model.eval()
        steps = 0
        detlosses.append(detlosstrain)
        seglosses.append(seglosstrain)
        detlossval = 0
        seglossval = 0
        for images, targets, target_centers in validate_loader_detection:
            with torch.no_grad():
                images = images.to(avDev)
                targets = targets.to(avDev)

                segmented, detected = model(images)
                tvLoss = TVLossDetect(tvweightdetection)
                total_variation_loss = tvLoss.forward(detected)
                mseloss = criterionDetected(detected, targets)
                detloss = mseloss + total_variation_loss
                detlossval += detloss.item()
                steps += 1

                ground_truth_centers = get_predected_centers(target_centers)
                colored_images = get_colored_image(detected)
                det_val_acc += np.average(
                    det_metrics(ground_truth_centers, colored_images,
                                color_classes)[0])

        for images, targets in validate_loader_segmentation:
            model.eval()
            with torch.no_grad():
                images = images.to(avDev)
                targets = targets.to(avDev)

                segmented, detected = model(images)

                segmentedLabels = torch.argmax(segmented, dim=1)
                tvLoss = TVLossSegment(tvweightsegmentation)
            total_variation_loss = tvLoss.forward(segmented)
            entropy_loss = criterionSegmented(segmented, targets.long())
            segloss = entropy_loss + total_variation_loss
            seglossval += segloss.item()
            accuracies = segmentationAccuracy(segmentedLabels.long(), targets,
                                              [0, 1, 2])
            seg_val_acc += accuracies[3]

        det_val_acc /= len(validate_loader_detection)
        seg_val_acc /= len(validate_loader_segmentation)
        detlossval /= len(validate_loader_detection)
        seglossval /= len(validate_loader_segmentation)
        valdetlosses.append(detlossval)
        valseglosses.append(seglossval)
        print("Epoch: ", epoch, " Finished")
        print("Epoch: ", epoch, " Detection Train Accuracy: ", det_train_acc,
              " Detection Validation Accuracy: ", det_val_acc,
              "Detection Loss Train:", detlosstrain)
        print("Epoch: ", epoch, " Segmentation Train Accuracy: ",
              det_train_acc, " Segmentation Validation Accuracy: ",
              seg_val_acc, "Segmentation Train Accuracy:", seglosstrain)

        acc = (det_val_acc + seg_val_acc) / 2
        if acc > best_acc:
            val_counter = 0
            best_acc = acc
            print("New Best Validation Accuracy Detection: ", det_val_acc,
                  " Segmentation: ", seg_val_acc)
            torch.save(
                {
                    'epoch': epoch,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'detlosses': detlosses,
                    'seglosses': seglosses,
                    'valdetlosses': valdetlosses,
                    'valseglosses': valseglosses
                }, checkpoint_path)
        elif val_counter < val_patience:
            val_counter += 1
            print("Best Validation Accuracy Not Updated ")
        else:
            print("Full Patience Reached ")
            break
        epoch += 1

    count = 0
    num = 0
    plot_learning_curve(detlosses, valdetlosses, "detection")
    plot_learning_curve(seglosses, valseglosses, "segmentation")
    det_test_metric = np.zeros((5, len(color_classes)))
    confusiondet = np.zeros((4, 4))
    confusionseg = np.zeros((3, 3))
    print("Training finished starting with testdatatset")
    print("Starting with detection")
    for images, targets, target_center in test_loader_detection:
        count += 1
        model.eval()
        with torch.no_grad():
            images = images.to(avDev)
            targets = targets.to(avDev)

            segmented, detected = model(images)

            tvLoss = TVLossDetect(tvweightdetection)
            total_variation_loss = tvLoss.forward(detected)
            mseloss = criterionDetected(detected, targets)
            loss = mseloss + total_variation_loss
            print("Detection Test loss: ", loss.item())
            ground_truth_centers = get_predected_centers(target_center)
            colored_images = get_colored_image(detected)
            det_test_metric += det_metrics(ground_truth_centers,
                                           colored_images, color_classes)
            confusiondet += det_confusion_matrix(ground_truth_centers,
                                                 colored_images, color_classes)
            if (saveImages):
                for j in range(batchSize):
                    num += 1
                    showImagesDetection(images[j], num)
                    showDetectedImages(detected[j], num, "output")
                    showDetectedImages(targets[j], num, "truth")

    det_test_metric /= len(test_loader_detection)
    plot_confusion_matrix(confusiondet, "detection")
    plot_confusion_matrix(confusiondet, "detection")
    print('Test Detection Overall Accuracy: {}.',
          np.average(det_test_metric[0]))
    print('Ball Accuracy:', det_test_metric[0][0])
    print('Goal Pillar Accuracy:', det_test_metric[0][1])
    print('Robot Accuracy:', det_test_metric[0][2])

    print('Test Detection Overall Recall: {}.', np.average(det_test_metric[1]))
    print('Ball Recall:', det_test_metric[1][0])
    print('Goal Pillar Recall:', det_test_metric[1][1])
    print('Robot Recall:', det_test_metric[1][2])

    print('Test Detection Overall Precision: {}.',
          np.average(det_test_metric[2]))
    print('Ball Precision:', det_test_metric[2][0])
    print('Goal Pillar Precision:', det_test_metric[2][1])
    print('Robot Precision:', det_test_metric[2][2])

    print('Test Detection Overall F1score: {}.',
          np.average(det_test_metric[3]))
    print('Ball F1score:', det_test_metric[3][0])
    print('Goal Pillar F1score:', det_test_metric[3][1])
    print('Robot F1score:', det_test_metric[3][2])

    print('Test Detection Overall False Rate: {}.',
          np.average(det_test_metric[4]))
    print('Ball False Rate:', det_test_metric[4][0])
    print('Goal Pillar False Rate:', det_test_metric[4][1])
    print('Robot False Rate:', det_test_metric[4][2])

    accuracies = [0, 0, 0, 0]
    iou = [0, 0, 0, 0]
    num = 0
    count = 0
    for images, targets in test_loader_segmentation:
        count += 1
        model.eval()
        with torch.no_grad():

            images = images.to(avDev)
            targets = targets.to(avDev)

            segmented, detected = model(images)

            segmentedLabels = torch.argmax(segmented, dim=1)
            tvLoss = TVLossSegment(tvweightsegmentation)
            total_variation_loss = tvLoss.forward(segmented)
            entropy_loss = criterionSegmented(segmented, targets.long())
            loss = entropy_loss + total_variation_loss
            print("Segmentation Test loss: ", loss.item())
        confusionseg = seg_confusion_matrix(targets, segmentedLabels)
        accuracies_returned = segmentationAccuracy(segmentedLabels.long(),
                                                   targets, [0, 1, 2])
        iou_returned = seg_iou(targets, segmentedLabels.long(), [0, 1, 2])
        accuracies = list(map(add, accuracies, accuracies_returned))
        iou = list(map(add, iou, iou_returned))
        if (saveImages):
            for j in range(batchSize):
                num += 1
                showImagesSegmentation(images[j], num)
                visualiseSegmented(segmentedLabels[j], num, "output")
                visualiseSegmented(targets[j], num, "truth")
    plot_confusion_matrix(confusionseg, "segmentation")
    print('Test Segmentation Accuracy: {}.', accuracies[3] / count)
    print('Field Accuracy: ', accuracies[2] / count)
    print('Line Accuracy: ', accuracies[1] / count)
    print('Background Accuracy: ', accuracies[0] / count)
    print('Iou: ', iou[3] / count)
    print('Iou Field: ', iou[2] / count)
    print('Iou Line: ', iou[1] / count)
    print('Iou Background: ', iou[0] / count)
Example #17
0
        loss = mseloss + total_variation_loss
        print("Detection Test loss: ", loss.item())
        ground_truth_centers = get_predected_centers(target_center)
        colored_images = get_colored_image(detected)
        det_test_metric += det_metrics(ground_truth_centers, colored_images,
                                       color_classes)
        confusiondet += det_confusion_matrix(ground_truth_centers,
                                             colored_images, color_classes)
        for j in range(batchSize):
            num += 1
            showImagesDetection(images[j], num)
            showDetectedImages(detected[j], num, "output")
            showDetectedImages(targets[j], num, "truth")

det_test_metric /= len(test_loader_detection)
plot_confusion_matrix(confusiondet, "detection")
print('Test Detection Overall Accuracy: {}.', np.average(det_test_metric[0]))
print('Ball Accuracy:', det_test_metric[0][0])
print('Goal Pillar Accuracy:', det_test_metric[0][1])
print('Robot Accuracy:', det_test_metric[0][2])

print('Test Detection Overall Recall: {}.', np.average(det_test_metric[1]))
print('Ball Recall:', det_test_metric[1][0])
print('Goal Pillar Recall:', det_test_metric[1][1])
print('Robot Recall:', det_test_metric[1][2])

print('Test Detection Overall Precision: {}.', np.average(det_test_metric[2]))
print('Ball Precision:', det_test_metric[2][0])
print('Goal Pillar Precision:', det_test_metric[2][1])
print('Robot Precision:', det_test_metric[2][2])