def log(self):
     file = open(self.filename, 'w')
     accuracy_max = max(self.accuracys)
     accuracy_max_index = self.accuracys.index(accuracy_max)
     iter_max = self.values[accuracy_max_index]
     file.write('max:\n\t{}: {} - a: {}\n'.format(self.iter_name, iter_max,
                                                  accuracy_max) + '\n')
     plot_confusion_matrix(self.matrixes[accuracy_max_index], self.classes,
                           self.imagename)
     plot_confusion_matrix(self.matrixes[accuracy_max_index], self.classes,
                           self.imagename_normalized, True)
     precision_max = max(self.precisions)
     iter_max = self.values[self.precisions.index(precision_max)]
     file.write('max:\n\t{}: {} - p: {}\n'.format(self.iter_name, iter_max,
                                                  precision_max) + '\n')
     for i in range(len(self.accuracys)):
         file.write('{}: {} - a: {} - p: {}'.format(
             self.iter_name, self.values[i], self.accuracys[i],
             self.precisions[i]) + '\n')
     for line in self.logs:
         if '    [[' in line:
             line = line.replace('    [[', '[[')
         file.write(line + '\n')
     file.close()
     if os.path.exists(self.matrixes_dir):
         shutil.rmtree(self.matrixes_dir)
     os.makedirs(self.matrixes_dir)
     for i in range(len(self.matrixes)):
         file = open(self.matrixes_dir + str(self.values[i]), 'w')
         pickle.dump(self.matrixes[i], file, -1)
         file.close()
示例#2
0
def plot_full_evaluation(all_y_test, all_y_pred, model_name):
    final_result = confusion_matrix(all_y_test,
                                    all_y_pred,
                                    labels=np.arange(nb_emotions))

    accuracy = (all_y_test == all_y_pred).sum() / len(all_y_pred)
    average = 'macro'

    precision = precision_score(all_y_test, all_y_pred, average=average)
    recall = recall_score(all_y_test, all_y_pred, average=average)
    f1 = f1_score(all_y_test, all_y_pred, average=average)

    comments = ['\nMODEL EVALUATION:']
    comments.append('  Accuracy  : {:.5f}'.format(accuracy))
    comments.append('  Precision : {:.5f}'.format(precision))
    comments.append('  Recall    : {:.5f}'.format(recall))
    comments.append('  F1-score  : {:.5f}'.format(f1))

    comments = '\n'.join(comments)

    plot_confusion_matrix(final_result,
                          emotions,
                          title="Confusion_Matrix_" + model_name,
                          normalize=True,
                          comments=comments)

    return final_result, comments
示例#3
0
def modes(mode, model, weightsFile, x_test, y_test):
    model.load_weights(weightsFile)
    if mode == 'test':
        score = model.evaluate(x_test, y_test, verbose=0)
        print('\n', 'Test accuracy:', score[1])
    elif mode == 'pred':
        a = np.full((1, 28, 28, 1), 0)
        a[0] = x_test[0]
        start = time.clock()
        model.predict(a)
        end = time.clock()
        print("Time per image prediction: {} ".format(
            (end - start) / len(x_test)))
    elif mode == 'arc':
        plot_model(model, to_file='model.png', show_shapes=True)
    elif mode == 'vis':
        plot.visualize_accuracy(model, x_test, y_test)
    elif mode == 'cm':
        ypred_onehot = model.predict(x_test)
        ypred = np.argmax(ypred_onehot, axis=1)
        ytrue = np.argmax(y_test, axis=1)

        # compute and plot the confusion matrix
        confusion_mtx = confusion_matrix(ytrue, ypred)
        plot.plot_confusion_matrix(confusion_mtx)
示例#4
0
def NSC_k_NN(df_treatment, embeds_cols, plot_conf=False, savepath=None):
    # Create classes for each moa
    class_dict = dict(zip(df_treatment['moa'].unique(), np.arange(len(df_treatment['moa'].unique()))))
    df_treatment['moa_class'] = df_treatment['moa'].map(class_dict)

    # Create nearest neighbors classifier
    predictions = list()
    labels = list()
    label_names = list()
    for comp in df_treatment['compound'].unique():
        df_ = df_treatment.loc[df_treatment['compound'] != comp, :]
        knn = KNeighborsClassifier(n_neighbors=4, algorithm='brute', metric='cosine')
        knn.fit(df_.loc[:, embeds_cols], df_.loc[:, 'moa_class'])

        nn = knn.kneighbors(df_treatment.loc[df_treatment['compound'] == comp, embeds_cols])
        for p in range(nn[1].shape[0]):
            predictions.append(list(df_.iloc[nn[1][p]]['moa_class']))
        labels.extend(df_treatment.loc[df_treatment['compound'] == comp, 'moa_class'])
        label_names.extend(df_treatment.loc[df_treatment['compound'] == comp, 'moa'])

    predictions = np.asarray(predictions)
    k_nn_acc = [accuracy_score(labels, predictions[:, 0]),
                accuracy_score(labels, predictions[:, 1]),
                accuracy_score(labels, predictions[:, 2]),
                accuracy_score(labels, predictions[:, 3])]

    if plot_conf:
        print('There are {} treatments'.format(len(df_treatment)))
        print('NSC is: {:.2f}%'.format(accuracy_score(labels, predictions[:, 0]) * 100))
        plot_confusion_matrix(labels, predictions[:, 0], class_dict, 'NSC', savepath)
    return k_nn_acc
示例#5
0
def print_test_accuracy(dataset,
                        show_example_errors=False,
                        show_confusion_matrix=False):

    # For all the images in the test-set,
    # calculate the predicted classes and whether they are correct.
    correct, cls_pred = predict_cls(images=dataset['test_images'],
                                    labels=dataset['test_labels'],
                                    cls_true=dataset['test_cls'])

    # Classification accuracy and the number of correct classifications.
    acc, num_correct = classification_accuracy(correct)

    # Number of images being classified.
    num_images = len(correct)

    # Print the accuracy.
    msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})"
    print(msg.format(acc, num_correct, num_images))

    # Plot some examples of mis-classifications, if desired.
    if show_example_errors:
        print("Example errors:")
        plot.plot_example_errors(cls_pred=cls_pred,
                                 correct=correct,
                                 dataset=dataset)

    # Plot the confusion matrix, if desired.
    if show_confusion_matrix:
        print("Confusion Matrix:")
        plot.plot_confusion_matrix(cls_pred=cls_pred, dataset=dataset)
示例#6
0
def print_stats(name, Y_pred, Y_test, y_final, initName, key):
    # Stats when running through the results
    print(name)
    print("Confusion Matrix: ")
    cm = confusion_matrix(Y_test, Y_pred)
    print(cm)
    plot_confusion_matrix(cm, name, initName, key)
    print('Accuracy Score: %.2f' % accuracy_score(Y_test, Y_pred))
    print()
示例#7
0
 def plot(self, cellLabel=True, fileName=''):
     "Save plot to faile. (Only support .png)"
     plot.plot_confusion_matrix(cm=np.asarray(self.statistics),
                                normalize=False,
                                target_names=self.vocTable.category,
                                title="Confusion Matrix",
                                cell_label=cellLabel,
                                filepath=fileName)
     print('Plot saved to: ' + fileName)
def image_matrix(folder, name):
    classes_dir = 'TestSet 2' if '_b' in folder or '_b_bin' in folder else 'TestSet'
    m = matrix(folder, name)
    image_folder = './logs/all_images/' + folder + '/'
    if not os.path.exists(image_folder):
        os.makedirs(image_folder)
    plot_confusion_matrix(m, getClassNames(classes_dir),
                          image_folder + folder + '.png')
    plot_confusion_matrix(m, getClassNames(classes_dir),
                          image_folder + folder + '_normalized.png', True)
示例#9
0
 def display_scores(self, plot_flag=False):
     Scorer.display_scores(self)
     print(self.report)
     if plot_flag is True:
         try:
             if self.metric is not 'set':
                 plot.plot_confusion_matrix(self.cnf,
                                            self.classes,
                                            title=self.metric)
         except:
             print("Cannot plot.")
示例#10
0
def report(y_test, y_pred, model):
    print('Accuracy Score of ' + model + ' : ' +
          str(accuracy_score(y_test, y_pred)))
    print('Precision Score ' + model + ' : ' +
          str(precision_score(y_test, y_pred)))
    print('Recall Score ' + model + ': ' + str(recall_score(y_test, y_pred)))
    print('F1 Score' + model + ' : ' + str(f1_score(y_test, y_pred)))
    print('Fb Score' + model + ' : ' +
          str(fbeta_score(y_test, y_pred, beta=1.5)))
    print('AUC Score' + model + ' : ' + str(roc_auc_score(y_test, y_pred)))
    plot.plot_confusion_matrix(y_test, y_pred, model)
    plt.show()
示例#11
0
def KNN_confusion_matrix(X, y, test_Size):
    name = "train/test : " + str(int(100 - 100 * test_Size)) + "/" + str(
        int(100 * test_Size))

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        random_state=0,
                                                        test_size=test_Size)

    # Khai báo lớp KNN với k = 10
    knn = KNeighborsClassifier(n_neighbors=10)

    # Huấn luyện
    knn.fit(X_train, y_train)

    # Tính độ chính xác
    accuracy = knn.score(X_test, y_test)

    # Tạo ma trận nhầm lẫn (confusion matrix)
    y_pred = knn.predict(X_test)
    cm = confusion_matrix(y_test, y_pred)

    # Tính toán độ do precision
    precision = precision_score(y_test, y_pred, average=None)
    precision = sum(precision) / len(precision)
    # Tính đoán độ đo recall
    recall = recall_score(y_test, y_pred, average=None)
    recall = sum(recall) / len(recall)
    # Tính toán độ đo F1
    f1 = f1_score(y_test, y_pred, average=None)
    f1 = sum(f1) / len(f1)
    plot.plot_confusion_matrix(y_test,
                               y_pred,
                               classes=CLASSNAME,
                               title='Confusion matrix. ' + name)
    # plot.plot_confusion_matrix(y_test, y_pred, classes=CLASSNAME, normalize=True, title='Normalized confusion matrix'+name)
    plt.show()

    print("---")
    print(name)
    print('   accuracy  : ' + str(accuracy))
    print('   precision : ' + str(precision))
    print('   recall    : ' + str(recall))
    print('   f1        : ' + str(f1))
def all_images():
    matrixes_images_folder = './logs/all_images/'
    matrixes_folder = './logs/matrixes/'
    if os.path.exists(matrixes_images_folder):
        shutil.rmtree(matrixes_images_folder)
    os.makedirs(matrixes_images_folder)
    for matrix_folder in os.listdir(matrixes_folder):
        if '.DS_Store' in matrix_folder:
            continue
        matrix_folder += '/'
        images_folder = matrixes_images_folder + matrix_folder
        os.makedirs(images_folder)
        classes_dir = 'TestSet 2' if '_b.' in matrix_folder or '_b_bin' in matrix_folder else 'TestSet'
        classes = getClassNames(classes_dir)
        for matrix_file in os.listdir(matrixes_folder + matrix_folder):
            imagename = matrixes_images_folder + matrix_folder + matrix_file + '.png'
            print('Going to generate matrix {}{}'.format(
                matrix_folder, matrix_file))
            m = matrix(matrix_folder, matrix_file)
            plot_confusion_matrix(m, classes, imagename)
示例#13
0
    def plot(self, dirname):
        """ Generate plots """
        # Plot learning curve
        print(f"{self.name}: Plotting learning curve")
        learning_curve = [
            x for x in self.output if x["type"] == "learning_curve"
        ]
        if learning_curve:
            fig = plot.plot_learning_curve(
                self.title,
                learning_curve[0]["data"]["train_sizes"],
                learning_curve[0]["data"]["train_scores"],
                learning_curve[0]["data"]["test_scores"],
                learning_curve[0]["data"]["fit_times"],
            )
            fig.savefig(
                os.path.join(dirname, f"{self.name}_learning_curve.png"))

        # Plot validation curve(s)
        valdiation_curve = [
            x for x in self.output if x["type"] == "validation_curve"
        ]
        if valdiation_curve:
            for param in valdiation_curve:
                fig = plot.plot_validation_curve(
                    self.title,
                    param["data"]["parameter"],
                    param["data"]["range"],
                    param["data"]["train_scores"],
                    param["data"]["test_scores"],
                )
                fig.savefig(
                    os.path.join(
                        dirname,
                        f'{self.name}_{param["data"]["parameter"]}_validation_curve.png',
                    ))

        # If NN plot loss curve
        if self.title == "Neural Networks Classifier":
            fig = plot.plot_loss_curve(self.model)
            fig.savefig(os.path.join(
                dirname,
                f'{self.name}_loss_curve.png',
            ))

        # Plot confusion matrix
        confusion = [x for x in self.output if x["type"] == "confusion_matrix"]
        if confusion:
            fig = plot.plot_confusion_matrix(self.model, self.data.x_test,
                                             self.data.y_test)
            fig.savefig(
                os.path.join(dirname, f"{self.name}_confusion_matrix.png"))
示例#14
0
def NSB_k_NN(df_treatment, embeds_cols, plot_conf=False, savepath=None):
    # Remove moa with only 1 plate
    df_treatment = df_treatment[df_treatment['moa'] != 'Cholesterol-lowering']
    df_treatment = df_treatment[df_treatment['moa'] != 'Kinase inhibitors']
    df_treatment = df_treatment.reset_index(drop=True)

    class_dict = dict(zip(df_treatment['moa'].unique(), np.arange(len(df_treatment['moa'].unique()))))
    df_treatment['moa_class'] = df_treatment['moa'].map(class_dict)

    predictions = list()
    labels = list()
    label_names = list()
    for batch in df_treatment['table_nr'].unique():
        for comp in df_treatment.loc[df_treatment['table_nr'] == batch, 'compound'].unique():
            df_ = df_treatment.loc[(df_treatment['compound'] != comp) & (df_treatment['table_nr'] != batch), :]
            knn = KNeighborsClassifier(n_neighbors=4, algorithm='brute', metric='cosine')
            knn.fit(df_.loc[:, embeds_cols], df_.loc[:, 'moa_class'])

            nn = knn.kneighbors(
                df_treatment.loc[(df_treatment['compound'] == comp) & (df_treatment['table_nr'] == batch), embeds_cols])
            for p in range(nn[1].shape[0]):
                predictions.append(list(df_.iloc[nn[1][p]]['moa_class']))
            labels.extend(
                df_treatment.loc[(df_treatment['compound'] == comp) & (df_treatment['table_nr'] == batch), 'moa_class'])
            label_names.extend(
                df_treatment.loc[(df_treatment['compound'] == comp) & (df_treatment['table_nr'] == batch), 'moa'])

    predictions = np.asarray(predictions)
    k_nn_acc = [accuracy_score(labels, predictions[:, 0]),
                accuracy_score(labels, predictions[:, 1]),
                accuracy_score(labels, predictions[:, 2]),
                accuracy_score(labels, predictions[:, 3])]

    if plot_conf:
        print('There are {} treatments'.format(len(df_treatment)))
        print('NSCB is: {:.2f}%'.format(accuracy_score(labels, predictions[:, 0]) * 100))
        plot_confusion_matrix(labels, predictions[:, 0], class_dict, 'NSCB', savepath)
    return k_nn_acc
示例#15
0
def train(model_name, category_type, dump=False):
    clf = tfidf_pipeline.make(model_name)

    categories = names.categories[category_type]

    print 'Loading data...'
    data = data_loader.load('full', categories)
    train_X, train_y, test_X, test_y = data_loader.split(data, 0.1)
    print 'Done.'

    print 'Training...'
    clf.fit(train_X, train_y)
    print 'Done.'

    print 'Testing...'
    predicted = clf.predict(test_X)

    if model_name in ['svr', 'linreg']:
        predicted = np.clip(np.round(predicted), 0, 7)
        accuracy = scorers.err1(test_y, predicted)
        print 'Off-by-one accuracy: ' +  str(accuracy)
    else:
        accuracy = scorers.err0(test_y, predicted)
        print 'Exact accuracy: ' +  str(accuracy)
        print classification_report(test_y, predicted, target_names=categories)
    cm = confusion_matrix(test_y, predicted)
    print cm
    plot.plot_confusion_matrix(cm, category_type)

    if dump:
        print 'Saving classifier...'
        if not exists('dumps'):
            makedirs('dumps')
        joblib.dump(clf, join('dumps', category_type + '_' + model_name + '_classifier.pkl'))
        print 'Done.'

    return clf
示例#16
0
def nn(X,
       y,
       act='relu',
       validation_split=0.33,
       epoch=50,
       output_file=None,
       title=None):
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.3, random_state=1)  # 70% training and 30% test
    model = Sequential()
    model.add(Dense(20, input_dim=X.shape[1], activation=act))
    model.add(Dense(5, activation=act))
    model.add(Dense(1, activation='sigmoid'))
    #    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy', auc])
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy', auc])
    start = time.time()
    history = model.fit(X_train,
                        y_train,
                        epochs=epoch,
                        validation_split=validation_split,
                        batch_size=10,
                        verbose=0)
    y_pred = model.predict(X_test)
    y_pred = y_pred.round()
    cm = confusion_matrix(y_test, y_pred)

    plt.figure()
    plot.plot_confusion_matrix(cm, ['no_default', 'default'], title=title)
    plt.savefig(output_file)

    metrics = model.evaluate(X_test, y_test)
    end = time.time()
    durartion = end - start
    return model, history.history, metrics, durartion
示例#17
0
                                                 save_best_model=save_best_model,
                                                 model_path=model_path)

  plot_histories(histories, 'DenseNet-LSTM, {}-fold cross-validation'.format(n_folds))

else:
  lstm_densenet = load_model(densenet_lstm_model_path)


## TESTING ##

# Get back the train/test split used
skf = StratifiedKFold(n_splits=5, shuffle=False)
labels = np.argmax(y, axis=1)
train_test = [(train, test) for (train,test) in skf.split(y, labels)]
train_idx, test_idx = zip(*train_test)

# Get emotion predictions
test_indices = test_idx[1]
y_predict = lstm_densenet.predict_classes(densenet_features[test_indices])
y_true = np.argmax(y[test_indices], axis=1)

# Computes the accuracy
acc = (y_predict==y_true).sum()/len(y_predict)
print('Test accuracy : {:.4f}'.format(acc))

# Plot the confusion matrix
cm = confusion_matrix(np.argmax(y[test_indices], axis=1), y_predict)
plot_confusion_matrix(cm, emotions, title='DenseNet-LSTM', normalize=True)

    models = [model_per_class(i, labelled_training_data) for i in range(1, 7)]
    print("Models prepared.\n")

    # make predictions for testing data
    print("Making predictions.\n")
    predictions = labelled_testing_data.map(
        lambda x: (float(np.argmax([model.predict(x.features) for model in models]) + 1), x.label))
    print("Predictions completed.\n")

    # calculate precision, recall, and f-measure
    print("Calculating evaluation metrics for feature set 1.\n")
    metrics = MulticlassMetrics(predictions)

    print("F-Measure: ", metrics.fMeasure())
    print("Confusion matrix\n\n")
    plot.plot_confusion_matrix(metrics.confusionMatrix().toArray(), "cm1_refactored.png")

    for i in range(1, 7):
        print("Precision for ", i, " is ", metrics.precision(i))
        print("Recall for ", i, " is ", metrics.recall(i))
        print("f-measure for ", i, " is ", metrics.fMeasure(float(i)), "\n")
        precision.append(metrics.precision(i))
        recall.append(metrics.recall(i))
        fmeasure.append(metrics.fMeasure(float(i)))
    plot.plot_per_activity_metric(precision, recall, fmeasure, "fs1_refactored.png")
    precision = []
    recall = []
    fmeasure = []

    # Feature Set 2: fetch all features
    print("FEATURE SET 2: FETCHING All THE FEATURES")
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis

lda = LinearDiscriminantAnalysis(n_components=2)
X_lda = lda.fit(exp_data_data, exp_data_labels).transform(exp_data_data)
pred = lda.predict(features_test)

# Print Confusion Matrix
class_names = ["is not book", "is book"]
cnf_matrix = confusion_matrix(labels_test, pred)
np.set_printoptions(precision=2)

# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix,
                      classes=class_names,
                      title='Confusion matrix, without normalization - LDA')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix,
                      classes=class_names,
                      normalize=True,
                      title='Normalized confusion matrix - LDA')
plt.show()

from sklearn.metrics import accuracy_score
# Print Accuracy Score
print "Accuracy is", accuracy_score(pred, labels_test)
print "The number of correct predictions is", accuracy_score(pred,
                                                             labels_test,
                                                             normalize=False)
示例#20
0
	# Create the callbacks
	custom_verbose = CustomVerbose(epochs)
	early_stop = EarlyStopping(patience=100)

	callbacks = [custom_verbose, early_stop]

	lstm_sift, skf, histories = train_crossval(create_lstm_sift_model,
															vgg_sift_features,
															y,
															batch_size=batch_size,
															epochs=epochs,
															callbacks=callbacks,
															n_folds=n_folds,
															save_best_model=save_best_model,
															model_path=trained_model_path)

	print("\nTraining complete.")
	plot_histories(histories, 'VGG-SIFT-LSTM, {}-fold cross-validation'.format(n_folds))


## TESTING ##
model_path = trained_model_path if train else vgg_sift_lstm_model_path

y_pred, y_true = evaluate_model(vgg_sift_features, y, model_path, n_splits=5)

# Plot confusion matrix
cm = confusion_matrix(y_true, y_pred)
plot_confusion_matrix(cm, emotions, title='VGG-SIFT-LSTM  -  MUG', normalize=True)

    averaged_tta_predictions_proba = tta_inference(
        model=model,
        data_folder=data_folder,
        input_dim=input_dim,
        passes=ModelConfig.tta_augmentation_passes,
    )
    averaged_tta_predictions = np.argmax(averaged_tta_predictions_proba,
                                         axis=1)
    per_sample_tta_inference_time = (time.time() -
                                     time_anchor) / test_generator_inference.n
    print("TTA inference done")
    print("Metrics computation and plots generation...")
    run.log("Final test loss", log_loss(y_true, y_pred_proba))
    run.log("Final test accuracy", accuracy_score(y_true, y_pred))
    run.log("Final TTA test accuracy",
            accuracy_score(y_true, averaged_tta_predictions))
    run.log("Training time", training_time)
    run.log("Per sample inference time", per_sample_inference_time)
    run.log("Per sample TTA inference time", per_sample_tta_inference_time)
    cm = confusion_matrix(y_true, y_pred)
    plot_confusion_matrix(
        cm,
        y_true,
        y_pred,
        np.asarray(list(train_generator.class_indices.keys())),
        path=PathsConfig.confusion_matrix_path,
    )
    run.log_image(name="Confusion matrix",
                  path=PathsConfig.confusion_matrix_path)
    print("Metrics and plots done")
示例#22
0
        print("Making predictions.\n")
        predictions = labelled_testing_data.map(lambda x: (float(
            np.argmax([model.predict(x.features)
                       for model in models]) + 1), x.label))
        print("Predictions completed.\n")

        # calculate precision, recall, and f-measure
        print("Calculating evaluation metrics.\n")
        metrics = MulticlassMetrics(predictions)

        print("F-Measure: ", metrics.fMeasure())
        results_dictionary[participant] = metrics.fMeasure()
        print("Confusion matrix\n\n")
        confusion_matrix_filename = str(
            participant) + "_" + "cm_refactored.png"
        plot.plot_confusion_matrix(metrics.confusionMatrix().toArray(),
                                   confusion_matrix_filename)

        # print Precision and Recall for all the activities
        for i in range(1, 7):
            print("Precision for ", i, " is ", metrics.precision(i))
            print("Recall for ", i, " is ", metrics.recall(i))
            print("f-measure for ", i, " is ", metrics.fMeasure(float(i)),
                  "\n")
            precision.append(metrics.precision(i))
            recall.append(metrics.recall(i))
            fmeasure.append(metrics.fMeasure(float(i)))
        fscore_filename = str(participant) + "_" + "fs_refactored.png"
        plot.plot_per_activity_metric(precision, recall, fmeasure,
                                      fscore_filename)
        precision = []
        recall = []
    
for epoch in range(start_epoch, opt.nepoch):
    # features, labels = seenDataset.epochData(include_bg=False)
    features, labels = seenDataset.epochData(include_bg=True)
    # train GAN
    trainFGGAN(epoch, features, labels)
    # synthesize features
    syn_feature, syn_label = trainFGGAN.generate_syn_feature(unseen_att_labels, unseen_attributes, num=opt.syn_num)
    num_of_bg = opt.syn_num*2

    real_feature_bg, real_label_bg = seenDataset.getBGfeats(num_of_bg)

    # concatenate synthesized + real bg features
    syn_feature = np.concatenate((syn_feature.data.numpy(), real_feature_bg))
    syn_label = np.concatenate((syn_label.data.numpy(), real_label_bg))
    
    trainCls(syn_feature, syn_label, gan_epoch=epoch)

    # -----------------------------------------------------------------------------------------------------------------------
    # plots
    classes = np.concatenate((['background'], get_unseen_class_labels(opt.dataset, split=opt.classes_split)))
    plot_confusion_matrix(np.load(f'{opt.outname}/confusion_matrix_Train.npy'), classes, classes, opt, dataset='Train', prefix=opt.class_embedding.split('/')[-1])
    plot_confusion_matrix(np.load(f'{opt.outname}/confusion_matrix_Test.npy'), classes, classes, opt, dataset='Test', prefix=opt.class_embedding.split('/')[-1])
    plot_acc(np.vstack(trainCls.val_accuracies), opt, prefix=opt.class_embedding.split('/')[-1])

    # save models
    if trainCls.isBestIter == True:
        trainFGGAN.save_checkpoint(state='best')

    trainFGGAN.save_checkpoint(state='latest')
示例#24
0
#print model.predict_classes(x_test) #classes predicted
#print model.predict_proba(x_test) #classes probability

pred = []
y_pred = model.predict_classes(x_test)
for i in range(len(x_test)):
    pred.append(y_pred[i])

cnf_matrix = confusion_matrix(label, pred)
print(cnf_matrix)

import matplotlib.pyplot as plt
from plot import plot_confusion_matrix

# # Compute confusion matrix
# cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)

# Plot non-normalized confusion matrix
# plt.figure()
plot_confusion_matrix(cnf_matrix,
                      classes=range(0, 10),
                      title='Confusion matrix, without normalization')

# # Plot normalized confusion matrix
# plt.figure()
# plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
#                       title='Normalized confusion matrix')

plt.show()
示例#25
0
from load import load_x,load_y
from grid import grid_search
from plot import plot_confusion_matrix

if __name__=='__main__':
    X_train=load_x('X_train.csv')
    y_train=load_y('Y_train.csv')
    X_test=load_x('X_test.csv')
    y_test=load_y('Y_test.csv')

    log2c=log2g=[-4,-3,-2,-1,0,1,2,3,4]
    confusion_matrix=grid_search(log2c,log2g,X_train,y_train,X_test,y_test)

    plot_confusion_matrix(confusion_matrix,log2c,log2g)
示例#26
0
import matplotlib
""" Save figure without displaying it """
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from pylab import rcParams

epoch_list=[i for i in range(config.epoch)]
class_names=[i for i in range(3)]
pred_label= model.predict(test_data)
print pred_label
# for i in pred_label:
#     i=i*(max(new)-min(new))+min(new)
print pred_label
pred_label=list(itertools.chain.from_iterable(pred_label))
test_label=list(itertools.chain.from_iterable(test_label))
print pred_label
print test_label


# In[13]:


from sklearn.metrics import confusion_matrix
cnf_matrix = confusion_matrix(test_label, pred_label)
# np.set_printoptions(prescision=2)
plot.plot_confusion_matrix(cnf_matrix, class_names, False, "Confusion matrix", "/Users/nicole/Desktop/python/finalproject")
plot.plot_curve(epoch_list, train_loss_list, "Accuracy curve", "/Users/wei-jer-chang/Desktop/final project", training=True, accuracy=False)
   

def image(matrix, filename=None, size='b'):
    classes_dir = 'TestSet 2' if size == 'b' else 'TestSet'
    plot_confusion_matrix(matrix, getClassNames(classes_dir), filename)
示例#28
0
    print("Preparing models\n")
    models = [model_per_class(i, labelled_training_data) for i in range(1, 7)]
    print("Models prepared.\n")

    # make predictions for testing data
    print("Making predictions.\n")
    predictions = labelled_testing_data.map(lambda x: (float(np.argmax([model.predict(x.features) for model in models]) + 1), x.label))
    print("Predictions completed.\n")

    # calculate precision, recall, and f-measure
    print("Calculating evaluation metrics for feature set 1.\n")
    metrics = MulticlassMetrics(predictions)

    print("F-Measure: ", metrics.fMeasure())
    print("Confusion matrix\n\n")
    plot.plot_confusion_matrix(metrics.confusionMatrix().toArray(), "cm1_normal.png")

    for i in range(1, 7):
        print("Precision for ", i, " is ", metrics.precision(i))
        print("Recall for ", i, " is ", metrics.recall(i))
        print("f-measure for ", i, " is ", metrics.fMeasure(float(i)), "\n")
        precision.append(metrics.precision(i))
        recall.append(metrics.recall(i))
        fmeasure.append(metrics.fMeasure(float(i)))
    plot.plot_per_activity_metric(precision, recall, fmeasure, "fs1_normal.png")
    precision = []
    recall = []
    fmeasure = []

    # Feature Set 2: fetch all features
    print("FEATURE SET 2: FETCHING All THE FEATURES")
示例#29
0
def draw_histos(vis=False, filepath='/output/evalSave/'):
    print(filepath)
    if not os.path.exists(filepath):
        print('path created')
        os.makedirs(filepath)
    
    with open(os.path.join(filepath, 'eval.json'), 'r') as j:
        data = json.load(j)
        
    # build precision-recall curves
    prec_03, rec_03 = eval_plot.sort_prec_rec(data['evaluation']['prec']['0.3'], data['evaluation']['rec']['0.3'])
    eval_plot.plot_prec_rec(rec_03, prec_03, 'Thrs: 0.3', color=(1, 0, 1))
        
    prec_05, rec_05 = eval_plot.sort_prec_rec(data['evaluation']['prec']['0.5'], data['evaluation']['rec']['0.5'])
    eval_plot.plot_prec_rec(rec_05, prec_05, 'Thrs: 0.5', color=(0, 1, 0))
    
    # build smoothed precision-recall curves
    # smooth PR-curve as described in http://cs229.stanford.edu/section/evaluation_metrics.pdf#
    smoothed_prec_03 = [max(prec_03[idx:]) for idx, _ in enumerate(prec_03)]
    smoothed_prec_05 = [max(prec_05[idx:]) for idx, _ in enumerate(prec_05)]
    
    eval_plot.plot_prec_rec(rec_03, smoothed_prec_03, 'Thrs_smoothed: 0.3', color=(1, 0, 1), linestyle="--")
    eval_plot.plot_prec_rec(rec_05, smoothed_prec_05, 'Thrs_smoothed: 0.5', color=(0, 1, 0), linestyle="--")
    
    #auc_03 = np.trapz(smoothed_prec_03, rec_03)
    #auc_05 = np.trapz(smoothed_prec_05, rec_05)

    auc_03 = np.trapz(prec_03, rec_03)
    auc_05 = np.trapz(prec_05, rec_05)
    
    title = "conf. stride: {:0.2f}, max. conf.: {:0.4f}, AUC_03: {:0.2f}, AUC_05: {:0.2f}".\
                                                format(data['confidence stride'], data['max_conf'], auc_03, auc_05)

    eval_plot.config(data['name'], title=title)
 
    
    eval_plot.savefig(os.path.join(filepath, (data['name'] + '.svg')))
    if vis:
        eval_plot.show()
    else:
        eval_plot.clearfig()

    # build mean iou - recall curves        
    postfix = ['', '_low', '_mid', '_high']
    
    for p in postfix:        
        eval_plot.plot_prec_rec(data['evaluation']['rec' + p]['0.3'], data['evaluation']['m_iou' + p]['0.3'], 'halt au', color=(1, 0, 1))
                
        eval_plot.plot_prec_rec(data['evaluation']['rec' + p]['0.5'], data['evaluation']['m_iou' + p]['0.5'], 'halt au', color=(0, 1, 0))
        
        title = "conf. stride: {:0.2f}, max. conf.: {:0.4f},\nAUC_03: {:0.2f}, AUC_05: {:0.2f}".\
                                                    format(data['confidence stride'], data['max_conf'], auc_03, auc_05)
               
        eval_plot.config(data['name'], title=title)
        
        eval_plot.savefig(os.path.join(filepath, 'm_iou' + p + '.svg'))
        if vis:
            eval_plot.show()
        else:
            eval_plot.clearfig()
    
    classes = ["Background", "Ferry", "Buoy", "Vessel/ship", "Speed boat", "Boat", "Kayak", "Sail boat", "Swimming person", "Flying bird/plane", "Other"]        
    
    num_classes = data['evaluation']['conf_mat']["0.3"][0][1]
    cm = np.array(data['evaluation']['conf_mat']["0.3"][0][0]).reshape((num_classes, num_classes))
    name = os.path.join(filepath, "confusion_matrix_03_05.svg")
    eval_plot.plot_confusion_matrix(cm, classes, name)
    
    cm = np.array(data['evaluation']['conf_mat']["0.3"][1][0]).reshape((num_classes, num_classes))
    name = os.path.join(filepath, "confusion_matrix_03_075.svg")
    eval_plot.plot_confusion_matrix(cm, classes, name)
    
    cm = np.array(data['evaluation']['conf_mat']["0.5"][0][0]).reshape((num_classes, num_classes))
    name = os.path.join(filepath, "confusion_matrix_05_05.svg")
    eval_plot.plot_confusion_matrix(cm, classes, name)
    
    cm = np.array(data['evaluation']['conf_mat']["0.5"][1][0]).reshape((num_classes, num_classes))
    name = os.path.join(filepath, "confusion_matrix_05_075.svg")
    eval_plot.plot_confusion_matrix(cm, classes, name)
示例#30
0
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

# Confusion matrix plotting module
from plot import plot_confusion_matrix

# Load data set
(x_traino, y_train), (x_testo, y_test) = mnist.load_data()
x_train = np.reshape(x_traino, (60000, 28 * 28))
x_test = np.reshape(x_testo, (10000, 28 * 28))
x_train, x_test = x_train / 255.0, x_test / 255.0

logreg = LogisticRegression(solver='saga',
                            multi_class='multinomial',
                            max_iter=100,
                            verbose=2)
est = logreg.fit(x_train, y_train)

y_pred = logreg.predict(x_test)

# Accuracy
score = accuracy_score(y_test, y_pred)
print("Accuracy score: %.4f" % score)

# Show confusion matrix for prediction
conf = plot_confusion_matrix(y_test,
                             y_pred,
                             np.array(range(10)),
                             normalize=True)
plt.show()
示例#31
0
plot_histories(hist_phrnn, 'PHRNN Model - ADAS&ME')
plot_histories(hist_tcnn, 'TCNN Model - ADAS&ME')

#### TESTING ####

print('\nTesting model...')
merge_weight = 0.45

# set tcnn_model_path to None to test only PHRNN and vice-versa
y_pred, y_true = evaluate_tcnn_phrnn_model(tcnn_model_path,
                                           phrnn_model_path,
                                           tcnn_features,
                                           phrnn_features,
                                           subjects,
                                           data_path,
                                           frames_data_path,
                                           merge_weight=merge_weight)
print_model_eval_metrics(y_pred, y_true)

# Plot confusion matrix
cm = confusion_matrix(y_true, y_pred, labels=list(range(nb_emotions)))
plot_confusion_matrix(cm,
                      emotions,
                      title='Late Fusion Model  -  ADAS&ME',
                      normalize=True)
plot_confusion_matrix(cm,
                      emotions,
                      title='Late Fusion Model  -  ADAS&ME',
                      normalize=False)