Exemplo n.º 1
0
Arquivo: mlp.py Projeto: pajkossy/nn
 def report_accuracy(self, y, a2, test=False):
     corr = np.argmax(a2, axis=1)
     pred = np.argmax(y, axis=1)
     ratio = float(sum(corr == pred))/y.shape[0]
     confusion_matrix = get_confusion_matrix(corr, pred)
     logging.info("Correctly classified: {}%".format(
         ratio * 100))
     logging.info("Confusion matrix:\n{}".format(confusion_matrix))
     return ratio, confusion_matrix
Exemplo n.º 2
0
def evaluateResults(estimator, test_set, test_labels, estimator_name='Unknown', file_name=''):
    result = estimator.predict(test_set)
    # print result

    aux = result == test_labels
    correct = sum(aux.astype(int))
    _accuracy = (correct * 100) / len(test_set)

    cm = ut.get_confusion_matrix(test_labels, result, estimator_name, file_name=file_name)
    print '\n'
    _precision, _recall, _f1score, _support = ut.get_measures_for_each_class(test_labels, result)
    print 'Estimator ', estimator_name
    print 'Average Accuracy:\t', _accuracy
    print 'Average Precision:\t', _precision
    print 'Average Recall:\t', _recall
    print 'Average F1 Measure:\t', _f1score
    print '\n'
    return result, _accuracy, _precision, _recall, _f1score
Exemplo n.º 3
0
    def test(self, test_data, test_label, _intv=None):
        '''test_data should also be a RDD object'''

        import numpy as np
        from utils import get_confusion_matrix
        pred_tuple = self.predict(test_data, _intv)

        # Get actual labels
        true_tuple = test_label.collect()

        pred, true = np.zeros(len(pred_tuple)), np.zeros(len(pred_tuple)) 
        for i in range(len(pred)):
            idx, cl = pred_tuple[i]
            idx, cl = true_tuple[i]
            if _intv == None:
                true[int(idx)] = int(cl)
                pred[int(idx)] = int(cl)
            else:
                true[_intv.index(idx)] = int(cl)
                pred[_intv.index(idx)] = int(cl)

        confusion_matrix = get_confusion_matrix(pred, true)
        return pred_tuple, confusion_matrix
Exemplo n.º 4
0
json_str3 = json.dumps(tr_los_dict, indent=4)
json_str4 = json.dumps(te_los_dict, indent=4)
if not os.path.exists('results'):
    os.mkdir('results')
with open('results/tr_acc_epoch.json', 'w') as json_file:
    json_file.write(json_str1)
with open('results/te_acc_epoch.json', 'w') as json_file:
    json_file.write(json_str2)
with open('results/tr_los_epoch.json', 'w') as json_file:
    json_file.write(json_str3)
with open('results/te_los_epoch.json', 'w') as json_file:
    json_file.write(json_str4)

# plot the accuracy and loss figures
plot_accuracy_curve(train_accuracy, test_accuracy, show=True)
plot_loss_curve(train_loss, test_loss, show=True)
# plot confusion matrix of the predicted results of the latest model
net.eval()
predict_list = np.array([]).astype(int)
label_list = np.array([]).astype(int)
with torch.no_grad():
    for images, labels in test_loader:
        images = Variable(images.to(device))
        labels = Variable(labels.to(device))
        outputs = net(images)
        predict_y = torch.max(outputs, dim=1)[1]
        predict_list = np.hstack((predict_list, predict_y.cpu().numpy()))
        label_list = np.hstack((label_list, labels.cpu().numpy()))
cm = get_confusion_matrix(classes, predict_list, label_list)
plot_confusion_matrix(cm, classes, title='Confusion matrix', normalize=True)
Exemplo n.º 5
0
test_y = test_y.reshape(10000)
print train_x.shape, train_y.shape, test_x.shape, test_y.shape
#creating a classifer Object
clf = KNeighborsClassifier(n_neighbors=50, n_jobs=-1)
print "Training ...... "
#clf.fit(train_x,train_y)

# clf.fit(X_transformed,cifar10_labels)
# print "start to predict"
# pred_labels = clf.predict(X_test_transformed)
# print np.mean(pred_labels == cifar10_test_label)

#saving trained model
#utils.save_model(clf,"knn_cifar10.pickle")
clf = utils.load_model("knn_cifar10.pickle")

pred = clf.predict(test_x)
acc = get_accuracy(test_y, pred)

print "Accuracy : ", (acc)
print "Test DATA Prediction ", (pred)
cm = utils.get_confusion_matrix(actual=test_y,
                                pred=pred,
                                n_classes=10,
                                class_names=text_labels)
print "Confusion Matrix"
print cm
utils.plot_confusion_matrix(cm,
                            classes=text_labels,
                            title='Confusion matrix, without normalization')
Exemplo n.º 6
0
characterisation_latency = []
for i in range(len(predC)):
    del_st = (Y_pos[i] - 800) // 30 + 1
    currC = predC[i][del_st:]
    currR = predR[i][del_st:]

    cls_out, cls_loc = cls_strategy.get_prediction(currC)

    if (cls_out == 1):
        currR = currR[cls_loc:]
        reg_out, reg_loc = reg_strategy.get_prediction(currR)
        strategy_out.append([reg_out])
        characterisation_latency.append(cls_loc + reg_loc)
    else:
        strategy_out.append([0.])

print("\n\nRESULTS")
print("-------\n\n")

print("Average Latency (in sec) : ", np.mean(characterisation_latency) * 30)

strategy_out = scalerY.inverse_transform(strategy_out)
Yr = scalerY.inverse_transform([ele[0] for ele in Yr])

acc, conf = get_confusion_matrix(strategy_out, Yc)
print("Classification accuracy : %.2f%%" % (acc * 100))
print("Classification confusion matrix\n", conf)

print("Overall MAE error : ", get_mae(strategy_out, Yr))
print("Overall RMSE error : ", get_rmse(strategy_out, Yr))
Exemplo n.º 7
0
    print("Validation after {} iterations".format(iteration))
    print("Val accuracy: {:.3f}".format(accuracy))
    print("Val miou: {:.3f}".format(miou))


if __name__ == '__main__':
    args = parser.parse_args()
    resnet50_weights = download_resnet50_weights()

    resnet50, deeplab = create_deeplab_model(resnet50_weights,
                                             args.deeplab_weights)

    deeplab = resnet50 > deeplab
    confusion = np.zeros((21, 21))

    print("Start validation")
    for i, (image, annotation) in tqdm(enumerate(iter_data(VALIDATION_SET))):
        annotation = make_annotation_one_hot_encoded(annotation)
        annotation = np.expand_dims(annotation, axis=0)

        image = subtract_channel_mean(image)
        image = np.expand_dims(image, axis=0).astype(np.float32)

        segmentation = deeplab.predict(image)
        confusion += get_confusion_matrix(annotation, segmentation)

        if i % 20 == 0 and i != 0:
            summarize(confusion, i + 1)

    summarize(confusion, i + 1)
Exemplo n.º 8
0
train_y = data['train_y']
test_x = data['test_x']
test_y = data['test_y']
text_labels = data['text_labels']
#from sklearn import decomposition
#pca = decomposition.PCA(n_components=768)
#pca.fit(train_x)
#utils.save_model(pca,"pca.pkl")
#pca=utils.load_model("pca.pkl")
#train_x = pca.transform(train_x)
#test_x = pca.transform(test_x)
print(train_x.shape, train_y.shape, test_x.shape, test_y.shape)
max = np.max(train_x)
min = np.min(train_x)
mean = np.mean(train_x)
train_x = (train_x - mean) / (max - min)
test_x = (test_x - mean) / (max - min)

pred, accuracy = cnn(train_x, train_y, test_x, test_y)
print("Accuracy : ", (accuracy))
print("Test DATA Prediction ", (pred))
cm = utils.get_confusion_matrix(actual=utils.back_from_onehot(test_y[0:500, ]),
                                pred=pred,
                                n_classes=10,
                                class_names=text_labels)
print("Confusion Matrix")
print(cm)
utils.plot_confusion_matrix(cm,
                            classes=text_labels,
                            title='Confusion matrix, without normalization')
Exemplo n.º 9
0
        deeplab,
        error='categorical_crossentropy',
        step=0.00001,
        verbose=True,
        addons=[algorithms.WeightDecay],
        decay_rate=0.0001,
    )

    for i in range(args.epochs):
        print("Epoch #{}".format(i + 1))

        for x_batch, y_batch in training_iterator():
            x_batch = resnet50.predict(x_batch)
            optimizer.train(x_batch, y_batch, epochs=1, summary='inline')

        print("Start validation")
        val_images, val_annotations = next(vaidation_iterator())
        segmentation = deeplab.predict(resnet50.predict(val_images))
        confusion = get_confusion_matrix(val_annotations, segmentation)

        accuracy, miou = segmentation_metrics(confusion)
        print("Val accuracy: {:.3f}".format(accuracy))
        print("Val miou: {:.3f}".format(miou))

        filename = 'deeplab_{:0>3}_{:.3f}_{:.3f}.hdf5'.format(
            i, accuracy, miou)
        filepath = os.path.join(storage_folder, filename)

        print("Saved: {}".format(filepath))
        storage.save(deeplab, filepath)
Exemplo n.º 10
0
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = keras.optimizers.Adam(lr=0.001)
acc_metric = keras.metrics.SparseCategoricalAccuracy()
train_writer = tf.summary.create_file_writer("logs/train/")
test_writer = tf.summary.create_file_writer("logs/test/")
train_step = test_step = 0

for epoch in range(num_epochs):
    confusion = np.zeros((len(class_names), len(class_names)))

    # Iterate through training set
    for batch_idx, (x, y) in enumerate(ds_train):
        with tf.GradientTape() as tape:
            y_pred = model(x, training=True)
            loss = loss_fn(y, y_pred)

        gradients = tape.gradient(loss, model.trainable_weights)
        optimizer.apply_gradients(zip(gradients, model.trainable_weights))
        acc_metric.update_state(y, y_pred)
        confusion += get_confusion_matrix(y, y_pred, class_names)

    with train_writer.as_default():
        tf.summary.image(
            "Confusion Matrix",
            plot_confusion_matrix(confusion / batch_idx, class_names),
            step=epoch,
        )

    # Reset accuracy in between epochs (and for testing and test)
    acc_metric.reset_states()
Exemplo n.º 11
0
 def score(self, input_data, label):
     input_pad = self.prepare_input(input_data)
     label = self.prepare_label(label)
     prediction = self.model.predict(input_pad)
     get_score_senti(label, prediction)
     get_confusion_matrix(label,prediction) #implemented cofusion matrix for it