Beispiel #1
0
while True:
    #ret stands for retrieve (cap.retrieve()), frame is the returned videcapture.grabbed
    #read combines features of retrieve and grab
    ret, frame = cap.read()

    # change to grayscale
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # resize frame before reshaping using numpy array
    frame = cv2.resize(frame, (28,28))
    cv2.imshow('object detect', frame)
    #reshape to input tensor dimensions
    gray = np.reshape(frame, (-1,28,28,1))
    print(gray)
    print(gray.shape)
    print(model.predict(gray))
    pred = np.argmax(model.predict(gray))

    if pred in range(9):
        print(chr(pred+65))
    elif pred in range(9,25):
        print(chr(pred+66))

    if cv2.waitKey(25) & 0xFF == ord('q'):
    #if we press q, exit window
        cv2.destroyAllWindows()
        break

# input = cv2.imread('B.jpg',0)
# input = cv2.resize(input, (28,28))
# cv2.imshow('image', input)
Beispiel #2
0
def main():
    a = time.time()
    x, y, x_test, y_test, img_prep, img_aug = get_data(
    )  # modified for spectra
    b = time.time()

    x = x.reshape((x.shape[0], x.shape[1], 1))
    x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], 1))

    print('data time: {}'.format(b - a))
    #     with tf.device('/gpu:0'):
    #         with tf.contrib.framework.arg_scope([tflearn.variables.variable], device='/cpu:0'):
    model = my_model(img_prep, img_aug)
    network = DNN(model)
    a = time.time()
    network.fit(x,
                y,
                n_epoch=1,
                shuffle=True,
                validation_set=(x_test, y_test),
                show_metric=True,
                batch_size=32,
                run_id='aa2')
    print(network.evaluate(x_test[0:32, :], y_test[0:32, :]))
    b = time.time()
    print('total time: {}'.format(b - a))

    # evali= model.evaluate(x_test, y_test)
    # print("Accuracy of the model is :", evali)
    divideby = 100
    dindex = int(x_test.shape[0] / divideby)
    labels = np.zeros((x_test.shape[0], 7))
    for i in range(divideby):
        start = i * dindex
        end = start + dindex
        prob_y = network.predict(x_test[start:end, :])
        y = network.predict_label(x_test[start:end, :])
        predictions = np.argmax(y, axis=1)
        for j in range(len(predictions)):
            labels[start + j, predictions[j]] = 1

    appendme = np.array([0, 1, 2, 3, 4, 5, 6])
    appendme = appendme.reshape((7, ))
    y_test_decode = onenothot_labels(y_test)
    y_test_decode = np.concatenate((y_test_decode, appendme))
    labels_decode = onenothot_labels(labels)
    labels_decode = np.concatenate((labels_decode, appendme))

    accuracy = float(np.sum(labels_decode == y_test_decode)) / float(
        y_test_decode.shape[0])

    class_names = [
        'Background', 'HEU', 'WGPu', 'I131', 'Co60', 'Tc99', 'HEUandTc99'
    ]

    cnf_matrix = confusion_matrix(y_test_decode, labels_decode)

    # print("The predicted labels are :", lables[f])
    # prediction = model.predict(testImages)
    # print("The predicted probabilities are :", prediction[f])
    fig = plt.figure()
    class_names = [
        'Background', 'HEU', 'WGPu', 'I131', 'Co60', 'Tc99', 'HEUandTc99'
    ]
    plot_confusion_matrix(cnf_matrix,
                          classes=class_names,
                          normalize=True,
                          title='Normalized confusion matrix')
    fig.savefig('classification_confusion_matrix.png')
    return
Beispiel #3
0
net = tflearn.regression(softmax, optimizer=adam, metric=accu)
# optimizer = tflearn.optimizers.Optimizer(learning_rate=0.01, False, "")
# loss = tf.reduce_mean(tf.nn.log_poisson_loss(logits=softmax, labels=Y))
# optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)
# init = tf.global_variables_initializer()


dnn = DNN(net, clip_gradients=5.0, tensorboard_verbose=0,
          tensorboard_dir='/tmp/tflearn_logs/', checkpoint_path=None,
          best_checkpoint_path=None, max_checkpoints=None,
          session=None, best_val_accuracy=0.0)

dnn.fit(train_X, train_Y, 10, validation_set=(test_X, test_Y),
        show_metric=True, run_id="dense_model")

pre_Y = dnn.predict(test_X)
# pre_pro_Y = dnn.predict_proba(test_X)


# pre_Y = clf.predict(test_X)
# pre_pro_Y = clf.predict_proba(test_X)
#
# def accurcy(test_Y, pre_Y):
#     print(len(pre_Y), len(test_Y))
#     all_size = len(test_X)
#     right = 0
#     pre_not0 = 0
#     test_not0 = 0
#     for i in range(all_size):
#         if test_Y[i] == pre_Y[i]:
#             right += 1
def main():
    x, y, x_test, y_test, img_prep, img_aug = get_data()
    #     with tf.device('/gpu:0'):
    #         with tf.contrib.framework.arg_scope([tflearn.variables.variable], device='/cpu:0'):
    model, features = my_model(img_prep, img_aug)
    network = DNN(model)
    # network.fit(x, y, n_epoch=100, shuffle=True, validation_set=(x_test, y_test), show_metric=True,
    #             batch_size=100, run_id='aa2')
    # task 3 stuff
    network.load('./lenet5_run.tflearn')
    feature_generator = DNN(features, session=network.session)
    if len(glob.glob('./lenet5_svm_features.npy')) != 1:
        svm_features = np.zeros((0, 512))
        for i in range(x.shape[0]):
            if i % 1000 == 0:
                print(i, svm_features.shape)
            chuckmein = x[i, :, :].reshape(
                (1, x.shape[1], x.shape[2], x.shape[3]))
            svm_features = np.vstack(
                (svm_features, feature_generator.predict(chuckmein)))
        np.save('./lenet5_svm_features.npy', svm_features)
    else:
        svm_features = np.load('./lenet5_svm_features.npy')

    if len(glob.glob('./lenet5_svm_features_test.npy')) != 1:
        svm_features_test = np.zeros((0, 512))
        for i in range(x_test.shape[0]):
            chuckmein = x_test[i, :, :].reshape(
                (1, x.shape[1], x.shape[2], x.shape[3]))
            svm_features_test = np.vstack(
                (svm_features_test, feature_generator.predict(chuckmein)))
        np.save('./lenet5_svm_features_test.npy', svm_features_test)
    else:
        svm_features_test = np.load('./lenet5_svm_features_test.npy')
    #  from here it's y vs. y_predict
    svm_y = np.zeros((y.shape[0], ))
    svm_y_test = np.zeros((y_test.shape[0]))
    for i in range(y.shape[0]):
        # print(y[i, :] == 1)
        mask = y[i, :] == 1
        meh = list(compress(range(len(mask)), mask))
        svm_y[i] = meh[0]
    for i in range(y_test.shape[0]):
        mask = y_test[i, :] == 1
        meh = list(compress(range(len(mask)), mask))
        svm_y_test[i] = meh[0]

    # clf = svm.SVC()
    # clf.fit(svm_features, svm_y)
    # predicted_y = clf.predict(svm_features_test)
    # accuracy_mask = svm_y_test == predicted_y
    # accuracy = float(len(list(compress(range(len(accuracy_mask)), accuracy_mask)))) / float(len(accuracy_mask))
    # print(accuracy)

    n_estimators = 10
    n_jobs = 4
    start = time.time()
    clf = OneVsRestClassifier(
        BaggingClassifier(SVC(kernel='linear',
                              probability=True,
                              class_weight=None),
                          max_samples=1.0 / n_estimators,
                          n_estimators=n_estimators,
                          n_jobs=n_jobs))
    clf.fit(svm_features, svm_y)
    end = time.time()
    print("Bagging SVC", end - start, clf.score(svm_features_test, svm_y_test))

    # y_test vs. predicted_y metric

    return