Beispiel #1
0
    def test_ae_class(self, objData, normalize=False):

        if normalize is True:
            objData.normalization(self.normalization_max)

        minibatch_aux = objData.minibatch
        objData.change_minibatch(1)

        y_true = objData.labels
        y_result = []

        print('\n# TEST AENCODER BY CLASS TRAINED')
        for ix in range(objData.total_batchs_complete):
            x_, label = objData.generate_batch()
            cost_class = []
            for class_i in range(self.num_class):
                cost_i = self.sess.run(self.AEclass[class_i].cost,
                                       feed_dict={self.x_batch: x_})
                cost_class.append(cost_i)

            y_result.append(np.argsort(cost_class)[0])
            objData.next_batch_test()

        objData.change_minibatch(minibatch_aux)
        utils.metrics_multiclass(y_true, y_result)
Beispiel #2
0
    def test_vgg(self, objData, normalize=False):

        if normalize is True:
            objData.normalization(self.normalization_max)

        count_success = 0
        prob_predicted = []
        plot_predicted = []

        label_total = []
        prob_total = np.random.random((0, self.num_class))

        print('\n# TEST VGG TRAINED')
        for i in range(objData.total_batchs_complete):
            batch, label = objData.generate_batch()
            prob = self.sess.run(self.probVGG, feed_dict={self.x_batch: batch})

            label_total = np.concatenate((label_total, label), axis=0)
            prob_total = np.concatenate((prob_total, prob), axis=0)

            # Acumulamos la presicion de cada iteracion, para despues hacer un promedio
            count, prob_predicted, plot_predicted = utils.process_prob(
                label,
                prob,
                predicted=prob_predicted,
                plot_predicted=plot_predicted)
            count_success = count_success + count
            objData.next_batch_test()

        # promediamos la presicion total
        print('\n# STATUS:')
        y_true = objData.labels
        y_prob = prob_predicted
        utils.metrics_multiclass(y_true, y_prob)
Beispiel #3
0
def test_model(net, session, objData, writer=None):
    label_total = []
    label_total_pred = []

    print('\n     # PHASE: Test classification')
    for i in range(objData.total_batchs_complete):
        batch, label = objData.generate_batch()
        # target = tf.one_hot(label, on_value=1, off_value=0, depth=net.num_class)
        # target = list(session.run(target))
        label_pred, acc, count, fc6 = session.run([net.labels_pred, net.accuracy, net.correct_count, net.fc6], feed_dict={vgg_batch: batch, vgg_label: label, train_mode: False})

        label_total = np.concatenate((label_total, label), axis=0)
        label_total_pred = np.concatenate((label_total_pred, label_pred), axis=0)

        total_batch = len(label)
        objData.next_batch_test()
        net.add_row_data_by_save(fc6, label, i)
        # writer.add_summary(summary, i)
        print('     results[ Total:'+str(total_batch)+' | True:'+str(count)+' | False:'+str(total_batch-count)+' | Accuracy:'+str(acc)+' ]')

    # Promediamos la presicion total
    print('\n     # STATUS:')
    y_true = label_total
    y_prob = label_total_pred
    accuracy_final = metrics_multiclass(y_true, y_prob)
    return accuracy_final
def test_model(net, sess_test, objData):
    total = objData.total_images
    count_success = 0
    count_by_class = np.zeros([net.num_class, net.num_class])

    prob_predicted = []
    plot_predicted = []
    label_total = []
    prob_total = np.random.random((0, net.num_class))

    print('\n# PHASE: Test classification')
    for i in range(objData.total_batchs_complete):
        batch, label = objData.generate_batch()
        prob, layer = sess_test.run([net.prob, net.relu6], feed_dict={vgg_batch: batch, train_mode: False})

        label_total = np.concatenate((label_total, label), axis=0)
        prob_total = np.concatenate((prob_total, prob), axis=0)

        # save output of a layer
        # utils.save_layer_output(layer, label, name='Train_SNC4_relu6', dir='../data/features/')
        # utils.save_layer_output_by_class(layer, label, name='Train_SNC4', dir='../data/features/')

        count, prob_predicted, plot_predicted = utils.process_prob(label, prob, predicted=prob_predicted,
                                                                   plot_predicted=plot_predicted)
        count_success = count_success + count
        objData.next_batch_test()

    # promediamos la presicion total
    print('\n# STATUS:')
    y_true = objData.labels
    y_prob = prob_predicted
    accuracy_final = utils.metrics_multiclass(y_true, y_prob)

    return accuracy_final
def test_model_all(net, sess_test, objData, numClass):

    y_true = objData.labels
    y_result = []

    for ix in range(objData.total_batchs_complete):

        x_, label = objData.generate_batch()
        cost_class = []
        for class_i in range(numClass):

            cost_i = sess_test.run(net[class_i].cost, feed_dict={x_batch: x_})
            cost_class.append(cost_i)

        y_result.append(np.argsort(cost_class)[0])
        objData.next_batch_test()

    utils.metrics_multiclass(y_true, y_result)
def test_model_all(net, sess_test, objData, numClass):
    y_true = objData.labels
    y_result = []
    for ix in range(objData.total_batchs_complete):
        x_, label = objData.generate_batch()
        cost_class = []
        for class_i in range(numClass):
            cost_i = sess_test.run(net[class_i].cost, feed_dict={x_batch: x_})
            cost_class.append(cost_i)

        y_result.append(np.argsort(cost_class)[0])
        objData.next_batch_test()

    # print(np.shape(y_true))
    #     ax = np.reshape(objData.labels.values, [objData.total_inputs])
    #     ax = list(ax)
    #     f = open("PaperCIARP.csv", "a+")
    #     f.write(",".join(map(str, ax)) + "\n")
    #     f.write(",".join(map(str, y_result)) + "\n")
    #     f.close()

    utils.metrics_multiclass(y_true, y_result)
    for i in range(0, 3):
        path_data_train_csv, path_data_test_csv, path_max_csv, name = path_datasets(
            i)

        print('\n[NAME:', name, ']')
        Damax = utils.load_max_csvData(path_max_csv)

        # Metodo 1
        # X_train, X_test, y_train, y_test, total_train, total_test = get_data_split(path_data_test_csv, Damax, 0.3)
        # Metodo 2
        X_train, X_test, y_train, y_test, total_train, total_test = get_data_all(
            path_data_train_csv, path_data_test_csv, Damax)
        print(np.shape(X_train), np.shape(X_test))

        knn = neighbors.KNeighborsClassifier()
        print("     Train model...")
        knn.fit(X_train, y_train)
        print("     Test model...")
        Z = knn.predict(X_test)

        acc = utils.metrics_multiclass(y_test, Z)

        print('     Save result...')
        output = [name, total_test, acc, path_data_test_csv]
        f.write(','.join(map(str, output)) + '\n')
        f.write(','.join(map(str, y_test)) + '\n')
        f.write(','.join(map(str, Z)) + '\n')

    f.close()