Beispiel #1
0
    def test(self):
        tf.global_variables_initializer().run()

        self.saver = tf.train.Saver()
        could_load, checkpoint_counter = self.load(self.checkpoint_dir)

        if could_load:
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")

        test_feed_dict = {
            self.test_inptus: self.test_x,
            self.test_labels: self.test_y
        }

        summary_str, test_loss, test_accuracy, p, t = self.sess.run(
            [
                self.test_summary, self.test_loss, self.test_accuracy,
                self.test_plab, self.test_tlab
            ],
            feed_dict=test_feed_dict)
        import metrics
        print("test_accuracy: {}".format(test_accuracy))
        with open('resnet.txt', 'a') as f:  # 设置文件对象
            f.write(
                str(self.i) + '-' + str(self.j) + ',' +
                str(metrics.accuracy(t, p)) + ',' +
                str(metrics.precision(t, p)) + ',' +
                str(metrics.recall(t, p)) + ',' + str(metrics.f1score(t, p)) +
                ',' + str(metrics.ft(t, p)) + '\n')
Beispiel #2
0
def evaluate(path):
    queries = read_dataset('queries.csv')
    targets = read_dataset('targets.csv')
    freqs = freq_count(targets)
    results = load_results(path, queries, targets)
    cutoff = 1000
    precisions = []
    recalls = []
    f1scores = []
    aps = []
    gains = []
    nnt1s = []
    nnt2s = []
    for (queried, retrieved) in results:
        x = categories_to_rel(queried, retrieved)[:cutoff]
        p = precision(x)
        r = recall(x, freqs[queried[0]])
        f = f1score(x, freqs[queried[0]])
        g = ndcg(x)
        ap = average_precision(x, freqs[queried[0]])
        t1 = nnt1(x, freqs[queried[0]])
        t2 = nnt2(x, freqs[queried[0]])
        precisions.append(p)
        recalls.append(r)
        f1scores.append(f)
        gains.append(g)
        aps.append(ap)
        nnt1s.append(t1)
        nnt2s.append(t2)
        print('precision:', p)
        print('recall:', r)
        print('F1 score:', f)
        print('average precision:', ap)
        print('NDCG:', g)
        print('nearest neighbor:', t1, t2)
Beispiel #3
0
def evaluate(path):
    cad = read_dataset('cad.csv')
    rgbd = read_dataset('rgbd.csv')
    freqs = freq_count(cad)
    results = load_results(path, rgbd, cad)

    mP = 0.0
    mR = 0.0
    mF = 0.0
    mAP = 0.0
    mNDCG = 0.0
    mNNT1 = 0.0
    mNNT2 = 0.0

    for (queried, retrieved) in results:
        f = freqs[queried[0]]
        x = categories_to_rel(queried, retrieved)[:f]
        # Sum up the retrieval scores
        mP += precision(x)
        mR += recall(x, f)
        mF += f1score(x, f)
        mNDCG += ndcg(x)
        mAP += average_precision(x, f)
        mNNT1 += nnt1(x, f)
        mNNT2 += nnt2(x, f)

    n = len(results)
    print('num queries:', n)
    print('mean precision:', mP / n)
    print('mean recall:', mR / n)
    print('mean F1:', mF / n)
    print('mean AP:', mAP / n)
    print('mean NDCG: ', mNDCG / n)
    print('mean NNT1: ', mNNT1 / n)
    print('mean NNT2: ', mNNT2 / n)

    # Plot PR-curve
    cutoff = 1000
    mean_precisions = np.zeros(cutoff, np.float64)
    mean_recalls = np.zeros(cutoff, np.float64)
    for (queried, retrieved) in results:
        x = categories_to_rel(queried, retrieved)[:cutoff]
        x = np.pad(x, (0, cutoff - len(x)), 'constant', constant_values=(0))
        precisions = []
        recalls = []
        for k, _ in enumerate(x):
            p = precision(x[:k + 1])
            r = recall(x[:k + 1], freqs[queried[0]])
            precisions.append(p)
            recalls.append(r)
        mean_precisions += precisions
        mean_recalls += recalls
    mean_precisions /= len(results)
    mean_recalls /= len(results)

    plt.plot(mean_recalls, mean_precisions)
    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.axis([0, 1, 0, 1.05])
    plt.show()
Beispiel #4
0
def __scores(clf, testset):
    """
    """
    accuracy_ = accuracy(clf, testset)
    precision_, recall_ = precision_recall(clf, testset)
    f1score_ = f1score(precision_, recall_)
    return accuracy_, precision_, recall_, f1score_
Beispiel #5
0
def evaluate(path):
    queries = read_dataset('queries.csv')
    targets = read_dataset('targets.csv')
    freqs = freq_count(targets)
    results = load_results(path, queries, targets)
    cutoff = 1000
    precisions = []
    recalls = []
    f1scores = []
    aps = []
    gains = []
    nnt1s = []
    nnt2s = []
    for (queried, retrieved) in results:
        x = categories_to_rel(queried, retrieved)[:cutoff]
        p = precision(x)
        r = recall(x, freqs[queried[0]])
        f = f1score(x, freqs[queried[0]])
        g = ndcg(x)
        ap = average_precision(x, freqs[queried[0]])
        t1 = nnt1(x, freqs[queried[0]])
        t2 = nnt2(x, freqs[queried[0]])
        precisions.append(p)
        recalls.append(r)
        f1scores.append(f)
        gains.append(g)
        aps.append(ap)
        nnt1s.append(t1)
        nnt2s.append(t2)
    print('mean precision:', numpy.mean(precisions))
    print('mean recall:', numpy.mean(recalls))
    print('mean F1 score:', numpy.mean(f1scores))
    print('mAP:', numpy.mean(aps))
    print('mean NDCG:', numpy.mean(gains))
    print('mean nearest neighbor:', numpy.mean(nnt1s), numpy.mean(nnt2s))

    # plot precision-recall curve
    mean_precisions = numpy.zeros(cutoff, numpy.float64)
    mean_recalls = numpy.zeros(cutoff, numpy.float64)
    for (queried, retrieved) in results:
        x = categories_to_rel(queried, retrieved)[:cutoff]
        x = numpy.pad(x, (0, cutoff - len(x)), 'constant', constant_values=(0))
        precisions = []
        recalls = []
        for k, _ in enumerate(x):
            p = precision(x[:k + 1])
            r = recall(x[:k + 1], freqs[queried[0]])
            precisions.append(p)
            recalls.append(r)
        mean_precisions += precisions
        mean_recalls += recalls
    mean_precisions /= len(results)
    mean_recalls /= len(results)
    plt.plot(mean_recalls, mean_precisions)
    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.axis([0, 1, 0, 1.05])
    plt.show()
Beispiel #6
0
def model_save_load(name, model, x, y=None):
    model_name = name + '.pkl'
    if model_name not in os.listdir('data/model'):
        model.fit(X=x, y=y)
        joblib.dump(model, 'data/model/' + model_name)
        return model
    else:
        model = joblib.load('data/model/' + model_name)
        return model


dg = dg()
for i in range(10):
    for j in range(10):
        train_data, test_data, train_labels, test_labels = dg.dsift_only(j)
        svm = SVC(kernel='poly', degree=3)
        svm = model_save_load('svm' + str(i) + '-' + str(j), svm,
                              train_data.reshape([train_data.shape[0], -1]),
                              train_labels)
        plab = p = svm.predict(test_data.reshape([test_data.shape[0],
                                                  -1])).reshape(-1,
                                                                1).tolist()
        t = test_labels.reshape(-1, 1).tolist()
        print(str(metrics.accuracy(t, p)))
        with open('denseSIFTsvm.txt', 'a') as f:  # 设置文件对象
            f.write(
                str(i) + '-' + str(j) + ',' + str(metrics.accuracy(t, p)) +
                ',' + str(metrics.precision(t, p)) + ',' +
                str(metrics.recall(t, p)) + ',' + str(metrics.f1score(t, p)) +
                ',' + str(metrics.ft(t, p)) + '\n')
Beispiel #7
0
print("XvalidT.shape: {}".format(XvalidT.shape))
print("YvalidT.shape: {}".format(YvalidT.shape))
# (2) Define model
nfeature = Xtrain.shape[0]
model = tf.keras.models.Sequential([
 tf.keras.layers.Dense(200,input_shape=(nfeature,),activation="tanh"),
 tf.keras.layers.Dense(50,activation="tanh"),
 tf.keras.layers.Dense(1,activation="sigmoid")])
# (3) Compile model
optimizer = tf.keras.optimizers.Adam(lr=0.02,beta_1=0.9,beta_2=0.999,epsilon=1e-7)
model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"])
model.summary()
# (4) Train model
epochs = 20
time_start = time.time()
ntrain = XtrainT.shape[0]
history = model.fit(XtrainT,YtrainT,epochs=epochs,batch_size=ntrain,validation_data=(XvalidT,YvalidT))
time_end = time.time()
print("Train time: {}".format(time_end - time_start))
# (5) Predictions and plotting
# confusion matrix
Afinal = model.predict(XvalidT).T
Yvalid_pred = np.round(Afinal)
metrics.confusion_matrix(Yvalid,Yvalid_pred,2)
f1score,precision,recall = metrics.f1score(Yvalid,Yvalid_pred)
print("F1Score: {} - Precision: {} - Recall: {}".format(f1score,precision,recall))
text_results.text_results(Yvalid,Yvalid_pred,Xvalid_raw)
# plot loss and accuracy
plot_results.plot_results_history(history.history,["loss","val_loss"])
plot_results.plot_results_history(history.history,["accuracy","val_accuracy"])
plt.show()
Beispiel #8
0
def train():
    """Train SEGNET with CAMVID dataset"""
    with tf.Graph().as_default():

        train_dataset = Dataset(FLAGS.train_list_file)
        train_dataset.build(num_class=FLAGS.num_class,
                            height=FLAGS.img_height,
                            width=FLAGS.img_width,
                            batch_size=FLAGS.batch_size,
                            num_epochs=FLAGS.num_epochs,
                            shuffle=FLAGS.buffer_size)

        train_x1, train_x2, train_y, train_y_ohe = train_dataset.get_next()
        train_batches_per_epoch = len(train_dataset) / FLAGS.batch_size

        val_dataset = Dataset(FLAGS.val_list_file)
        val_dataset.build(num_class=FLAGS.num_class,
                          height=FLAGS.img_height,
                          width=FLAGS.img_width,
                          batch_size=FLAGS.val_batch_size,
                          num_epochs=FLAGS.num_epochs,
                          shuffle=FLAGS.buffer_size)

        val_x1, val_x2, val_y, val_y_ohe = val_dataset.get_next()
        val_batches_per_epoch = len(val_dataset) / FLAGS.val_batch_size

        tf.logging.info(' Model: {}'.format(FLAGS.model))
        tf.logging.info(' Number of training examples: {}'.format(
            len(train_dataset)))
        tf.logging.info(' Number of validation examples: {}'.format(
            len(val_dataset)))
        tf.logging.info(
            ' Number of batches per epoch: {}'.format(train_batches_per_epoch))
        tf.logging.info(' Number of validation batches per epoch: {}'.format(
            val_batches_per_epoch))

        #Median frequency balancing class_weights
        print("Compute Median Frequency Balancing")
        #class_weights = median_frequency_balancing(train_dataset.y)
        class_weights = [
            0.027638146768482457, 1.9435447608367058, 1.0, 0.2272722416661377,
            15.64842042833608, 0.286085652525553, 1.5602306923731635,
            0.8940138739340321, 1.4694906910406547, 6.248954806477947,
            0.287822264363265
        ]
        print class_weights

        if FLAGS.model == 'CDNet':
            train_inputs = tf.concat([train_x1, train_x2], axis=-1)
            val_inputs = tf.concat([val_x1, val_x2], axis=-1)

            model = CDNet(num_class=FLAGS.num_class,
                          is_training=True,
                          init_kernel=tf.glorot_normal_initializer())
        elif FLAGS.model == 'eLSTM':
            train_inputs = tf.transpose(tf.stack([train_x1, train_x2]),
                                        [1, 0, 2, 3, 4])
            val_inputs = tf.transpose(tf.stack([val_x1, val_x2]),
                                      [1, 0, 2, 3, 4])

            model = eLSTM(num_class=FLAGS.num_class,
                          is_training=True,
                          init_kernel=tf.glorot_normal_initializer())
        else:
            raise ValueError('No Model found!')

        train_prob, train_logits = model.forward(train_inputs)
        train_loss = cross_entropy(train_logits,
                                   train_y_ohe,
                                   class_weights=class_weights)
        train_loss_sum = tf.summary.scalar('loss', train_loss)

        #BackPrpagation
        train_op = model.backward(train_loss)

        train_precision, train_precision_stream = precision(
            train_prob, train_y)
        train_recall, train_recall_stream = recall(train_prob, train_y)
        train_stream_op = tf.group(train_precision_stream, train_recall_stream)

        train_f1score = f1score(train_precision, train_recall)
        train_f1score_sum = tf.summary.scalar('f1score', train_f1score)

        train_summary_op = tf.summary.merge(
            [train_loss_sum, train_f1score_sum])

        val_prob, val_logits = model.forward(val_inputs, reuse=True)
        val_loss = cross_entropy(val_logits, val_y_ohe)
        val_loss_sum = tf.summary.scalar('loss', val_loss)

        val_precision, val_precision_stream = precision(val_prob, val_y)
        val_recall, val_recall_stream = recall(val_prob, val_y)
        val_stream_op = tf.group(val_precision_stream, val_recall_stream)

        val_f1score = f1score(val_precision, val_recall)
        val_f1score_sum = tf.summary.scalar('f1score', val_f1score)

        x1_sum = tf.summary.image('X1', val_x1, max_outputs=3)
        x2_sum = tf.summary.image('X2', val_x2, max_outputs=3)
        gt = tf.cast(val_y, dtype=tf.float32)
        gt_sum = tf.summary.image('GT', gt, max_outputs=3)
        predictions = tf.argmax(val_prob, -1)
        predictions = tf.cast(predictions, dtype=tf.float32)
        predictions = tf.reshape(
            predictions, shape=[-1, FLAGS.img_height, FLAGS.img_width, 1])
        pred_sum = tf.summary.image('Prediction', predictions, max_outputs=3)

        val_summary_op = tf.summary.merge(
            [val_loss_sum, val_f1score_sum, x1_sum, x2_sum, gt_sum, pred_sum])

        init = tf.global_variables_initializer()
        init_local = tf.local_variables_initializer()

        def train_step(step):
            _, _, _loss, _summary = sess.run(
                [train_op, train_stream_op, train_loss, train_summary_op],
                feed_dict={tf.keras.backend.learning_phase(): 1})
            _f1score = sess.run(train_f1score)

            return _loss, _f1score, _summary

        def validation_step(step):
            _, _loss, _summary = sess.run(
                [val_stream_op, val_loss, val_summary_op],
                feed_dict={tf.keras.backend.learning_phase(): 0})
            _f1score = sess.run(val_f1score)

            return _loss, _f1score, _summary

        with tf.Session() as sess:
            sess.run(init)
            sess.run(init_local)
            sess.run(train_dataset.init())
            sess.run(val_dataset.init())

            if os.path.exists(FLAGS.logdir + '/' + FLAGS.model):
                shutil.rmtree(FLAGS.logdir + '/' + FLAGS.model)

            train_writer = tf.summary.FileWriter(FLAGS.logdir + '/' +
                                                 FLAGS.model + '/train',
                                                 graph=tf.get_default_graph())
            val_writer = tf.summary.FileWriter(FLAGS.logdir + '/' +
                                               FLAGS.model + '/val')

            for epoch in xrange(1, FLAGS.num_epochs + 1):
                print("Epoch:({}/{})".format(epoch, FLAGS.num_epochs))
                progbar = tf.keras.utils.Progbar(
                    target=train_batches_per_epoch)
                print("Training")
                for step in xrange(1, train_batches_per_epoch + 1):
                    try:
                        _loss, _f1score, _summary = train_step(step)
                    except tf.errors.OutOfRangeError:
                        print("End of training dataset.")
                        sys.exit(0)
                    progbar.update(step, [('loss', _loss),
                                          ('f1score', _f1score)])
                    if step == train_batches_per_epoch:
                        print("Validation")
                        progbar = tf.keras.utils.Progbar(
                            target=val_batches_per_epoch)
                        for val_step in xrange(1, val_batches_per_epoch + 1):
                            _val_loss, _val_f1score, _val_summary = validation_step(
                                val_step)
                            progbar.update(val_step,
                                           [('val_loss', _val_loss),
                                            ('val_f1score', _val_f1score)])

                monitor(value=_val_f1score,
                        sess=sess,
                        epoch=epoch,
                        name=FLAGS.model,
                        logdir=FLAGS.logdir + '/' + FLAGS.model)
                train_writer.add_summary(_summary, epoch)
                train_writer.flush()
                val_writer.add_summary(_val_summary, epoch)
                val_writer.flush()

            train_writer.close()
            val_writer.close()