Exemple #1
0
def main(_):
    with tf.device('/gpu:0'):
        sess = tf.Session()

        query_image = tf.placeholder(tf.float32, [None, 224, 224, 3],
                                     name='query_image')
        train_mode = tf.placeholder(tf.bool, name='train_mode')

        model = netvlad.Netvlad(FLAGS.model_path)
        model.build(query_image, train_mode)

        print("number of total parameters in the model is %d\n" %
              model.get_var_count())

        sess.run(tf.global_variables_initializer())
        print("evaluation begins!\n")

        batch = np.zeros((11, 224, 224, 3))
        for i in range(1, 12):
            batch[i - 1, :, :, :] = eva_utils.load_image(("test/%s.JPG" % (i)))
        descriptor = sess.run(model.vlad_output,
                              feed_dict={
                                  'query_image:0': batch,
                                  'train_mode:0': False
                              })
        A = np.dot(descriptor, descriptor.transpose())
        B = np.sum(descriptor**2, axis=1, keepdims=True)
        C = np.sum(descriptor**2, axis=1)
        print(B + C - 2 * A)
        D = B + C - 2 * A
        D[D < 0] = 0
        L2_distance = np.sqrt(D)
        print(L2_distance)
Exemple #2
0
def main(_):
    qList, dbList = eva_init.get_List(FLAGS.mat_path)
    update_index_every = 600 / FLAGS.batch_size

    if FLAGS.initH5:
        eva_init.h5_initial(FLAGS.eva_h5File)
    if FLAGS.computeDist:
        eva_init.compute_dist(FLAGS.mat_path, FLAGS.eva_h5File)
    if FLAGS.loadImage:
        eva_init.multipro_load_image(FLAGS.data_dir, FLAGS.eva_h5File, qList, dbList)

    with tf.device('/gpu:0'):
        sess = tf.Session()

        query_image = tf.placeholder(tf.float32,[None, 224, 224, 3], name = 'query_image')
        train_mode = tf.placeholder(tf.bool, name = 'train_mode')

        model = netvlad.Netvlad(FLAGS.model_path)
        model.build(query_image, train_mode)

        print("number of total parameters in the model is %d\n" % model.get_var_count())

        sess.run(tf.global_variables_initializer())
        print("evaluation begins!\n")
        eva_utils.evaluate(sess, model, FLAGS.batch_size, FLAGS.eva_h5File, qList, dbList, FLAGS.numRecall)
Exemple #3
0
def main(_):
    config = tf.ConfigProto(allow_soft_placement = True)
    with tf.device('/gpu:0'):
        with tf.Session(config = config) as sess:
            X = tf.placeholder(tf.float32, [None, 32, 32, 3], name = 'X')
            Y = tf.placeholder(tf.int64, [None], name = 'Y')

            if FLAGS.use_vlad:
                model = netvlad_1.Netvlad(FLAGS.modelPath)
            else:
                model = netvlad.Netvlad(FLAGS.modelPath)
            model.build(X)
            print("number of total parameters in the model is %d\n" % model.get_var_count())

            correct_prediction = tf.equal(tf.argmax(tf.nn.softmax(model.fc3), axis = 1), Y)
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

            sess.run(tf.global_variables_initializer())

            Acc = 0.0
            acc = 0.0
            numBatch = 10000 / FLAGS.batch_size

            print("evaluation begins!\n")
            count = 0.0
            for x, y in eva_utils.next_batch(FLAGS.batch_size, 'cifar-10-batches-py'):
                count += 1
                acc = sess.run(accuracy, feed_dict = {X: x, Y: y})
                Acc += acc
                if count % FLAGS.print_every == 0:
                    print("progress: %.4f      current accuracy = %.6f      total accuracy = %.6f\n" % (count / numBatch, acc, Acc / count))
Exemple #4
0
def main(_):
    config = tf.ConfigProto(allow_soft_placement=True)
    with tf.device('/gpu:0'):
        with tf.Session(config=config) as sess:
            X = tf.placeholder(tf.float32, [FLAGS.batch_size, 32, 32, 3],
                               name='X')
            Y = tf.placeholder(tf.int64, [FLAGS.batch_size], name='Y')

            if FLAGS.use_vlad:
                model = netvlad_1.Netvlad('vgg16.npy')
            else:
                model = netvlad.Netvlad(FLAGS.modelPath)
            model.build(X)
            print("number of total parameters in the model is %d\n" %
                  model.get_var_count())

            if FLAGS.use_a_softmax:
                print('using angular softmax')
                loss = A_softmax.A_softmax(model.fc1, tf.one_hot(Y, depth=10),
                                           model.fc3, FLAGS.m)
            else:
                print('not using angular softmax')
                loss = tf.losses.softmax_cross_entropy(tf.one_hot(Y, depth=10),
                                                       model.fc3)
                # loss = tf.reduce_mean(tf.reduce_sum(-tf.log(tf.nn.softmax(model.fc3)) * tf.one_hot(Y, depth = 10), axis = -1))

            global_step = tf.Variable(0, trainable=False)
            starter_learning_rate = FLAGS.lr
            learning_rate = tf.train.exponential_decay(starter_learning_rate,
                                                       global_step,
                                                       100000,
                                                       0.96,
                                                       staircase=True)
            train = tf.train.GradientDescentOptimizer(learning_rate).minimize(
                loss)

            # output1 = model.fc1
            # train = tf.train.RMSPropOptimizer(FLAGS.lr).minimize(loss)

            correct_prediction = tf.equal(
                tf.argmax(tf.nn.softmax(model.fc3), axis=1), Y)
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

            sess.run(tf.global_variables_initializer())

            train_loss = 0
            numBatch = 50000 / FLAGS.batch_size

            print("training begins!\n")
            for i in range(FLAGS.numEpoch):
                count = 0.0
                for x, y in train_utils.next_batch(FLAGS.batch_size,
                                                   'cifar-10-batches-py'):
                    count += 1
                    # assert False
                    _, train_loss, acc = sess.run([train, loss, accuracy],
                                                  feed_dict={
                                                      X: x,
                                                      Y: y
                                                  })
                    # _, train_loss, acc, out3 = sess.run([train, loss, accuracy, output3], feed_dict = {X: x, Y: y})
                    if count % FLAGS.print_every == 0:
                        print(
                            "Epoch: %s    progress: %.4f  accuracy = %.4f      training_loss = %.6f\n"
                            % (i, count / numBatch, acc, train_loss))
                        # print(out1[:10, 0])
                        # print(out2[:10])
                        # print(out3[:10, 0])
                        # print [list(output).count(j) for j in range(10)]
                        # assert False
                if (i + 1) % FLAGS.save_every == 0:
                    model.save_npy(
                        sess, "%s/epoch_%d_loss_%.6f" %
                        (FLAGS.checkpoint_dir, i, train_loss))
Exemple #5
0
def main(_):
    qList, dbList = train_init.get_List(FLAGS.mat_path)
    update_index_every = 1000 / FLAGS.batch_size

    if FLAGS.initH5:
        train_init.h5_initial(FLAGS.train_h5File)
    if FLAGS.computeDist:
        train_init.compute_dist(FLAGS.mat_path, FLAGS.train_h5File)
    if FLAGS.initIndex:
        train_init.index_initial(FLAGS.train_h5File, qList, dbList)
    if FLAGS.loadImage:
        train_init.multipro_load_image(FLAGS.data_dir, FLAGS.train_h5File,
                                       qList, dbList)

    with tf.device('/gpu:0'):
        sess = tf.Session()

        query_image = tf.placeholder(tf.float32, [None, 224, 224, 3],
                                     name='query_image')
        labels = tf.placeholder(tf.float32, [None, 32768, 60])
        train_mode = tf.placeholder(tf.bool, name='train_mode')

        model = netvlad.Netvlad('vgg16.npy')
        model.build(query_image, train_mode)

        print("number of total parameters in the model is %d\n" %
              model.get_var_count())

        loss, mindistP, mindistN = triplet_loss(model.vlad_output, labels, 0.2)
        train = tf.train.RMSPropOptimizer(FLAGS.lr).minimize(loss)
        sess.run(tf.global_variables_initializer())
        train_loss = 0

        count = 0
        print("training begins!\n")
        for i in range(FLAGS.numEpoch):

            for x, y, z in train_utils.next_batch(sess, model,
                                                  FLAGS.batch_size,
                                                  FLAGS.train_h5File,
                                                  FLAGS.randomStartIdx, qList,
                                                  dbList):
                count = count + 1
                if count >= update_index_every:
                    count = 0
                    train_utils.index_update(sess, model,
                                             FLAGS.batch_size * 30,
                                             FLAGS.train_h5File, qList, dbList)

                _, train_loss, minP, minN = sess.run(
                    [train, loss, mindistP, mindistN],
                    feed_dict={
                        query_image: x,
                        labels: y,
                        train_mode: True
                    })
                if count % FLAGS.print_every == 0:
                    print(
                        "Epoch: %d    progress: %.4f%%  training_loss = %.6f\n"
                        % (i, z, train_loss))
                    print("Minimum L2 distance of P and N is %s   %s\n" %
                          (minP, minN))
            if (i + 1) % FLAGS.save_every == 0:
                model.save_npy(
                    sess, "%s/netvlad_epoch_%d_loss_%.6f" %
                    (FLAGS.checkpoint_dir, i, train_loss))
                update_index_every *= 2