beta1=0.9,
                                  beta2=0.999,
                                  epsilon=1e-08,
                                  use_locking=False).minimize(
                                      triplet_loss, global_step=global_step)
#train_op = tf.train.MomentumOptimizer(learning_rate,0.9).minimize(cost,global_step=global_step)

sess.run(tf.initialize_all_variables())

ISOTIMEFORMAT = '%Y-%m-%d %X'
iter_show = 0
for epoch in range(n_epoch):
    start_time = time.time()
    iter_per_epoch = 2
    for iter in xrange(iter_per_epoch):
        street_batch, shop_batch, nopair_batch, _, _, _ = input.load_batchsize_images(
            batch_size)
        feed_dict = {
            x_street: street_batch,
            x_shop: shop_batch,
            x_nopair: nopair_batch,
            train_mode: True
        }
        #conv1, conv2, conv3, conv4, conv5, fc8, fc7, fc6, pool3 = sess.run([network.conv1, network.conv2, network.conv3, network.conv4, network.conv5, network.fc8, network.fc7, network.fc6, network.pool3], feed_dict=feed_dict)
        _, err, d_pair, d_nopair, lr, train_summary = sess.run(
            [
                train_op, triplet_loss, dist_pair, dist_nopair, learning_rate,
                merged
            ],
            feed_dict=feed_dict)
        iter_show += 1
        train_writer.add_summary(train_summary, iter_show)
Exemplo n.º 2
0
 #loss = tf.reduce_mean(-tf.reduce_sum(true_out*tf.log(nin.prob),[1]))
 #loss = tf.reduce_mean(tf.reduce_sum((nin.prob-true_out)**2,[1]))
 #train = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
 # train = tf.train.AdamOptimizer(1e-4).minimize(loss)
 global_step = tf.Variable(0)
 starter_learning_rate = 0.0001
 learning_rate = tf.train.exponential_decay(starter_learning_rate,
                                            global_step,
                                            1000,
                                            0.96,
                                            staircase=True)
 optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9).minimize(
     loss, global_step=global_step)
 sess.run(tf.initialize_all_variables())
 for i in xrange(10000):
     x_batch, y_batch = input.load_batchsize_images(batch_size=64)
     #cost = tf.reduce_sum((vgg.prob - true_out) ** 2)
     batch_size = y_batch.shape[0]
     #loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(nin.prob,true_out))
     #print loss.eval(feed_dict={images: x_batch, true_out: y_batch, train_mode: True})
     #optimizer = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
     lr, prob, f, pool, cost, _ = sess.run(
         [learning_rate, nin.prob, nin.final, nin.pool4, loss, optimizer],
         feed_dict={
             images: x_batch,
             true_out: y_batch,
             train_mode: True
         })
     #print "np.argsort:"
     #print np.argsort(prob,axis=1).shape
     index = np.argsort(prob, axis=1)[:, 49:50]
Exemplo n.º 3
0
street_nin.print_params()
street_nin.print_layers()

shop_nin.print_params()
shop_nin.print_layers()

nopair_nin.print_params()
nopair_nin.print_layers()
print('   batch_size: %d' % batch_size)

iter_show = 0
for epoch in range(n_epoch):
    start_time = time.time()
    iter_per_epoch = 2
    for iter in xrange(iter_per_epoch):
        street_batch, shop_batch, nopair_batch, y_street_batch, y_shop_batch, y_nopair_batch = input.load_batchsize_images(
            batch_size)
        #print "street_batch.shape:"
        #print street_batch.shape
        #print "iter:"
        #print iter
        feed_dict = {
            x_street: street_batch,
            x_shop: shop_batch,
            x_nopair: nopair_batch
        }
        feed_dict.update(
            dict(street_nin.all_drop.items() + shop_nin.all_drop.items())
        )  # enable all dropout/dropconnect/denoising layers
        err, d_pair, d_nopair, t_lr, _, train_summary = sess.run(
            [
                triplet_loss, dist_pair, dist_nopair, triplet_lr, train_op,