示例#1
0
    for index in shuffle_indices:
        x_shuffled.append(X[index])
        y_shuffled.append(Y[index])

    size_of_dataset = len(x_shuffled)
    n_train = int(np.ceil(size_of_dataset * train_test_ratio))
    n_test = int(np.ceil(size_of_dataset * (1 - train_test_ratio)))
    return np.asarray(x_shuffled[0:n_train]), np.asarray(x_shuffled[n_train + 1:size_of_dataset]), np.asarray(y_shuffled[0:n_train]), np.asarray(y_shuffled[n_train + 1:size_of_dataset])

train_images, test_images, train_label, test_label = get_train_test_dataset(0.7)
model_path = './model_triplet/'
model = TripletLoss()

# Input and output tensor
img_placeholder = tf.placeholder(tf.float32, [None, 28, 28, 3], name='img')
net = model.conv_net(img_placeholder, reuse=False)
print(net)

# generate random index from test image corpus and display the image
idx = np.random.randint(0, len(test_images))
im = test_images[idx]

# show the test image
print('************Query Image**************')
#show_image(idx, test_images)

# Find k-nearest neighbor using cosine similarity
# compute vector representation for each training images and normalize those
def generate_db_normed_vectors():
    saver = tf.train.Saver()
    with tf.Session() as sess:
示例#2
0
    print("placeholder_shape", placeholder_shape)

    # Setup Network
    next_batch = dataset.get_triplets_batch
    anchor_input = tf.placeholder(tf.float32,
                                  placeholder_shape,
                                  name='anchor_input')
    positive_input = tf.placeholder(tf.float32,
                                    placeholder_shape,
                                    name='positive_input')
    negative_input = tf.placeholder(tf.float32,
                                    placeholder_shape,
                                    name='negative_input')

    margin = 0.5
    anchor_output = model.conv_net(anchor_input, reuse=False)
    positive_output = model.conv_net(positive_input, reuse=True)
    negative_output = model.conv_net(negative_input, reuse=True)
    loss = model.triplet_loss(anchor_output, positive_output, negative_output,
                              margin)

    # Setup Optimizer
    global_step = tf.Variable(0, trainable=False)

    train_step = tf.train.MomentumOptimizer(FLAGS.learning_rate,
                                            FLAGS.momentum,
                                            use_nesterov=True).minimize(
                                                loss, global_step=global_step)

    # Start Training
    saver = tf.train.Saver()
示例#3
0
def train():
    data_src = './ChallengeImages2/'
    learning_rate = 0.01
    train_iter = 250
    batch_size = 128
    momentum = 0.99
    step = 50
    # Setup Dataset
    dataset = PreProcessing(data_src)
    print(dataset.images_test)
    print(dataset.labels_test)
    model = TripletLoss()
    height = 50
    width = 150
    dims = [height, width, 3]
    placeholder_shape = [None] + dims
    print("placeholder_shape", placeholder_shape)

    # Setup Network
    next_batch = dataset.get_triplets_batch
    anchor_input = tf.placeholder(tf.float32,
                                  placeholder_shape,
                                  name='anchor_input')
    positive_input = tf.placeholder(tf.float32,
                                    placeholder_shape,
                                    name='positive_input')
    negative_input = tf.placeholder(tf.float32,
                                    placeholder_shape,
                                    name='negative_input')

    margin = 0.5
    # Will be of size N x 28
    anchor_output = model.conv_net(anchor_input, reuse=tf.AUTO_REUSE)
    positive_output = model.conv_net(positive_input, reuse=tf.AUTO_REUSE)
    negative_output = model.conv_net(negative_input, reuse=tf.AUTO_REUSE)
    print(tf.shape(anchor_output))
    return
    '''
    compute the similarity between the positive_output and the negative_output
    if similarity is < 0.25 that's great
    '''
    similarity_pos_neg = computeCosSimilarity(positive_output, negative_output)
    similarity_pos_anchor = computeCosSimilarity(positive_output,
                                                 anchor_output)

    loss = model.triplet_loss(anchor_output, positive_output, negative_output,
                              margin)

    # Setup Optimizer
    global_step = tf.Variable(0, trainable=False)

    train_step = tf.train.MomentumOptimizer(learning_rate,
                                            momentum,
                                            use_nesterov=True).minimize(
                                                loss, global_step=global_step)

    # Start Training
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # Setup Tensorboard
        tf.summary.scalar('step', global_step)
        tf.summary.scalar('loss', loss)
        for var in tf.trainable_variables():
            tf.summary.histogram(var.op.name, var)
        merged = tf.summary.merge_all()
        writer = tf.summary.FileWriter('train.log', sess.graph)

        # Train iter
        for i in range(train_iter):
            batch_anchor, batch_positive, batch_negative = next_batch(
                batch_size)

            _, l, summary_str = sess.run(
                [train_step, loss, merged],
                feed_dict={
                    anchor_input: batch_anchor,
                    positive_input: batch_positive,
                    negative_input: batch_negative
                })

            #pNAccuracy = computeAccuracyLess(0.25, similarity_p_n)
            #pAAccuracy = computeAccuracyGreat(0.75, similarity_p_a)
            writer.add_summary(summary_str, i)
            print("#%d - Loss" % i, l)
            #print("#%d - P and A Accuracy" % i, pAAccuracy)
            #print("#%d - N and A Accuracy" % i, pNAccuracy)

            if (i + 1) % step == 0:
                saver.save(sess, "model_triplet/model.ckpt")
        saver.save(sess, "model_triplet/model.ckpt")
    print('Training completed successfully.')
    return dataset