Exemple #1
0
qa_data = load_data2()
print('done loading data...\n\n')
batch_no = 0
batch_size = 50
#while batch_no*batch_size < len(qa_data['training']):
for eidx in range(20):
    #while batch_no < 6500:
    #if eidx == 0:
    #   batch_no = 6500
    #else:
    batch_no = 0

    while batch_no * batch_size < len(qa_data['training']) - batch_size:
        print('batch = ' + str(batch_no))
        (questions_in_true, answer_in_true,
         im_feat_true) = get_training_batch(batch_no, batch_size, qa_data)

        if batch_no * batch_size < len(qa_data['training']) - 1:
            (questions_in_false, answer_in_false,
             im_feat_false) = get_training_batch(batch_no + 1, batch_size,
                                                 qa_data)
        else:
            (questions_in_false, answer_in_false,
             im_feat_false) = get_training_batch(0, batch_size, qa_data)

        _, loss_val, sr, sw, sf = sess.run(
            [train_op_d, loss, s_r, s_w, s_f],
            feed_dict={
                answers_true: answer_in_true,
                image: im_feat_true,
                question: questions_in_true,
Exemple #2
0
saver.restore(sess, "./train_g_v3.3")
#quit()

print('answers 1000....')
print('loading data...\n\n')
qa_data = load_data2()
print('done loading data...\n\n')
batch_size = 50
#while batch_no*batch_size < len(qa_data['training']):
for train_loops in range(10):
    batch_no = 0
    while batch_no * batch_size < len(qa_data['training']):
        print('batch = ' + str(batch_no))
        (questions_in_true, answer_in_true,
         im_feat_true) = get_training_batch(batch_no, batch_size, qa_data)

        noise_in = np.random.normal(scale=0.3, size=[batch_size, 2048])

        _, loss_val, g_out = sess.run(
            [train_op, loss, scores_emb],
            feed_dict={
                noise: noise_in,
                answers_true: answer_in_true,
                image: im_feat_true,
                question: questions_in_true,
            })

        print('loss = ' + str(loss_val))
        loss_vals.append(loss_val)
        #np.save('loss_vals_g_v' + save_ver, loss_vals)
Exemple #3
0
   train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)

# init variables
sess.run(tf.global_variables_initializer())




print('loading data...\n\n')
qa_data = load_data()
print('done loading data...\n\n')
#batch_no = 0
batch_size = 50
while batch_no*batch_size < len(qa_data['training']):
   print('batch = ' + str(batch_no))
   (questions_in_true, answer_in_true, im_feat_true) = get_training_batch(batch_no, batch_size, qa_data)

   noise_in = np.random.normal(size=[batch_size,2048])

   train_step.run(feed_dict={
      noise: noise_in, 
      answers_true: answer_in_true,
      cnn_out_true: im_feat_true,
      questions_true: questions_in_true,
   })

   loss_val = sess.run(loss, feed_dict={
      noise: noise_in, 
      answers_true: answer_in_true,
      cnn_out_true: im_feat_true,
      questions_true: questions_in_true,