Пример #1
0
 def __init__(self,
              num_hops=1,
              batch_size=50,
              validation_size=300,
              sentence_embed_dim=200,
              num_variants=5,
              valid_num_variants=10):
     self.valid_num_variants = valid_num_variants
     self.sentence_output_embed_dim = self.sentence_input_embed_dim = self.question_embed_dim = self.memory_dim = sentence_embed_dim
     self.batch_generator = batch_generator.BatchGenerator(
         batch_size, num_variants=num_variants)
     self.num_variants = self.batch_generator.num_variants
     self.num_hops = num_hops
     self.vocabluary_size = len(self.batch_generator.encode_dict)
     self.batch_size = batch_size
     self.validation_size = validation_size
     self.session = tf.Session()
Пример #2
0
 def __init__(self,
              num_hops=1,
              batch_size=100,
              validation_size=400,
              sentence_input_embed_dim=200,
              sentence_output_embed_dim=200):
     self.batch_generator = batch_generator.BatchGenerator(batch_size)
     self.num_hops = num_hops
     self.question_embed_dim = sentence_input_embed_dim
     self.sentence_input_embed_dim = sentence_input_embed_dim
     self.sentence_ouput_embed_dim = sentence_output_embed_dim
     self.vocabluary_size = len(self.batch_generator.encode_dict)
     self.memory_dim = sentence_output_embed_dim
     self.batch_size = batch_size
     self.validation_size = validation_size
     self.session = tf.Session()
     self.bb = tf.Variable(
         tf.truncated_normal([1, self.question_embed_dim], stddev=0.1))
     self.ba = tf.Variable(
         tf.truncated_normal([1, self.sentence_input_embed_dim],
                             stddev=0.1))
     self.bc = tf.Variable(
         tf.truncated_normal([1, self.sentence_ouput_embed_dim],
                             stddev=0.1))
     self.A = []
     self.B = tf.Variable(
         tf.truncated_normal(
             [self.vocabluary_size, self.question_embed_dim], stddev=0.1))
     self.C = []
     self.W = tf.Variable(
         tf.truncated_normal(
             [self.sentence_ouput_embed_dim, self.vocabluary_size],
             stddev=0.1))
     for i in range(self.num_hops):
         self.A.append(
             tf.Variable(
                 tf.truncated_normal(
                     [self.vocabluary_size, self.sentence_input_embed_dim],
                     stddev=0.1)))
         self.C.append(
             tf.Variable(
                 tf.truncated_normal(
                     [self.vocabluary_size, self.sentence_ouput_embed_dim],
                     stddev=0.1)))
     self.init = tf.initialize_all_variables()
Пример #3
0
    FLAGS = tf.flags.FLAGS

    is_result_sharpen = False
    model = draw_model.DrawModel(with_attention, with_attention)

    sess = tf.InteractiveSession()
    saver = tf.train.Saver()  # saves variables learned during training
    tf.global_variables_initializer().run()

    # to restore from model, uncomment the next line
    ckpt_file = os.path.join(FLAGS.data_dir, "drawmodel.ckpt")
    print(ckpt_file)
    saver.restore(sess, ckpt_file)

    # enter images
    img_generator = batch_gen.BatchGenerator(const.batch_size, FLAGS.data_dir)

    xtrain, ytrain = img_generator.next(direction=direction, debug=True)

    print('xtrain.shape ', xtrain.shape)
    #sys.exit(1)
    feed_dict = {model.x: xtrain, model.y: ytrain}
    canvases = sess.run(model.cs, feed_dict)  # generate some examples
    canvases = np.array(canvases)  # T x batch x img_size

    # visualize results
    T, batch_size, img_size = canvases.shape
    y_recons = 1.0 / (1.0 + np.exp(-canvases))  # x_recons = sigmoid(canvas)
    print(y_recons.shape)
    B = A = int(np.sqrt(img_size))
    prefix = save_path + '/myattn_deploy3_withoutattenlc'
Пример #4
0
                    str(texture_id) + '_reconstructed.png',
                    output_img[:, :, :, idx] * 255)
                generateTile.gif_step += 1


if __name__ == '__main__':
    # load module
    generateTile.gif_step = 0
    dir = const.Direction.UP.value
    size = const.A
    with_attention = False
    is_result_sharpen = False
    B = A = const.A

    FLAGS = tf.flags.FLAGS
    img_generator = batch_gen.BatchGenerator(const.batch_size)

    xtrain, ytrain = img_generator.next(direction=dir, debug=True)

    radius = 3
    # Tile around center
    texture_list = [3]

    input_img = np.reshape(np.transpose(xtrain[texture_list, :]),
                           (A, B, const.num_channels, len(texture_list)))

    output_img = np.zeros(
        (B * (2 * radius + 1), B * (2 * radius + 1), const.num_channels,
         len(texture_list)))
    output_img[B * radius:B * radius + B,
               B * radius:B * radius + B, :, :] = input_img
Пример #5
0
if __name__ == '__main__':
    # load module

    dir = const.Direction.UP.value
    size = const.A
    with_attention = False
    is_result_sharpen = False
    B = A = const.A
    prefix = './output/myattn_deploy3'
    radius = 3
    save_path = os.path.join(
        "./train/",
        'simple_d' + str(dir) + '_s' + str(size) + '_a' + str(with_attention))
    FLAGS = tf.flags.FLAGS
    img_generator = batch_gen.BatchGenerator(const.batch_size, save_path)

    xtrain, ytrain = img_generator.next(direction=dir, debug=True)

    texture_list = [0, 4, 10]
    input_img = np.reshape(np.transpose(xtrain[texture_list, :]),
                           (A, B, len(texture_list)))

    output_img = np.zeros(
        (B * (2 * radius + 1), B * (2 * radius + 1), len(texture_list)))

    output_img[B * radius:B * radius + B,
               B * radius:B * radius + B, :] = input_img * 255
    #gt_img =
    ## Do Left to right