Exemplo n.º 1
0
    def test_loss(self):
        batch_size = 1
        img_size = 64
        channels = 4

        input = ginput.Input()
        input_a, input_b = input.get_batch(batch_size, img_size, channels)
        labels = tf.cast(tf.random_uniform([batch_size, 1], minval=0, maxval=2, dtype=tf.int32), dtype=tf.float32)

        model = gmodel.Model(model_type='early_fusion', is_train=True, grid_size=1)
        model.build(img_a=input_a, img_b=input_b, labels=labels)

        # Define Loss
        loss_fn = gloss.Loss(model=model)

        train_op = gtrain_op.TrainOp(model=model,
                                     loss_fn=loss_fn,
                                     learning_rate=0.0001,
                                     decay_steps=100,
                                     beta=0.5,
                                     staircase=False)

        op = train_op.create_train_op()

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())

            for i in range(100):
                np_loss, _ = sess.run([loss_fn.loss, op])
                print('Loss value: %s' % np_loss)
Exemplo n.º 2
0
    def test_buid_latefusion(self):
        print('\ntest_buid_latefusion')
        batch_size = 64
        img_size = 64
        channels = 4

        dataset = inputs.Input()
        img_a, img_b = dataset.get_batch(batch_size=batch_size,
                                         img_size=img_size,
                                         channels=channels)

        late_fusion = nets.LateFusion(name='late_fusion', is_train=True)
        end_points, outputs = late_fusion(inputs_a=img_a, inputs_b=img_b)

        print('\nEnd points\n')
        for end_point in end_points:
            print('##############')
            print(end_point)
            print(end_points[end_point].get_shape().as_list())

        assert outputs.get_shape().as_list() == [batch_size, 1]
Exemplo n.º 3
0
    def test_loss(self):
        batch_size = 64
        img_size = 64
        channels = 4

        input = ginput.Input()
        input_a, input_b = input.get_batch(batch_size, img_size, channels)
        labels = tf.cast(tf.random_uniform([batch_size, 1], minval=0, maxval=2, dtype=tf.int32), dtype=tf.float32)

        model = gmodel.Model(model_type='early_fusion', is_train=True, grid_size=1)
        model.build(img_a=input_a, img_b=input_b, labels=labels)

        # Define Loss
        loss_fn = gloss.Loss(model=model)

        loss = loss_fn.compute_loss()

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            np_loss = sess.run(loss)

            assert np_loss.shape == ()
            print('Loss value: %s' % np_loss)
Exemplo n.º 4
0
    def test_buid_fnet(self):
        print('\ntest_buid_fnet')

        batch_size = 64
        img_size = 64
        channels = 4

        dataset = inputs.Input()
        img_a, img_b = dataset.get_batch(batch_size=batch_size,
                                         img_size=img_size,
                                         channels=channels)

        fnet = nets.FeatureNet(name='feature_net', is_train=True)
        end_points_a, outputs_a = fnet(inputs=img_a)
        end_points_b, outputs_b = fnet(inputs=img_b)

        print('\nEnd points A\n')
        for end_point in end_points_a:
            print('##############')
            print(end_point)
            print(end_points_a[end_point].get_shape().as_list())

        print('\nEnd points B\n')
        for end_point in end_points_b:
            print('##############')
            print(end_point)
            print(end_points_b[end_point].get_shape().as_list())

        features = nets.get_fnet_feature_map()
        assert outputs_a.get_shape().as_list() == [
            batch_size, img_size / 2**len(features),
            img_size / 2**len(features), features[-1]
        ]
        assert outputs_b.get_shape().as_list() == [
            batch_size, img_size / 2**len(features),
            img_size / 2**len(features), features[-1]
        ]
Exemplo n.º 5
0
    def test_buid_dnet(self):
        print('\ntest_buid_dnet')

        batch_size = 64
        feature_size = 8
        channels = 512

        dataset = inputs.Input()
        feature_a, feature_b = dataset.get_batch(batch_size=batch_size,
                                                 img_size=feature_size,
                                                 channels=channels)

        dnet = nets.DecisionNet(name='decision_net', is_train=True)
        end_points, outputs = dnet(inputs_a=feature_a,
                                   inputs_b=feature_b,
                                   concat=True)

        print('\nEnd points\n')
        for end_point in end_points:
            print('##############')
            print(end_point)
            print(end_points[end_point].get_shape().as_list())

        assert outputs.get_shape().as_list() == [batch_size, 1]
Exemplo n.º 6
0
def main(_):
    utils.create_folder(folder_list=[FLAGS.dataset_dir, FLAGS.train_log_dir])

    # Force all input processing onto CPU in order to reserve the GPU for
    # the forward inference and back-propagation.
    with tf.name_scope('inputs'):
        with tf.device('/cpu:0'):
            input = ginput.Input()
            input_a, input_b = input.get_batch(FLAGS.batch_size,
                                               FLAGS.img_size, FLAGS.channels)
            labels = tf.cast(tf.random_uniform([FLAGS.batch_size, 1],
                                               minval=0,
                                               maxval=2,
                                               dtype=tf.int32),
                             dtype=tf.float32)

    # Define the Model
    model = gmodel.Model(model_type=FLAGS.model_type,
                         is_train=True,
                         grid_size=FLAGS.grid_size)
    model.build(img_a=input_a, img_b=input_b, labels=labels)

    # Define Loss
    loss_fn = gloss.Loss(model=model,
                         weight_decay_coeff=FLAGS.weight_decay_coeff)

    # Get global step and the respective operation
    global_step_inc = utils.create_global_step()

    # Create training operation
    train_op = gtrain_op.TrainOp(model=model,
                                 loss_fn=loss_fn,
                                 learning_rate=FLAGS.learning_rate,
                                 decay_steps=FLAGS.decay_steps,
                                 beta=FLAGS.beta,
                                 staircase=FLAGS.staircase)

    op = train_op.create_train_op()

    status_message = tf.string_join([
        'Starting train step: ',
        tf.as_string(tf.train.get_or_create_global_step())
    ],
                                    name='status_message')

    hooks = [
        tf.train.LoggingTensorHook([status_message], every_n_iter=10),
        tf.train.StopAtStepHook(num_steps=FLAGS.max_number_of_steps),
        utils.RunTrainOpsHook(train_ops=op)
    ]

    training.train(train_op=global_step_inc,
                   logdir=FLAGS.train_log_dir,
                   master='',
                   is_chief=True,
                   scaffold=None,
                   hooks=hooks,
                   chief_only_hooks=None,
                   save_checkpoint_secs=FLAGS.save_checkpoint_secs,
                   save_summaries_steps=FLAGS.save_summaries_steps,
                   config=None,
                   max_wait_secs=7200)