Example #1
0
    def train_setup(self):
        tf.set_random_seed(self.conf.random_seed)

        # Create queue coordinator.
        self.coord = tf.train.Coordinator()

        # Input size
        h, w = (self.conf.input_height, self.conf.input_width)
        input_size = (h, w)

        # Devices
        gpu_list = get_available_gpus()
        zip_encoder, zip_decoder_b, zip_decoder_w = [], [], []
        restore_vars = []
        self.loaders = []

        self.im_list = []

        for i in range(len(gpu_list)):
            with tf.device(gpu_list[i]):
                # Load reader
                with tf.name_scope("create_inputs"):
                    reader = ImageReader(self.conf.data_dir,
                                         self.conf.data_list, input_size,
                                         self.conf.random_scale,
                                         self.conf.random_mirror,
                                         self.conf.ignore_label, IMG_MEAN,
                                         self.coord)
                    self.image_batch, self.label_batch, names = reader.dequeue(
                        self.conf.batch_size)
                    self.im_list.append(self.image_batch)
                    image_batch_075 = tf.image.resize_images(
                        self.image_batch,
                        [int(h * 0.75), int(w * 0.75)])
                    image_batch_05 = tf.image.resize_images(
                        self.image_batch,
                        [int(h * 0.5), int(w * 0.5)])

                # Create network
                with tf.variable_scope('', reuse=False):
                    net = Deeplab_v2(self.image_batch, self.conf.num_classes,
                                     True)
                with tf.variable_scope('', reuse=True):
                    net075 = Deeplab_v2(image_batch_075, self.conf.num_classes,
                                        True)
                with tf.variable_scope('', reuse=True):
                    net05 = Deeplab_v2(image_batch_05, self.conf.num_classes,
                                       True)
                # Variables that load from pre-trained model.
                restore_var = [
                    v for v in tf.global_variables() if 'fc' not in v.name
                ]
                restore_vars.append(restore_var)
                # Trainable Variables
                all_trainable = tf.trainable_variables()
                # Fine-tune part
                encoder_trainable = [
                    v for v in all_trainable if 'fc' not in v.name
                ]  # lr * 1.0
                # Decoder part
                decoder_trainable = [
                    v for v in all_trainable if 'fc' in v.name
                ]

                decoder_w_trainable = [
                    v for v in decoder_trainable
                    if 'weights' in v.name or 'gamma' in v.name
                ]  # lr * 10.0
                decoder_b_trainable = [
                    v for v in decoder_trainable
                    if 'biases' in v.name or 'beta' in v.name
                ]  # lr * 20.0
                # Check
                assert (len(all_trainable) == len(decoder_trainable) +
                        len(encoder_trainable))
                assert (len(decoder_trainable) == len(decoder_w_trainable) +
                        len(decoder_b_trainable))

                # Network raw output
                raw_output100 = net.outputs
                raw_output075 = net075.outputs
                raw_output05 = net05.outputs
                raw_output = tf.reduce_max(tf.stack([
                    raw_output100,
                    tf.image.resize_images(raw_output075,
                                           tf.shape(raw_output100)[1:3, ]),
                    tf.image.resize_images(raw_output05,
                                           tf.shape(raw_output100)[1:3, ])
                ]),
                                           axis=0)

                # Groud Truth: ignoring all labels greater or equal than n_classes
                label_proc = prepare_label(self.label_batch,
                                           tf.stack(
                                               raw_output.get_shape()[1:3]),
                                           num_classes=self.conf.num_classes,
                                           one_hot=False)  # [batch_size, h, w]
                label_proc075 = prepare_label(
                    self.label_batch,
                    tf.stack(raw_output075.get_shape()[1:3]),
                    num_classes=self.conf.num_classes,
                    one_hot=False)
                label_proc05 = prepare_label(
                    self.label_batch,
                    tf.stack(raw_output05.get_shape()[1:3]),
                    num_classes=self.conf.num_classes,
                    one_hot=False)

                raw_gt = tf.reshape(label_proc, [
                    -1,
                ])
                raw_gt075 = tf.reshape(label_proc075, [
                    -1,
                ])
                raw_gt05 = tf.reshape(label_proc05, [
                    -1,
                ])

                indices = tf.squeeze(
                    tf.where(tf.less_equal(raw_gt, self.conf.num_classes - 1)),
                    1)
                indices075 = tf.squeeze(
                    tf.where(
                        tf.less_equal(raw_gt075, self.conf.num_classes - 1)),
                    1)
                indices05 = tf.squeeze(
                    tf.where(tf.less_equal(raw_gt05,
                                           self.conf.num_classes - 1)), 1)

                gt = tf.cast(tf.gather(raw_gt, indices), tf.int32)
                gt075 = tf.cast(tf.gather(raw_gt075, indices075), tf.int32)
                gt05 = tf.cast(tf.gather(raw_gt05, indices05), tf.int32)

                raw_prediction = tf.reshape(raw_output,
                                            [-1, self.conf.num_classes])
                raw_prediction100 = tf.reshape(raw_output100,
                                               [-1, self.conf.num_classes])
                raw_prediction075 = tf.reshape(raw_output075,
                                               [-1, self.conf.num_classes])
                raw_prediction05 = tf.reshape(raw_output05,
                                              [-1, self.conf.num_classes])

                prediction = tf.gather(raw_prediction, indices)
                prediction100 = tf.gather(raw_prediction100, indices)
                prediction075 = tf.gather(raw_prediction075, indices075)
                prediction05 = tf.gather(raw_prediction05, indices05)

                # Pixel-wise softmax_cross_entropy loss
                loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=prediction, labels=gt)
                loss100 = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=prediction100, labels=gt)
                loss075 = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=prediction075, labels=gt075)
                loss05 = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=prediction05, labels=gt05)
                # L2 regularization
                l2_losses = [
                    self.conf.weight_decay * tf.nn.l2_loss(v)
                    for v in all_trainable if 'weights' in v.name
                ]
                # Loss function
                self.reduced_loss = tf.reduce_mean(loss) + tf.reduce_mean(
                    loss100) + tf.reduce_mean(loss075) + tf.reduce_mean(
                        loss05) + tf.add_n(l2_losses)

                # Define optimizers
                # 'poly' learning rate
                base_lr = tf.constant(self.conf.learning_rate)
                self.curr_step = tf.placeholder(dtype=tf.float32, shape=())
                learning_rate = tf.scalar_mul(
                    base_lr,
                    tf.pow((1 - self.curr_step / self.conf.num_steps),
                           self.conf.power))
                # We have several optimizers here in order to handle the different lr_mult
                # which is a kind of parameters in Caffe. This controls the actual lr for each
                # layer.
                opt_encoder = tf.train.MomentumOptimizer(
                    learning_rate, self.conf.momentum)
                opt_decoder_w = tf.train.MomentumOptimizer(
                    learning_rate * 10.0, self.conf.momentum)
                opt_decoder_b = tf.train.MomentumOptimizer(
                    learning_rate * 20.0, self.conf.momentum)

                # Gradient accumulation
                # Define a variable to accumulate gradients.
                accum_grads = [
                    tf.Variable(tf.zeros_like(v.initialized_value()),
                                trainable=False) for v in encoder_trainable +
                    decoder_w_trainable + decoder_b_trainable
                ]
                # Define an operation to clear the accumulated gradients for next batch.
                self.zero_op = [
                    v.assign(tf.zeros_like(v)) for v in accum_grads
                ]
                # To make sure each layer gets updated by different lr's, we do not use 'minimize' here.
                # Instead, we separate the steps compute_grads+update_params.
                # Compute grads
                grads = tf.gradients(
                    self.reduced_loss, encoder_trainable +
                    decoder_w_trainable + decoder_b_trainable)
                # Accumulate and normalise the gradients.
                self.accum_grads_op = [
                    accum_grads[i].assign_add(grad /
                                              self.conf.grad_update_every)
                    for i, grad in enumerate(grads)
                ]

                grads = tf.gradients(
                    self.reduced_loss, encoder_trainable +
                    decoder_w_trainable + decoder_b_trainable)
                grads_encoder = accum_grads[:len(encoder_trainable)]
                grads_decoder_w = accum_grads[len(encoder_trainable):(
                    len(encoder_trainable) + len(decoder_w_trainable))]
                grads_decoder_b = accum_grads[(len(encoder_trainable) +
                                               len(decoder_w_trainable)):]

                zip_encoder.append(list(zip(grads_encoder, encoder_trainable)))
                zip_decoder_b.append(
                    list(zip(grads_decoder_w, decoder_w_trainable)))
                zip_decoder_w.append(
                    list(zip(grads_decoder_b, decoder_b_trainable)))

        avg_grads_encoder = average_gradients(zip_encoder)
        avg_grads_decoder_w = average_gradients(zip_decoder_w)
        avg_grads_decoder_b = average_gradients(zip_decoder_b)

        for i in range(len(gpu_list)):
            with tf.device(gpu_list[i]):
                # Update params
                train_op_conv = opt_encoder.apply_gradients(avg_grads_encoder)
                train_op_fc_w = opt_decoder_w.apply_gradients(
                    avg_grads_decoder_w)
                train_op_fc_b = opt_decoder_b.apply_gradients(
                    avg_grads_decoder_b)

        # Finally, get the train_op!
        update_ops = tf.get_collection(
            tf.GraphKeys.UPDATE_OPS
        )  # for collecting moving_mean and moving_variance
        with tf.control_dependencies(update_ops):
            self.train_op = tf.group(train_op_conv, train_op_fc_w,
                                     train_op_fc_b)

        # Saver for storing checkpoints of the model
        self.saver = tf.train.Saver(var_list=tf.global_variables(),
                                    max_to_keep=0)

        # Loader for loading the pre-trained model
        for i in range(len(gpu_list)):
            with tf.device(gpu_list[i]):
                #print(restore_var)
                #print("restoring gpu ", i)
                self.loaders.append(tf.train.Saver(var_list=restore_vars[i]))
                #print("restored gpu ", i)

        # Training summary
        # Processed predictions: for visualisation.
        raw_output_up = tf.image.resize_bilinear(raw_output, input_size)
        raw_output_up = tf.argmax(raw_output_up, axis=3)
        self.pred = tf.expand_dims(raw_output_up, axis=3)
        # Image summary.
        images_summary = tf.py_func(inv_preprocess,
                                    [self.image_batch, 1, IMG_MEAN], tf.uint8)
        labels_summary = tf.py_func(
            decode_labels, [self.label_batch, 1, self.conf.num_classes],
            tf.uint8)
        preds_summary = tf.py_func(decode_labels,
                                   [self.pred, 1, self.conf.num_classes],
                                   tf.uint8)
        self.total_summary = tf.summary.image(
            'images',
            tf.concat(axis=2,
                      values=[images_summary, labels_summary, preds_summary]),
            max_outputs=1)  # Concatenate row-wise.
        if not os.path.exists(self.conf.logdir):
            os.makedirs(self.conf.logdir)
        self.summary_writer = tf.summary.FileWriter(
            self.conf.logdir, graph=tf.get_default_graph())
Example #2
0
def train(args):
    ## set hyparameter
    img_mean = np.array((104.00698793, 116.66876762, 122.67891434),
                        dtype=np.float32)
    tf.set_random_seed(args.random_seed)
    coord = tf.train.Coordinator()
    LAMBDA = 10

    print("d_model_name:", args.d_name)
    print("lambda:", args.lamb)
    print("learning_rate:", args.learning_rate)
    print("is_val:", args.is_val)
    print("---------------------------------")

    ## load data
    with tf.name_scope("create_inputs"):
        reader = ImageReader(args.data_dir, args.img_size, args.random_scale,
                             args.random_mirror, args.random_crop,
                             args.ignore_label, args.is_val, img_mean, coord)
        image_batch, label_batch = reader.dequeue(args.batch_size)
        print("Data is ready!")

    ## load model
    g_net = choose_generator(args.g_name, image_batch)
    score_map = g_net.get_output()
    fk_batch = tf.nn.softmax(score_map, dim=-1)
    gt_batch = tf.one_hot(label_batch, args.num_classes, dtype=tf.float32)
    x_batch = tf.train.batch([
        (reader.image + img_mean) / 255.,
    ],
                             args.batch_size,
                             dynamic_pad=True)  # normalization
    d_fk_net, d_gt_net = choose_discriminator(args.d_name, fk_batch, gt_batch,
                                              x_batch)
    d_fk_pred = d_fk_net.get_output()  # fake segmentation result in d
    d_gt_pred = d_gt_net.get_output()  # ground-truth result in d

    label, logits = convert_to_calculateloss(score_map, args.num_classes,
                                             label_batch)
    predict_label = tf.argmax(logits, axis=1)
    predict_batch = g_net.topredict(score_map, tf.shape(image_batch)[1:3])
    print("The model has been created!")

    ## get all kinds of variables list
    g_restore_var = [
        v for v in tf.global_variables() if 'discriminator' not in v.name
    ]
    vgg_restore_var = [
        v for v in tf.global_variables()
        if 'discriminator' in v.name and 'image' in v.name
    ]
    g_var = [
        v for v in tf.trainable_variables() if 'discriminator' not in v.name
    ]
    d_var = [
        v for v in tf.trainable_variables()
        if 'discriminator' in v.name and 'image' not in v.name
    ]
    # g_trainable_var = [v for v in g_var if 'beta' not in v.name or 'gamma' not in v.name] #batch_norm training open
    g_trainable_var = g_var
    d_trainable_var = d_var

    ## set loss
    mce_loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label,
                                                       logits=logits))
    g_bce_loss = -tf.reduce_mean(d_fk_pred)
    g_loss = mce_loss + args.lamb * g_bce_loss

    fk_score_var = tf.reduce_mean(d_fk_pred)
    gt_score_var = tf.reduce_mean(d_gt_pred)
    d_loss = fk_score_var - gt_score_var

    alpha = tf.random_uniform(shape=tf.shape(gt_batch), minval=0., maxval=1.)
    differences = fk_batch - gt_batch
    interpolates = gt_batch + (alpha * differences)
    gradients = tf.gradients(
        Discriminator_add_vgg({
            'seg': interpolates,
            'data': x_batch
        },
                              reuse=True).get_output(), [interpolates])[0]
    slopes = tf.sqrt(
        tf.reduce_sum(tf.square(gradients), reduction_indices=[1, 2, 3]))
    gradient_penalty = tf.reduce_mean((slopes - 1.)**2)
    d_loss += gradient_penalty

    mce_loss_var, mce_loss_op = tf.metrics.mean(mce_loss)
    g_bce_loss_var, g_bce_loss_op = tf.metrics.mean(g_bce_loss)
    g_loss_var, g_loss_op = tf.metrics.mean(g_loss)
    d_loss_var, d_loss_op = tf.metrics.mean(d_loss)
    iou_var, iou_op = tf.metrics.mean_iou(label, predict_label,
                                          args.num_classes)
    accuracy_var, acc_op = tf.metrics.accuracy(label, predict_label)
    metrics_op = tf.group(mce_loss_op, g_bce_loss_op, g_loss_op, d_loss_op,
                          iou_op, acc_op)

    ## set optimizer
    iterstep = tf.placeholder(dtype=tf.float32,
                              shape=[],
                              name='iteration_step')

    base_lr = tf.constant(args.learning_rate, dtype=tf.float32, shape=[])
    lr = tf.scalar_mul(base_lr,
                       tf.pow(
                           (1 - iterstep / args.num_steps),
                           args.power))  # learning rate reduce with the time

    g_gradients = tf.train.AdamOptimizer(learning_rate=lr).compute_gradients(
        g_loss, g_trainable_var)
    d_gradients = tf.train.AdamOptimizer(
        learning_rate=lr * 10).compute_gradients(d_loss, d_trainable_var)
    grad_fk_oi = tf.gradients(d_fk_pred, fk_batch, name='grad_fk_oi')[0]
    grad_gt_oi = tf.gradients(d_gt_pred, gt_batch, name='grad_gt_oi')[0]
    grad_fk_img_oi = tf.gradients(d_fk_pred,
                                  image_batch,
                                  name='grad_fk_img_oi')[0]
    grad_gt_img_oi = tf.gradients(d_gt_pred,
                                  image_batch,
                                  name='grad_gt_img_oi')[0]

    train_g_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(
        g_loss, var_list=g_trainable_var)
    train_d_op = tf.train.AdamOptimizer(learning_rate=lr * 10).minimize(
        d_loss, var_list=d_trainable_var)

    ## set summary
    vs_image = tf.py_func(inv_preprocess,
                          [image_batch, args.save_num_images, img_mean],
                          tf.uint8)
    vs_label = tf.py_func(
        decode_labels, [label_batch, args.save_num_images, args.num_classes],
        tf.uint8)
    vs_predict = tf.py_func(
        decode_labels, [predict_batch, args.save_num_images, args.num_classes],
        tf.uint8)
    tf.summary.image(name='image collection_train',
                     tensor=tf.concat(axis=2,
                                      values=[vs_image, vs_label, vs_predict]),
                     max_outputs=args.save_num_images)
    tf.summary.scalar('fk_score', tf.reduce_mean(d_fk_pred))
    tf.summary.scalar('gt_score', tf.reduce_mean(d_gt_pred))
    tf.summary.scalar('g_loss_train', g_loss_var)
    tf.summary.scalar('d_loss_train', d_loss_var)
    tf.summary.scalar('mce_loss_train', mce_loss_var)
    tf.summary.scalar('g_bce_loss_train', -1. * g_bce_loss_var)
    tf.summary.scalar('iou_train', iou_var)
    tf.summary.scalar('accuracy_train', accuracy_var)
    tf.summary.scalar('grad_fk_oi', tf.reduce_mean(tf.abs(grad_fk_oi)))
    tf.summary.scalar('grad_gt_oi', tf.reduce_mean(tf.abs(grad_gt_oi)))
    tf.summary.scalar('grad_fk_img_oi', tf.reduce_mean(tf.abs(grad_fk_img_oi)))
    tf.summary.scalar('grad_gt_img_oi', tf.reduce_mean(tf.abs(grad_gt_img_oi)))

    for grad, var in g_gradients + d_gradients:
        tf.summary.histogram(var.op.name + "/gradients", grad)

    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name + "/values", var)

    summary_op = tf.summary.merge_all()
    summary_writer = tf.summary.FileWriter(args.log_dir,
                                           graph=tf.get_default_graph(),
                                           max_queue=3)

    ## set session
    print("GPU index:" + str(os.environ['CUDA_VISIBLE_DEVICES']))
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    global_init = tf.global_variables_initializer()
    local_init = tf.local_variables_initializer()
    sess.run(global_init)
    sess.run(local_init)

    ## set saver
    saver_all = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=2)
    trained_step = 0
    if os.path.exists(args.restore_from + 'checkpoint'):
        trained_step = load_weight(args.restore_from, saver_all, sess)
    else:
        load_weight(args.baseweight_from['d_vgg'], vgg_restore_var, sess)
        saver_g = tf.train.Saver(var_list=g_restore_var, max_to_keep=2)
        load_weight(args.baseweight_from['g'], saver_g, sess)

    threads = tf.train.start_queue_runners(sess, coord)
    print("all setting has been done,training start!")

    ## start training
    def auto_setting_train_steps(mode):
        if mode == 0:
            return 5, 1
        elif mode == 1:
            return 1, 5
        else:
            return 1, 1

    d_train_steps = 5
    g_train_steps = 1
    flags = [0 for i in range(3)]
    for step in range(args.num_steps):
        now_step = int(
            trained_step) + step if trained_step is not None else step
        feed_dict = {iterstep: now_step}

        for i in range(d_train_steps):
            _, _ = sess.run([train_d_op, metrics_op], feed_dict)

        for i in range(g_train_steps):
            g_loss_, mce_loss_, g_bce_loss_, d_loss_, _, _ = sess.run([
                g_loss_var, mce_loss_var, g_bce_loss_var, d_loss_var,
                train_g_op, metrics_op
            ], feed_dict)

        ########################
        fk_score_, gt_score_ = sess.run([fk_score_var, gt_score_var],
                                        feed_dict)
        if fk_score_ > 0.48 and fk_score_ < 0.52:
            flags[0] += 1
            flags[1] = flags[2] = 0
        elif gt_score_ - fk_score_ > 0.3:
            flags[1] += 1
            flags[0] = flags[2] = 0
        else:
            flags[2] += 1
            flags[0] = flags[1] = 0
        if max(flags) > 100:
            d_train_steps, g_train_steps = auto_setting_train_steps(
                flags.index(max(flags)))
        ########################

        if step > 0 and step % args.save_pred_every == 0:
            save_weight(args.restore_from, saver_all, sess, now_step)

        if step % 50 == 0 or step == args.num_steps - 1:
            print('step={} d_loss={} g_loss={} mce_loss={} g_bce_loss_={}'.
                  format(now_step, d_loss_, g_loss_, mce_loss_, g_bce_loss_))
            summary_str = sess.run(summary_op, feed_dict)
            summary_writer.add_summary(summary_str, now_step)
            sess.run(local_init)

    ## end training
    coord.request_stop()
    coord.join(threads)
    print('end....')
Example #3
0
    def train_setup(self):
        tf.set_random_seed(self.conf.random_seed)
        
        # Create queue coordinator.
        self.coord = tf.train.Coordinator()

        # Input size
        input_size = (self.conf.input_height, self.conf.input_width)
        
        # Load reader
        with tf.name_scope("create_inputs"):
            reader = ImageReader(
                self.conf.data_dir,
                self.conf.data_list,
                input_size,
                self.conf.random_scale,
                self.conf.random_mirror,
                self.conf.ignore_label,
                IMG_MEAN,
                self.coord)
            self.image_batch, self.label_batch = reader.dequeue(self.conf.batch_size)
        
        # Create network
        if self.conf.encoder_name not in ['res101', 'res50', 'deeplab']:
            print('encoder_name ERROR!')
            print("Please input: res101, res50, or deeplab")
            sys.exit(-1)
        elif self.conf.encoder_name == 'deeplab':
            net = Deeplab_v2(self.image_batch, self.conf.num_classes, True)
            # Variables that load from pre-trained model.
            restore_var = [v for v in tf.global_variables() if 'fc' not in v.name]
            # Trainable Variables
            all_trainable = tf.trainable_variables()
            # Fine-tune part
            encoder_trainable = [v for v in all_trainable if 'fc' not in v.name] # lr * 1.0
            # Decoder part
            decoder_trainable = [v for v in all_trainable if 'fc' in v.name]
        else:
            net = ResNet_segmentation(self.image_batch, self.conf.num_classes, True, self.conf.encoder_name)
            # Variables that load from pre-trained model.
            restore_var = [v for v in tf.global_variables() if 'resnet_v1' in v.name]
            # Trainable Variables
            all_trainable = tf.trainable_variables()
            # Fine-tune part
            encoder_trainable = [v for v in all_trainable if 'resnet_v1' in v.name] # lr * 1.0
            # Decoder part
            decoder_trainable = [v for v in all_trainable if 'decoder' in v.name]
        
        decoder_w_trainable = [v for v in decoder_trainable if 'weights' in v.name or 'gamma' in v.name] # lr * 10.0
        decoder_b_trainable = [v for v in decoder_trainable if 'biases' in v.name or 'beta' in v.name] # lr * 20.0
        # Check
        assert(len(all_trainable) == len(decoder_trainable) + len(encoder_trainable))
        assert(len(decoder_trainable) == len(decoder_w_trainable) + len(decoder_b_trainable))

        # Network raw output
        raw_output = net.outputs # [batch_size, h, w, 21]

        # Output size
        output_shape = tf.shape(raw_output)
        output_size = (output_shape[1], output_shape[2])

        # Groud Truth: ignoring all labels greater or equal than n_classes
        label_proc = prepare_label(self.label_batch, output_size, num_classes=self.conf.num_classes, one_hot=False)
        raw_gt = tf.reshape(label_proc, [-1,])
        indices = tf.squeeze(tf.where(tf.less_equal(raw_gt, self.conf.num_classes - 1)), 1)
        gt = tf.cast(tf.gather(raw_gt, indices), tf.int32)
        raw_prediction = tf.reshape(raw_output, [-1, self.conf.num_classes])
        prediction = tf.gather(raw_prediction, indices)

        # Pixel-wise softmax_cross_entropy loss
        loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=gt)
        # L2 regularization
        l2_losses = [self.conf.weight_decay * tf.nn.l2_loss(v) for v in all_trainable if 'weights' in v.name]
        # Loss function
        self.reduced_loss = tf.reduce_mean(loss) + tf.add_n(l2_losses)

        # Define optimizers
        # 'poly' learning rate
        base_lr = tf.constant(self.conf.learning_rate)
        self.curr_step = tf.placeholder(dtype=tf.float32, shape=())
        learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - self.curr_step / self.conf.num_steps), self.conf.power))
        # We have several optimizers here in order to handle the different lr_mult
        # which is a kind of parameters in Caffe. This controls the actual lr for each
        # layer.
        opt_encoder = tf.train.MomentumOptimizer(learning_rate, self.conf.momentum)
        opt_decoder_w = tf.train.MomentumOptimizer(learning_rate * 10.0, self.conf.momentum)
        opt_decoder_b = tf.train.MomentumOptimizer(learning_rate * 20.0, self.conf.momentum)
        # To make sure each layer gets updated by different lr's, we do not use 'minimize' here.
        # Instead, we separate the steps compute_grads+update_params.
        # Compute grads
        grads = tf.gradients(self.reduced_loss, encoder_trainable + decoder_w_trainable + decoder_b_trainable)
        grads_encoder = grads[:len(encoder_trainable)]
        grads_decoder_w = grads[len(encoder_trainable) : (len(encoder_trainable) + len(decoder_w_trainable))]
        grads_decoder_b = grads[(len(encoder_trainable) + len(decoder_w_trainable)):]
        # Update params
        train_op_conv = opt_encoder.apply_gradients(zip(grads_encoder, encoder_trainable))
        train_op_fc_w = opt_decoder_w.apply_gradients(zip(grads_decoder_w, decoder_w_trainable))
        train_op_fc_b = opt_decoder_b.apply_gradients(zip(grads_decoder_b, decoder_b_trainable))
        # Finally, get the train_op!
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # for collecting moving_mean and moving_variance
        with tf.control_dependencies(update_ops):
            self.train_op = tf.group(train_op_conv, train_op_fc_w, train_op_fc_b)

        # Saver for storing checkpoints of the model
        self.saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=0)

        # Loader for loading the pre-trained model
        self.loader = tf.train.Saver(var_list=restore_var)

        # Training summary
        # Processed predictions: for visualisation.
        raw_output_up = tf.image.resize_bilinear(raw_output, input_size)
        raw_output_up = tf.argmax(raw_output_up, axis=3)
        self.pred = tf.expand_dims(raw_output_up, dim=3)
        # Image summary.
        images_summary = tf.py_func(inv_preprocess, [self.image_batch, 2, IMG_MEAN], tf.uint8)
        labels_summary = tf.py_func(decode_labels, [self.label_batch, 2, self.conf.num_classes], tf.uint8)
        preds_summary = tf.py_func(decode_labels, [self.pred, 2, self.conf.num_classes], tf.uint8)
        self.total_summary = tf.summary.image('images',
            tf.concat(axis=2, values=[images_summary, labels_summary, preds_summary]),
            max_outputs=2) # Concatenate row-wise.
        if not os.path.exists(self.conf.logdir):
            os.makedirs(self.conf.logdir)
        self.summary_writer = tf.summary.FileWriter(self.conf.logdir, graph=tf.get_default_graph())
Example #4
0
    def train_setup(self):
        tf.set_random_seed(self.conf.random_seed)

        # Create queue coordinator.
        self.coord = tf.train.Coordinator()

        # Input size
        h, w = (self.conf.input_height, self.conf.input_width)
        input_size = (h, w)

        # Devices
        gpu_list = get_available_gpus()
        zip_encoder, zip_decoder_b, zip_decoder_w, zip_crf = [], [], [], []
        previous_crf_names = []
        restore_vars = []
        self.loaders = []

        self.im_list = []

        for i in range(len(gpu_list)):
            with tf.device(gpu_list[i]):
                # Load reader
                with tf.name_scope("create_inputs"):
                    reader = ImageReader(self.conf.data_dir,
                                         self.conf.data_list, input_size,
                                         self.conf.random_scale,
                                         self.conf.random_mirror,
                                         self.conf.ignore_label, IMG_MEAN,
                                         self.coord)
                    self.image_batch, self.label_batch, self.sp_batch = reader.dequeue(
                        self.conf.batch_size)
                    self.im_list.append(self.image_batch)
                    image_batch_075 = tf.image.resize_images(
                        self.image_batch,
                        [int(h * 0.75), int(w * 0.75)])
                    image_batch_05 = tf.image.resize_images(
                        self.image_batch,
                        [int(h * 0.5), int(w * 0.5)])
                    sp_batch_075 = tf.image.resize_images(
                        self.sp_batch,
                        [int(h * 0.75), int(w * 0.75)])
                    sp_batch_05 = tf.image.resize_images(
                        self.sp_batch,
                        [int(h * 0.5), int(w * 0.5)])

                #for i in range(1):
                #    self.image_batch = tf.Print(self.image_batch, [self.image_batch[i]], message = 'image batch ', summarize=5)

                #for i in range(1):
                #    self.label_batch = tf.Print(self.label_batch, [self.label_batch[i]], message = 'label batch ', summarize=5)

                #for i in range(1):
                #    self.sp_batch = tf.Print(self.sp_batch, [self.sp_batch[i]], message = 'sp batch ', summarize=5)

                # Create network
                with tf.variable_scope('', reuse=False):
                    if self.conf.crf_type == 'crf':
                        net = Deeplab_v2(self.image_batch,
                                         self.conf.num_classes,
                                         True,
                                         rescale075=False,
                                         rescale05=False,
                                         crf_type=self.conf.crf_type)
                    else:
                        net = Deeplab_v2(self.image_batch,
                                         self.conf.num_classes,
                                         True,
                                         rescale075=False,
                                         rescale05=False,
                                         crf_type=self.conf.crf_type,
                                         superpixels=self.sp_batch)
                '''
                with tf.variable_scope('', reuse=True):
                    if self.conf.crf_type == 'crfSP':
                        net075 = Deeplab_v2(image_batch_075, self.conf.num_classes, True, rescale075=True, rescale05=False, crf_type = self.conf.crf_type, superpixels=sp_batch_075)
                    else:
                        net075 = Deeplab_v2(image_batch_075, self.conf.num_classes, True, rescale075=True, rescale05=False, crf_type = self.conf.crf_type)

                with tf.variable_scope('', reuse=True):
                    if self.conf.crf_type == 'crfSP':
                        net05 = Deeplab_v2(image_batch_05, self.conf.num_classes, True, rescale075=False, rescale05=True, crf_type = self.conf.crf_type, superpixels=sp_batch_05)
                    else:
                        net05 = Deeplab_v2(image_batch_05, self.conf.num_classes, True, rescale075=False, rescale05=True, crf_type = self.conf.crf_type)
                '''
                # Variables that load from pre-trained model.
                restore_var = [
                    v for v in tf.global_variables()
                    if ('fc' not in v.name and 'crfrnn' not in v.name)
                ]  # when don't want to train using previous crf weights
                #restore_var = [v for v in tf.global_variables() if ('fc' not in v.name and 'superpixel' not in v.name)]
                restore_vars.append(restore_var)

                # Trainable Variables
                all_trainable = tf.trainable_variables()
                # Fine-tune part
                for name in previous_crf_names:
                    for v in all_trainable:
                        if v.name == name:
                            all_trainable.remove(v)

                crf_trainable = [
                    v for v in all_trainable
                    if ('crfrnn' in v.name and v.name not in previous_crf_names
                        )
                ]
                previous_crf_names.extend(v.name for v in crf_trainable)

                encoder_trainable = [
                    v for v in all_trainable
                    if 'fc' not in v.name and 'crfrnn' not in v.name
                ]  # lr * 1.0

                # Remove encoder_trainable from all_trainable
                #all_trainable = [v for v in all_trainable if v not in encoder_trainable]

                # Decoder part
                decoder_trainable = [
                    v for v in all_trainable
                    if 'fc' in v.name and 'crfrnn' not in v.name
                ]

                decoder_w_trainable = [
                    v for v in decoder_trainable
                    if ('weights' in v.name or 'gamma' in v.name)
                    and 'crfrnn' not in v.name
                ]  # lr * 10.0
                decoder_b_trainable = [
                    v for v in decoder_trainable
                    if ('biases' in v.name or 'beta' in v.name)
                    and 'crfrnn' not in v.name
                ]  # lr * 20.0
                # Check
                assert (len(all_trainable) == len(encoder_trainable) +
                        len(decoder_trainable) + len(crf_trainable)
                        )  #+ len(encoder_trainable)
                assert (len(decoder_trainable) == len(decoder_w_trainable) +
                        len(decoder_b_trainable))

                # Network raw output
                raw_output100 = net.outputs
                raw_output = raw_output100
                '''
                raw_output075 = net075.outputs
                raw_output05 = net05.outputs
                raw_output = tf.reduce_max(tf.stack([raw_output100,
                                                     tf.image.resize_images(raw_output075, tf.shape(raw_output100)[1:3,]),
                                                     tf.image.resize_images(raw_output05, tf.shape(raw_output100)[1:3,])]), axis=0)
                '''
                # Ground Truth: ignoring all labels greater or equal than n_classes
                label_proc = prepare_label(self.label_batch,
                                           tf.stack(
                                               raw_output.get_shape()[1:3]),
                                           num_classes=self.conf.num_classes,
                                           one_hot=True)  # [batch_size, h, w]
                '''
                label_proc075 = prepare_label(self.label_batch, tf.stack(raw_output075.get_shape()[1:3]), num_classes=self.conf.num_classes, one_hot=True)
                label_proc05 = prepare_label(self.label_batch, tf.stack(raw_output05.get_shape()[1:3]), num_classes=self.conf.num_classes, one_hot=True)
                '''
                raw_gt = tf.reshape(label_proc, [
                    -1,
                ])
                '''
                raw_gt075 = tf.reshape(label_proc075, [-1,])
                raw_gt05 = tf.reshape(label_proc05, [-1,])
                '''
                indices = tf.squeeze(
                    tf.where(tf.less_equal(raw_gt, self.conf.num_classes - 1)),
                    1)
                '''
                indices075 = tf.squeeze(tf.where(tf.less_equal(raw_gt075, self.conf.num_classes - 1)), 1)
                indices05 = tf.squeeze(tf.where(tf.less_equal(raw_gt05, self.conf.num_classes - 1)), 1)
                '''
                gt = tf.cast(tf.gather(raw_gt, indices), tf.int32)
                '''
                gt075 = tf.cast(tf.gather(raw_gt075, indices075), tf.int32)
                gt05 = tf.cast(tf.gather(raw_gt05, indices05), tf.int32)
                '''
                raw_prediction = tf.reshape(raw_output,
                                            [-1, self.conf.num_classes])
                raw_prediction100 = tf.reshape(raw_output100,
                                               [-1, self.conf.num_classes])
                '''
                raw_prediction075 = tf.reshape(raw_output075, [-1, self.conf.num_classes])
                raw_prediction05 = tf.reshape(raw_output05, [-1, self.conf.num_classes])
                '''
                prediction = tf.gather(raw_prediction, indices)
                prediction100 = tf.gather(raw_prediction100, indices)
                '''
                prediction075 = tf.gather(raw_prediction075, indices075)
                prediction05 = tf.gather(raw_prediction05, indices05)
                '''
                # Pixel-wise softmax_cross_entropy loss
                #loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=gt)
                loss = tf.nn.softmax_cross_entropy_with_logits(
                    logits=raw_prediction,
                    labels=tf.reshape(label_proc[0],
                                      (h * w, self.conf.num_classes)))
                # NOTE used to be loss=tf.nn.softmax_cross_entropy_with_logits_v2
                '''
                coefficients = [0.01460247, 1.25147725, 2.88479363, 1.20348121, 1.65261654, 1.67514772,
                                0.62338799, 0.7729363,  0.42038501, 0.98557268, 1.31867536, 0.85313332,
                                0.67227604, 1.21317965, 1.        , 0.24263748, 1.80877607, 1.3082213,
                                0.79664027, 0.72543945, 1.27823374]
                '''
                #loss = weighted_loss(self.conf.num_classes, coefficients, labels=tf.reshape(label_proc[0], (h*w, self.conf.num_classes)), logits=raw_prediction)
                #loss100 = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction100, labels=gt)
                loss100 = tf.nn.softmax_cross_entropy_with_logits(
                    logits=raw_prediction100,
                    labels=tf.reshape(label_proc[0],
                                      (h * w, self.conf.num_classes)))
                # NOTE used to be loss=tf.nn.softmax_cross_entropy_with_logits_v2
                #loss100 = weighted_loss(self.conf.num_classes, coefficients, labels=tf.reshape(label_proc[0], (h*w, self.conf.num_classes)), logits=raw_prediction100)
                #loss075 = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction075, labels=gt075)
                #loss075 = tf.nn.softmax_cross_entropy_with_logits_v2(logits=raw_prediction075, labels=tf.reshape(label_proc075[0], (int(h * 0.75) * int(w * 0.75), self.conf.num_classes)))
                #loss075 = weighted_loss(self.conf.num_classes, coefficients, labels=tf.reshape(label_proc075[0], (int(h * 0.75) * int(w * 0.75), self.conf.num_classes)), logits=raw_prediction075)
                #loss05 = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction05, labels=gt05)
                #loss05 = tf.nn.softmax_cross_entropy_with_logits_v2(logits=raw_prediction05, labels=tf.reshape(label_proc05[0], (int(h * 0.5) * int(w * 0.5), self.conf.num_classes)))
                #loss05 = weighted_loss(self.conf.num_classes, coefficients, labels=tf.reshape(label_proc05[0], (int(h * 0.5) * int(w * 0.5), self.conf.num_classes)), logits=raw_prediction05)

                # L2 regularization
                l2_losses = [
                    self.conf.weight_decay * tf.nn.l2_loss(v)
                    for v in all_trainable if 'weights' in v.name
                ]

                # Loss function
                self.reduced_loss = tf.reduce_mean(loss) + tf.reduce_mean(
                    loss100
                )  #+ tf.reduce_mean(loss075) + tf.reduce_mean(loss05) + tf.add_n(l2_losses)

                # Define optimizers
                # 'poly' learning rate
                base_lr = tf.constant(self.conf.learning_rate)
                self.curr_step = tf.placeholder(dtype=tf.float32, shape=())
                learning_rate = tf.scalar_mul(
                    base_lr,
                    tf.pow((1 - self.curr_step / self.conf.num_steps),
                           self.conf.power))
                # We have several optimizers here in order to handle the different lr_mult
                # which is a kind of parameters in Caffe. This controls the actual lr for each
                # layer.
                opt_encoder = tf.train.MomentumOptimizer(
                    learning_rate, self.conf.momentum)
                opt_decoder_w = tf.train.MomentumOptimizer(
                    learning_rate * 10.0, self.conf.momentum)
                opt_decoder_b = tf.train.MomentumOptimizer(
                    learning_rate * 20.0, self.conf.momentum)
                opt_crf = tf.train.MomentumOptimizer(learning_rate,
                                                     self.conf.momentum)

                # Gradient accumulation
                # Define a variable to accumulate gradients.
                accum_grads = [
                    tf.Variable(tf.zeros_like(v.initialized_value()),
                                trainable=False) for v in encoder_trainable +
                    decoder_w_trainable + decoder_b_trainable + crf_trainable
                ]  #encoder_trainable +

                # Define an operation to clear the accumulated gradients for next batch.
                self.zero_op = [
                    v.assign(tf.zeros_like(v)) for v in accum_grads
                ]
                # To make sure each layer gets updated by different lr's, we do not use 'minimize' here.
                # Instead, we separate the steps compute_grads+update_params.
                # Compute grads
                grads = tf.gradients(self.reduced_loss,
                                     encoder_trainable + decoder_w_trainable +
                                     decoder_b_trainable +
                                     crf_trainable)  #encoder_trainable +
                # Accumulate and normalise the gradients.
                self.accum_grads_op = [
                    accum_grads[i].assign_add(grad /
                                              self.conf.grad_update_every)
                    for i, grad in enumerate(grads)
                ]

                #'''
                grads_encoder = accum_grads[:len(encoder_trainable)]
                grads_decoder_w = accum_grads[len(encoder_trainable
                                                  ):len(encoder_trainable) +
                                              len(decoder_w_trainable)]
                grads_decoder_b = accum_grads[(
                    len(encoder_trainable) +
                    len(decoder_w_trainable)):(len(encoder_trainable) +
                                               len(decoder_w_trainable) +
                                               len(decoder_b_trainable))]
                grads_crf = accum_grads[
                    len(encoder_trainable) + len(decoder_w_trainable) +
                    len(decoder_b_trainable
                        ):]  # assuming crf gradients are appended to the end
                #'''
                '''
                grads_decoder_w = accum_grads[: len(decoder_w_trainable)]
                grads_decoder_b = accum_grads[(len(decoder_w_trainable)):(len(decoder_w_trainable)+len(decoder_b_trainable))]
                grads_crf = accum_grads[len(decoder_w_trainable)+len(decoder_b_trainable):] # assuming crf gradients are appended to the end
                '''

                zip_encoder.append(list(zip(grads_encoder, encoder_trainable)))
                zip_decoder_b.append(
                    list(zip(grads_decoder_b, decoder_b_trainable)))
                zip_decoder_w.append(
                    list(zip(grads_decoder_w, decoder_w_trainable)))
                zip_crf.append(list(zip(grads_crf, crf_trainable)))

        avg_grads_encoder = average_gradients(zip_encoder)
        avg_grads_decoder_w = average_gradients(zip_decoder_w)
        avg_grads_decoder_b = average_gradients(zip_decoder_b)
        avg_grads_crf = average_gradients(zip_crf)

        for i in range(len(gpu_list)):
            with tf.device(gpu_list[i]):
                # Update params
                train_op_conv = opt_encoder.apply_gradients(avg_grads_encoder)
                train_op_fc_w = opt_decoder_w.apply_gradients(
                    avg_grads_decoder_w)
                train_op_fc_b = opt_decoder_b.apply_gradients(
                    avg_grads_decoder_b)
                train_op_crf = opt_crf.apply_gradients(avg_grads_crf)

        # Finally, get the train_op!
        update_ops = tf.get_collection(
            tf.GraphKeys.UPDATE_OPS
        )  # for collecting moving_mean and moving_variance
        with tf.control_dependencies(update_ops):
            self.train_op = tf.group(train_op_conv, train_op_fc_w,
                                     train_op_fc_b,
                                     train_op_crf)  # train_op_conv

        # Saver for storing checkpoints of the model
        self.saver = tf.train.Saver(var_list=tf.global_variables(),
                                    max_to_keep=0)

        # Loader for loading the pre-trained model
        for i in range(len(gpu_list)):
            with tf.device(gpu_list[i]):
                self.loaders.append(tf.train.Saver(var_list=restore_vars[i]))
                #self.loaders.append(tf.train.Saver(var_list=tf.global_variables()))

        # Training summary
        # Processed predictions: for visualisation.
        raw_output_up = tf.image.resize_bilinear(raw_output, input_size)
        raw_output_up = tf.argmax(raw_output_up, axis=3)
        self.pred = tf.expand_dims(raw_output_up, axis=3)
        # Image summary.
        images_summary = tf.py_func(inv_preprocess,
                                    [self.image_batch, 1, IMG_MEAN], tf.uint8)
        labels_summary = tf.py_func(
            decode_labels, [self.label_batch, 1, self.conf.num_classes],
            tf.uint8)
        preds_summary = tf.py_func(decode_labels,
                                   [self.pred, 1, self.conf.num_classes],
                                   tf.uint8)
        self.total_summary = tf.summary.image(
            'images',
            tf.concat(axis=2,
                      values=[images_summary, labels_summary, preds_summary]),
            max_outputs=1)  # Concatenate row-wise.
        if not os.path.exists(self.conf.logdir):
            os.makedirs(self.conf.logdir)
        self.summary_writer = tf.summary.FileWriter(
            self.conf.logdir, graph=tf.get_default_graph())
Example #5
0
random_scale = False
random_mirror = True
random_crop = True
batch_size = 8
learning_rate = 0.00001
power = 0.9
num_steps = 300000
restore_from = './weights/dvn/20171119/'
g_weight_from = ''
d_weight_from = ''
data_dir = '/data/rui.wu/irfan/gan_seg/dvn/data/'
is_train = True
with tf.name_scope("create_inputs"):
    reader = ImageReader(data_dir, img_size, crop_size, random_scale,
                         random_mirror, random_crop, is_train, coord)
    image_batch, label_batch = reader.dequeue(batch_size)
    print("Data is ready!")

## load model
label_batch = tf.cast(label_batch, tf.uint8)
image_batch = tf.cast(image_batch, tf.float32)
# b = tf.zeros(label_batch.get_shape())
# a = tf.ones(label_batch.get_shape())
# label_batch_b = tf.where(tf.greater(label_batch, 0.5), a, b)

real_iou = tf.placeholder(tf.float32, [batch_size, 1])
train_seg = tf.placeholder(tf.float32, [batch_size, 128, 128, 1])
train_image = tf.placeholder(tf.float32, [batch_size, 128, 128, 3])
train_seg_new = tf.cast(train_seg, tf.uint8)
train_seg_new = tf.squeeze(train_seg_new, squeeze_dims=[3])
train_seg_new = tf.one_hot(train_seg_new, 2)
Example #6
0
    def build(self):
        config = self.__dict__.copy()
        num_labels      = self.num_labels    #for segmentation (pixel labels)
        ignore_label    = 255   #for segmentation (pixel labels)
        random_seed     = 1234
        generator       = self.resnetG
        discriminator   = self.resnetD
        GEN_A2B_NAME = 'GEN_A2B'
        GEN_B2A_NAME = 'GEN_B2A'
        DIS_A_NAME   = 'DIS_A'
        DIS_B_NAME   = 'DIS_B'

        global_step = tf.train.get_or_create_global_step()
        slim.add_model_variable(global_step)
        global_step_update = tf.assign_add(global_step, 1, name='global_step_update')

        def resize_and_onehot(tensor, shape, depth):
            with tf.device('/device:CPU:0'):
                onehot_tensor = tf.one_hot(tf.squeeze( 
                                        tf.image.resize_nearest_neighbor(
                                            tf.cast(tensor, tf.int32), shape), -1), depth=depth)
                return onehot_tensor
        def convert_to_labels(onehot_seg, crop_size=None):
            fake_segments_output = onehot_seg
            print ('%s | ' % fake_segments_output.device, fake_segments_output)
            if crop_size:
                fake_segments_output = tf.image.resize_bilinear(fake_segments_output, crop_size) #tf.shape(source_segments_batch)[1:3])
            fake_segments_output = tf.argmax(fake_segments_output, axis=-1) # generate segment indices matrix
            fake_segments_output = tf.expand_dims(fake_segments_output, dim=-1) # Create 4-d tensor.
            return fake_segments_output

        target_data_queue = []
        tf.set_random_seed(random_seed)
        coord = tf.train.Coordinator()
        with tf.name_scope("create_inputs"):
            for i, data in enumerate([config['source_data']] + config['target_data']):
                reader = ImageReader(
                    data['data_dir'],
                    data['data_list'],
                    config['crop_size'],                    # Original size: [1024, 2048]
                    random_scale=config['random_scale'],
                    random_mirror=True,
                    ignore_label=ignore_label,
                    img_mean=0,                             # set IMG_MEAN to centralize image pixels (set NONE for automatic choosing)
                    img_channel_format='RGB',               # Default: BGR in deeplab_v2. See here: https://github.com/zhengyang-wang/Deeplab-v2--ResNet-101--Tensorflow/issues/30
                    coord=coord,
                    rgb_label=False)
                data_queue = reader.dequeue(config['batch_size'])

                if i == 0:
                    # ---[ source: training data
                    source_images_batch    = data_queue[0]  #A: 3 chaanels
                    source_segments_batch  = data_queue[1]  #B: 1-label channels

                    source_images_batch    = tf.cast(source_images_batch, tf.float32) / 127.5 - 1.

                    source_images_batch    = tf.image.resize_bilinear(source_images_batch, config['resize'])  #A: 3 chaanels
                    source_segments_batch  = tf.image.resize_nearest_neighbor(source_segments_batch, config['resize'])  #B: 1-label channels

                    source_segments_batch  = tf.cast(tf.one_hot(tf.squeeze(source_segments_batch, -1), depth=num_labels), tf.float32) - 0.5 #B: 19 channels

                else:
                    # ---[ target: validation data / testing data
                    target_images_batch    = data_queue[0]  #A: 3 chaanels
                    target_segments_batch  = data_queue[1]  #B: 1-label channels

                    target_images_batch    = tf.cast(target_images_batch, tf.float32) / 127.5 - 1.

                    target_images_batch    = tf.image.resize_bilinear(target_images_batch, config['resize'])  #A: 3 chaanels
                    target_segments_batch  = tf.image.resize_nearest_neighbor(target_segments_batch, config['resize'])  #B: 1-label channels

                    target_segments_batch  = tf.cast(tf.one_hot(tf.squeeze(target_segments_batch, -1), depth=num_labels), tf.float32) - 0.5 #B: 19 channels
                    target_data_queue.append([target_images_batch, target_segments_batch])


        size_list = cuttool(config['batch_size'], config['gpus'])
        source_images_batches    = tf.split(source_images_batch,   size_list)
        source_segments_batches  = tf.split(source_segments_batch, size_list)
        fake_1_segments_output   = [None] * len(size_list)
        fake_2_segments_output   = [None] * len(size_list)
        fake_1_images_output     = [None] * len(size_list)
        fake_2_images_output     = [None] * len(size_list)
        d_real_img_output        = [None] * len(size_list)
        d_fake_img_output        = [None] * len(size_list)
        d_real_seg_output        = [None] * len(size_list)
        d_fake_seg_output        = [None] * len(size_list)

        for gid, (source_images_batch, source_segments_batch) in \
                enumerate(zip(source_images_batches, source_segments_batches)):
            # ---[ Generator A2B & B2A
            with tf.device('/device:GPU:{}'.format((gid-1) % config['gpus'])):
                fake_seg  = generator(source_images_batch, output_channel=num_labels, reuse=tf.AUTO_REUSE, phase_train=True, scope=GEN_A2B_NAME)
                fake_seg  = tf.nn.softmax(fake_seg) - 0.5
                fake_img_ = generator(fake_seg, output_channel=3, reuse=tf.AUTO_REUSE, phase_train=True, scope=GEN_B2A_NAME)
                fake_img_ = tf.nn.tanh(fake_img_)
                fake_img  = generator(source_segments_batch, output_channel=3, reuse=tf.AUTO_REUSE, phase_train=True, scope=GEN_B2A_NAME)
                fake_img  = tf.nn.tanh(fake_img)
                fake_seg_ = generator(fake_img, output_channel=num_labels, reuse=tf.AUTO_REUSE, phase_train=True, scope=GEN_A2B_NAME)
                fake_seg_ = tf.nn.softmax(fake_seg_) - 0.5

            # ---[ Discriminator A & B
            with tf.device('/device:GPU:{}'.format((gid-1) % config['gpus'])):
                d_real_img = discriminator(source_images_batch,   reuse=tf.AUTO_REUSE, phase_train=True, scope=DIS_A_NAME)
                d_fake_img = discriminator(fake_img, reuse=tf.AUTO_REUSE, phase_train=True, scope=DIS_A_NAME)
                d_real_seg = discriminator(source_segments_batch, reuse=tf.AUTO_REUSE, phase_train=True, scope=DIS_B_NAME)
                d_fake_seg = discriminator(fake_seg, reuse=tf.AUTO_REUSE, phase_train=True, scope=DIS_B_NAME)
                #d_fake_img_val = discriminator(fake_img_val, reuse=tf.AUTO_REUSE, phase_train=False, scope=DIS_A_NAME)
                #d_fake_seg_val = discriminator(fake_seg_val, reuse=tf.AUTO_REUSE, phase_train=False, scope=DIS_B_NAME)


                fake_1_segments_output [gid]  = fake_seg
                fake_2_segments_output [gid]  = fake_seg_
                fake_1_images_output [gid]    = fake_img
                fake_2_images_output [gid]    = fake_img_

                d_real_img_output [gid]       = d_real_img
                d_fake_img_output [gid]       = d_fake_img
                d_real_seg_output [gid]       = d_real_seg
                d_fake_seg_output [gid]       = d_fake_seg

        source_images_batch    = tf.concat(source_images_batches, axis=0)   #-1~1
        source_segments_batch  = tf.concat(source_segments_batches, axis=0) #onehot: -0.5~+0.5
        fake_1_segments_output = tf.concat(fake_1_segments_output, axis=0)  ;   print('fake_1_segments_output', fake_1_segments_output)
        fake_2_segments_output = tf.concat(fake_2_segments_output, axis=0)  ;   print('fake_2_segments_output', fake_2_segments_output)
        fake_1_images_output   = tf.concat(fake_1_images_output  , axis=0)  ;   print('fake_1_images_output  ', fake_1_images_output  )
        fake_2_images_output   = tf.concat(fake_2_images_output  , axis=0)  ;   print('fake_2_images_output  ', fake_2_images_output  )
        d_real_img_output      = tf.concat(d_real_img_output , axis=0)
        d_fake_img_output      = tf.concat(d_fake_img_output , axis=0)
        d_real_seg_output      = tf.concat(d_real_seg_output , axis=0)
        d_fake_seg_output      = tf.concat(d_fake_seg_output , axis=0)

        source_data_color = [
            (1.+source_images_batch   ) / 2.                                                                ,         # source_images_batch_color
            sgtools.decode_labels(tf.cast(convert_to_labels(source_segments_batch + 0.5), tf.int32),  num_labels),    # source_segments_batch_colo
            sgtools.decode_labels(tf.cast(convert_to_labels(fake_1_segments_output + 0.5), tf.int32),  num_labels),   # fake_1_segments_output_col
            sgtools.decode_labels(tf.cast(convert_to_labels(fake_2_segments_output + 0.5), tf.int32),  num_labels),   # fake_2_segments_output_col
            (1.+fake_1_images_output  ) / 2.                                                                ,         # fake_1_images_output_color
            (1.+fake_2_images_output  ) / 2.                                                                ,         # fake_2_images_output_color
            ]

        # ---[ Validation Model
        target_data_color_queue = []
        for target_data in target_data_queue:
            with tf.device('/device:GPU:{}'.format((2) % config['gpus'])):
                fake_seg  = generator(val_images_holder, output_channel=num_labels, reuse=tf.AUTO_REUSE, phase_train=False, scope=GEN_A2B_NAME)
                fake_seg  = tf.nn.softmax(fake_seg) - 0.5
                fake_img_ = generator(fake_seg, output_channel=3, reuse=tf.AUTO_REUSE, phase_train=False, scope=GEN_B2A_NAME)
                fake_img_ = tf.nn.tanh(fake_img_)
                fake_img  = generator(val_segments_holder, output_channel=3, reuse=tf.AUTO_REUSE, phase_train=False, scope=GEN_B2A_NAME)
                fake_img  = tf.nn.tanh(fake_img)
                fake_seg_ = generator(fake_img, output_channel=num_labels, reuse=tf.AUTO_REUSE, phase_train=False, scope=GEN_A2B_NAME)
                fake_seg_ = tf.nn.softmax(fake_seg) - 0.5

            target_data_color_queue.append([
                    (1.+target_images_batch   ) / 2.                                                          , # target_images_batch_color
                    sgtools.decode_labels(tf.cast(convert_to_labels(target_segments_batch + 0.5), tf.int32),  num_labels)    , # target_segments_batch_color
                    sgtools.decode_labels(tf.cast(convert_to_labels(fake_seg  + 0.5), tf.int32),  num_labels) , # val_fake_1_segments_output_color
                    sgtools.decode_labels(tf.cast(convert_to_labels(fake_seg_ + 0.5), tf.int32),  num_labels) , # val_fake_2_segments_output_color
                    (1.+val_fake_1_images_output  ) / 2.                                                      , # val_fake_1_images_output_color
                    (1.+val_fake_2_images_output  ) / 2.                                                      , # val_fake_2_images_output_color
                    ])

        # ---[ Segment-level loss: pixelwise loss
        # d_seg_batch = tf.image.resize_nearest_neighbor(seg_gt, tf.shape(_d_real['segment'])[1:3])
        # d_seg_batch = tf.squeeze(d_seg_batch, -1)
        # d_seg_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=d_seg_batch, logits=_d_real['segment'], name='segment_pixelwise_loss')   # pixel-wise loss
        # d_seg_loss = tf.reduce_mean(d_seg_loss)
        # d_seg_loss = tf.identity(d_seg_loss, name='d_seg_loss')

        # ---[ GAN Loss: crite loss
        #d_loss_old = - (tf.reduce_mean(d_source_output['critic']) - tf.reduce_mean(d_target_output['critic']))
        #g_loss = - (tf.reduce_mean(d_target_output['critic']))
        ## gradient penalty
        #LAMBDA = 10
        ##alpha = tf.placeholder(tf.float32, shape=[None], name='alpha')
        #alpha = tf.random_uniform([config['batch_size']], 0.0, 1.0, dtype=tf.float32)
        #for _ in source_segments_batch.shape[1:]:
            #alpha = tf.expand_dims(alpha, axis=1)   #shape=[None,1,1,1]
        #interpolates = alpha * source_segments_batch + (1.-alpha) * target_segments_output
        #print ('source_segments_batch:', source_segments_batch)
        #print ('target_segments_output:',target_segments_output)
        #print ('interpolates:', interpolates)
        #interpolates = resize_and_onehot(interpolates, target_raw_segments_output.shape.as_list()[1:3], num_labels)
        #print ('interpolates:', interpolates)
        #_d_intp = discriminator(interpolates, reuse=True, phase_train=True, scope=DIS_NAME)
        #intp_grads = tf.gradients(_d_intp['critic'], [interpolates])[0]
        #slopes = tf.sqrt(tf.reduce_sum(tf.square(intp_grads), reduction_indices=[1]))   #L2-distance
        #grads_penalty = tf.reduce_mean(tf.square(slopes-1), name='grads_penalty')
        #d_loss = d_loss_old + LAMBDA * grads_penalty


        def sigmoid_cross_entropy(labels, logits):
            return tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits) )
        def least_square(labels, logits):
            return tf.reduce_mean( (labels - logits) ** 2 )

        if config['loss_mode'] == 'lsgan':
            # ---[ GAN loss: LSGAN loss (chi-square, or called least-square)
            loss_func = least_square
        else:
            # ---[ GAN loss: sigmoid BCE loss
            loss_func = sigmoid_cross_entropy

        # ---[ LOSS
        _img_recovery = config['L1_lambda'] * tf.reduce_mean( tf.abs(source_images_batch - fake_2_images_output))
        #_seg_recovery = config['L1_lambda'] * tf.reduce_mean( tf.abs(source_segments_batch - fake_1_segments_output))   #r1.0: error
        #_seg_recovery = config['L1_lambda'] * tf.reduce_mean( tf.abs(source_segments_batch - fake_2_segments_output))   #r2.0
        _seg_recovery = config['L1_lambda'] * tf.reduce_mean( tf.abs(source_segments_batch_color - fake_2_segments_output_color))    #r2.0.5: not sure because, in theory, no gradient if using decode_labels()


        g_loss_a2b = \
                loss_func( labels=tf.ones_like(d_fake_seg_output), logits=d_fake_seg_output ) + \
                _img_recovery + _seg_recovery
        g_loss_b2a = \
                loss_func( labels=tf.ones_like(d_fake_img_output), logits=d_fake_img_output ) + \
                _img_recovery + _seg_recovery
        g_loss = \
                loss_func( labels=tf.ones_like(d_fake_seg_output), logits=d_fake_seg_output ) + \
                loss_func( labels=tf.ones_like(d_fake_img_output), logits=d_fake_img_output ) + \
                _img_recovery + _seg_recovery

        da_loss = \
                loss_func( labels=tf.ones_like(d_real_img_output), logits=d_real_img_output ) + \
                loss_func( labels=tf.zeros_like(d_fake_img_output), logits=d_fake_img_output )
        db_loss = \
                loss_func( labels=tf.ones_like(d_real_seg_output), logits=d_real_seg_output ) + \
                loss_func( labels=tf.zeros_like(d_fake_seg_output), logits=d_fake_seg_output )
        d_loss = \
                (da_loss + db_loss) / 2.

        # D will output [BATCH_SIZE, 32, 32, 1]
        num_da_real_img_acc = tf.size( tf.where(tf.reduce_mean(tf.nn.sigmoid(d_real_img_output), axis=[1,2,3]) > 0.5)[:,0], name='num_da_real_img_acc' )
        num_da_fake_img_acc = tf.size( tf.where(tf.reduce_mean(tf.nn.sigmoid(d_fake_img_output), axis=[1,2,3]) < 0.5)[:,0], name='num_da_fake_img_acc' )
        num_db_real_seg_acc = tf.size( tf.where(tf.reduce_mean(tf.nn.sigmoid(d_real_seg_output), axis=[1,2,3]) > 0.5)[:,0], name='num_db_real_seg_acc' )
        num_db_fake_seg_acc = tf.size( tf.where(tf.reduce_mean(tf.nn.sigmoid(d_fake_seg_output), axis=[1,2,3]) < 0.5)[:,0], name='num_db_fake_seg_acc' )

        ## limit weights to 0
        #g_weight_regularizer = [0.0001 * tf.nn.l2_loss(v) for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, GEN_NAME) if 'weight' in v.name]
        #g_weight_regularizer = tf.add_n(g_weight_regularizer, name='g_weight_regularizer_loss')
        #g_loss += g_weight_regularizer
        #d_weight_regularizer = [0.0001 * tf.nn.l2_loss(v) for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, DIS_NAME) if 'weight' in v.name]
        #d_weight_regularizer = tf.add_n(d_weight_regularizer, name='d_weight_regularizer_loss')
        #d_loss += d_weight_regularizer

        d_loss = tf.identity(d_loss, name='d_loss')
        g_loss = tf.identity(g_loss, name='g_loss')

        ## --- Training Set Validation ---
        # Predictions.
        #pred_gt = tf.reshape(target_segments_batch, [-1,])
        #pred    = tf.reshape(target_segments_output, [-1,])
        #indices = tf.squeeze(tf.where(tf.not_equal(pred_gt, ignore_label)), 1)
        #pred_gt = tf.cast(tf.gather(pred_gt, indices), tf.int32)
        #pred    = tf.cast(tf.gather(pred, indices), tf.int32)
        ## mIoU
        ### Allowing to use indices matrices in mean_iou() with `num_classes=indices.max()`
        #weights = tf.cast(tf.less_equal(pred_gt, num_labels), tf.int32) # Ignoring all labels greater than or equal to n_classes.
        #mIoU, mIoU_update_op = tf.metrics.mean_iou(pred, pred_gt, num_classes=num_labels, weights=weights)

        # ---[ Variables
        g_a2b_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, GEN_A2B_NAME)
        g_b2a_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, GEN_B2A_NAME)
        d_a_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, DIS_A_NAME)
        d_b_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, DIS_B_NAME)
        g_vars = g_a2b_vars + g_b2a_vars
        d_vars = d_a_vars + d_b_vars

        print_list(g_a2b_vars, GEN_A2B_NAME)
        print_list(g_b2a_vars, GEN_B2A_NAME)
        print_list(d_a_vars, DIS_A_NAME)
        print_list(d_b_vars, DIS_B_NAME)

        # ---[ Optimizer
        ## `colocate_gradients_with_ops = True` to reduce GPU MEM utils, and fasten training speed
        OPT_NAME = 'Optimizer'
        g_opts = []; d_opts = []
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            with tf.variable_scope(OPT_NAME):
                #with tf.device('/device:GPU:{}'.format(config['gpus']-1)):
                if True:
                    if len(g_vars) > 0:
                        g_opt = tf.train.AdamOptimizer(learning_rate=config['g_lr'], beta1=0.5, beta2=0.9).minimize(g_loss,
                            var_list=g_vars, colocate_gradients_with_ops=True)
                        g_opts.append(g_opt)
                    if len(d_vars) > 0:
                        d_opt = tf.train.AdamOptimizer(learning_rate=config['d_lr'], beta1=0.5, beta2=0.9).minimize(d_loss,
                            var_list=d_vars, colocate_gradients_with_ops=True)
                        d_opts.append(d_opt)

        g_opt = tf.group(*g_opts)
        d_opt = tf.group(*d_opts)
        opt_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, OPT_NAME)
        print_list(opt_vars, OPT_NAME)

        # --- [ Summary
        scalars   = [d_loss, g_loss]
        #scalars  += [mIoU]
        scalars  += [num_da_real_img_acc, num_da_fake_img_acc, num_db_real_seg_acc, num_db_fake_seg_acc]
        scalars  += [g_loss_a2b, g_loss_b2a, da_loss, db_loss]
        writer, summarys = create_summary(summary_dir=config['summary_dir'], name=config['suffix'],
                scalar = scalars,
                )

        '''
        Training
        '''
        with tf.Session(config=GpuConfig) as sess:
            sess.run(tf.global_variables_initializer()) #DONOT put it after ``saver.restore``
            sess.run(tf.local_variables_initializer()) #DONOT put it after ``saver.restore``
            saver = tf.train.Saver(g_vars + d_vars, max_to_keep=1)
            #g_saver = tf.train.Saver(g_vars, max_to_keep=1)
            #d_saver = tf.train.Saver(d_vars, max_to_keep=1)
            #if self.ckpt:
                #saver.restore(sess, self.ckpt)
                #print ("Training starts at %d iteration..." % sess.run(global_step))

            feeds = {}

            # Start queue threads.
            threads = tf.train.start_queue_runners(coord=coord, sess=sess)

            inside_epoch  = int(config['print_epoch']) if config['print_epoch'] < config['max_epoch'] else int(config['max_epoch'] / 1)
            outside_epoch = int(config['max_epoch'] / inside_epoch)
            start = int(sess.run(global_step) / inside_epoch)
            if start >= outside_epoch:
                raise ValueError("initial iteration:%d >= max iteration:%d. please reset '--max_epoch' value." % (sess.run(global_step), config['max_epoch']))

            start_time = time.time()
            for epo in range(start, outside_epoch):
                bar = IncrementalBar('[epoch {:<4d}/{:<4d}]'.format(epo, outside_epoch), max=inside_epoch)
                for epi in range(inside_epoch):
                    iters = sess.run(global_step)
                    # save summary
                    if epo == 0:
                        save_summarys = sess.run(summarys, feed_dict=feeds)
                        writer.add_summary(save_summarys, iters)

                    for _ in range(config['d_epoch']):
                        sess.run(d_opt, feed_dict=feeds)

                    if iters > self.pretrain_D_epoch:
                        for _ in range(config['g_epoch']):
                            sess.run(g_opt, feed_dict=feeds)

                    sess.run(global_step_update)
                    bar.next()

                duration = time.time() - start_time
                disc_loss, gen_loss = \
                        sess.run([d_loss, g_loss], feed_dict=feeds)
                na_real, na_fake, nb_real, nb_fake = \
                        sess.run([num_da_real_img_acc, num_da_fake_img_acc, num_db_real_seg_acc, num_db_fake_seg_acc], feed_dict=feeds)

                #sess.run(mIoU_update_op, feed_dict=feeds)
                #miou = sess.run(mIoU, feed_dict=feeds)
                print (' -',
                        'DLoss: %-8.2e' % disc_loss,
                        #'(W: %-8.2e)' % disc_wloss,
                        'GLoss: %-8.2e' % gen_loss,
                        #'(W: %-8.2e)' % gen_wloss,
                        '|',
                        '[Da_img] #real: %d, #fake: %d' % (na_real, na_fake),
                        '[Db_seg] #real: %d, #fake: %d' % (nb_real, nb_fake),
                        '|',
                        #'[train_mIoU] %.2f' % miou,
                        '[ETA] %s' % format_time(duration)
                        )
                bar.finish()

                iters = sess.run(global_step)
                # save checkpoint
                if epo % 2 == 0:
                    saver_path = os.path.join(config['ckpt_dir'], '{}.ckpt'.format(config['name']))
                    saver.save(sess, save_path=saver_path, global_step=global_step)
                # save summary
                if epo % 1 == 0:
                    save_summarys = sess.run(summarys, feed_dict=feeds)
                    writer.add_summary(save_summarys, iters)
                # output samples
                if epo % 5 == 0:
                    img_gt, seg_gt, seg_1, seg_2, img_1, img_2 = sess.run(source_data_color)
                    print ("Range %10s:" % "seg_gt", seg_gt.min(), seg_gt.max())
                    print ("Range %10s:" % "seg_1", seg_1.min(), seg_1.max())
                    print ("Range %10s:" % "seg_2", seg_2.min(), seg_2.max())
                    print ("Range %10s:" % "img_gt", img_gt.min(), img_gt.max())
                    print ("Range %10s:" % "img_1", img_1.min(), img_1.max())
                    print ("Range %10s:" % "img_2", img_2.min(), img_2.max())
                    _output = np.concatenate([img_gt, seg_gt, seg_1, img_1, img_2, seg_2], axis=0)
                    save_visualization(_output, save_path=os.path.join(config['result_dir'], 'tr-{}.jpg'.format(iters)), size=[3, 2*config['batch_size']])
                    #seg_output = np.concatenate([seg_gt, seg_2, seg_1], axis=0)
                    #img_output = np.concatenate([img_gt, img_2, img_1], axis=0)
                    #save_visualization(seg_output, save_path=os.path.join(config['result_dir'], 'tr-seg-1gt_2mapback_3map-{}.jpg'.format(iters)), size=[3, config['batch_size']])
                    #save_visualization(img_output, save_path=os.path.join(config['result_dir'], 'tr-img-1gt_2mapback_3map-{}.jpg'.format(iters)), size=[3, config['batch_size']])
                    for i,target_data_color in enumerate(target_data_color_queue):
                        val_img_gt, val_seg_gt, val_seg_1, val_seg_2, val_img_1, val_img_2 = sess.run(target_data_color)
                        print ("Val Range %10s:" % "seg_gt", val_seg_gt.min(), val_seg_gt.max())
                        print ("Val Range %10s:" % "seg_1", val_seg_1.min(), val_seg_1.max())
                        print ("Val Range %10s:" % "seg_2", val_seg_2.min(), val_seg_2.max())
                        print ("Val Range %10s:" % "img_gt", val_img_gt.min(), val_img_gt.max())
                        print ("Val Range %10s:" % "img_1", val_img_1.min(), val_img_1.max())
                        print ("Val Range %10s:" % "img_2", val_img_2.min(), val_img_2.max())
                        _output = np.concatenate([val_img_gt, val_seg_gt, val_seg_1, val_img_1, val_img_2, val_seg_2], axis=0)
                        save_visualization(_output, save_path=os.path.join(config['result_dir'], 'val{}-{}.jpg'.format(i,iters)), size=[3, 2*config['batch_size']])
                        #val_seg_output = np.concatenate([val_seg_gt, val_seg_2, val_seg_1], axis=0)
                        #val_img_output = np.concatenate([val_img_gt, val_img_2, val_img_1], axis=0)
                        #save_visualization(seg_output, save_path=os.path.join(config['result_dir'], 'val{}-seg-1gt_2mapback_3map-{}.jpg'.format(i,iters)), size=[3, config['batch_size']])
                        #save_visualization(img_output, save_path=os.path.join(config['result_dir'], 'val{}-img-1gt_2mapback_3map-{}.jpg'.format(i,iters)), size=[3, config['batch_size']])

                writer.flush()
            writer.close()
def train(args):
    ## set hyparameter
    img_mean = np.array((104.00698793, 116.66876762, 122.67891434),
                        dtype=np.float32)
    tf.set_random_seed(args.random_seed)
    coord = tf.train.Coordinator()
    print("g_model_name:", args.g_name)
    print("lambda:", args.lambd)
    print("learning_rate:", args.learning_rate)
    print("is_val:", args.is_val)
    print("---------------------------------")

    ## load data
    with tf.name_scope("create_inputs"):
        reader = ImageReader(args.data_dir, args.img_size, args.random_scale,
                             args.random_mirror, args.random_crop,
                             args.ignore_label, args.is_val, img_mean, coord)
        image_batch, label_batch = reader.dequeue(args.batch_size)
        print("Data is ready!")

    ## load model
    g_net = choose_generator(args.g_name, image_batch)
    score_map = g_net.get_output()  # [batch_size, h, w, num_classes]

    label, logits = convert_to_calculateloss(score_map, args.num_classes,
                                             label_batch)
    predict_label = tf.argmax(logits, axis=1)
    predict_batch = g_net.topredict(score_map, tf.shape(image_batch)[1:3])
    print("The model has been created!")

    ## get all kinds of variables list
    if '50' not in args.g_name:  # aim at vgg16
        g_restore_var = [
            v for v in tf.global_variables()
            if 'generator' in v.name and 'image' in v.name
        ]
        g_trainable_var = [
            v for v in tf.trainable_variables()
            if 'generator' in v.name and 'upscore' not in v.name
        ]
    else:  # aim at resnet50
        g_restore_var = [
            v for v in tf.global_variables() if 'fc' not in v.name
        ]
        g_trainable_var = [
            v for v in tf.trainable_variables()
            if 'beta' not in v.name or 'gamma' not in v.name
        ]

    ## set loss
    mce_loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label,
                                                       logits=logits))
    # l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'weights' in v.name]
    # g_loss = tf.reduce_mean(mce_loss) + tf.add_n(l2_losses)
    g_loss = mce_loss  # don't add the penalization

    g_loss_var, g_loss_op = tf.metrics.mean(g_loss)
    iou_var, iou_op = tf.metrics.mean_iou(label, predict_label,
                                          args.num_classes)
    accuracy_var, acc_op = tf.metrics.accuracy(label, predict_label)
    metrics_op = tf.group(g_loss_op, iou_op, acc_op)

    ## set optimizer
    iterstep = tf.placeholder(dtype=tf.float32,
                              shape=[],
                              name='iteration_step')
    base_lr = tf.constant(args.learning_rate, dtype=tf.float32, shape=[])
    lr = tf.scalar_mul(base_lr,
                       tf.pow(
                           (1 - iterstep / args.num_steps),
                           args.power))  # learning rate reduce with the time

    # g_gradients = tf.train.MomentumOptimizer(learning_rate=lr, momentum=args.momentum).compute_gradients(g_loss,
    #                                                                                                      g_trainable_var)
    train_g_op = tf.train.MomentumOptimizer(learning_rate=lr,
                                            momentum=args.momentum).minimize(
                                                g_loss,
                                                var_list=g_trainable_var)
    train_all_op = train_g_op

    ## set summary
    vs_image = tf.py_func(inv_preprocess,
                          [image_batch, args.save_num_images, img_mean],
                          tf.uint8)
    vs_label = tf.py_func(
        decode_labels, [label_batch, args.save_num_images, args.num_classes],
        tf.uint8)
    vs_predict = tf.py_func(
        decode_labels, [predict_batch, args.save_num_images, args.num_classes],
        tf.uint8)
    tf.summary.image(name='image collection_train',
                     tensor=tf.concat(axis=2,
                                      values=[vs_image, vs_label, vs_predict]),
                     max_outputs=args.save_num_images)

    tf.summary.scalar('g_loss_train', g_loss_var)
    tf.summary.scalar('iou_train', iou_var)
    tf.summary.scalar('accuracy_train', accuracy_var)
    # for grad, var in g_gradients:
    #     tf.summary.histogram(var.op.name + "/gradients", grad)
    #
    # for var in tf.trainable_variables():
    #     tf.summary.histogram(var.op.name + "/values", var)

    summary_op = tf.summary.merge_all()
    summary_writer = tf.summary.FileWriter(args.log_dir,
                                           graph=tf.get_default_graph(),
                                           max_queue=10)

    ## set session
    print("GPU index:" + str(os.environ['CUDA_VISIBLE_DEVICES']))
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    global_init = tf.global_variables_initializer()
    local_init = tf.local_variables_initializer()
    sess.run(global_init)
    sess.run(local_init)

    ## set saver
    saver_all = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=5)
    trained_step = 0
    if os.path.exists(args.restore_from + 'checkpoint'):
        trained_step = load_weight(args.restore_from, saver_all, sess)
    else:
        if '50' in args.g_name:
            saver_g = tf.train.Saver(var_list=g_restore_var)
            load_weight(args.baseweight_from['res50'], saver_g, sess)
        elif 'vgg' in args.g_name:
            load_weight(args.baseweight_from['vgg16'], g_restore_var, sess)

    threads = tf.train.start_queue_runners(sess, coord)
    print("all setting has been done,training start!")

    ## start training
    for step in range(args.num_steps):
        now_step = int(
            trained_step) + step if trained_step is not None else step
        feed_dict = {iterstep: now_step}
        _, _, g_loss_ = sess.run([train_all_op, metrics_op, g_loss], feed_dict)

        if step > 0 and step % args.save_pred_every == 0:
            save_weight(args.restore_from, saver_all, sess, now_step)

        if step % 50 == 0 or step == args.num_steps - 1:
            print('step={} g_loss={}'.format(now_step, g_loss_))
            summary_str = sess.run(summary_op, feed_dict)
            summary_writer.add_summary(summary_str, now_step)
            sess.run(local_init)

    ## end training
    coord.request_stop()
    coord.join(threads)
    print('end....')
Example #8
0
def train(args):
    ## set hyparameter
    img_mean = np.array((104.00698793, 116.66876762, 122.67891434),
                        dtype=np.float32)
    tf.set_random_seed(args.random_seed)
    coord = tf.train.Coordinator()
    print("g_name:", args.g_name)
    print("d_name:", args.d_name)
    print("lambda:", args.lambd)
    print("learning_rate:", args.learning_rate)
    print("is_val:", args.is_val)
    print("---------------------------------")

    ## load data
    with tf.name_scope("create_inputs"):
        reader = ImageReader(args.data_dir, args.img_size, args.random_scale,
                             args.random_mirror, args.random_crop,
                             args.ignore_label, args.is_val, img_mean, coord)
        image_batch, label_batch = reader.dequeue(args.batch_size)
        print("Data is ready!")

    ## load model
    image_normal_batch = tf.train.batch([
        (reader.image + img_mean) / 255.,
    ],
                                        args.batch_size,
                                        dynamic_pad=True)
    g_net, g_net_x = choose_generator(args.g_name, image_batch,
                                      image_normal_batch)
    score_map = g_net.get_output()
    fk_batch = tf.nn.softmax(score_map, dim=-1)
    pre_batch = tf.expand_dims(tf.cast(tf.argmax(fk_batch, axis=-1), tf.uint8),
                               axis=-1)
    gt_batch = tf.image.resize_nearest_neighbor(label_batch,
                                                tf.shape(score_map)[1:3])
    gt_batch = tf.where(tf.equal(gt_batch, args.ignore_label), pre_batch,
                        gt_batch)
    gt_batch = convert_to_scaling(fk_batch, args.num_classes, gt_batch)
    x_batch = g_net_x.get_appointed_layer('generator/image_conv5_3')
    d_fk_net, d_gt_net = choose_discriminator(args.d_name, fk_batch, gt_batch,
                                              x_batch)
    d_fk_pred = d_fk_net.get_output()  # fake segmentation result in d
    d_gt_pred = d_gt_net.get_output()  # ground-truth result in d

    label, logits = convert_to_calculateloss(score_map, args.num_classes,
                                             label_batch)
    predict_label = tf.argmax(logits, axis=1)
    predict_batch = g_net.topredict(score_map, tf.shape(image_batch)[1:3])
    print("The model has been created!")

    ## get all kinds of variables list
    g_restore_var = [
        v for v in tf.global_variables() if 'discriminator' not in v.name
    ]
    g_var = [
        v for v in tf.trainable_variables()
        if 'generator' in v.name and 'deconv' not in v.name
    ]
    d_var = [v for v in tf.trainable_variables() if 'discriminator' in v.name]
    # g_trainable_var = [v for v in g_var if 'beta' not in v.name or 'gamma' not in v.name]  # batch_norm training open
    g_trainable_var = g_var
    d_trainable_var = d_var

    ## set loss
    mce_loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label,
                                                       logits=logits))
    # l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'weights' in v.name]
    # mce_loss = tf.reduce_mean(mce_loss) + tf.add_n(l2_losses)
    # g_bce_loss = tf.reduce_mean(tf.log(d_fk_pred + eps))
    g_bce_loss = args.lambd * tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_fk_pred),
                                                logits=d_fk_pred))
    g_loss = mce_loss + g_bce_loss
    # d_loss = tf.reduce_mean(tf.constant(-1.0) * [tf.log(d_gt_pred + eps) + tf.log(1. - d_fk_pred + eps)])
    d_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_gt_pred), logits=d_gt_pred) \
                            + tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_fk_pred),
                                                                      logits=d_fk_pred))

    fk_score_var = tf.reduce_mean(tf.sigmoid(d_fk_pred))
    gt_score_var = tf.reduce_mean(tf.sigmoid(d_gt_pred))
    mce_loss_var, mce_loss_op = tf.metrics.mean(mce_loss)
    g_bce_loss_var, g_bce_loss_op = tf.metrics.mean(g_bce_loss)
    g_loss_var, g_loss_op = tf.metrics.mean(g_loss)
    d_loss_var, d_loss_op = tf.metrics.mean(d_loss)
    iou_var, iou_op = tf.metrics.mean_iou(label, predict_label,
                                          args.num_classes)
    accuracy_var, acc_op = tf.metrics.accuracy(label, predict_label)
    metrics_op = tf.group(mce_loss_op, g_bce_loss_op, g_loss_op, d_loss_op,
                          iou_op, acc_op)

    ## set optimizer
    iterstep = tf.placeholder(dtype=tf.float32,
                              shape=[],
                              name='iteration_step')

    base_lr = tf.constant(args.learning_rate, dtype=tf.float32, shape=[])
    lr = tf.scalar_mul(base_lr,
                       tf.pow(
                           (1 - iterstep / args.num_steps),
                           args.power))  # learning rate reduce with the time

    # g_gradients = tf.train.MomentumOptimizer(learning_rate=lr,
    #                                          momentum=args.momentum).compute_gradients(g_loss,
    #                                                                                    var_list=g_trainable_var)
    g_gradients = tf.train.AdamOptimizer(learning_rate=lr).compute_gradients(
        g_loss, var_list=g_trainable_var)
    d_gradients = tf.train.MomentumOptimizer(
        learning_rate=lr * 10,
        momentum=args.momentum).compute_gradients(d_loss,
                                                  var_list=d_trainable_var)
    grad_fk_oi = tf.gradients(d_fk_pred, fk_batch, name='grad_fk_oi')[0]
    grad_gt_oi = tf.gradients(d_gt_pred, gt_batch, name='grad_gt_oi')[0]
    grad_fk_img_oi = tf.gradients(d_fk_pred,
                                  image_batch,
                                  name='grad_fk_img_oi')[0]
    grad_gt_img_oi = tf.gradients(d_gt_pred,
                                  image_batch,
                                  name='grad_gt_img_oi')[0]

    train_g_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(
        g_loss, var_list=g_trainable_var)
    train_d_op = tf.train.MomentumOptimizer(learning_rate=lr * 10,
                                            momentum=args.momentum).minimize(
                                                d_loss,
                                                var_list=d_trainable_var)

    ## set summary
    vs_image = tf.py_func(inv_preprocess,
                          [image_batch, args.save_num_images, img_mean],
                          tf.uint8)
    vs_label = tf.py_func(
        decode_labels, [label_batch, args.save_num_images, args.num_classes],
        tf.uint8)
    vs_predict = tf.py_func(
        decode_labels, [predict_batch, args.save_num_images, args.num_classes],
        tf.uint8)
    tf.summary.image(name='image collection_train',
                     tensor=tf.concat(axis=2,
                                      values=[vs_image, vs_label, vs_predict]),
                     max_outputs=args.save_num_images)
    tf.summary.scalar('fk_score', fk_score_var)
    tf.summary.scalar('gt_score', gt_score_var)
    tf.summary.scalar('g_loss_train', g_loss_var)
    tf.summary.scalar('d_loss_train', d_loss_var)
    tf.summary.scalar('mce_loss_train', mce_loss_var)
    tf.summary.scalar('g_bce_loss_train', g_bce_loss_var)
    tf.summary.scalar('iou_train', iou_var)
    tf.summary.scalar('accuracy_train', accuracy_var)
    tf.summary.scalar('grad_fk_oi', tf.reduce_mean(tf.abs(grad_fk_oi)))
    tf.summary.scalar('grad_gt_oi', tf.reduce_mean(tf.abs(grad_gt_oi)))
    tf.summary.scalar('grad_fk_img_oi', tf.reduce_mean(tf.abs(grad_fk_img_oi)))
    tf.summary.scalar('grad_gt_img_oi', tf.reduce_mean(tf.abs(grad_gt_img_oi)))

    for grad, var in g_gradients + d_gradients:
        tf.summary.histogram(var.op.name + "/gradients", grad)

    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name + "/values", var)

    summary_op = tf.summary.merge_all()
    summary_writer = tf.summary.FileWriter(args.log_dir,
                                           graph=tf.get_default_graph(),
                                           max_queue=3)

    ## set session
    print("GPU index:" + str(os.environ['CUDA_VISIBLE_DEVICES']))
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    global_init = tf.global_variables_initializer()
    local_init = tf.local_variables_initializer()
    sess.run(global_init)
    sess.run(local_init)

    ## set saver
    saver_all = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=2)
    trained_step = 0
    if os.path.exists(args.restore_from + 'checkpoint'):
        trained_step = load_weight(args.restore_from, saver_all, sess)
    else:
        saver_g = tf.train.Saver(var_list=g_restore_var, max_to_keep=2)
        load_weight(args.baseweight_from['g'], saver_g,
                    sess)  # the weight is the completely g model

    threads = tf.train.start_queue_runners(sess, coord)
    print("all setting has been done,training start!")

    ## start training
    # def auto_setting_train_steps(mode):
    #     if mode == 0:
    #         return 5, 1
    #     elif mode == 1:
    #         return 1, 5
    #     else:
    #         return 1, 1

    d_train_steps = 10
    g_train_steps = 1
    # flags = [0 for i in range(3)]

    for step in range(args.num_steps):
        now_step = int(
            trained_step) + step if trained_step is not None else step
        feed_dict = {iterstep: step}
        for i in range(d_train_steps):
            _, _ = sess.run([train_d_op, metrics_op], feed_dict)

        for i in range(g_train_steps):
            g_loss_, mce_loss_, g_bce_loss_, d_loss_, _, _ = sess.run([
                g_loss_var, mce_loss_var, g_bce_loss_var, d_loss_var,
                train_g_op, metrics_op
            ], feed_dict)

        ########################
        # fk_score_, gt_score_ = sess.run([fk_score_var, gt_score_var], feed_dict)
        # if fk_score_ > 0.48 and fk_score_ < 0.52:
        #     flags[0] += 1
        #     flags[1] = flags[2] = 0
        # elif gt_score_ - fk_score_ > 0.3:
        #     flags[1] += 1
        #     flags[0] = flags[2] = 0
        # else:
        #     flags[2] += 1
        #     flags[0] = flags[1] = 0
        # if max(flags) > 100:
        #     d_train_steps, g_train_steps = auto_setting_train_steps(flags.index(max(flags)))
        ########################

        if step > 0 and step % args.save_pred_every == 0:
            save_weight(args.restore_from, saver_all, sess, now_step)

        if step % 50 == 0 or step == args.num_steps - 1:
            print('step={} d_loss={} g_loss={} mce_loss={} g_bce_loss_={}'.
                  format(now_step, d_loss_, g_loss_, mce_loss_, g_bce_loss_))
            summary_str = sess.run(summary_op, feed_dict)
            summary_writer.add_summary(summary_str, now_step)
            sess.run(local_init)

    ## end training
    coord.request_stop()
    coord.join(threads)
    print('end....')