コード例 #1
0
    def train(self, assign_model_path=None):
        # Create a session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        # config.log_device_placement = False
        with tf.Session(config=config) as self.sess:
            self.train_writer = tf.summary.FileWriter(
                os.path.join(MODEL_DIR, 'train'), self.sess.graph)
            init = tf.global_variables_initializer()
            self.sess.run(init)

            # restore the model
            saver = tf.train.Saver(max_to_keep=6)
            restore_epoch, checkpoint_path = model_utils.pre_load_checkpoint(
                MODEL_DIR)
            global LOG_FOUT
            if restore_epoch == 0:
                LOG_FOUT = open(os.path.join(MODEL_DIR, 'log_train.txt'), 'w')
                LOG_FOUT.write(str(socket.gethostname()) + '\n')
                LOG_FOUT.write(str(FLAGS) + '\n')
            else:
                LOG_FOUT = open(os.path.join(MODEL_DIR, 'log_train.txt'), 'a')
                saver.restore(self.sess, checkpoint_path)

            ###assign the generator with another model file
            if assign_model_path is not None:
                print "Load pre-train model from %s" % (assign_model_path)
                assign_saver = tf.train.Saver(var_list=[
                    var for var in tf.trainable_variables()
                    if var.name.startswith("generator")
                ])
                assign_saver.restore(self.sess, assign_model_path)

            ##read data
            self.fetchworker = data_provider.Fetcher(BATCH_SIZE, NUM_POINT,
                                                     USE_DATA_NORM)
            self.fetchworker.start()
            for epoch in tqdm(range(restore_epoch, MAX_EPOCH + 1), ncols=55):
                log_string('**** EPOCH %03d ****\t' % (epoch))
                self.train_one_epoch()
                if epoch % 20 == 0:
                    saver.save(self.sess,
                               os.path.join(MODEL_DIR, "model"),
                               global_step=epoch)
            self.fetchworker.shutdown()
コード例 #2
0
ファイル: main.py プロジェクト: whuchenlin/PU-Net
def train(assign_model_path=None):
    is_training = True
    bn_decay = 0.95
    step = tf.Variable(0, trainable=False)
    learning_rate = BASE_LEARNING_RATE
    tf.summary.scalar('bn_decay', bn_decay)
    tf.summary.scalar('learning_rate', learning_rate)

    # get placeholder
    pointclouds_pl, pointclouds_gt, pointclouds_gt_normal, pointclouds_radius = MODEL_GEN.placeholder_inputs(
        BATCH_SIZE, NUM_POINT, UP_RATIO)

    #create the generator model
    pred, _ = MODEL_GEN.get_gen_model(pointclouds_pl,
                                      is_training,
                                      scope='generator',
                                      bradius=pointclouds_radius,
                                      reuse=None,
                                      use_normal=False,
                                      use_bn=False,
                                      use_ibn=False,
                                      bn_decay=bn_decay,
                                      up_ratio=UP_RATIO)

    #get emd loss
    gen_loss_emd, matchl_out = model_utils.get_emd_loss(
        pred, pointclouds_gt, pointclouds_radius)

    #get repulsion loss
    if USE_REPULSION_LOSS:
        gen_repulsion_loss = model_utils.get_repulsion_loss4(pred)
        tf.summary.scalar('loss/gen_repulsion_loss', gen_repulsion_loss)
    else:
        gen_repulsion_loss = 0.0

    #get total loss function
    pre_gen_loss = 100 * gen_loss_emd + gen_repulsion_loss + tf.losses.get_regularization_loss(
    )

    # create pre-generator ops
    gen_update_ops = [
        op for op in tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        if op.name.startswith("generator")
    ]
    gen_tvars = [
        var for var in tf.trainable_variables()
        if var.name.startswith("generator")
    ]

    with tf.control_dependencies(gen_update_ops):
        pre_gen_train = tf.train.AdamOptimizer(
            learning_rate,
            beta1=0.9).minimize(pre_gen_loss,
                                var_list=gen_tvars,
                                colocate_gradients_with_ops=True,
                                global_step=step)
    # merge summary and add pointclouds summary
    tf.summary.scalar('loss/gen_emd', gen_loss_emd)
    tf.summary.scalar('loss/regularation', tf.losses.get_regularization_loss())
    tf.summary.scalar('loss/pre_gen_total', pre_gen_loss)
    pretrain_merged = tf.summary.merge_all()

    pointclouds_image_input = tf.placeholder(tf.float32,
                                             shape=[None, 500, 1500, 1])
    pointclouds_input_summary = tf.summary.image('pointcloud_input',
                                                 pointclouds_image_input,
                                                 max_outputs=1)
    pointclouds_image_pred = tf.placeholder(tf.float32,
                                            shape=[None, 500, 1500, 1])
    pointclouds_pred_summary = tf.summary.image('pointcloud_pred',
                                                pointclouds_image_pred,
                                                max_outputs=1)
    pointclouds_image_gt = tf.placeholder(tf.float32,
                                          shape=[None, 500, 1500, 1])
    pointclouds_gt_summary = tf.summary.image('pointcloud_gt',
                                              pointclouds_image_gt,
                                              max_outputs=1)
    image_merged = tf.summary.merge([
        pointclouds_input_summary, pointclouds_pred_summary,
        pointclouds_gt_summary
    ])

    # Create a session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    config.log_device_placement = False
    with tf.Session(config=config) as sess:
        train_writer = tf.summary.FileWriter(os.path.join(MODEL_DIR, 'train'),
                                             sess.graph)
        init = tf.global_variables_initializer()
        sess.run(init)
        ops = {
            'pointclouds_pl': pointclouds_pl,
            'pointclouds_gt': pointclouds_gt,
            'pointclouds_gt_normal': pointclouds_gt_normal,
            'pointclouds_radius': pointclouds_radius,
            'pointclouds_image_input': pointclouds_image_input,
            'pointclouds_image_pred': pointclouds_image_pred,
            'pointclouds_image_gt': pointclouds_image_gt,
            'pretrain_merged': pretrain_merged,
            'image_merged': image_merged,
            'gen_loss_emd': gen_loss_emd,
            'pre_gen_train': pre_gen_train,
            'pred': pred,
            'step': step,
        }
        #restore the model
        saver = tf.train.Saver(max_to_keep=6)
        restore_epoch, checkpoint_path = model_utils.pre_load_checkpoint(
            MODEL_DIR)
        global LOG_FOUT
        if restore_epoch == 0:
            LOG_FOUT = open(os.path.join(MODEL_DIR, 'log_train.txt'), 'w')
            LOG_FOUT.write(str(socket.gethostname()) + '\n')
            LOG_FOUT.write(str(FLAGS) + '\n')
        else:
            LOG_FOUT = open(os.path.join(MODEL_DIR, 'log_train.txt'), 'a')
            saver.restore(sess, checkpoint_path)

        ###assign the generator with another model file
        if assign_model_path is not None:
            print "Load pre-train model from %s" % (assign_model_path)
            assign_saver = tf.train.Saver(var_list=[
                var for var in tf.trainable_variables()
                if var.name.startswith("generator")
            ])
            assign_saver.restore(sess, assign_model_path)

        ##read data
        input_data, gt_data, data_radius, _ = data_provider.load_patch_data(
            skip_rate=1,
            num_point=NUM_POINT,
            norm=USE_DATA_NORM,
            use_randominput=USE_RANDOM_INPUT)

        fetchworker = data_provider.Fetcher(input_data, gt_data, data_radius,
                                            BATCH_SIZE, NUM_POINT,
                                            USE_RANDOM_INPUT, USE_DATA_NORM)
        fetchworker.start()
        for epoch in tqdm(range(restore_epoch, MAX_EPOCH + 1), ncols=55):
            log_string('**** EPOCH %03d ****\t' % (epoch))
            train_one_epoch(sess, ops, fetchworker, train_writer)
            if epoch % 20 == 0:
                saver.save(sess,
                           os.path.join(MODEL_DIR, "model"),
                           global_step=epoch)
        fetchworker.shutdown()
コード例 #3
0
def train(assign_model_path=None, bn_decay=0.95):
    step = tf.Variable(0, trainable=False)
    learning_rate = BASE_LEARNING_RATE
    # get placeholder
    pointclouds_pl, pointclouds_gt, pointclouds_gt_normal, pointclouds_radius = MODEL_GEN.placeholder_inputs(
        BATCH_SIZE, NUM_POINT, UP_RATIO)
    # create discriminator
    if FLAGS.gan:
        d_real = MODEL_GEN.get_discriminator(pointclouds_gt,
                                             True,
                                             'd_1',
                                             reuse=False,
                                             use_bn=False,
                                             use_ibn=False,
                                             use_normal=False,
                                             bn_decay=bn_decay)
    # create the generator model
    pred, _ = MODEL_GEN.get_gen_model(pointclouds_pl,
                                      True,
                                      scope='generator',
                                      reuse=False,
                                      use_normal=False,
                                      use_bn=False,
                                      use_ibn=False,
                                      bn_decay=bn_decay,
                                      up_ratio=UP_RATIO)
    if FLAGS.gan:
        d_fake = MODEL_GEN.get_discriminator(pred,
                                             True,
                                             'd_1',
                                             reuse=True,
                                             use_bn=False,
                                             use_ibn=False,
                                             use_normal=False,
                                             bn_decay=bn_decay)
    # get cd loss
    gen_loss_cd, _ = model_utils.get_cd_loss(pred, pointclouds_gt,
                                             pointclouds_radius, 1.0)
    # get gan loss
    if FLAGS.gan:
        d_loss_real = tf.reduce_mean((d_real - 1)**2)
        d_loss_fake = tf.reduce_mean(d_fake**2)

        d_loss = 0.5 * (d_loss_real + d_loss_fake)
        # get loss for generator
        g_loss = tf.reduce_mean((d_fake - 1)**2)
    # get total loss function
    pre_gen_loss = gen_loss_cd
    if FLAGS.gan:
        pre_gen_loss = g_loss + FLAGS.lambd * pre_gen_loss
    """ Training """
    # divide trainable variables into a group for D and a group for G
    t_vars = tf.trainable_variables()
    if FLAGS.gan:
        d_vars = [var for var in t_vars if 'd_' in var.name]
    g_vars = [var for var in t_vars if 'generator' in var.name]
    # optimizers
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        if FLAGS.gan:
            d_optim = tf.train.AdamOptimizer(
                learning_rate,
                beta1=0.5).minimize(d_loss,
                                    var_list=d_vars,
                                    colocate_gradients_with_ops=True)
        if assign_model_path:
            learning_rate = learning_rate / 10
        pre_gen_train = tf.train.AdamOptimizer(
            learning_rate,
            beta1=0.9).minimize(pre_gen_loss,
                                var_list=g_vars,
                                colocate_gradients_with_ops=True,
                                global_step=step)
    # weight clipping
    if FLAGS.gan:
        clip_D = [p.assign(tf.clip_by_value(p, -0.01, 0.01)) for p in d_vars]

    # merge summary and add pointclouds summary
    tf.summary.scalar('bn_decay', bn_decay)
    tf.summary.scalar('learning_rate', learning_rate)
    tf.summary.scalar('loss/gen_cd', gen_loss_cd)
    tf.summary.scalar('loss/regularation', tf.losses.get_regularization_loss())
    tf.summary.scalar('loss/pre_gen_total', pre_gen_loss)
    if FLAGS.gan:
        tf.summary.scalar('loss/d_loss_real', d_loss_real)
        tf.summary.scalar('loss/d_loss_fake', d_loss_fake)
        tf.summary.scalar('loss/d_loss', d_loss)
        tf.summary.scalar('loss/g_loss', g_loss)
    pretrain_merged = tf.summary.merge_all()

    pointclouds_image_input = tf.placeholder(tf.float32,
                                             shape=[None, 500, 1500, 1])
    pointclouds_input_summary = tf.summary.image('pointcloud_input',
                                                 pointclouds_image_input,
                                                 max_outputs=1)
    pointclouds_image_pred = tf.placeholder(tf.float32,
                                            shape=[None, 500, 1500, 1])
    pointclouds_pred_summary = tf.summary.image('pointcloud_pred',
                                                pointclouds_image_pred,
                                                max_outputs=1)
    pointclouds_image_gt = tf.placeholder(tf.float32,
                                          shape=[None, 500, 1500, 1])
    pointclouds_gt_summary = tf.summary.image('pointcloud_gt',
                                              pointclouds_image_gt,
                                              max_outputs=1)
    image_merged = tf.summary.merge([
        pointclouds_input_summary, pointclouds_pred_summary,
        pointclouds_gt_summary
    ])

    # Create a session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    with tf.Session(config=config) as sess:
        train_writer = tf.summary.FileWriter(os.path.join(MODEL_DIR, 'train'),
                                             sess.graph)
        init = tf.global_variables_initializer()
        sess.run(init)
        ops = {
            'pointclouds_pl': pointclouds_pl,
            'pointclouds_gt': pointclouds_gt,
            'pointclouds_gt_normal': pointclouds_gt_normal,
            'pointclouds_radius': pointclouds_radius,
            'pointclouds_image_input': pointclouds_image_input,
            'pointclouds_image_pred': pointclouds_image_pred,
            'pointclouds_image_gt': pointclouds_image_gt,
            'pretrain_merged': pretrain_merged,
            'image_merged': image_merged,
            'gen_loss_cd': gen_loss_cd,
            'pre_gen_train': pre_gen_train,
            'd_optim': d_optim if FLAGS.gan else None,
            'pred': pred,
            'step': step,
            'clip': clip_D if FLAGS.gan else None,
        }
        # restore the model
        saver = tf.train.Saver(max_to_keep=6)
        restore_epoch, checkpoint_path = model_utils.pre_load_checkpoint(
            MODEL_DIR)
        if restore_epoch == 0:
            LOG_FOUT = open(os.path.join(MODEL_DIR, 'log_train.txt'), 'w')
            LOG_FOUT.write(str(socket.gethostname()) + '\n')
            LOG_FOUT.write(str(FLAGS) + '\n')
        else:
            LOG_FOUT = open(os.path.join(MODEL_DIR, 'log_train.txt'), 'a')
            saver.restore(sess, checkpoint_path)

        ###assign the generator with another model file
        if assign_model_path is not None:
            print("Load pre-train model from %s" % assign_model_path)
            assign_saver = tf.train.Saver(var_list=[
                var for var in tf.trainable_variables()
                if var.name.startswith("generator")
            ])
            assign_saver.restore(sess, assign_model_path)

        ##read data
        input_data, gt_data, data_radius, _ = data_provider.load_patch_data(
            FLAGS.dataset,
            skip_rate=1,
            norm=USE_DATA_NORM,
            use_randominput=USE_RANDOM_INPUT)

        fetchworker = data_provider.Fetcher(input_data, gt_data, data_radius,
                                            BATCH_SIZE, NUM_POINT,
                                            USE_RANDOM_INPUT, USE_DATA_NORM)
        fetchworker.start()
        for epoch in tqdm(range(restore_epoch, MAX_EPOCH + 1), ncols=55):
            log_string(LOG_FOUT, '**** EPOCH %03d ****\t' % epoch)
            train_one_epoch(sess, ops, fetchworker, train_writer, LOG_FOUT,
                            FLAGS.gan)
            if epoch % 20 == 0:
                saver.save(sess,
                           os.path.join(MODEL_DIR, "model"),
                           global_step=epoch)
        fetchworker.shutdown()
        LOG_FOUT.close()