Exemplo n.º 1
0
def test(args):
    inputs = tf.placeholder(tf.float32, (1, None, 3))
    npts = tf.placeholder(tf.int32, (1, ))
    print('AAAAAAAAAAAAAAAA')
    print(args.num_gt_points)
    print('AAAAAAAAAAAAAAAAAAAAAAAAAa')
    gt = tf.placeholder(tf.float32, (1, args.num_gt_points, 3))
    model_module = importlib.import_module('.%s' % args.model_type, 'models')
    model = model_module.Model(inputs, npts, gt, tf.constant(1.0))

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    sess = tf.Session(config=config)

    saver = tf.train.Saver()
    saver.restore(sess, args.checkpoint)

    t0 = time.time()

    partial = read_pcd(args.pcd_file)
    bbox = np.loadtxt(args.bbox_file)

    # Calculate center, rotation and scale
    center = (bbox.min(0) + bbox.max(0)) / 2
    bbox -= center
    yaw = np.arctan2(bbox[3, 1] - bbox[0, 1], bbox[3, 0] - bbox[0, 0])
    rotation = np.array([[np.cos(yaw), -np.sin(yaw), 0],
                         [np.sin(yaw), np.cos(yaw), 0], [0, 0, 1]])
    bbox = np.dot(bbox, rotation)
    scale = bbox[3, 0] - bbox[0, 0]
    bbox /= scale

    partial = np.dot(partial - center, rotation) / scale
    partial = np.dot(partial, [[1, 0, 0], [0, 0, 1], [0, 1, 0]])

    start = time.time()
    completion = sess.run(model.outputs,
                          feed_dict={
                              inputs: [partial],
                              npts: [partial.shape[0]]
                          })

    completion = completion[0]

    completion_w = np.dot(completion, [[1, 0, 0], [0, 0, 1], [0, 1, 0]])
    completion_w = np.dot(completion_w * scale, rotation.T) + center

    dummy = args.pcd_file.split('/')[-1]
    result_path = os.path.join(args.results_dir, dummy)
    save_pcd(result_path, completion_w)

    plot_path = os.path.join(args.results_dir, 'plots.png')
    plot_pcd_three_views(plot_path, [partial, completion], ['input', 'output'],
                         '%d input points' % partial.shape[0], [5, 0.5])
    sess.close()
    tf = time.time()
    print('Total time: {}'.format(tf - t0))
Exemplo n.º 2
0
def test(args):
    inputs = tf.placeholder(tf.float32, (1, None, 3))
    npts = tf.placeholder(tf.int32, (1,))
    gt = tf.placeholder(tf.float32, (1, args.num_gt_points, 3))
    model_module = importlib.import_module('.%s' % args.model_type, 'models')
    model = model_module.Model(inputs, npts, gt, tf.constant(1.0))

    os.makedirs(os.path.join(args.results_dir, 'plots'), exist_ok=True)
    os.makedirs(os.path.join(args.results_dir, 'completions'), exist_ok=True)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    sess = tf.Session(config=config)

    saver = tf.train.Saver()
    saver.restore(sess, args.checkpoint)

    car_ids = [filename.split('.')[0] for filename in os.listdir(args.pcd_dir)]
    total_time = 0
    total_points = 0
    for i, car_id in enumerate(car_ids):
        partial = read_pcd(os.path.join(args.pcd_dir, '%s.pcd' % car_id))
        bbox = np.loadtxt(os.path.join(args.bbox_dir, '%s.txt' % car_id))
        total_points += partial.shape[0]

        # Calculate center, rotation and scale
        center = (bbox.min(0) + bbox.max(0)) / 2
        bbox -= center
        yaw = np.arctan2(bbox[3, 1] - bbox[0, 1], bbox[3, 0] - bbox[0, 0])
        rotation = np.array([[np.cos(yaw), -np.sin(yaw), 0],
                            [np.sin(yaw), np.cos(yaw), 0],
                            [0, 0, 1]])
        bbox = np.dot(bbox, rotation)
        scale = bbox[3, 0] - bbox[0, 0]
        bbox /= scale

        partial = np.dot(partial - center, rotation) / scale
        partial = np.dot(partial, [[1, 0, 0], [0, 0, 1], [0, 1, 0]])

        start = time.time()
        completion = sess.run(model.outputs, feed_dict={inputs: [partial], npts: [partial.shape[0]]})
        total_time += time.time() - start
        completion = completion[0]

        completion_w = np.dot(completion, [[1, 0, 0], [0, 0, 1], [0, 1, 0]])
        completion_w = np.dot(completion_w * scale, rotation.T) + center
        pcd_path = os.path.join(args.results_dir, 'completions', '%s.pcd' % car_id)
        save_pcd(pcd_path, completion_w)

        if i % args.plot_freq == 0:
            plot_path = os.path.join(args.results_dir, 'plots', '%s.png' % car_id)
            plot_pcd_three_views(plot_path, [partial, completion], ['input', 'output'],
                                 '%d input points' % partial.shape[0], [5, 0.5])
    print('Average # input points:', total_points / len(car_ids))
    print('Average time:', total_time / len(car_ids))
    sess.close()
Exemplo n.º 3
0
def test(args):
    inputs = tf.placeholder(tf.float32, (1, None, 3))
    npts = tf.placeholder(tf.int32, (1, ))
    gt = tf.placeholder(tf.float32, (1, args.num_gt_points, 3))
    model_module = importlib.import_module('.%s' % args.model_type, 'models')
    model = model_module.Model(inputs, npts, gt, tf.constant(1.0))

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    sess = tf.Session(config=config)

    saver = tf.train.Saver()
    saver.restore(sess, args.checkpoint)

    an = Analyzer(dr.get_PLIDAR_predicted_path)
    argument_list = an.get_datatype_trackno_carno(data_types=["train"])

    for i in range(len(argument_list)):
        data_type, track_no, car_no = argument_list[i]
        # Input - partial point cloud .pcd
        pcd_file = \
            dr.get_pcn_lidar_reference_partial_path(data_type, track_no,
                                                    car_no, extension='pcd')
        # Input - bbox 8 corners .txt
        bbox_file = dr.get_pcn_bbox_lidar_path(data_type, track_no, car_no)

        # Result - complete point cloud .pcd
        result_path = dr.get_pcn_lidar_reference_complete_path(
            data_type, track_no, car_no)
        # Result - plot of input and output of pointcloud
        plot_path = dr.get_pcn_plot_lidar_path(data_type, track_no, car_no)

        partial = read_pcd(pcd_file)
        bbox = np.loadtxt(bbox_file)

        # Calculate center, rotation and scale
        center = (bbox.min(0) + bbox.max(0)) / 2
        bbox -= center
        yaw = np.arctan2(bbox[3, 1] - bbox[0, 1], bbox[3, 0] - bbox[0, 0])
        rotation = np.array([[np.cos(yaw), -np.sin(yaw), 0],
                             [np.sin(yaw), np.cos(yaw), 0], [0, 0, 1]])
        bbox = np.dot(bbox, rotation)
        scale = bbox[3, 0] - bbox[0, 0]
        bbox /= scale

        partial = np.dot(partial - center, rotation) / scale
        partial = np.dot(partial, [[1, 0, 0], [0, 0, 1], [0, 1, 0]])

        completion = sess.run(model.outputs,
                              feed_dict={
                                  inputs: [partial],
                                  npts: [partial.shape[0]]
                              })
        completion = completion[0]

        completion_w = np.dot(completion, [[1, 0, 0], [0, 0, 1], [0, 1, 0]])
        completion_w = np.dot(completion_w * scale, rotation.T) + center

        save_pcd(result_path, completion_w)

        plot_pcd_three_views(plot_path, [partial, completion],
                             ['input', 'output'],
                             '%d input points' % partial.shape[0], [5, 0.5])

        print('Finish {}/{}'.format(i + 1, len(argument_list)))

    sess.close()
Exemplo n.º 4
0
def train(args):
    min_loss_fine = 1.0
    is_training_pl = tf.placeholder(tf.bool, shape=(), name='is_training')
    global_step = tf.Variable(0, trainable=False, name='global_step')
    alpha = tf.train.piecewise_constant(global_step, [10000, 20000, 50000],
                                        [0.01, 0.1, 0.5, 1.0], 'alpha_op')

    provider = TrainProvider(args, is_training_pl)
    ids, inputs, gt = provider.batch_data
    num_eval_steps = provider.num_valid // args.batch_size

    model_module = importlib.import_module('.%s' % args.model_type, 'models')
    model = model_module.Model(inputs, gt, alpha, is_training_pl)
    add_train_summary('alpha', alpha)

    if args.lr_decay:
        learning_rate = tf.train.exponential_decay(args.base_lr,
                                                   global_step,
                                                   args.lr_decay_steps,
                                                   args.lr_decay_rate,
                                                   staircase=True,
                                                   name='lr')
        learning_rate = tf.maximum(learning_rate, args.lr_clip)
        add_train_summary('learning_rate', learning_rate)
    else:
        learning_rate = tf.constant(args.base_lr, name='lr')

    trainer = tf.train.AdamOptimizer(learning_rate)
    train_op = trainer.minimize(model.loss, global_step)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    sess = tf.Session(config=config)
    sess.run(tf.global_variables_initializer())

    saver = tf.train.Saver(max_to_keep=15)
    if args.restore:
        saver.restore(sess, tf.train.latest_checkpoint(args.log_dir))
        #saver.restore(sess, 'data/trained_models/pcn_cd')
    else:
        if os.path.exists(args.log_dir):
            delete_key = input(
                colored('%s exists. Delete? [y (or enter)/N]' % args.log_dir,
                        'white', 'on_red'))
            if delete_key == 'y' or delete_key == "":
                os.system('rm -rf %s/*' % args.log_dir)
                os.makedirs(os.path.join(args.log_dir, 'plots'))
        else:
            os.makedirs(os.path.join(args.log_dir, 'plots'))
        with open(os.path.join(args.log_dir, 'args.txt'), 'w') as log:
            for arg in sorted(vars(args)):
                log.write(arg + ': ' + str(getattr(args, arg)) +
                          '\n')  # log of arguments
        os.system('cp models/%s.py %s' %
                  (args.model_type, args.log_dir))  # bkp of model def
        os.system('cp train.py %s' % args.log_dir)  # bkp of train procedure

    train_summary = tf.summary.merge_all('train_summary')
    valid_summary = tf.summary.merge_all('valid_summary')
    writer = tf.summary.FileWriter(args.log_dir, sess.graph)

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    total_time = 0
    train_start = time.time()
    step = sess.run(global_step)
    while not coord.should_stop():
        step += 1
        epoch = step * args.batch_size // provider.num_train + 1
        start = time.time()
        _, loss, loss_fine, summary = sess.run(
            [train_op, model.loss, model.loss_fine, train_summary],
            feed_dict={is_training_pl: True})
        total_time += time.time() - start
        writer.add_summary(summary, step)
        if step % args.steps_per_print == 0:
            print(
                'epoch %d  step %d  loss %.8f loss_fine %.8f - time per batch %.4f'
                % (epoch, step, loss, loss_fine,
                   total_time / args.steps_per_print))
            total_time = 0
        if step < 100000:
            steps_per_eval = args.steps_per_eval * 10
        else:
            steps_per_eval = args.steps_per_eval
        if step % steps_per_eval == 0:
            print(colored('Testing...', 'grey', 'on_green'))
            total_loss = 0
            total_time = 0
            total_loss_fine = 0
            sess.run(tf.local_variables_initializer())
            for i in range(num_eval_steps):
                start = time.time()
                loss, loss_fine, _ = sess.run(
                    [model.loss, model.loss_fine, model.update],
                    feed_dict={is_training_pl: False})
                total_loss += loss
                total_loss_fine += loss_fine
                total_time += time.time() - start
            summary = sess.run(valid_summary,
                               feed_dict={is_training_pl: False})
            writer.add_summary(summary, step)
            print(
                colored(
                    'epoch %d  step %d  loss %.8f loss_fine %.8f - time per batch %.4f'
                    % (epoch, step, total_loss / num_eval_steps,
                       total_loss_fine / num_eval_steps,
                       total_time / num_eval_steps), 'grey', 'on_green'))
            total_time = 0
            if (total_loss_fine / num_eval_steps) < min_loss_fine:
                min_loss_fine = total_loss_fine / num_eval_steps
                saver.save(sess, os.path.join(args.log_dir, 'model'), step)
                print(
                    colored('Model saved at %s' % args.log_dir, 'white',
                            'on_blue'))
        if step % args.steps_per_visu == 0:
            model_id, pcds = sess.run([ids[0], model.visualize_ops],
                                      feed_dict={is_training_pl: True})
            model_id = model_id.decode('utf-8')
            plot_path = os.path.join(
                args.log_dir, 'plots',
                'epoch_%d_step_%d_%s.png' % (epoch, step, model_id))
            plot_pcd_three_views(plot_path, pcds, model.visualize_titles)
        #if step % args.steps_per_save == 0:
        #    saver.save(sess, os.path.join(args.log_dir, 'model'), step)
        #    print(colored('Model saved at %s' % args.log_dir, 'white', 'on_blue'))
        if step >= args.max_step:
            break
    print('Total time', datetime.timedelta(seconds=time.time() - train_start))
    coord.request_stop()
    coord.join(threads)
    sess.close()
Exemplo n.º 5
0
def test(args):
    inputs = tf.placeholder(tf.float32, (1, None, 3))
    gt = tf.placeholder(tf.float32, (1, args.num_gt_points, 3))
    model_module = importlib.import_module('.%s' % args.model_type, 'models')
    model = model_module.Model(inputs, gt, tf.constant(1.0))

    output = tf.placeholder(tf.float32, (1, args.num_gt_points, 3))
    cd_op = chamfer(output, gt)
    emd_op = earth_mover(output, gt)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    sess = tf.Session(config=config)

    saver = tf.train.Saver()
    saver.restore(sess, args.checkpoint)

    os.makedirs(args.results_dir, exist_ok=True)
    csv_file = open(os.path.join(args.results_dir, 'results.csv'), 'w')
    writer = csv.writer(csv_file)
    writer.writerow(['id', 'cd', 'emd'])

    with open(args.list_path) as file:
        model_list = file.read().splitlines()
    total_time = 0
    total_cd = 0
    total_emd = 0
    cd_per_cat = {}
    emd_per_cat = {}
    for i, model_id in enumerate(model_list):
        partial = read_pcd(
            os.path.join(args.data_dir, 'partial', '%s.pcd' % model_id))
        complete = read_pcd(
            os.path.join(args.data_dir, 'complete', '%s.pcd' % model_id))
        start = time.time()
        completion = sess.run(model.outputs, feed_dict={inputs: [partial]})
        total_time += time.time() - start
        cd, emd = sess.run([cd_op, emd_op],
                           feed_dict={
                               output: completion,
                               gt: [complete]
                           })
        total_cd += cd
        total_emd += emd
        writer.writerow([model_id, cd, emd])

        synset_id, model_id = model_id.split('/')
        if not cd_per_cat.get(synset_id):
            cd_per_cat[synset_id] = []
        if not emd_per_cat.get(synset_id):
            emd_per_cat[synset_id] = []
        cd_per_cat[synset_id].append(cd)
        emd_per_cat[synset_id].append(emd)

        if i % args.plot_freq == 0:
            os.makedirs(os.path.join(args.results_dir, 'plots', synset_id),
                        exist_ok=True)
            plot_path = os.path.join(args.results_dir, 'plots', synset_id,
                                     '%s.png' % model_id)
            plot_pcd_three_views(plot_path, [partial, completion[0], complete],
                                 ['input', 'output', 'ground truth'],
                                 'CD %.4f  EMD %.4f' % (cd, emd),
                                 [5, 0.5, 0.5])
        if args.save_pcd:
            os.makedirs(os.path.join(args.results_dir, 'pcds', synset_id),
                        exist_ok=True)
            save_pcd(
                os.path.join(args.results_dir, 'pcds', '%s.pcd' % model_id),
                completion[0])
    csv_file.close()
    sess.close()

    print('Average time: %f' % (total_time / len(model_list)))
    print('Average Chamfer distance: %f' % (total_cd / len(model_list)))
    print('Average Earth mover distance: %f' % (total_emd / len(model_list)))
    print('Chamfer distance per category')
    for synset_id in cd_per_cat.keys():
        print(synset_id, '%f' % np.mean(cd_per_cat[synset_id]))
    print('Earth mover distance per category')
    for synset_id in emd_per_cat.keys():
        print(synset_id, '%f' % np.mean(emd_per_cat[synset_id]))
Exemplo n.º 6
0
def train(args):
    is_training_pl = tf.placeholder(tf.bool, shape=(), name='is_training')
    global_step = tf.Variable(0, trainable=False, name='global_step')
    alpha = tf.train.piecewise_constant(global_step, [10000, 20000, 50000],
                                        [0.01, 0.1, 0.5, 1.0], 'alpha_op')
    inputs_pl = tf.placeholder(tf.float32, (1, None, 3), 'inputs')
    npts_pl = tf.placeholder(tf.int32, (args.batch_size, ), 'num_points')
    gt_pl = tf.placeholder(tf.float32,
                           (args.batch_size, args.num_gt_points, 3),
                           'ground_truths')

    model_module = importlib.import_module('.%s' % args.model_type, 'models')

    model = model_module.Model(inputs_pl, npts_pl, gt_pl, alpha)
    add_train_summary('alpha', alpha)

    if args.lr_decay:
        learning_rate = tf.train.exponential_decay(args.base_lr,
                                                   global_step,
                                                   args.lr_decay_steps,
                                                   args.lr_decay_rate,
                                                   staircase=True,
                                                   name='lr')
        learning_rate = tf.maximum(learning_rate, args.lr_clip)
        add_train_summary('learning_rate', learning_rate)
    else:
        learning_rate = tf.constant(args.base_lr, name='lr')
    train_summary = tf.summary.merge_all('train_summary')
    valid_summary = tf.summary.merge_all('valid_summary')

    trainer = tf.train.AdamOptimizer(learning_rate)
    train_op = trainer.minimize(model.loss, global_step)

    df_train, num_train = lmdb_dataflow(args.lmdb_train,
                                        args.batch_size,
                                        args.num_input_points,
                                        args.num_gt_points,
                                        is_training=True)
    train_gen = df_train.get_data()
    df_valid, num_valid = lmdb_dataflow(args.lmdb_valid,
                                        args.batch_size,
                                        args.num_input_points,
                                        args.num_gt_points,
                                        is_training=False)
    valid_gen = df_valid.get_data()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    sess = tf.Session(config=config)
    saver = tf.train.Saver()

    if args.restore:
        saver.restore(sess, tf.train.latest_checkpoint(args.log_dir))
        writer = tf.summary.FileWriter(args.log_dir)
    else:
        sess.run(tf.global_variables_initializer())
        if os.path.exists(args.log_dir):
            delete_key = input(
                colored('%s exists. Delete? [y (or enter)/N]' % args.log_dir,
                        'white', 'on_red'))
            if delete_key == 'y' or delete_key == "":
                os.system('rm -rf %s/*' % args.log_dir)
                os.makedirs(os.path.join(args.log_dir, 'plots'))
        else:
            os.makedirs(os.path.join(args.log_dir, 'plots'))
        with open(os.path.join(args.log_dir, 'args.txt'), 'w') as log:
            for arg in sorted(vars(args)):
                log.write(arg + ': ' + str(getattr(args, arg)) +
                          '\n')  # log of arguments
        os.system('cp models/%s.py %s' %
                  (args.model_type, args.log_dir))  # bkp of model def
        os.system('cp train.py %s' % args.log_dir)  # bkp of train procedure
        writer = tf.summary.FileWriter(args.log_dir, sess.graph)

    total_time = 0
    train_start = time.time()
    init_step = sess.run(global_step)
    for step in range(init_step + 1, args.max_step + 1):
        epoch = step * args.batch_size // num_train + 1
        ids, inputs, npts, gt = next(train_gen)
        start = time.time()
        feed_dict = {
            inputs_pl: inputs,
            npts_pl: npts,
            gt_pl: gt,
            is_training_pl: True
        }
        _, loss, summary = sess.run([train_op, model.loss, train_summary],
                                    feed_dict=feed_dict)
        total_time += time.time() - start
        writer.add_summary(summary, step)
        if step % args.steps_per_print == 0:
            print('epoch %d  step %d  loss %.8f - time per batch %.4f' %
                  (epoch, step, loss, total_time / args.steps_per_print))
            total_time = 0
        if step % args.steps_per_eval == 0:
            print(colored('Testing...', 'grey', 'on_green'))
            num_eval_steps = num_valid // args.batch_size
            total_loss = 0
            total_time = 0
            sess.run(tf.local_variables_initializer())
            for i in range(num_eval_steps):
                start = time.time()
                ids, inputs, npts, gt = next(valid_gen)
                feed_dict = {
                    inputs_pl: inputs,
                    npts_pl: npts,
                    gt_pl: gt,
                    is_training_pl: False
                }
                loss, _ = sess.run([model.loss, model.update],
                                   feed_dict=feed_dict)
                total_loss += loss
                total_time += time.time() - start
            summary = sess.run(valid_summary,
                               feed_dict={is_training_pl: False})
            writer.add_summary(summary, step)
            print(
                colored(
                    'epoch %d  step %d  loss %.8f - time per batch %.4f' %
                    (epoch, step, total_loss / num_eval_steps,
                     total_time / num_eval_steps), 'grey', 'on_green'))
            total_time = 0
            if step % args.steps_per_visu == 0:
                all_pcds = sess.run(model.visualize_ops, feed_dict=feed_dict)
                for i in range(0, args.batch_size, args.visu_freq):
                    plot_path = os.path.join(
                        args.log_dir, 'plots',
                        'epoch_%d_step_%d_%s.png' % (epoch, step, ids[i]))
                    pcds = [x[i] for x in all_pcds]
                    plot_pcd_three_views(plot_path, pcds,
                                         model.visualize_titles)
        if step % args.steps_per_save == 0:
            saver.save(sess, os.path.join(args.log_dir, 'model'), step)
            print(
                colored('Model saved at %s' % args.log_dir, 'white',
                        'on_blue'))

    print('Total time', datetime.timedelta(seconds=time.time() - train_start))
    sess.close()
Exemplo n.º 7
0
def train(args):
    is_training_pl = tf.placeholder(tf.bool, shape=(), name='is_training')
    global_step = tf.Variable(0, trainable=False, name='global_step')
    provider = TrainProvider(args, is_training_pl)

    ids, inputs, gt_pose = provider.batch_data
    num_eval_steps = provider.num_valid // args.batch_size

    model = itn.ITN(inputs, gt_pose, args.iterations,
                    args.validation_iterations, args.no_batchnorm,
                    args.rot_representation, is_training_pl)

    if not args.no_lr_decay:
        learning_rate = tf.train.exponential_decay(args.base_lr,
                                                   global_step,
                                                   args.lr_decay_steps,
                                                   args.lr_decay_rate,
                                                   staircase=True,
                                                   name='lr')
        learning_rate = tf.maximum(learning_rate, args.lr_clip)
        add_train_summary('learning_rate', learning_rate)
    else:
        learning_rate = tf.constant(args.base_lr, name='lr')

    trainer = tf.train.AdamOptimizer(learning_rate)
    train_op = trainer.minimize(model.loss, global_step)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    sess = tf.Session(config=config)
    sess.run(tf.global_variables_initializer())
    # sess = tf_debug.LocalCLIDebugWrapperSession(sess)
    saver = tf.train.Saver()

    now = datetime.datetime.now(pytz.timezone('US/Pacific'))
    hr = '{:02d}'.format(now.hour)
    mn = '{:02d}'.format(now.minute)
    dy = '{:02d}'.format(now.day)
    mt = '{:02d}'.format(now.month)
    yr = '{:04d}'.format(now.year)
    log_dir = args.log_dir + '_'.join(['', hr, mn, dy, mt, yr])

    if args.restore:
        saver.restore(sess, tf.train.latest_checkpoint(log_dir))
    else:
        if os.path.exists(log_dir):
            delete_key = input(
                colored('%s exists. Delete? [y (or enter)/N]' % log_dir,
                        'white', 'on_red'))
            if delete_key == 'y' or delete_key == "":
                os.system('rm -rf %s' % log_dir)
                os.makedirs(log_dir)
                os.makedirs(os.path.join(log_dir, 'plots'))
        else:
            os.makedirs(os.path.join(log_dir, 'plots'))
        with open(os.path.join(log_dir, 'args.txt'), 'w') as log:
            for arg in sorted(vars(args)):
                log.write(arg + ': ' + str(getattr(args, arg)) +
                          '\n')  # log of arguments
        os.system('cp models/itn.py %s' % (log_dir))  # bkp of model def
        os.system('cp train.py %s' % log_dir)  # bkp of train procedure

    writer = tf.summary.FileWriter(log_dir, sess.graph)

    train_summary = tf.summary.merge_all('train_summary')
    valid_summary = tf.summary.merge_all('valid_summary')
    writer = tf.summary.FileWriter(log_dir, sess.graph)

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    total_time = 0
    train_start = time.time()
    step = sess.run(global_step)

    while not coord.should_stop():
        step += 1
        epoch = step * args.batch_size // provider.num_train + 1
        start = time.time()
        __, loss, summary = sess.run([train_op, model.loss, train_summary],
                                     feed_dict={is_training_pl: True})
        total_time += time.time() - start
        writer.add_summary(summary, step)

        if step % args.steps_per_print == 0:
            print('epoch %d  step %d  loss %.8f - time per batch %.4f' %
                  (epoch, step, loss, total_time / args.steps_per_print))
            total_time = 0

        if step % args.steps_per_eval == 0:
            print(colored('Testing...', 'grey', 'on_green'))
            total_loss = 0
            total_time = 0
            sess.run(tf.local_variables_initializer())
            for i in range(num_eval_steps):
                start = time.time()
                loss, _ = sess.run([model.loss, model.update],
                                   feed_dict={is_training_pl: False})
                total_loss += loss
                total_time += time.time() - start
            summary = sess.run(valid_summary,
                               feed_dict={is_training_pl: False})
            writer.add_summary(summary, step)
            print(
                colored(
                    'epoch %d  step %d  loss %.8f - time per batch %.4f' %
                    (epoch, step, total_loss / num_eval_steps,
                     total_time / num_eval_steps), 'grey', 'on_green'))
            total_time = 0

        if step % args.steps_per_visu == 0:
            model_id, pcds = sess.run([ids[0], model.visualize_ops],
                                      feed_dict={is_training_pl: True})
            model_id = model_id.decode('utf-8')
            plot_path = os.path.join(
                log_dir, 'plots',
                'epoch_%d_step_%d_%s.png' % (epoch, step, model_id))
            plot_pcd_three_views(plot_path, pcds, model.visualize_titles)

        if step % args.steps_per_save == 0:
            saver.save(sess, os.path.join(log_dir, 'model'), step)
            print(colored('Model saved at %s' % log_dir, 'white', 'on_blue'))

        if step >= args.max_step:
            break

    print('Total time', datetime.timedelta(seconds=time.time() - train_start))
    coord.request_stop()
    coord.join(threads)
    sess.close()
Exemplo n.º 8
0
def test(args):
    inputs = tf.placeholder(tf.float32, (1, None, 3))
    npts = tf.placeholder(tf.int32, (1, ))
    gt = tf.placeholder(tf.float32, (1, args.num_gt_points, 6))
    model_module = importlib.import_module('.%s' % args.model_type, 'models')
    model = model_module.Model(inputs, npts, gt, tf.constant(1.0),
                               args.num_channel)

    output = tf.placeholder(tf.float32,
                            (1, args.num_gt_points, 3 + args.num_channel))
    cd_op = chamfer(output[:, :, 0:3], gt[:, :, 0:3])
    emd_op = earth_mover(output[:, :, 0:3], gt[:, :, 0:3])

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    sess = tf.Session(config=config)

    saver = tf.train.Saver()
    saver.restore(sess, args.checkpoint)

    os.makedirs(args.results_dir, exist_ok=True)
    csv_file = open(os.path.join(args.results_dir, 'results.csv'), 'w')
    writer = csv.writer(csv_file)
    writer.writerow(['id', 'cd', 'emd'])

    with open(args.list_path) as file:
        model_list = file.read().splitlines()
    total_time = 0
    total_cd = 0
    total_emd = 0
    cd_per_cat = {}
    emd_per_cat = {}
    np.random.seed(1)
    for i, model_id in enumerate(model_list):
        if args.experiment == 'shapenet':
            synset_id, model_id = model_id.split('/')
            partial = read_pcd(
                os.path.join(args.data_dir, 'partial', synset_id,
                             '%s.pcd' % model_id))
            complete = read_pcd(
                os.path.join(args.data_dir, 'complete', synset_id,
                             '%s.pcd' % model_id))
        elif args.experiment == 'suncg':
            synset_id = 'all_rooms'
            partial = read_pcd(
                os.path.join(args.data_dir, 'pcd_partial',
                             '%s.pcd' % model_id))
            complete = read_pcd(
                os.path.join(args.data_dir, 'pcd_complete',
                             '%s.pcd' % model_id))
        if args.rotate:
            angle = np.random.rand(1) * 2 * np.pi
            partial = np.stack([
                np.cos(angle) * partial[:, 0] - np.sin(angle) * partial[:, 2],
                partial[:, 1],
                np.sin(angle) * partial[:, 0] + np.cos(angle) * partial[:, 2]
            ],
                               axis=-1)
            complete = np.stack([
                np.cos(angle) * complete[:, 0] -
                np.sin(angle) * complete[:, 2], complete[:, 1],
                np.sin(angle) * complete[:, 0] +
                np.cos(angle) * complete[:, 2], complete[:, 3], complete[:, 4],
                complete[:, 5]
            ],
                                axis=-1)
        partial = partial[:, :3]
        complete = resample_pcd(complete, 16384)
        start = time.time()
        completion1, completion2, mesh_out = sess.run(
            [model.outputs1, model.outputs2, model.gt_can],
            feed_dict={
                inputs: [partial],
                npts: [partial.shape[0]],
                gt: [complete]
            })
        completion1[0][:, (3 + args.num_channel):] *= 0
        completion2[0][:, (3 + args.num_channel):] *= 0
        mesh_out[0][:, (3 + args.num_channel):] *= 0
        total_time += time.time() - start
        # cd, emd = sess.run([cd_op, emd_op],
        cd, emd = sess.run([cd_op, cd_op],
                           feed_dict={
                               output: completion2,
                               gt: [complete]
                           })
        total_cd += cd
        total_emd += emd
        if not cd_per_cat.get(synset_id):
            cd_per_cat[synset_id] = []
        if not emd_per_cat.get(synset_id):
            emd_per_cat[synset_id] = []
        cd_per_cat[synset_id].append(cd)
        emd_per_cat[synset_id].append(emd)
        writer.writerow([model_id, cd, emd])

        if i % args.plot_freq == 0:
            os.makedirs(
                os.path.join(args.results_dir, 'plots', synset_id),
                exist_ok=True)
            plot_path = os.path.join(args.results_dir, 'plots', synset_id,
                                     '%s.png' % model_id)
            plot_pcd_three_views(
                plot_path, [
                    partial, completion1[0], completion2[0], mesh_out[0],
                    complete
                ], ['input', 'coarse', 'fine', 'mesh', 'ground truth'],
                'CD %.4f  EMD %.4f' % (cd, emd), [5, 0.5, 0.5, 0.5, 0.5],
                num_channel=args.num_channel)
        if args.save_pcd:
            os.makedirs(
                os.path.join(args.results_dir, 'input', synset_id),
                exist_ok=True)
            pts_coord = partial[:, 0:3]
            pts_color = matplotlib.cm.cool((partial[:, 1]))[:, 0:3]
            # save_pcd(os.path.join(args.results_dir, 'input', synset_id, '%s.ply' % model_id), np.concatenate((pts_coord, pts_color), -1))
            pcd = PointCloud()
            pcd.points = Vector3dVector(pts_coord)
            pcd.colors = Vector3dVector(pts_color)
            write_point_cloud(
                os.path.join(args.results_dir, 'input', synset_id,
                             '%s.ply' % model_id),
                pcd,
                write_ascii=True)
            os.makedirs(
                os.path.join(args.results_dir, 'output1', synset_id),
                exist_ok=True)
            pts_coord = completion1[0][:, 0:3]
            pts_color = matplotlib.cm.Set1(
                (np.argmax(completion1[0][:, 3:3 + args.num_channel], -1) +
                 1) / args.num_channel - 0.5 / args.num_channel)[:, 0:3]
            # pts_color = matplotlib.cm.tab20((np.argmax(completion1[0][:, 3:3+args.num_channel], -1) + 1)/args.num_channel - 0.5/args.num_channel)[:,0:3]
            # save_pcd(os.path.join(args.results_dir, 'output1', synset_id, '%s.ply' % model_id), np.concatenate((pts_coord, pts_color), -1))
            pcd.points = Vector3dVector(pts_coord)
            pcd.colors = Vector3dVector(pts_color)
            write_point_cloud(
                os.path.join(args.results_dir, 'output1', synset_id,
                             '%s.ply' % model_id),
                pcd,
                write_ascii=True)
            os.makedirs(
                os.path.join(args.results_dir, 'output2', synset_id),
                exist_ok=True)
            pts_coord = completion2[0][:, 0:3]
            pts_color = matplotlib.cm.Set1(
                (np.argmax(completion2[0][:, 3:3 + args.num_channel], -1) +
                 1) / args.num_channel - 0.5 / args.num_channel)[:, 0:3]
            # pts_color = matplotlib.cm.tab20((np.argmax(completion2[0][:, 3:3+args.num_channel], -1) + 1)/args.num_channel - 0.5/args.num_channel)[:,0:3]
            # save_pcd(os.path.join(args.results_dir, 'output2', synset_id, '%s.ply' % model_id), np.concatenate((pts_coord, pts_color), -1))
            pcd.points = Vector3dVector(pts_coord)
            pcd.colors = Vector3dVector(pts_color)
            write_point_cloud(
                os.path.join(args.results_dir, 'output2', synset_id,
                             '%s.ply' % model_id),
                pcd,
                write_ascii=True)
            #######
            os.makedirs(
                os.path.join(args.results_dir, 'regions', synset_id),
                exist_ok=True)
            for idx in range(3, 3 + args.num_channel):
                val_min = np.min(completion2[0][:, idx])
                val_max = np.max(completion2[0][:, idx])
                pts_color = 0.8 * matplotlib.cm.Reds(
                    (completion2[0][:, idx] - val_min) /
                    (val_max - val_min))[:, 0:3]
                pts_color += 0.2 * matplotlib.cm.gist_gray(
                    (completion2[0][:, idx] - val_min) /
                    (val_max - val_min))[:, 0:3]
                pcd.colors = Vector3dVector(pts_color)
                write_point_cloud(
                    os.path.join(args.results_dir, 'regions', synset_id,
                                 '%s_%s.ply' % (model_id, idx - 3)),
                    pcd,
                    write_ascii=True)
            os.makedirs(
                os.path.join(args.results_dir, 'gt', synset_id), exist_ok=True)
            pts_coord = complete[:, 0:3]
            if args.experiment == 'shapenet':
                pts_color = matplotlib.cm.cool(complete[:, 1])[:, 0:3]
            elif args.experiment == 'suncg':
                pts_color = matplotlib.cm.Set1(complete[:, 3] -
                                               0.5 / args.num_channel)[:, 0:3]
            # save_pcd(os.path.join(args.results_dir, 'gt', synset_id, '%s.ply' % model_id), np.concatenate((pts_coord, pts_color), -1))
            pcd.points = Vector3dVector(pts_coord)
            pcd.colors = Vector3dVector(pts_color)
            write_point_cloud(
                os.path.join(args.results_dir, 'gt', synset_id,
                             '%s.ply' % model_id),
                pcd,
                write_ascii=True)
    sess.close()

    print('Average time: %f' % (total_time / len(model_list)))
    print('Average Chamfer distance: %f' % (total_cd / len(model_list)))
    print('Average Earth mover distance: %f' % (total_emd / len(model_list)))
    writer.writerow([
        total_time / len(model_list), total_cd / len(model_list),
        total_emd / len(model_list)
    ])
    print('Chamfer distance per category')
    for synset_id in cd_per_cat.keys():
        print(synset_id, '%f' % np.mean(cd_per_cat[synset_id]))
        writer.writerow([synset_id, np.mean(cd_per_cat[synset_id])])
    print('Earth mover distance per category')
    for synset_id in emd_per_cat.keys():
        print(synset_id, '%f' % np.mean(emd_per_cat[synset_id]))
        writer.writerow([synset_id, np.mean(emd_per_cat[synset_id])])
    csv_file.close()
def train(args):
    is_training_pl = tf.placeholder(tf.bool, shape=(), name='is_training')
    global_step = tf.Variable(0, trainable=False, name='global_step')

    #Note that theta is a parameter used for progressive training
    theta = tf.train.piecewise_constant(global_step, [10000, 20000, 50000],
                                        [0.01, 0.1, 0.5, 1.0], 'theta_op')

    provider = TrainProvider(args, is_training_pl)
    ids, inputs, gt = provider.batch_data
    num_eval_steps = provider.num_test // args.batch_size

    print('provider.num_valid', provider.num_test)
    print('num_eval_steps', num_eval_steps)

    model_module = importlib.import_module('.%s' % args.model_type, 'models')
    model = model_module.Model(inputs, gt, theta, False)
    add_train_summary('alpha', theta)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    sess = tf.Session(config=config)
    sess.run(tf.global_variables_initializer())

    saver = tf.train.Saver(max_to_keep=10)
    saver.restore(sess, args.model_path)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    start_time = time.time()
    while not coord.should_stop():
        print(colored('Testing...', 'grey', 'on_green'))
        total_time = 0
        total_loss_fine = 0
        cd_per_cat = {}
        sess.run(tf.local_variables_initializer())
        for j in range(num_eval_steps):
            start = time.time()
            ids_eval, inputs_eval, gt_eval, loss_fine, fine = sess.run(
                [ids, inputs, gt, model.loss_fine, model.fine],
                feed_dict={is_training_pl: False})
            synset_id = str(ids_eval[0]).split('_')[0].split('\'')[1]
            total_loss_fine += loss_fine
            total_time += time.time() - start

            if not cd_per_cat.get(synset_id):
                cd_per_cat[synset_id] = []
            cd_per_cat[synset_id].append(loss_fine)

            if args.plot:
                for i in range(args.batch_size):
                    model_id = str(ids_eval[i]).split('_')[1]
                    os.makedirs(os.path.join(args.save_path, 'plots',
                                             synset_id),
                                exist_ok=True)
                    plot_path = os.path.join(args.save_path, 'plots',
                                             synset_id, '%s.png' % model_id)
                    plot_pcd_three_views(plot_path,
                                         [inputs_eval[i], fine[i], gt_eval[i]],
                                         ['input', 'output', 'ground truth'],
                                         'CD %.4f' % (loss_fine),
                                         [0.5, 0.5, 0.5])
        print('Average Chamfer distance: %f' %
              (total_loss_fine / num_eval_steps))
        print('Chamfer distance per category')
        dict_novel = {
            '02924116': 'Bus',
            '02818832': 'Bed',
            '02871439': 'bookshelf',
            '02828884': 'bench',
            '03467517': 'guitar',
            '03790512': 'motorbike',
            '04225987': 'skateboard',
            '03948459': 'pistol'
        }
        temp_loss = 0
        for synset_id in dict_novel.keys():
            temp_loss += np.mean(cd_per_cat[synset_id])
            print(dict_novel[synset_id],
                  ' %f' % np.mean(cd_per_cat[synset_id]))
        break
    print('Total time', datetime.timedelta(seconds=time.time() - start_time))
    coord.request_stop()
    coord.join(threads)
    sess.close()