Beispiel #1
0
def benchmark_results(split, args):
    data_queue, data_processes = data_setup(args,
                                            split,
                                            num_workers=1,
                                            repeat=False)
    L = len(data_processes[0].data_paths)
    Nb = int(L / args.batch_size)
    if Nb * args.batch_size < L:
        Nb += 1

    # iterate over dataset in batches
    for bidx in tqdm(range(Nb)):
        targets, clouds_data = data_queue.get()
        loss, dist1, dist2, emd_cost, outputs = args.step(
            args, targets, clouds_data)
        for idx in range(targets.shape[0]):
            fname = clouds_data[0][idx][:clouds_data[0][idx].rfind('.')]
            synset = fname.split('/')[-2]
            outp = outputs[idx:idx + 1, ...].squeeze()
            odir = args.odir + '/benchmark/%s' % (synset)
            if not os.path.isdir(odir):
                print("Creating %s ..." % (odir))
                os.makedirs(odir)
            ofile = os.path.join(odir, fname.split('/')[-1])
            print("Saving to %s ..." % (ofile))
            with h5py.File(ofile, "w") as f:
                f.create_dataset("data", data=outp)

    kill_data_processes(data_queue, data_processes)
    cur_dir = os.getcwd()
    subprocess.call("cd %s; zip -r submission.zip *; cd %s" %
                    (args.odir + '/benchmark', cur_dir),
                    shell=True)
    print("Submission file has been saved to %s/submission.zip" %
          (args.odir + '/benchmark'))
Beispiel #2
0
def test_process():
    from multiprocessing import Queue
    parser = argparse.ArgumentParser(description='')
    args = parser.parse_args()
    args.dataset = 'facade'
    #    args.dataset = 'shapenet'
    args.nworkers = 1
    args.batch_size = 1
    args.pc_augm_scale = 0
    args.pc_augm_rot = 0
    args.pc_augm_mirror_prob = 0
    args.pc_augm_jitter = 0
    args.inpts = 2048
    args.scene = '01'
    data_processes = []
    data_queue = Queue(1)
    for i in range(args.nworkers):
        data_processes.append(
            ShapenetDataProcess(data_queue, args, split='train', repeat=False))
        data_processes[-1].start()

    i = 0
    for targets, clouds_data in get_while_running(data_processes, data_queue,
                                                  0.5):
        inp = clouds_data[1][0].squeeze().T
        targets = targets[0]
        print(i, inp.shape, targets.shape)
        plot_pcds(None, [inp.squeeze(), targets.squeeze()], ['partial', 'gt'],
                  use_color=[0, 0],
                  color=[None, None])
        i += 1

    kill_data_processes(data_queue, data_processes)
Beispiel #3
0
def test(split, args):
    """ Evaluated model on test set """
    print("Testing....")
    args.model.eval()

    data_queue, data_processes = data_setup(args,
                                            split,
                                            num_workers=1,
                                            repeat=False)

    meters = []
    lnm = [
        'loss',
    ]
    Nl = len(lnm)
    for i in range(Nl):
        meters.append(tnt.meter.AverageValueMeter())

    t0 = time.time()

    N = len(data_processes[0].data_paths)
    Nb = int(N / args.batch_size)
    if Nb * args.batch_size < N:
        Nb += 1
    # iterate over dataset in batches
    for bidx in tqdm(range(Nb)):
        targets, clouds_data = data_queue.get()
        t_loader = 1000 * (time.time() - t0)
        t0 = time.time()

        loss, dist1, dist2, emd_cost, outputs = args.step(
            args, targets, clouds_data)

        t_trainer = 1000 * (time.time() - t0)
        losses = [
            loss,
        ]
        for ix, l in enumerate(losses):
            meters[ix].add(l)

        logging.debug('Batch loss %f, Loader time %f ms, Trainer time %f ms.',
                      loss, t_loader, t_trainer)
        t0 = time.time()

    kill_data_processes(data_queue, data_processes)
    return [meters[ix].value()[0] for ix in range(Nl)]
Beispiel #4
0
def metrics(split, args, epoch=0):
    print("Metrics ....")
    db_name = split
    args.model.eval()
    Gerror = defaultdict(list)
    Gerror_emd = defaultdict(list)
    data_queue, data_processes = data_setup(args,
                                            split,
                                            num_workers=1,
                                            repeat=False)
    N = len(data_processes[0].data_paths)
    Nb = int(N / args.batch_size)
    if Nb * args.batch_size < N:
        Nb += 1
    # iterate over dataset in batches
    for bidx in tqdm(range(Nb)):
        targets, clouds_data = data_queue.get()
        loss, dist1, dist2, emd_cost, outputs = args.step(
            args, targets, clouds_data)
        dgens = batch_instance_metrics(args, dist1, dist2)
        for idx in range(targets.shape[0]):
            if hasattr(args, 'classmap'):
                classname = args.classmap[clouds_data[0][idx].split('/')[0]]
            Gerror[classname].append(dgens[idx])
            Gerror_emd[classname].append(emd_cost[idx])
    kill_data_processes(data_queue, data_processes)
    Gm_errors = []
    Gm_errors_emd = []
    outfile = args.odir + '/results_%s_%d' % (db_name, epoch + 1)
    if args.eval:
        outfile = args.odir + '/eval_%s_%d' % (db_name, epoch)
    print("Saving results to %s ..." % (outfile))
    with open(outfile, 'w') as f:
        f.write('#ParametersTotal %d\n' % (args.nparams))
        f.write('#ParametersEncoder %d\n' % (args.enc_params))
        f.write('#ParametersDecoder %d\n' % (args.dec_params))
        for classname in list(Gerror.keys()):
            Gmean_error_emd = np.mean(Gerror_emd[classname])
            Gm_errors_emd.append(Gmean_error_emd)
            Gmean_error = np.mean(Gerror[classname])
            Gm_errors.append(Gmean_error)
            f.write('%s Generator_emd %.6f\n' % (classname, Gmean_error_emd))
            f.write('%s Generator_dist %.6f\n' % (classname, Gmean_error))
        f.write('Generator Class Mean EMD %.6f\n' % (np.mean(Gm_errors_emd)))
        f.write('Generator Class Mean DIST %.6f\n' % (np.mean(Gm_errors)))
Beispiel #5
0
def main():
    args = parse_args()
    args.num_gpus = len(get_available_gpus())
    eval(args.NET + '_setup')(args)
    set_seed(args.seed)
    setup(args)

    # Create model and optimizer
    if args.resume or args.eval or args.benchmark:
        last_epoch, best_epoch, best_val_loss, num_params, \
            enc_params, dec_params = parse_experiment(args.odir)
        i = last_epoch
        if args.eval or args.benchmark:
            i = best_epoch
        args.resume = model_at(args, i)
        model, stats = tf_resume(args, i)
    else:
        check_overwrite(os.path.join(args.odir, 'trainlog.txt'))
        model = eval(args.NET + '_create_model')(args)
        stats = []

    print('Will save to ' + args.odir)
    if not os.path.exists(args.odir):
        os.makedirs(args.odir)
    if not os.path.exists(args.odir + '/models'):
        os.makedirs(args.odir + '/models')
    with open(os.path.join(args.odir, 'cmdline.txt'), 'w') as f:
        f.write(" ".join([
            "'" + a + "'" if (len(a) == 0 or a[0] != '-') else a
            for a in sys.argv
        ]))

    args.model = model
    args.step = eval(args.NET + '_step')

    # Training loop
    epoch = args.start_epoch
    train_data_queue, train_data_processes = data_setup(args,
                                                        'train',
                                                        args.nworkers,
                                                        repeat=True)
    if args.eval == 0:
        for epoch in range(args.start_epoch, args.epochs):
            print('Epoch {}/{} ({}):'.format(epoch + 1, args.epochs,
                                             args.odir))

            loss = train(args, epoch, train_data_queue,
                         train_data_processes)[0]

            if (epoch +
                    1) % args.test_nth_epoch == 0 or epoch + 1 == args.epochs:
                loss_val = test('val', args)[0]
                print('-> Train Loss: {}, \tVal loss: {}'.format(
                    loss, loss_val))
                stats.append({
                    'epoch': epoch + 1,
                    'loss': loss,
                    'loss_val': loss_val
                })
            else:
                loss_val = 0
                print('-> Train loss: {}'.format(loss))
                stats.append({'epoch': epoch + 1, 'loss': loss})

            if (epoch +
                    1) % args.save_nth_epoch == 0 or epoch + 1 == args.epochs:
                with open(os.path.join(args.odir, 'trainlog.txt'),
                          'w') as outfile:
                    json.dump(stats, outfile)

                save_model(args, epoch)
            if (epoch +
                    1) % args.test_nth_epoch == 0 and epoch + 1 < args.epochs:
                split = 'val'
                predictions = samples(split, args, 20)
                cache_pred(predictions, split, args)
                metrics(split, args, epoch)

            if math.isnan(loss): break

        if len(stats) > 0:
            with open(os.path.join(args.odir, 'trainlog.txt'), 'w') as outfile:
                json.dump(stats, outfile)

    kill_data_processes(train_data_queue, train_data_processes)

    split = 'val'
    predictions = samples(split, args, 20)
    cache_pred(predictions, split, args)
    metrics(split, args, epoch)
    if args.benchmark:
        benchmark_results('test', args)
Beispiel #6
0
def samples(split, args, N):
    print("Sampling ...")
    args.model.eval()

    collected = defaultdict(list)
    predictions = {}
    class_samples = defaultdict(int)
    if hasattr(args, 'classmap'):
        for val in args.classmap:
            class_samples[val[0]] = 0
    else:
        count = 0

    data_queue, data_processes = data_setup(args,
                                            split,
                                            num_workers=1,
                                            repeat=False)
    L = len(data_processes[0].data_paths)
    Nb = int(L / args.batch_size)
    if Nb * args.batch_size < L:
        Nb += 1

    # iterate over dataset in batches
    for bidx in tqdm(range(Nb)):
        targets, clouds_data = data_queue.get()
        run_net = False
        for idx in range(targets.shape[0]):
            if hasattr(args, 'classmap'):
                fname = clouds_data[0][idx][:clouds_data[0][idx].rfind('.')]
                synset = fname.split('/')[-2]
                if class_samples[synset] <= N:
                    run_net = True
                    break
            elif count <= N:
                run_net = True
                break
        if run_net:
            loss, dist1, dist2, emd_cost, outputs = args.step(
                args, targets, clouds_data)
            for idx in range(targets.shape[0]):
                if hasattr(args, 'classmap'):
                    fname = clouds_data[0][idx][:clouds_data[0][idx].rfind('.'
                                                                           )]
                    synset = fname.split('/')[-2]
                    if class_samples[synset] > N:
                        continue
                    class_samples[synset] += 1
                else:
                    fname = str(bidx)
                    if count > N:
                        break
                    count += 1
                collected[fname].append(
                    (outputs[idx:idx + 1,
                             ...], targets[idx:idx + 1,
                                           ...], clouds_data[1][idx:idx + 1,
                                                                ...]))
    kill_data_processes(data_queue, data_processes)

    for fname, lst in collected.items():
        o_cpu, t_cpu, inp = list(zip(*lst))
        o_cpu = o_cpu[0]
        t_cpu, inp = t_cpu[0], inp[0]
        predictions[fname] = (inp, o_cpu, t_cpu)
    return predictions