def main():
    initialize_dataset(args.dataset)
    build_symbolic_dataset = get_symbolic_dataset_builder(args.dataset)
    dataset = build_symbolic_dataset(args)
    dataloader = dataset.make_dataloader(32, False, False, nr_workers=4)
    meters = GroupMeters()

    for idx, feed_dict in tqdm_gofor(dataloader):
        feed_dict = GView(feed_dict)

        for i, (p, s, gt) in enumerate(
                zip(feed_dict.program_seq, feed_dict.scene, feed_dict.answer)):
            _, pred = execute_program(p, s)

            if pred[0] == 'error':
                raise pred[1]

            if pred[1] != gt:
                print(p)
                print(s)

                from IPython import embed
                embed()
                from sys import exit
                exit()

            meters.update('accuracy', pred[1] == gt)
        get_current_tqdm().set_description(
            meters.format_simple('Exec:', 'val', compressed=True))

    logger.critical(
        meters.format_simple('Symbolic execution test:',
                             'avg',
                             compressed=False))
예제 #2
0
def main():
    scenes = io.load_json(args.scene_json)['scenes']
    preds = io.load(args.preds_json)
    if isinstance(preds, dict):
        preds = list(preds.values())
    if False:
        preds = [transpose_scene(s) for s in preds]
    meter = GroupMeters()

    flattened_objs = [o for s in scenes for o in s['objects']]
    flattened_preds = {
        k: np.concatenate([np.array(p[k]) for p in preds], axis=0)
        for k in preds[0]
    }

    for k, preds in flattened_preds.items():
        kk = def_.word2lemma.get(k, k)
        for i, o in tqdm_gofor(flattened_objs,
                               desc='{}(lemma: {})'.format(k, kk),
                               leave=False):
            meter.update(
                'acc', (preds[i] > 0) == (kk == o[def_.concept2attribute[kk]]))
            meter.update(
                f'acc/{k}',
                (preds[i] > 0) == (kk == o[def_.concept2attribute[kk]]))
    print(meter.format_simple('Results:', compressed=False))
예제 #3
0
def main():
    logger.critical('Loading the word embedding.')
    vocab, word_embeddings = load_word_embedding(args.vse)

    logger.critical('Building up the model.')
    model = CompletionModel(word_embeddings)
    if args.use_gpu:
        model.cuda()
    # Disable the cudnn benchmark.
    model.eval()
    cudnn.benchmark = False

    logger.critical('Loading the dataset.')

    dev_dataset = CompletionDataset(vocab, pjoin(args.data_dir, args.dev_img), pjoin(args.data_dir, args.dev_cap), mode=args.mode)
    test_dataset = CompletionDataset(vocab, pjoin(args.data_dir, args.test_img), pjoin(args.data_dir, args.test_cap), mode=args.mode)

    logger.critical('Building up the data loader.')
    dev_dataloader = make_dataloader(dev_dataset, num_workers=args.data_workers, batch_size=64, shuffle=False, drop_last=False, pin_memory=True)
    test_dataloader = make_dataloader(test_dataset, num_workers=args.data_workers, batch_size=64, shuffle=False, drop_last=False, pin_memory=True)

    for epoch_id in range(1, 11):
        load_weights(model, pjoin(args.load, 'epoch_{}.pth'.format(epoch_id)))

        for loader in [dev_dataloader, test_dataloader]:
            meters = GroupMeters()

            end = time.time()
            with tqdm_pbar(total=len(loader), leave=False) as pbar:
                for i, data in enumerate(loader):
                    feed_dict = data
                    feed_dict = mark_volatile(feed_dict)

                    if args.use_gpu:
                        feed_dict = async_copy_to(feed_dict, 0)

                    data_time = time.time() - end; end = time.time()

                    output_dict = model(feed_dict)
                    output_dict = as_numpy(output_dict)

                    gpu_time = time.time() - end;  end = time.time()

                    meters.update({k: float(v) for k, v in output_dict.items() if k.startswith('top')}, n=len(feed_dict['image']))
                    meters.update({'time/data': data_time, 'time/gpu': gpu_time})

                    pbar.set_description(format_meters('sentid={}'.format(i), meters.val, '{}={:.4f}', ', '))
                    pbar.update()

                    end = time.time()

            print(epoch_id, sorted(meters.avg.items()))
예제 #4
0
    def train_epoch(self, data_loader, meters=None):
        if meters is None:
            meters = GroupMeters()

        self._model.train()
        end = time.time()
        for fd in data_loader:
            data_time = time.time() - end
            end = time.time()
            self.train_step(fd, meters=meters)
            step_time = time.time() - end
            end = time.time()
            meters.update({'time/data': data_time, 'time/step': step_time})
        return meters
예제 #5
0
    def validate(self, data_loader, metric, meters=None):
        if meters is None:
            meters = GroupMeters()

        self._model.eval()
        end = time.time()
        for fd in data_loader:
            data_time = time.time() - end
            end = time.time()
            self.validate_step(fd, metric, meters=meters)
            step_time = time.time() - end
            end = time.time()
            meters.update({'time/data': data_time, 'time/step': step_time})

        return meters.avg