def main():
    initialize_dataset(args.dataset)
    build_symbolic_dataset = get_symbolic_dataset_builder(args.dataset)
    dataset = build_symbolic_dataset(args)
    dataloader = dataset.make_dataloader(32, False, False, nr_workers=4)
    meters = GroupMeters()

    for idx, feed_dict in tqdm_gofor(dataloader):
        feed_dict = GView(feed_dict)

        for i, (p, s, gt) in enumerate(
                zip(feed_dict.program_seq, feed_dict.scene, feed_dict.answer)):
            _, pred = execute_program(p, s)

            if pred[0] == 'error':
                raise pred[1]

            if pred[1] != gt:
                print(p)
                print(s)

                from IPython import embed
                embed()
                from sys import exit
                exit()

            meters.update('accuracy', pred[1] == gt)
        get_current_tqdm().set_description(
            meters.format_simple('Exec:', 'val', compressed=True))

    logger.critical(
        meters.format_simple('Symbolic execution test:',
                             'avg',
                             compressed=False))
예제 #2
0
def main():
    args.dump_dir = ensure_path(
        osp.join('dumps', args.dataset_name, args.desc_name, args.expr))
    args.ckpt_dir = ensure_path(osp.join(args.dump_dir, 'checkpoints'))
    args.meta_dir = ensure_path(osp.join(args.dump_dir, 'meta'))
    args.vis_dir = osp.join(args.dump_dir, 'vis', args.run_name)

    initialize_dataset(args.dataset)
    build_dataset = get_dataset_builder(args.dataset)

    dataset = build_dataset(args, configs, args.data_image_root,
                            args.data_scenes_json, args.data_questions_json)

    dataset_split = int(len(dataset) *
                        args.data_split) if args.data_split <= 1 else int(
                            args.data_split)
    train_dataset, validation_dataset = dataset.split_trainval(dataset_split)

    logger.critical('Building the model.')
    model = desc.make_model(args, train_dataset.unwrapped.vocab)

    if args.use_gpu:
        model.cuda()
        # Use the customized data parallel if applicable.
        if args.gpu_parallel:
            from jactorch.parallel import JacDataParallel
            # from jactorch.parallel import UserScatteredJacDataParallel as JacDataParallel
            model = JacDataParallel(model, device_ids=args.gpus).cuda()
        # Disable the cudnn benchmark.
        cudnn.benchmark = False

    if args.load:
        from jactorch.io import load_weights
        if load_weights(model, args.load):
            logger.critical(
                'Loaded weights from pretrained model: "{}".'.format(
                    args.load))

    from jacinle.utils.meter import GroupMeters
    meters = GroupMeters()

    if args.embed:
        from IPython import embed
        embed()

    logger.critical('Building the data loader.')
    validation_dataloader = validation_dataset.make_dataloader(
        args.batch_size,
        shuffle=True,
        drop_last=False,
        nr_workers=args.data_workers)

    model.eval()
    validate_epoch(0, model, validation_dataloader, meters)
    logger.critical(
        meters.format_simple('Validation',
                             {k: v
                              for k, v in meters.avg.items() if v != 0},
                             compressed=False))
    return meters
예제 #3
0
 def _mining_epoch(self, mining_epoch_size, mining_dataset_size):
   """Take exam, collect and update positive dataset and negative dataset"""
   pos_data = RandomlyIterDataset()
   neg_data = RandomlyIterDataset()
   self.model.eval()
   meters = GroupMeters()
   with tqdm_pbar(total=mining_epoch_size) as pbar:
     for i in range(mining_epoch_size):
       message, result = self._get_result(i, meters, mode='mining')
       positive, number, backup = self._extract_info(result)
       dataset = pos_data if positive else neg_data
       if dataset.size < mining_dataset_size:
         dataset.append((number, backup))
       pbar.set_description(message)
       pbar.update()
       # When both positive and negative dataset are full, break.
       if pos_data.size >= mining_dataset_size and \
               neg_data.size >= mining_dataset_size:
         break
   logger.info(meters.format_simple('> Mining: ', compressed=False))
   self._inherit_neg_data(neg_data, self.neg_data, meters, mining_dataset_size)
   self.pos_data = pos_data
   self.neg_data = neg_data
   self._dump_meters(meters, 'mining')
   return meters
예제 #4
0
def main():
    scenes = io.load_json(args.scene_json)['scenes']
    preds = io.load(args.preds_json)
    if isinstance(preds, dict):
        preds = list(preds.values())
    if False:
        preds = [transpose_scene(s) for s in preds]

    # flattened_objs = [o for s in scenes for o in s['objects']]
    # flattened_preds = {
    #     k: np.concatenate([np.array(p[k]) for p in preds], axis=0)
    #     for k in preds[0]
    # }
    meter = GroupMeters()

    '''
    for i, scene in tqdm_gofor(scenes, mininterval=0.5):
        for j in range(len(scene['objects'])):
            test(j, scene['objects'], preds[i], meter)
    '''
    for i, pred in tqdm_gofor(preds, mininterval=0.5):
        scene = scenes[i]
        for j in range(len(scene['objects'])):
            test(j, scene['objects'], pred, meter)

    print(meter.format_simple('Results:', compressed=False))
예제 #5
0
def main():
    scenes = io.load_json(args.scene_json)['scenes']
    preds = io.load(args.preds_json)
    if isinstance(preds, dict):
        preds = list(preds.values())
    if False:
        preds = [transpose_scene(s) for s in preds]
    meter = GroupMeters()

    flattened_objs = [o for s in scenes for o in s['objects']]
    flattened_preds = {
        k: np.concatenate([np.array(p[k]) for p in preds], axis=0)
        for k in preds[0]
    }

    for k, preds in flattened_preds.items():
        kk = def_.word2lemma.get(k, k)
        for i, o in tqdm_gofor(flattened_objs,
                               desc='{}(lemma: {})'.format(k, kk),
                               leave=False):
            meter.update(
                'acc', (preds[i] > 0) == (kk == o[def_.concept2attribute[kk]]))
            meter.update(
                f'acc/{k}',
                (preds[i] > 0) == (kk == o[def_.concept2attribute[kk]]))
    print(meter.format_simple('Results:', compressed=False))
예제 #6
0
 def _mining_epoch(self, mining_epoch_size, mining_dataset_size):
     """Take exam, collect and update positive dataset and negative dataset"""
     pos_data = RandomlyIterDataset()
     neg_data = RandomlyIterDataset()
     self.model.eval()
     meters_deter = GroupMeters()
     meters_stoch = GroupMeters()
     disable_pbar = False
     if os.getenv("ONCLUSTER") is not None:
         disable_pbar = True
     with tqdm_pbar(total=mining_epoch_size, disable=disable_pbar) as pbar:
         for i in range(mining_epoch_size):
             if i % 2 == 0:
                 message, result = self._get_result(i,
                                                    meters_deter,
                                                    mode='mining-deter')
             else:
                 message, result = self._get_result(i,
                                                    meters_stoch,
                                                    mode='mining-stoch')
             positive, number, backup = self._extract_info(result)
             dataset = pos_data if positive else neg_data
             if dataset.size < mining_dataset_size:
                 dataset.append((number, backup))
             pbar.set_description(message)
             pbar.update()
             # When both positive and negative dataset are full, break.
             if pos_data.size >= mining_dataset_size and \
                     neg_data.size >= mining_dataset_size:
                 break
     logger.info(
         meters_deter.format_simple('> Mining (deter): ', compressed=False))
     logger.info(
         meters_stoch.format_simple('> Mining (stoch): ', compressed=False))
     meters = self.best_meters(meters_deter, meters_stoch)
     self._inherit_neg_data(neg_data, self.neg_data, meters,
                            mining_dataset_size)
     self.pos_data = pos_data
     self.neg_data = neg_data
     self._dump_meters(meters_deter, 'mining-deter')
     self._dump_meters(meters_stoch, 'mining-stoch')
     return meters, meters_deter, meters_stoch
예제 #7
0
  def _test_epoch(self, epoch_size):
    meters = GroupMeters()
    self._prepare_dataset(epoch_size, mode='test')

    def test_func(index):
      message, _ = self._get_result(index, meters, mode='test')
      return message

    tqdm_for(epoch_size, test_func)
    logger.info(meters.format_simple('> Evaluation: ', compressed=False))
    self._dump_meters(meters, 'test')
    return meters
def main_train(validation_dataset):
    logger.critical('Building the model.')
    model = desc.make_model(args)

    if args.use_gpu:
        model.cuda()
        # Use the customized data parallel if applicable.
        if args.gpu_parallel:
            from jactorch.parallel import JacDataParallel
            # from jactorch.parallel import UserScatteredJacDataParallel as JacDataParallel
            model = JacDataParallel(model, device_ids=args.gpus).cuda()
        # Disable the cudnn benchmark.
        cudnn.benchmark = False

    trainer = TrainerEnv(model, None)

    if args.load:
        if trainer.load_weights(args.load):
            logger.critical(
                'Loaded weights from pretrained model: "{}".'.format(
                    args.load))

        from jacinle.utils.meter import GroupMeters
        meters = GroupMeters()

    logger.critical('Building the data loader.')
    validation_dataloader = validation_dataset.make_dataloader(
        args.batch_size,
        shuffle=False,
        drop_last=False,
        nr_workers=args.data_workers)

    meters.reset()
    model.eval()

    if not os.path.isdir(args.output_attr_path):
        os.makedirs(args.output_attr_path)
    validate_attribute(model, validation_dataloader, meters, args.setname,
                       logger, args.output_attr_path)
    logger.critical(
        meters.format_simple(args.setname,
                             {k: v
                              for k, v in meters.avg.items() if v != 0},
                             compressed=False))
    return meters
예제 #9
0
    def train(self,
              data_loader,
              nr_epochs,
              verbose=True,
              meters=None,
              early_stop=None,
              print_interval=1):
        if meters is None:
            meters = GroupMeters()

        for epoch in range(1, 1 + nr_epochs):
            meters.reset()
            self.train_epoch(data_loader, meters=meters)
            if verbose and epoch % print_interval == 0:
                caption = 'Epoch: {}:'.format(epoch)
                logger.info(meters.format_simple(caption))
            if early_stop is not None:
                flag = early_stop(self._model)
                if flag:
                    break
예제 #10
0
  def _train_epoch(self, epoch_size):
    model = self.model
    meters = GroupMeters()
    self._prepare_dataset(epoch_size, mode='train')

    def train_func(index):
      model.eval()
      feed_dict = self._get_train_data(index, meters)
      model.train()
      message, _ = self._train_step(feed_dict, meters)
      return message

    # For $epoch_size times, do train_func with tqdm progress bar.
    tqdm_for(epoch_size, train_func)
    logger.info(
        meters.format_simple(
            '> Train Epoch {:5d}: '.format(self.current_epoch),
            compressed=False))
    self._dump_meters(meters, 'train')
    return meters
예제 #11
0
파일: eval.py 프로젝트: lilujunai/VCML
def main():
    scenes = io.load_json(args.scene_json)['scenes']
    preds = io.load(args.preds_json)
    if isinstance(preds, dict):
        preds = list(preds.values())
    if False:
        preds = [transpose_scene(s) for s in preds]
    scenes = scenes[:1000]
    preds = preds[:1000]

    flattened_objs = [o for s in scenes for o in s['objects']]
    flattened_preds = {
        k: np.concatenate([np.array(p[k]) for p in preds], axis=0)
        for k in preds[0]
    }
    meter = GroupMeters()

    for i, obj in tqdm_gofor(flattened_objs, mininterval=0.5):
        test(i, flattened_objs, flattened_preds, meter)

    print(meter.format_simple('Results:', compressed=False))