Exemple #1
0
def validate_epoch(epoch, trainer, val_dataloader, meters):
    end = time.time()
    with tqdm_pbar(total=len(val_dataloader)) as pbar:
        for feed_dict in val_dataloader:
            if args.use_gpu:
                if not args.gpu_parallel:
                    feed_dict = async_copy_to(feed_dict, 0)

            data_time = time.time() - end; end = time.time()

            output_dict, extra_info = trainer.evaluate(feed_dict)

            # TODO(Jiayuan Mao @ 04/26): compute the monitoring values.
            monitors = as_float(output_dict['monitors'])
            step_time = time.time() - end; end = time.time()

            # TODO(Jiayuan Mao @ 04/23): normalize the loss/other metrics by adding n=xxx if applicable.
            meters.update(monitors)
            meters.update({'time/data': data_time, 'time/step': step_time})

            if args.use_tb:
                meters.flush()

            pbar.set_description(meters.format_simple(
                'Epoch {} (validation)'.format(epoch),
                {k: v for k, v in meters.val.items() if k.startswith('validation') and k.count('/') <= 2},
                compressed=True
            ), refresh=False)
            pbar.update()

            end = time.time()
Exemple #2
0
def validate_epoch(model, val_dataloader, meters):
    end = time.time()
    with tqdm_pbar(total=len(val_dataloader)) as pbar:
        for feed_dict in val_dataloader:
            if args.use_gpu:
                if not args.gpu_parallel:
                    feed_dict = async_copy_to(feed_dict, 0)

            data_time = time.time() - end
            end = time.time()

            output_dict = model(feed_dict)

            # TODO(Jiayuan Mao @ 04/26): compute the monitoring values.
            monitors = as_float(output['monitors'])
            step_time = time.time() - end
            end = time.time()

            # TODO(Jiayuan Mao @ 04/23): normalize the loss/other metrics by adding n=xxx if applicable.
            meters.update(monitors)
            meters.update({'time/data': data_time, 'time/step': step_time})

            if args.use_tb:
                meters.flush()

            pbar.set_description(
                meters.format_simple('Test', 'val', compressed=True))
            pbar.update()

            end = time.time()
Exemple #3
0
def validate_epoch(epoch, trainer, val_dataloader, meters, meter_prefix='validation'):
    end = time.time()
    with tqdm_pbar(total=len(val_dataloader)) as pbar:
        for feed_dict in val_dataloader:
            if args.use_gpu:
                if not args.gpu_parallel:
                    feed_dict = async_copy_to(feed_dict, 0)

            data_time = time.time() - end; end = time.time()

            output_dict, extra_info = trainer.evaluate(feed_dict, cast_tensor=False)
            monitors = {meter_prefix + '/' + k: v for k, v in as_float(output_dict['monitors']).items()}
            step_time = time.time() - end; end = time.time()

            n = feed_dict['image'].size(0)
            meters.update(monitors, n=n)
            meters.update({'time/data': data_time, 'time/step': step_time})

            if args.use_tb:
                meters.flush()

            pbar.set_description(meters.format_simple(
                'Epoch {} (validation)'.format(epoch),
                {k: v for k, v in meters.val.items() if k.startswith('validation') and k.count('/') <= 2},
                compressed=True
            ))
            pbar.update()

            end = time.time()
Exemple #4
0
 def _mining_epoch(self, mining_epoch_size, mining_dataset_size):
   """Take exam, collect and update positive dataset and negative dataset"""
   pos_data = RandomlyIterDataset()
   neg_data = RandomlyIterDataset()
   self.model.eval()
   meters = GroupMeters()
   with tqdm_pbar(total=mining_epoch_size) as pbar:
     for i in range(mining_epoch_size):
       message, result = self._get_result(i, meters, mode='mining')
       positive, number, backup = self._extract_info(result)
       dataset = pos_data if positive else neg_data
       if dataset.size < mining_dataset_size:
         dataset.append((number, backup))
       pbar.set_description(message)
       pbar.update()
       # When both positive and negative dataset are full, break.
       if pos_data.size >= mining_dataset_size and \
               neg_data.size >= mining_dataset_size:
         break
   logger.info(meters.format_simple('> Mining: ', compressed=False))
   self._inherit_neg_data(neg_data, self.neg_data, meters, mining_dataset_size)
   self.pos_data = pos_data
   self.neg_data = neg_data
   self._dump_meters(meters, 'mining')
   return meters
Exemple #5
0
    def map(self,
            func,
            iterable,
            chunksize=1,
            sort=True,
            total=None,
            desc='',
            callback=None,
            use_tqdm=True,
            update_interval=0.1,
            update_iters=1,
            **kwargs):

        if total is None and isinstance(iterable, collections.Sized):
            total = len(iterable)
        if use_tqdm:
            pbar = tqdm_pbar(total=total, **kwargs)
            with pbar:
                return super().map(func,
                                   iterable,
                                   chunksize,
                                   sort,
                                   callback=self._wrap_callback(
                                       callback,
                                       pbar,
                                       desc,
                                       update_interval=update_interval,
                                       update_iters=update_iters))
        else:
            return super().map(func,
                               iterable,
                               chunksize,
                               sort,
                               callback=callback)
Exemple #6
0
def tqdm_for(total, func):
    """wrapper of the for function with message showing on the progress bar."""
    # Not support break cases for now.
    with tqdm_pbar(total=total) as pbar:
        for i in range(total):
            message = func(i)
            pbar.set_description(message)
            pbar.update()
Exemple #7
0
def tqdm_for(total, func):
  """wrapper of the for function with message showing on the progress bar."""
  # Not support break cases for now.
  disable = False
  if os.getenv("ONCLUSTER") is not None:
    disable = True
  with tqdm_pbar(total=total, disable=disable) as pbar:
    for i in range(total):
      message = func(i)
      pbar.set_description(message)
      pbar.update()
Exemple #8
0
def main():
    logger.critical('Loading the word embedding.')
    vocab, word_embeddings = load_word_embedding(args.vse)

    logger.critical('Building up the model.')
    model = CompletionModel(word_embeddings)
    if args.use_gpu:
        model.cuda()
    # Disable the cudnn benchmark.
    model.eval()
    cudnn.benchmark = False

    logger.critical('Loading the dataset.')

    dev_dataset = CompletionDataset(vocab, pjoin(args.data_dir, args.dev_img), pjoin(args.data_dir, args.dev_cap), mode=args.mode)
    test_dataset = CompletionDataset(vocab, pjoin(args.data_dir, args.test_img), pjoin(args.data_dir, args.test_cap), mode=args.mode)

    logger.critical('Building up the data loader.')
    dev_dataloader = make_dataloader(dev_dataset, num_workers=args.data_workers, batch_size=64, shuffle=False, drop_last=False, pin_memory=True)
    test_dataloader = make_dataloader(test_dataset, num_workers=args.data_workers, batch_size=64, shuffle=False, drop_last=False, pin_memory=True)

    for epoch_id in range(1, 11):
        load_weights(model, pjoin(args.load, 'epoch_{}.pth'.format(epoch_id)))

        for loader in [dev_dataloader, test_dataloader]:
            meters = GroupMeters()

            end = time.time()
            with tqdm_pbar(total=len(loader), leave=False) as pbar:
                for i, data in enumerate(loader):
                    feed_dict = data
                    feed_dict = mark_volatile(feed_dict)

                    if args.use_gpu:
                        feed_dict = async_copy_to(feed_dict, 0)

                    data_time = time.time() - end; end = time.time()

                    output_dict = model(feed_dict)
                    output_dict = as_numpy(output_dict)

                    gpu_time = time.time() - end;  end = time.time()

                    meters.update({k: float(v) for k, v in output_dict.items() if k.startswith('top')}, n=len(feed_dict['image']))
                    meters.update({'time/data': data_time, 'time/gpu': gpu_time})

                    pbar.set_description(format_meters('sentid={}'.format(i), meters.val, '{}={:.4f}', ', '))
                    pbar.update()

                    end = time.time()

            print(epoch_id, sorted(meters.avg.items()))
Exemple #9
0
def train_epoch(epoch, trainer, train_dataloader, meters):
    nr_iters = args.iters_per_epoch
    if nr_iters == 0:
        nr_iters = len(train_dataloader)

    meters.update(epoch=epoch)

    trainer.trigger_event("epoch:before", trainer, epoch)
    train_iter = iter(train_dataloader)

    end = time.time()
    with tqdm_pbar(total=nr_iters) as pbar:
        for i in range(nr_iters):
            feed_dict = next(train_iter)

            if args.use_gpu:
                if not args.gpu_parallel:
                    feed_dict = async_copy_to(feed_dict, 0)

            data_time = time.time() - end
            end = time.time()

            loss, monitors, output_dict, extra_info = trainer.step(
                feed_dict, cast_tensor=False)
            step_time = time.time() - end
            end = time.time()

            n = feed_dict["image"].size(0)
            meters.update(loss=loss, n=n)
            meters.update(monitors, n=n)
            meters.update({"time/data": data_time, "time/step": step_time})

            if args.use_tb:
                meters.flush()

            pbar.set_description(
                meters.format_simple(
                    "Epoch {}".format(epoch),
                    {
                        k: v
                        for k, v in meters.val.items()
                        if not k.startswith("validation") and k != "epoch"
                        and k.count("/") <= 1
                    },
                    compressed=True,
                ))
            pbar.update()

            end = time.time()

    trainer.trigger_event("epoch:after", trainer, epoch)
def train_epoch(epoch, trainer, train_dataloader, meters):
    nr_iters = args.iters_per_epoch
    if nr_iters == 0:
        nr_iters = len(train_dataloader)

    meters.update(epoch=epoch)
    if args.dataset=='blocks' and epoch==6:
        keep_only_temporal_concept_learner(trainer, args, configs)

    trainer.trigger_event('epoch:before', trainer, epoch)
    train_iter = iter(train_dataloader)
    end = time.time()
    with tqdm_pbar(total=nr_iters) as pbar:
        for i in range(nr_iters):
            feed_dict = next(train_iter)
            if args.use_gpu:
                if not args.gpu_parallel:
                    feed_dict = async_copy_to(feed_dict, 0)
            data_time = time.time() - end; end = time.time()
            loss, monitors, output_dict, extra_info = trainer.step(feed_dict, cast_tensor=False)
            step_time = time.time() - end; end = time.time()

            n = len(feed_dict)
            meters.update(loss=loss, n=n)

            for tmp_key, tmp_value in monitors.items(): 
                if isinstance(tmp_value , list):
                    for sub_idx, sub_value in enumerate(tmp_value):
                        if sub_value[0]==-1:
                            continue 
                        meters.update({tmp_key: sub_value[0]}, n=sub_value[1])
                elif tmp_value==-1:
                    continue 
                else:
                    meters.update({tmp_key: tmp_value}, n=1)

            meters.update({'time/data': data_time, 'time/step': step_time})

            if args.use_tb:
                meters.flush()

            pbar.set_description(meters.format_simple(
                'Epoch {}'.format(epoch),
                {k: v for k, v in meters.val.items() if not k.startswith('validation') and k != 'epoch' and k.count('/') <= 1},
                compressed=True
            ))
            pbar.update()

            end = time.time()

    trainer.trigger_event('epoch:after', trainer, epoch)
Exemple #11
0
def train_epoch(epoch, trainer, train_dataloader, meters):
    nr_iters = args.iters_per_epoch
    if nr_iters == 0:
        nr_iters = len(train_dataloader)

    meters.update(epoch=epoch)

    trainer.trigger_event('epoch:before', trainer, epoch)
    train_iter = iter(train_dataloader)

    end = time.time()
    with tqdm_pbar(total=nr_iters) as pbar:
        for i in range(nr_iters):
            feed_dict = next(train_iter)

            if args.use_gpu:
                if not args.gpu_parallel:
                    feed_dict = async_copy_to(feed_dict, 0)

            data_time = time.time() - end
            end = time.time()

            loss, monitors, output_dict, extra_info = trainer.step(feed_dict)
            step_time = time.time() - end
            end = time.time()

            # TODO(Jiayuan Mao @ 04/23): normalize the loss/monitors by adding n=xxx if applicable.
            meters.update(loss=loss)
            meters.update(monitors)
            meters.update({'time/data': data_time, 'time/step': step_time})

            if args.use_tb:
                meters.flush()

            # TODO(Jiayuan Mao @ 04/23): customize the logger.
            pbar.set_description(meters.format_simple(
                'Epoch {}'.format(epoch), {
                    k: v
                    for k, v in meters.val.items()
                    if not k.startswith('validation') and k.count('/') <= 1
                },
                compressed=True),
                                 refresh=False)
            pbar.update()

            end = time.time()

    trainer.trigger_event('epoch:after', trainer, epoch)
Exemple #12
0
def download(url, dirname, cli=True, filename=None, md5=None):
    """
    Download URL to a directory. Will figure out the filename automatically from URL.
    Will figure out the filename automatically from URL, if not given."""
    # From: https://github.com/ppwwyyxx/tensorpack/blob/master/tensorpack/utils/fs.py

    if cli:
        from jacinle.cli.keyboard import maybe_mkdir
        maybe_mkdir(dirname)
    else:
        assert osp.isdir(dirname)

    filename = filename or url.split('/')[-1]
    path = os.path.join(dirname, filename)

    def hook(t):
        last_b = [0]

        def inner(b, bsize, tsize=None):
            if tsize is not None:
                t.total = tsize
            t.update((b - last_b[0]) * bsize)
            last_b[0] = b

        return inner

    try:
        with tqdm_pbar(unit='B', unit_scale=True, miniters=1,
                       desc=filename) as pbar:
            path, _ = urllib.request.urlretrieve(url,
                                                 path,
                                                 reporthook=hook(pbar))
        statinfo = os.stat(path)
        size = statinfo.st_size
    except Exception:
        print('Failed to download {}.'.format(url))
        raise
    assert size > 0, "Download an empty file!"
    print('Successfully downloaded ' + filename + " " + fsize_format(size) +
          '.')

    if md5 is not None:
        assert check_integrity(
            path, md5), 'Integrity check for {} failed'.format(path)

    return path
def validate_epoch(epoch, trainer, val_dataloader, meters, meter_prefix='validation'):
    if args.testing_flag:
        json_output_list = []
    
    end = time.time()
    with tqdm_pbar(total=len(val_dataloader)*args.batch_size) as pbar:
        for feed_dict in val_dataloader:
            if args.use_gpu:
                if not args.gpu_parallel:
                    feed_dict = async_copy_to(feed_dict, 0)
            #pdb.set_trace()
            data_time = time.time() - end; end = time.time()
            output_dict_list, extra_info = trainer.evaluate(feed_dict, cast_tensor=False)
            if args.testing_flag:
                prepare_data_for_testing(output_dict_list, feed_dict, json_output_list)

            step_time = time.time() - end; end = time.time()
            for idx, mon_dict  in enumerate(output_dict_list['monitors']): 
                monitors = {meter_prefix + '/' + k: v for k, v in as_float(mon_dict).items()}
                # remove padding values
                for tmp_key, tmp_value in monitors.items(): 
                    if isinstance(tmp_value , list):
                        for sub_idx, sub_value in enumerate(tmp_value):
                            if sub_value[0]==-1:
                                continue 
                            meters.update({tmp_key: sub_value[0]}, n=sub_value[1])
                    elif tmp_value==-1:
                        continue 
                    else:
                        meters.update({tmp_key: tmp_value}, n=1)
                
                meters.update({'time/data': data_time, 'time/step': step_time})
                if args.use_tb:
                    meters.flush()

                pbar.set_description(meters.format_simple(
                    'Epoch {} (validation)'.format(epoch),
                    {k: v for k, v in meters.val.items() if k.startswith('validation') and k.count('/') <= 2},
                    compressed=True
                ))
                pbar.update()

            end = time.time()
    if args.testing_flag==1:
        jsondump(args.test_result_path, json_output_list)
Exemple #14
0
def validate_epoch(epoch,
                   trainer,
                   val_dataloader,
                   meters,
                   meter_prefix="validation"):
    end = time.time()
    with tqdm_pbar(total=len(val_dataloader)) as pbar:
        for feed_dict in val_dataloader:
            if args.use_gpu:
                if not args.gpu_parallel:
                    feed_dict = async_copy_to(feed_dict, 0)

            data_time = time.time() - end
            end = time.time()

            output_dict, extra_info = trainer.evaluate(feed_dict,
                                                       cast_tensor=False)
            monitors = {
                meter_prefix + "/" + k: v
                for k, v in as_float(output_dict["monitors"]).items()
            }
            step_time = time.time() - end
            end = time.time()

            n = feed_dict["image"].size(0)
            meters.update(monitors, n=n)
            meters.update({"time/data": data_time, "time/step": step_time})

            if args.use_tb:
                meters.flush()

            pbar.set_description(
                meters.format_simple(
                    "Epoch {} (validation)".format(epoch),
                    {
                        k: v
                        for k, v in meters.val.items()
                        if (k.startswith(meter_prefix)) and k.count("/") <= 2
                    },
                    compressed=True,
                ))
            pbar.update()

            end = time.time()
Exemple #15
0
 def _mining_epoch(self, mining_epoch_size, mining_dataset_size):
     """Take exam, collect and update positive dataset and negative dataset"""
     pos_data = RandomlyIterDataset()
     neg_data = RandomlyIterDataset()
     self.model.eval()
     meters_deter = GroupMeters()
     meters_stoch = GroupMeters()
     disable_pbar = False
     if os.getenv("ONCLUSTER") is not None:
         disable_pbar = True
     with tqdm_pbar(total=mining_epoch_size, disable=disable_pbar) as pbar:
         for i in range(mining_epoch_size):
             if i % 2 == 0:
                 message, result = self._get_result(i,
                                                    meters_deter,
                                                    mode='mining-deter')
             else:
                 message, result = self._get_result(i,
                                                    meters_stoch,
                                                    mode='mining-stoch')
             positive, number, backup = self._extract_info(result)
             dataset = pos_data if positive else neg_data
             if dataset.size < mining_dataset_size:
                 dataset.append((number, backup))
             pbar.set_description(message)
             pbar.update()
             # When both positive and negative dataset are full, break.
             if pos_data.size >= mining_dataset_size and \
                     neg_data.size >= mining_dataset_size:
                 break
     logger.info(
         meters_deter.format_simple('> Mining (deter): ', compressed=False))
     logger.info(
         meters_stoch.format_simple('> Mining (stoch): ', compressed=False))
     meters = self.best_meters(meters_deter, meters_stoch)
     self._inherit_neg_data(neg_data, self.neg_data, meters,
                            mining_dataset_size)
     self.pos_data = pos_data
     self.neg_data = neg_data
     self._dump_meters(meters_deter, 'mining-deter')
     self._dump_meters(meters_stoch, 'mining-stoch')
     return meters, meters_deter, meters_stoch
Exemple #16
0
 def map(self,
         func,
         iterable,
         chunksize=1,
         sort=True,
         total=None,
         desc='',
         callback=None,
         use_tqdm=True,
         **kwargs):
     if total is None and isinstance(iterable, collections.Sized):
         total = len(iterable)
     if use_tqdm:
         pbar = tqdm_pbar(total=total, **kwargs)
         super().map(func,
                     iterable,
                     chunksize,
                     sort,
                     callback=self._wrap_callback(callback, pbar, desc))
     else:
         super().map(func, iterable, chunksize, sort, callback=callback)
Exemple #17
0
def train_epoch(epoch, train_loader, trainer, meters):
    nr_iters = args.iters_per_epoch
    if nr_iters == 0:
        nr_iters = len(train_loader)

    meters.reset()
    end = time.time()

    trainer.trigger_event('epoch:before', trainer, epoch)

    train_iter = iter(train_loader)
    with tqdm_pbar(total=nr_iters) as pbar:
        for i in range(nr_iters):
            feed_dict = next(train_iter)

            if not args.gpu_parallel and args.use_gpu:
                feed_dict = async_copy_to(feed_dict, 0)

            data_time = time.time() - end; end = time.time()

            loss, monitors, output_dict, extra_info = trainer.step(feed_dict)
            step_time = time.time() - end;  end = time.time()

            meters.update(loss=loss)
            meters.update(monitors)
            meters.update({'time/data': data_time, 'time/step': step_time})

            if args.use_tb:
                meters.flush()

            pbar.set_description(format_meters('iter={}/{}'.format(epoch, i),
                {k: v for k, v in meters.val.items() if k.startswith('loss') or k.startswith('time')},
                '{}={:.4f}', ', '))
            pbar.update()

            end = time.time()

    trainer.trigger_event('epoch:after', trainer, epoch)
Exemple #18
0
def validate_epoch(epoch,
                   model,
                   val_dataloader,
                   meters,
                   meter_prefix='validation'):
    end = time.time()

    visualized = 0
    vis = HTMLTableVisualizer(args.vis_dir, 'NSCL Execution Visualization')
    vis.begin_html()

    try:
        with tqdm_pbar(total=len(val_dataloader)) as pbar:
            for feed_dict in val_dataloader:
                if args.use_gpu:
                    if not args.gpu_parallel:
                        feed_dict = async_copy_to(feed_dict, 0)

                data_time = time.time() - end
                end = time.time()

                output_dict = model(feed_dict)
                monitors = {
                    meter_prefix + '/' + k: v
                    for k, v in as_float(output_dict['monitors']).items()
                }
                step_time = time.time() - end
                end = time.time()

                n = feed_dict['image'].size(0)
                meters.update(monitors, n=n)
                meters.update({'time/data': data_time, 'time/step': step_time})

                feed_dict = GView(as_detached(as_cpu(feed_dict)))
                output_dict = GView(as_detached(as_cpu(output_dict)))

                for i in range(n):
                    with vis.table(
                            'Visualize #{} Metainfo'.format(visualized), [
                                HTMLTableColumnDesc('id', 'QID', 'text',
                                                    {'width': '50px'}),
                                HTMLTableColumnDesc('image', 'Image', 'figure',
                                                    {'width': '400px'}),
                                HTMLTableColumnDesc('qa', 'QA', 'text',
                                                    {'width': '200px'}),
                                HTMLTableColumnDesc('p', 'Program', 'code',
                                                    {'width': '200px'})
                            ]):
                        image_filename = osp.join(args.data_image_root,
                                                  feed_dict.image_filename[i])
                        image = Image.open(image_filename)
                        fig, ax = vis_bboxes(image,
                                             feed_dict.objects_raw[i],
                                             'object',
                                             add_text=False)
                        _ = ax.set_title('object bounding box annotations')
                        QA_string = """
                            <p><b>Q</b>: {}</p>
                            <p><b>A</b>: {}</p>
                        """.format(feed_dict.question_raw[i],
                                   feed_dict.answer[i])
                        P_string = '\n'.join(
                            [repr(x) for x in feed_dict.program_seq[i]])

                        vis.row(id=i, image=fig, qa=QA_string, p=P_string)
                        plt.close()

                    with vis.table(
                            'Visualize #{} Metainfo'.format(visualized), [
                                HTMLTableColumnDesc('id', 'QID', 'text',
                                                    {'width': '50px'}),
                                HTMLTableColumnDesc('image', 'Image', 'figure',
                                                    {'width': '400px'}),
                                HTMLTableColumnDesc('mask', 'Mask', 'figure',
                                                    {'width': '700px'})
                            ]):
                        image_filename = osp.join(args.data_image_root,
                                                  feed_dict.image_filename[i])
                        image = Image.open(image_filename)
                        fig, ax = vis_bboxes(image,
                                             feed_dict.objects_raw[i],
                                             'object',
                                             add_text=False)
                        _ = ax.set_title('object bounding box annotations')
                        if not args.show_mask:
                            montage = fig
                        else:
                            num_slots = output_dict['monet/m'].shape[1]
                            monet_fig = [
                                [
                                    tensor2im(output_dict['monet/m'][i, k])
                                    for k in range(num_slots)
                                ],
                                [
                                    tensor2im(output_dict['monet/x'][i, k])
                                    for k in range(num_slots)
                                ],
                                [
                                    tensor2im(output_dict['monet/xm'][i, k])
                                    for k in range(num_slots)
                                ],
                                [tensor2im(output_dict['monet/x_input'][i])] +
                                [
                                    tensor2im(output_dict['monet/x_tilde'][i])
                                    for k in range(num_slots - 1)
                                ]
                            ]
                            montage = montage_fig(monet_fig)
                        vis.row(id=i, image=fig, mask=montage)
                        plt.close()

                    with vis.table('Visualize #{} Trace'.format(visualized), [
                            HTMLTableColumnDesc('id', 'Step', 'text',
                                                {'width': '50px'}),
                            HTMLTableColumnDesc('image', 'Image', 'figure',
                                                {'width': '600px'}),
                            HTMLTableColumnDesc('p', 'operation', 'text',
                                                {'width': '200px'}),
                            HTMLTableColumnDesc('r', 'result', 'code',
                                                {'width': '200px'})
                    ]):
                        # TODO(Jiayuan Mao @ 11/20): support output_dict.programs.
                        for j, (prog, buf) in enumerate(
                                zip(feed_dict.program_seq[i],
                                    output_dict.buffers[i])):
                            if j != len(feed_dict.program_seq[i]) - 1 and (
                                    buf > 0
                            ).long().sum().item() > 0 and buf.size(
                                    0) == feed_dict.objects_raw[i].shape[0]:
                                this_objects = feed_dict.objects_raw[i][
                                    torch.nonzero(buf > 0)[:, 0].numpy()]
                                fig, ax = vis_bboxes(image,
                                                     this_objects,
                                                     'object',
                                                     add_text=False)
                            else:
                                fig, ax = vis_bboxes(image, [],
                                                     'object',
                                                     add_text=False)
                            vis.row(id=j, image=fig, p=repr(prog), r=repr(buf))
                            plt.close()

                    visualized += 1
                    if visualized > args.nr_visualize:
                        raise StopIteration()

                pbar.set_description(
                    meters.format_simple(
                        'Epoch {} (validation)'.format(epoch), {
                            k: v
                            for k, v in meters.val.items()
                            if k.startswith('validation') and k.count('/') <= 1
                        },
                        compressed=True))
                pbar.update()

                end = time.time()
    except StopIteration:
        pass

    from jacinle.utils.meta import dict_deep_kv
    from jacinle.utils.printing import kvformat
    with vis.table('Info', [
            HTMLTableColumnDesc('name', 'Name', 'code', {}),
            HTMLTableColumnDesc('info', 'KV', 'code', {})
    ]):
        vis.row(name='args', info=kvformat(args.__dict__, max_key_len=32))
        vis.row(name='configs',
                info=kvformat(dict(dict_deep_kv(configs)), max_key_len=32))
    vis.end_html()

    logger.info(
        'Happy Holiday! You can find your result at "http://monday.csail.mit.edu/xiuming'
        + osp.realpath(args.vis_dir) + '".')
def validate_attribute(model,
                       val_dataloader,
                       meters,
                       meter_prefix='validation',
                       logger=None,
                       output_attr_path=''):
    end = time.time()
    video_num = len(val_dataloader)
    #pdb.set_trace()
    with tqdm_pbar(total=int(len(val_dataloader) * args.batch_size /
                             128)) as pbar:
        output_dict_list = []
        frame_id_list = []
        for feed_dict_list in val_dataloader:
            #for vid in range(video_num):
            end_frm_flag = False
            #while (not end_frm_flag):
            for idx, feed_dict in enumerate(feed_dict_list):
                scene_idx = feed_dict['meta_ann']['scene_index']
                full_path = os.path.join(
                    output_attr_path,
                    'attribute_' + str(scene_idx).zfill(5) + '.json')
                if os.path.isfile(full_path):
                    print('File exists. %s\n' % (full_path))
                    tmp_dict = jsonload(full_path)
                    if len(tmp_dict) == len(
                            feed_dict['tube_info']['box_seq']['tubes'][0]):
                        continue
                    print('size didn\'t match. %s\n' % (full_path))
                    #pdb.set_trace()
                if args.use_gpu:
                    if not args.gpu_parallel:
                        feed_dict = async_copy_to(feed_dict, 0)
                frm_id = feed_dict['frm_id']
                data_time = time.time() - end
                end = time.time()

                f_scene = model.resnet(feed_dict['img'])
                f_sng = model.scene_graph(f_scene, feed_dict)
                output_dict = parse_scene(feed_dict, f_sng,
                                          model.reasoning.embedding_attribute,
                                          frm_id)
                #pdb.set_trace()
                output_dict_list.append(output_dict)
                frame_id_list.append(frm_id)

                step_time = time.time() - end
                end = time.time()
                if frm_id == len(
                        feed_dict['tube_info']['box_seq']['tubes'][0]) - 1:
                    video_attr_list = []
                    for idx, result_dict in enumerate(output_dict_list):
                        mon_dict = result_dict.pop('monitors')
                        result_dict['frm_id'] = frame_id_list[idx]
                        video_attr_list.append(result_dict)
                        monitors = {
                            meter_prefix + '/' + k: v
                            for k, v in as_float(mon_dict).items()
                        }

                        n = 1
                        meters.update(monitors, n=n)
                        meters.update({
                            'time/data': data_time,
                            'time/step': step_time
                        })

                    jsondump(full_path, video_attr_list)

                    if args.use_tb:
                        meters.flush()

                    pbar.set_description(
                        meters.format_simple('({})'.format(args.setname), {
                            k: v
                            for k, v in meters.val.items()
                            if k.startswith('validation') and k.count('/') <= 2
                        },
                                             compressed=True))
                    pbar.update()

                    end = time.time()
                    output_dict_list = []
                    frame_id_list = []
                    if logger is not None:
                        logger.critical(
                            meters.format_simple(meter_prefix, {
                                k: v
                                for k, v in meters.avg.items() if v != 0
                            },
                                                 compressed=False))