예제 #1
0
 def forward(self, *inputs, **kwargs):
     inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
     if len(self.device_ids) == 1:
         inputs = async_copy_to(inputs, 0)
         kwargs = async_copy_to(kwargs, 0)
         return self.module(*inputs[0], **kwargs[0])
     replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
     outputs = self.parallel_apply(replicas, inputs, kwargs)
     return self.gather(outputs, self.output_device)
예제 #2
0
def validate_epoch(epoch, trainer, val_dataloader, meters, meter_prefix='validation'):
    end = time.time()
    with tqdm_pbar(total=len(val_dataloader)) as pbar:
        for feed_dict in val_dataloader:
            if args.use_gpu:
                if not args.gpu_parallel:
                    feed_dict = async_copy_to(feed_dict, 0)

            data_time = time.time() - end; end = time.time()

            output_dict, extra_info = trainer.evaluate(feed_dict, cast_tensor=False)
            monitors = {meter_prefix + '/' + k: v for k, v in as_float(output_dict['monitors']).items()}
            step_time = time.time() - end; end = time.time()

            n = feed_dict['image'].size(0)
            meters.update(monitors, n=n)
            meters.update({'time/data': data_time, 'time/step': step_time})

            if args.use_tb:
                meters.flush()

            pbar.set_description(meters.format_simple(
                'Epoch {} (validation)'.format(epoch),
                {k: v for k, v in meters.val.items() if k.startswith('validation') and k.count('/') <= 2},
                compressed=True
            ))
            pbar.update()

            end = time.time()
예제 #3
0
def validate_epoch(model, val_dataloader, meters):
    end = time.time()
    with tqdm_pbar(total=len(val_dataloader)) as pbar:
        for feed_dict in val_dataloader:
            if args.use_gpu:
                if not args.gpu_parallel:
                    feed_dict = async_copy_to(feed_dict, 0)

            data_time = time.time() - end
            end = time.time()

            output_dict = model(feed_dict)

            # TODO(Jiayuan Mao @ 04/26): compute the monitoring values.
            monitors = as_float(output['monitors'])
            step_time = time.time() - end
            end = time.time()

            # TODO(Jiayuan Mao @ 04/23): normalize the loss/other metrics by adding n=xxx if applicable.
            meters.update(monitors)
            meters.update({'time/data': data_time, 'time/step': step_time})

            if args.use_tb:
                meters.flush()

            pbar.set_description(
                meters.format_simple('Test', 'val', compressed=True))
            pbar.update()

            end = time.time()
예제 #4
0
def validate_epoch(epoch, trainer, val_dataloader, meters):
    end = time.time()
    with tqdm_pbar(total=len(val_dataloader)) as pbar:
        for feed_dict in val_dataloader:
            if args.use_gpu:
                if not args.gpu_parallel:
                    feed_dict = async_copy_to(feed_dict, 0)

            data_time = time.time() - end; end = time.time()

            output_dict, extra_info = trainer.evaluate(feed_dict)

            # TODO(Jiayuan Mao @ 04/26): compute the monitoring values.
            monitors = as_float(output_dict['monitors'])
            step_time = time.time() - end; end = time.time()

            # TODO(Jiayuan Mao @ 04/23): normalize the loss/other metrics by adding n=xxx if applicable.
            meters.update(monitors)
            meters.update({'time/data': data_time, 'time/step': step_time})

            if args.use_tb:
                meters.flush()

            pbar.set_description(meters.format_simple(
                'Epoch {} (validation)'.format(epoch),
                {k: v for k, v in meters.val.items() if k.startswith('validation') and k.count('/') <= 2},
                compressed=True
            ), refresh=False)
            pbar.update()

            end = time.time()
예제 #5
0
def main_one_shot(prototype_dataset, one_shot_dataset, model, epoch, trainer,
                  meters, batch):
    gdef.attribute_concepts["shape"].append("pepper")
    gdef.attribute_concepts["shape"].append("garlic")
    gdef.attribute_concepts["shape"].append("cheese")
    proto_loader = prototype_dataset.make_dataloader(1,
                                                     False,
                                                     drop_last=True,
                                                     nr_workers=1)
    proto_iterator = iter(proto_loader)
    for feed_dict in proto_iterator:
        feed_dict = async_copy_to(feed_dict, 0)
        model.add_concept(feed_dict)
    model = model.cuda()
    model.eval()
    validate_epoch(
        epoch,
        trainer,
        one_shot_dataset.make_dataloader(batch,
                                         False,
                                         drop_last=True,
                                         nr_workers=4),
        meters,
        meter_prefix="one_shot",
    )
예제 #6
0
def _async_copy(inputs, device_ids):
    nr_devs = len(device_ids)
    assert type(inputs) in (tuple, list)
    assert len(inputs) == nr_devs

    outputs = []
    for i, dev in zip(inputs, device_ids):
        with cuda.device(dev):
            outputs.append(async_copy_to(i, dev))

    return tuple(outputs)
예제 #7
0
def main():
    logger.critical('Loading the word embedding.')
    vocab, word_embeddings = load_word_embedding(args.vse)

    logger.critical('Building up the model.')
    model = CompletionModel(word_embeddings)
    if args.use_gpu:
        model.cuda()
    # Disable the cudnn benchmark.
    model.eval()
    cudnn.benchmark = False

    logger.critical('Loading the dataset.')

    dev_dataset = CompletionDataset(vocab, pjoin(args.data_dir, args.dev_img), pjoin(args.data_dir, args.dev_cap), mode=args.mode)
    test_dataset = CompletionDataset(vocab, pjoin(args.data_dir, args.test_img), pjoin(args.data_dir, args.test_cap), mode=args.mode)

    logger.critical('Building up the data loader.')
    dev_dataloader = make_dataloader(dev_dataset, num_workers=args.data_workers, batch_size=64, shuffle=False, drop_last=False, pin_memory=True)
    test_dataloader = make_dataloader(test_dataset, num_workers=args.data_workers, batch_size=64, shuffle=False, drop_last=False, pin_memory=True)

    for epoch_id in range(1, 11):
        load_weights(model, pjoin(args.load, 'epoch_{}.pth'.format(epoch_id)))

        for loader in [dev_dataloader, test_dataloader]:
            meters = GroupMeters()

            end = time.time()
            with tqdm_pbar(total=len(loader), leave=False) as pbar:
                for i, data in enumerate(loader):
                    feed_dict = data
                    feed_dict = mark_volatile(feed_dict)

                    if args.use_gpu:
                        feed_dict = async_copy_to(feed_dict, 0)

                    data_time = time.time() - end; end = time.time()

                    output_dict = model(feed_dict)
                    output_dict = as_numpy(output_dict)

                    gpu_time = time.time() - end;  end = time.time()

                    meters.update({k: float(v) for k, v in output_dict.items() if k.startswith('top')}, n=len(feed_dict['image']))
                    meters.update({'time/data': data_time, 'time/gpu': gpu_time})

                    pbar.set_description(format_meters('sentid={}'.format(i), meters.val, '{}={:.4f}', ', '))
                    pbar.update()

                    end = time.time()

            print(epoch_id, sorted(meters.avg.items()))
예제 #8
0
def main():
    logger.critical('Loading the dataset.')
    data = io.load(args.caption)
    # Step 1: filter out images.
    images = {c['image_id'] for c in data['annotations']}
    # Step 2: build a reverse mapping for images.
    id2image = {i['id']: i for i in data['images']}
    images = [id2image[i] for i in images]

    import torchvision.transforms as T
    image_transform = T.Compose([
        T.Resize((args.image_size, args.image_size)),
        T.ToTensor(),
        T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    dataset = COCOImageDataset(images, args.image_root, image_transform)

    logger.critical('Building the model.')

    model = FeatureExtractor()
    if args.use_gpu:
        model.cuda()
        if args.gpu_parallel:
            from jactorch.parallel import JacDataParallel
            model = JacDataParallel(model, device_ids=args.gpus).cuda()
        cudnn.benchmark = True

    model.eval()
    dataloader = dataset.make_dataloader(args.batch_size,
                                         shuffle=False,
                                         drop_last=False,
                                         nr_workers=args.data_workers)
    output_file = io.open_h5(args.output, 'w')
    writer = AsyncWriter(output_file, total_size=len(dataset))

    for feed_dict in tqdm(dataloader,
                          total=len(dataloader),
                          desc='Extracting features'):
        if args.use_gpu:
            feed_dict = async_copy_to(feed_dict, 0)

        with torch.no_grad():
            output_dict = model(feed_dict)

        writer.feed(output_dict)

    writer.join()
    output_file.close()

    io.dump(args.output_images_json, images)
예제 #9
0
def train_epoch(epoch, trainer, train_dataloader, meters):
    nr_iters = args.iters_per_epoch
    if nr_iters == 0:
        nr_iters = len(train_dataloader)

    meters.update(epoch=epoch)
    if args.dataset=='blocks' and epoch==6:
        keep_only_temporal_concept_learner(trainer, args, configs)

    trainer.trigger_event('epoch:before', trainer, epoch)
    train_iter = iter(train_dataloader)
    end = time.time()
    with tqdm_pbar(total=nr_iters) as pbar:
        for i in range(nr_iters):
            feed_dict = next(train_iter)
            if args.use_gpu:
                if not args.gpu_parallel:
                    feed_dict = async_copy_to(feed_dict, 0)
            data_time = time.time() - end; end = time.time()
            loss, monitors, output_dict, extra_info = trainer.step(feed_dict, cast_tensor=False)
            step_time = time.time() - end; end = time.time()

            n = len(feed_dict)
            meters.update(loss=loss, n=n)

            for tmp_key, tmp_value in monitors.items(): 
                if isinstance(tmp_value , list):
                    for sub_idx, sub_value in enumerate(tmp_value):
                        if sub_value[0]==-1:
                            continue 
                        meters.update({tmp_key: sub_value[0]}, n=sub_value[1])
                elif tmp_value==-1:
                    continue 
                else:
                    meters.update({tmp_key: tmp_value}, n=1)

            meters.update({'time/data': data_time, 'time/step': step_time})

            if args.use_tb:
                meters.flush()

            pbar.set_description(meters.format_simple(
                'Epoch {}'.format(epoch),
                {k: v for k, v in meters.val.items() if not k.startswith('validation') and k != 'epoch' and k.count('/') <= 1},
                compressed=True
            ))
            pbar.update()

            end = time.time()

    trainer.trigger_event('epoch:after', trainer, epoch)
예제 #10
0
def train_epoch(epoch, trainer, train_dataloader, meters):
    nr_iters = args.iters_per_epoch
    if nr_iters == 0:
        nr_iters = len(train_dataloader)

    meters.update(epoch=epoch)

    trainer.trigger_event("epoch:before", trainer, epoch)
    train_iter = iter(train_dataloader)

    end = time.time()
    with tqdm_pbar(total=nr_iters) as pbar:
        for i in range(nr_iters):
            feed_dict = next(train_iter)

            if args.use_gpu:
                if not args.gpu_parallel:
                    feed_dict = async_copy_to(feed_dict, 0)

            data_time = time.time() - end
            end = time.time()

            loss, monitors, output_dict, extra_info = trainer.step(
                feed_dict, cast_tensor=False)
            step_time = time.time() - end
            end = time.time()

            n = feed_dict["image"].size(0)
            meters.update(loss=loss, n=n)
            meters.update(monitors, n=n)
            meters.update({"time/data": data_time, "time/step": step_time})

            if args.use_tb:
                meters.flush()

            pbar.set_description(
                meters.format_simple(
                    "Epoch {}".format(epoch),
                    {
                        k: v
                        for k, v in meters.val.items()
                        if not k.startswith("validation") and k != "epoch"
                        and k.count("/") <= 1
                    },
                    compressed=True,
                ))
            pbar.update()

            end = time.time()

    trainer.trigger_event("epoch:after", trainer, epoch)
예제 #11
0
def _async_copy_stream(inputs, device_ids):
    nr_devs = len(device_ids)
    assert type(inputs) in (tuple, list)
    assert len(inputs) == nr_devs

    outputs = []
    streams = [_get_stream(d) for d in device_ids]
    for i, dev, stream in zip(inputs, device_ids, streams):
        with cuda.device(dev):
            main_stream = cuda.current_stream()
            with cuda.stream(stream):
                outputs.append(async_copy_to(i, dev, main_stream=main_stream))
            main_stream.wait_stream(stream)

    return outputs
예제 #12
0
def train_epoch(epoch, trainer, train_dataloader, meters):
    nr_iters = args.iters_per_epoch
    if nr_iters == 0:
        nr_iters = len(train_dataloader)

    meters.update(epoch=epoch)

    trainer.trigger_event('epoch:before', trainer, epoch)
    train_iter = iter(train_dataloader)

    end = time.time()
    with tqdm_pbar(total=nr_iters) as pbar:
        for i in range(nr_iters):
            feed_dict = next(train_iter)

            if args.use_gpu:
                if not args.gpu_parallel:
                    feed_dict = async_copy_to(feed_dict, 0)

            data_time = time.time() - end
            end = time.time()

            loss, monitors, output_dict, extra_info = trainer.step(feed_dict)
            step_time = time.time() - end
            end = time.time()

            # TODO(Jiayuan Mao @ 04/23): normalize the loss/monitors by adding n=xxx if applicable.
            meters.update(loss=loss)
            meters.update(monitors)
            meters.update({'time/data': data_time, 'time/step': step_time})

            if args.use_tb:
                meters.flush()

            # TODO(Jiayuan Mao @ 04/23): customize the logger.
            pbar.set_description(meters.format_simple(
                'Epoch {}'.format(epoch), {
                    k: v
                    for k, v in meters.val.items()
                    if not k.startswith('validation') and k.count('/') <= 1
                },
                compressed=True),
                                 refresh=False)
            pbar.update()

            end = time.time()

    trainer.trigger_event('epoch:after', trainer, epoch)
예제 #13
0
def validate_epoch(epoch, trainer, val_dataloader, meters, meter_prefix='validation'):
    if args.testing_flag:
        json_output_list = []
    
    end = time.time()
    with tqdm_pbar(total=len(val_dataloader)*args.batch_size) as pbar:
        for feed_dict in val_dataloader:
            if args.use_gpu:
                if not args.gpu_parallel:
                    feed_dict = async_copy_to(feed_dict, 0)
            #pdb.set_trace()
            data_time = time.time() - end; end = time.time()
            output_dict_list, extra_info = trainer.evaluate(feed_dict, cast_tensor=False)
            if args.testing_flag:
                prepare_data_for_testing(output_dict_list, feed_dict, json_output_list)

            step_time = time.time() - end; end = time.time()
            for idx, mon_dict  in enumerate(output_dict_list['monitors']): 
                monitors = {meter_prefix + '/' + k: v for k, v in as_float(mon_dict).items()}
                # remove padding values
                for tmp_key, tmp_value in monitors.items(): 
                    if isinstance(tmp_value , list):
                        for sub_idx, sub_value in enumerate(tmp_value):
                            if sub_value[0]==-1:
                                continue 
                            meters.update({tmp_key: sub_value[0]}, n=sub_value[1])
                    elif tmp_value==-1:
                        continue 
                    else:
                        meters.update({tmp_key: tmp_value}, n=1)
                
                meters.update({'time/data': data_time, 'time/step': step_time})
                if args.use_tb:
                    meters.flush()

                pbar.set_description(meters.format_simple(
                    'Epoch {} (validation)'.format(epoch),
                    {k: v for k, v in meters.val.items() if k.startswith('validation') and k.count('/') <= 2},
                    compressed=True
                ))
                pbar.update()

            end = time.time()
    if args.testing_flag==1:
        jsondump(args.test_result_path, json_output_list)
예제 #14
0
def validate_epoch(epoch,
                   trainer,
                   val_dataloader,
                   meters,
                   meter_prefix="validation"):
    end = time.time()
    with tqdm_pbar(total=len(val_dataloader)) as pbar:
        for feed_dict in val_dataloader:
            if args.use_gpu:
                if not args.gpu_parallel:
                    feed_dict = async_copy_to(feed_dict, 0)

            data_time = time.time() - end
            end = time.time()

            output_dict, extra_info = trainer.evaluate(feed_dict,
                                                       cast_tensor=False)
            monitors = {
                meter_prefix + "/" + k: v
                for k, v in as_float(output_dict["monitors"]).items()
            }
            step_time = time.time() - end
            end = time.time()

            n = feed_dict["image"].size(0)
            meters.update(monitors, n=n)
            meters.update({"time/data": data_time, "time/step": step_time})

            if args.use_tb:
                meters.flush()

            pbar.set_description(
                meters.format_simple(
                    "Epoch {} (validation)".format(epoch),
                    {
                        k: v
                        for k, v in meters.val.items()
                        if (k.startswith(meter_prefix)) and k.count("/") <= 2
                    },
                    compressed=True,
                ))
            pbar.update()

            end = time.time()
예제 #15
0
def train_epoch(epoch, train_loader, trainer, meters):
    nr_iters = args.iters_per_epoch
    if nr_iters == 0:
        nr_iters = len(train_loader)

    meters.reset()
    end = time.time()

    trainer.trigger_event('epoch:before', trainer, epoch)

    train_iter = iter(train_loader)
    with tqdm_pbar(total=nr_iters) as pbar:
        for i in range(nr_iters):
            feed_dict = next(train_iter)

            if not args.gpu_parallel and args.use_gpu:
                feed_dict = async_copy_to(feed_dict, 0)

            data_time = time.time() - end; end = time.time()

            loss, monitors, output_dict, extra_info = trainer.step(feed_dict)
            step_time = time.time() - end;  end = time.time()

            meters.update(loss=loss)
            meters.update(monitors)
            meters.update({'time/data': data_time, 'time/step': step_time})

            if args.use_tb:
                meters.flush()

            pbar.set_description(format_meters('iter={}/{}'.format(epoch, i),
                {k: v for k, v in meters.val.items() if k.startswith('loss') or k.startswith('time')},
                '{}={:.4f}', ', '))
            pbar.update()

            end = time.time()

    trainer.trigger_event('epoch:after', trainer, epoch)
예제 #16
0
def validate_epoch(epoch,
                   model,
                   val_dataloader,
                   meters,
                   meter_prefix='validation'):
    end = time.time()

    visualized = 0
    vis = HTMLTableVisualizer(args.vis_dir, 'NSCL Execution Visualization')
    vis.begin_html()

    try:
        with tqdm_pbar(total=len(val_dataloader)) as pbar:
            for feed_dict in val_dataloader:
                if args.use_gpu:
                    if not args.gpu_parallel:
                        feed_dict = async_copy_to(feed_dict, 0)

                data_time = time.time() - end
                end = time.time()

                output_dict = model(feed_dict)
                monitors = {
                    meter_prefix + '/' + k: v
                    for k, v in as_float(output_dict['monitors']).items()
                }
                step_time = time.time() - end
                end = time.time()

                n = feed_dict['image'].size(0)
                meters.update(monitors, n=n)
                meters.update({'time/data': data_time, 'time/step': step_time})

                feed_dict = GView(as_detached(as_cpu(feed_dict)))
                output_dict = GView(as_detached(as_cpu(output_dict)))

                for i in range(n):
                    with vis.table(
                            'Visualize #{} Metainfo'.format(visualized), [
                                HTMLTableColumnDesc('id', 'QID', 'text',
                                                    {'width': '50px'}),
                                HTMLTableColumnDesc('image', 'Image', 'figure',
                                                    {'width': '400px'}),
                                HTMLTableColumnDesc('qa', 'QA', 'text',
                                                    {'width': '200px'}),
                                HTMLTableColumnDesc('p', 'Program', 'code',
                                                    {'width': '200px'})
                            ]):
                        image_filename = osp.join(args.data_image_root,
                                                  feed_dict.image_filename[i])
                        image = Image.open(image_filename)
                        fig, ax = vis_bboxes(image,
                                             feed_dict.objects_raw[i],
                                             'object',
                                             add_text=False)
                        _ = ax.set_title('object bounding box annotations')
                        QA_string = """
                            <p><b>Q</b>: {}</p>
                            <p><b>A</b>: {}</p>
                        """.format(feed_dict.question_raw[i],
                                   feed_dict.answer[i])
                        P_string = '\n'.join(
                            [repr(x) for x in feed_dict.program_seq[i]])

                        vis.row(id=i, image=fig, qa=QA_string, p=P_string)
                        plt.close()

                    with vis.table(
                            'Visualize #{} Metainfo'.format(visualized), [
                                HTMLTableColumnDesc('id', 'QID', 'text',
                                                    {'width': '50px'}),
                                HTMLTableColumnDesc('image', 'Image', 'figure',
                                                    {'width': '400px'}),
                                HTMLTableColumnDesc('mask', 'Mask', 'figure',
                                                    {'width': '700px'})
                            ]):
                        image_filename = osp.join(args.data_image_root,
                                                  feed_dict.image_filename[i])
                        image = Image.open(image_filename)
                        fig, ax = vis_bboxes(image,
                                             feed_dict.objects_raw[i],
                                             'object',
                                             add_text=False)
                        _ = ax.set_title('object bounding box annotations')
                        if not args.show_mask:
                            montage = fig
                        else:
                            num_slots = output_dict['monet/m'].shape[1]
                            monet_fig = [
                                [
                                    tensor2im(output_dict['monet/m'][i, k])
                                    for k in range(num_slots)
                                ],
                                [
                                    tensor2im(output_dict['monet/x'][i, k])
                                    for k in range(num_slots)
                                ],
                                [
                                    tensor2im(output_dict['monet/xm'][i, k])
                                    for k in range(num_slots)
                                ],
                                [tensor2im(output_dict['monet/x_input'][i])] +
                                [
                                    tensor2im(output_dict['monet/x_tilde'][i])
                                    for k in range(num_slots - 1)
                                ]
                            ]
                            montage = montage_fig(monet_fig)
                        vis.row(id=i, image=fig, mask=montage)
                        plt.close()

                    with vis.table('Visualize #{} Trace'.format(visualized), [
                            HTMLTableColumnDesc('id', 'Step', 'text',
                                                {'width': '50px'}),
                            HTMLTableColumnDesc('image', 'Image', 'figure',
                                                {'width': '600px'}),
                            HTMLTableColumnDesc('p', 'operation', 'text',
                                                {'width': '200px'}),
                            HTMLTableColumnDesc('r', 'result', 'code',
                                                {'width': '200px'})
                    ]):
                        # TODO(Jiayuan Mao @ 11/20): support output_dict.programs.
                        for j, (prog, buf) in enumerate(
                                zip(feed_dict.program_seq[i],
                                    output_dict.buffers[i])):
                            if j != len(feed_dict.program_seq[i]) - 1 and (
                                    buf > 0
                            ).long().sum().item() > 0 and buf.size(
                                    0) == feed_dict.objects_raw[i].shape[0]:
                                this_objects = feed_dict.objects_raw[i][
                                    torch.nonzero(buf > 0)[:, 0].numpy()]
                                fig, ax = vis_bboxes(image,
                                                     this_objects,
                                                     'object',
                                                     add_text=False)
                            else:
                                fig, ax = vis_bboxes(image, [],
                                                     'object',
                                                     add_text=False)
                            vis.row(id=j, image=fig, p=repr(prog), r=repr(buf))
                            plt.close()

                    visualized += 1
                    if visualized > args.nr_visualize:
                        raise StopIteration()

                pbar.set_description(
                    meters.format_simple(
                        'Epoch {} (validation)'.format(epoch), {
                            k: v
                            for k, v in meters.val.items()
                            if k.startswith('validation') and k.count('/') <= 1
                        },
                        compressed=True))
                pbar.update()

                end = time.time()
    except StopIteration:
        pass

    from jacinle.utils.meta import dict_deep_kv
    from jacinle.utils.printing import kvformat
    with vis.table('Info', [
            HTMLTableColumnDesc('name', 'Name', 'code', {}),
            HTMLTableColumnDesc('info', 'KV', 'code', {})
    ]):
        vis.row(name='args', info=kvformat(args.__dict__, max_key_len=32))
        vis.row(name='configs',
                info=kvformat(dict(dict_deep_kv(configs)), max_key_len=32))
    vis.end_html()

    logger.info(
        'Happy Holiday! You can find your result at "http://monday.csail.mit.edu/xiuming'
        + osp.realpath(args.vis_dir) + '".')
def validate_attribute(model,
                       val_dataloader,
                       meters,
                       meter_prefix='validation',
                       logger=None,
                       output_attr_path=''):
    end = time.time()
    video_num = len(val_dataloader)
    #pdb.set_trace()
    with tqdm_pbar(total=int(len(val_dataloader) * args.batch_size /
                             128)) as pbar:
        output_dict_list = []
        frame_id_list = []
        for feed_dict_list in val_dataloader:
            #for vid in range(video_num):
            end_frm_flag = False
            #while (not end_frm_flag):
            for idx, feed_dict in enumerate(feed_dict_list):
                scene_idx = feed_dict['meta_ann']['scene_index']
                full_path = os.path.join(
                    output_attr_path,
                    'attribute_' + str(scene_idx).zfill(5) + '.json')
                if os.path.isfile(full_path):
                    print('File exists. %s\n' % (full_path))
                    tmp_dict = jsonload(full_path)
                    if len(tmp_dict) == len(
                            feed_dict['tube_info']['box_seq']['tubes'][0]):
                        continue
                    print('size didn\'t match. %s\n' % (full_path))
                    #pdb.set_trace()
                if args.use_gpu:
                    if not args.gpu_parallel:
                        feed_dict = async_copy_to(feed_dict, 0)
                frm_id = feed_dict['frm_id']
                data_time = time.time() - end
                end = time.time()

                f_scene = model.resnet(feed_dict['img'])
                f_sng = model.scene_graph(f_scene, feed_dict)
                output_dict = parse_scene(feed_dict, f_sng,
                                          model.reasoning.embedding_attribute,
                                          frm_id)
                #pdb.set_trace()
                output_dict_list.append(output_dict)
                frame_id_list.append(frm_id)

                step_time = time.time() - end
                end = time.time()
                if frm_id == len(
                        feed_dict['tube_info']['box_seq']['tubes'][0]) - 1:
                    video_attr_list = []
                    for idx, result_dict in enumerate(output_dict_list):
                        mon_dict = result_dict.pop('monitors')
                        result_dict['frm_id'] = frame_id_list[idx]
                        video_attr_list.append(result_dict)
                        monitors = {
                            meter_prefix + '/' + k: v
                            for k, v in as_float(mon_dict).items()
                        }

                        n = 1
                        meters.update(monitors, n=n)
                        meters.update({
                            'time/data': data_time,
                            'time/step': step_time
                        })

                    jsondump(full_path, video_attr_list)

                    if args.use_tb:
                        meters.flush()

                    pbar.set_description(
                        meters.format_simple('({})'.format(args.setname), {
                            k: v
                            for k, v in meters.val.items()
                            if k.startswith('validation') and k.count('/') <= 2
                        },
                                             compressed=True))
                    pbar.update()

                    end = time.time()
                    output_dict_list = []
                    frame_id_list = []
                    if logger is not None:
                        logger.critical(
                            meters.format_simple(meter_prefix, {
                                k: v
                                for k, v in meters.avg.items() if v != 0
                            },
                                                 compressed=False))