Exemplo n.º 1
0
def main():
    args.dump_dir = ensure_path(
        osp.join('dumps', args.dataset_name, args.desc_name, args.expr))
    args.ckpt_dir = ensure_path(osp.join(args.dump_dir, 'checkpoints'))
    args.meta_dir = ensure_path(osp.join(args.dump_dir, 'meta'))
    args.vis_dir = osp.join(args.dump_dir, 'vis', args.run_name)

    initialize_dataset(args.dataset)
    build_dataset = get_dataset_builder(args.dataset)

    dataset = build_dataset(args, configs, args.data_image_root,
                            args.data_scenes_json, args.data_questions_json)

    dataset_split = int(len(dataset) *
                        args.data_split) if args.data_split <= 1 else int(
                            args.data_split)
    train_dataset, validation_dataset = dataset.split_trainval(dataset_split)

    logger.critical('Building the model.')
    model = desc.make_model(args, train_dataset.unwrapped.vocab)

    if args.use_gpu:
        model.cuda()
        # Use the customized data parallel if applicable.
        if args.gpu_parallel:
            from jactorch.parallel import JacDataParallel
            # from jactorch.parallel import UserScatteredJacDataParallel as JacDataParallel
            model = JacDataParallel(model, device_ids=args.gpus).cuda()
        # Disable the cudnn benchmark.
        cudnn.benchmark = False

    if args.load:
        from jactorch.io import load_weights
        if load_weights(model, args.load):
            logger.critical(
                'Loaded weights from pretrained model: "{}".'.format(
                    args.load))

    from jacinle.utils.meter import GroupMeters
    meters = GroupMeters()

    if args.embed:
        from IPython import embed
        embed()

    logger.critical('Building the data loader.')
    validation_dataloader = validation_dataset.make_dataloader(
        args.batch_size,
        shuffle=True,
        drop_last=False,
        nr_workers=args.data_workers)

    model.eval()
    validate_epoch(0, model, validation_dataloader, meters)
    logger.critical(
        meters.format_simple('Validation',
                             {k: v
                              for k, v in meters.avg.items() if v != 0},
                             compressed=False))
    return meters
def main_train(validation_dataset):
    logger.critical('Building the model.')
    model = desc.make_model(args)

    if args.use_gpu:
        model.cuda()
        # Use the customized data parallel if applicable.
        if args.gpu_parallel:
            from jactorch.parallel import JacDataParallel
            # from jactorch.parallel import UserScatteredJacDataParallel as JacDataParallel
            model = JacDataParallel(model, device_ids=args.gpus).cuda()
        # Disable the cudnn benchmark.
        cudnn.benchmark = False

    trainer = TrainerEnv(model, None)

    if args.load:
        if trainer.load_weights(args.load):
            logger.critical(
                'Loaded weights from pretrained model: "{}".'.format(
                    args.load))

        from jacinle.utils.meter import GroupMeters
        meters = GroupMeters()

    logger.critical('Building the data loader.')
    validation_dataloader = validation_dataset.make_dataloader(
        args.batch_size,
        shuffle=False,
        drop_last=False,
        nr_workers=args.data_workers)

    meters.reset()
    model.eval()

    if not os.path.isdir(args.output_attr_path):
        os.makedirs(args.output_attr_path)
    validate_attribute(model, validation_dataloader, meters, args.setname,
                       logger, args.output_attr_path)
    logger.critical(
        meters.format_simple(args.setname,
                             {k: v
                              for k, v in meters.avg.items() if v != 0},
                             compressed=False))
    return meters
Exemplo n.º 3
0
def main():
    dataset = MyDataset()
    from jactorch.data.dataloader import JacDataLoader, JacDataLoaderMultiGPUWrapper
    from jactorch.data.collate import VarLengthCollateV3
    dataloader = JacDataLoader(dataset,
                               batch_size=8,
                               collate_fn=VarLengthCollateV3({
                                   'x': 'concat',
                                   'y': 'concat'
                               }),
                               shuffle=True,
                               drop_last=True,
                               num_workers=0)
    dataloader = JacDataLoaderMultiGPUWrapper(dataloader, args.gpus)

    from jactorch.parallel import JacDataParallel
    model = MyModel()
    model = JacDataParallel(model,
                            user_scattered=True,
                            dict_gather_layout={
                                'z': 'concat',
                                'devices': 'skip'
                            })
    model.cuda()
    optimizer = optim.SGD(model.parameters(), 1e-4)

    from jactorch.train.env import TrainerEnv, default_reduce_func
    env = TrainerEnv(model, optimizer)

    # the reduce func only changes the behavior of reduction on the loss function and the monitors.
    def custom_reduce_func(k, v):
        if '_max' in k:
            return v.max()
        elif '_sum' in k:
            return v.sum()
        else:
            return default_reduce_func(k, v)

    feed_dict = next(iter(dataloader))
    loss, monitors, outputs, _ = env.step(feed_dict,
                                          reduce_func=custom_reduce_func)

    # feed_dict is a List[Dict], where each dict contain 4 keys: x, y, x_length, and y_length.
    # The length of the list is the number of GPUs.
    # All x's and y's are concatenated along the first dimension (the batch dimension).
    # All {x,y}_lengths are int-typed tensors, recording the length for each item in the batch (thus of size [batch_size]).
    jacinle.stprint(feed_dict)
    # outputs is a dict, which gathers all outputs across all gpus.
    # You can specify the gathering method via dict_gather_layout.
    # For a value to "concat", it will output the concatenation of all tensors across all gpus.
    # An auxiliary tensor: z_length will be added. It is int64-typed, of size [nr_devs], which records the size of dim0
    # for all tensors.
    # If you want to have the maximal control of the outputs, specify 'skip'. In this case, it outputs List[Tuple[str]].
    jacinle.stprint(outputs)
    jacinle.stprint(monitors)
Exemplo n.º 4
0
def main():
    logger.critical('Loading the dataset.')
    data = io.load(args.caption)
    # Step 1: filter out images.
    images = {c['image_id'] for c in data['annotations']}
    # Step 2: build a reverse mapping for images.
    id2image = {i['id']: i for i in data['images']}
    images = [id2image[i] for i in images]

    import torchvision.transforms as T
    image_transform = T.Compose([
        T.Resize((args.image_size, args.image_size)),
        T.ToTensor(),
        T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    dataset = COCOImageDataset(images, args.image_root, image_transform)

    logger.critical('Building the model.')

    model = FeatureExtractor()
    if args.use_gpu:
        model.cuda()
        if args.gpu_parallel:
            from jactorch.parallel import JacDataParallel
            model = JacDataParallel(model, device_ids=args.gpus).cuda()
        cudnn.benchmark = True

    model.eval()
    dataloader = dataset.make_dataloader(args.batch_size,
                                         shuffle=False,
                                         drop_last=False,
                                         nr_workers=args.data_workers)
    output_file = io.open_h5(args.output, 'w')
    writer = AsyncWriter(output_file, total_size=len(dataset))

    for feed_dict in tqdm(dataloader,
                          total=len(dataloader),
                          desc='Extracting features'):
        if args.use_gpu:
            feed_dict = async_copy_to(feed_dict, 0)

        with torch.no_grad():
            output_dict = model(feed_dict)

        writer.feed(output_dict)

    writer.join()
    output_file.close()

    io.dump(args.output_images_json, images)
Exemplo n.º 5
0
def main_train(train_dataset, validation_dataset, extra_dataset=None):
    logger.critical('Building the model.')
    model = desc.make_model(args, train_dataset.unwrapped.vocab)

    if args.use_gpu:
        model.cuda()
        # Use the customized data parallel if applicable.
        if args.gpu_parallel:
            from jactorch.parallel import JacDataParallel
            # from jactorch.parallel import UserScatteredJacDataParallel as JacDataParallel
            model = JacDataParallel(model, device_ids=args.gpus).cuda()
        # Disable the cudnn benchmark.
        cudnn.benchmark = False

    if hasattr(desc, 'make_optimizer'):
        logger.critical('Building customized optimizer.')
        optimizer = desc.make_optimizer(model, args.lr)
    else:
        from jactorch.optim import AdamW
        from torch.optim import RMSProp
        trainable_parameters_monet = [x for name, x in model.named_parameters() if x.requires_grad and 'monet' in name 
    x.requires_grad and 'monet' in name]
        trainable_parameters_nscl = [x for x in model.parameters if x.requires_grad and x not in trainable_parameters_monet]

        optimizer_monet = RMSProp(trainable_parameters_monet)
        optimizer_nscl = AdamW(trainable_parameters, args.lr, weight_decay=configs.train.weight_decay)

    if args.acc_grad > 1:
        from jactorch.optim import AccumGrad
        optimizer = AccumGrad(optimizer, args.acc_grad)
        logger.warning('Use accumulated grad={:d}, effective iterations per epoch={:d}.'.format(args.acc_grad, int(args.iters_per_epoch / args.acc_grad)))

    trainer = TrainerEnv(model, optimizer)

    if args.resume:
        extra = trainer.load_checkpoint(args.resume)
        if extra:
            args.start_epoch = extra['epoch']
            logger.critical('Resume from epoch {}.'.format(args.start_epoch))
    elif args.load:
        if trainer.load_weights(args.load):
            logger.critical('Loaded weights from pretrained model: "{}".'.format(args.load))

    if args.use_tb and not args.debug:
        from jactorch.train.tb import TBLogger, TBGroupMeters
        tb_logger = TBLogger(args.tb_dir)
        meters = TBGroupMeters(tb_logger)
        logger.critical('Writing tensorboard logs to: "{}".'.format(args.tb_dir))
    else:
        from jacinle.utils.meter import GroupMeters
        meters = GroupMeters()

    if not args.debug:
        logger.critical('Writing meter logs to file: "{}".'.format(args.meter_file))

    if args.clip_grad:
        logger.info('Registering the clip_grad hook: {}.'.format(args.clip_grad))
        def clip_grad(self, loss):
            from torch.nn.utils import clip_grad_norm_
            clip_grad_norm_(self.model.parameters(), max_norm=args.clip_grad)
        trainer.register_event('backward:after', clip_grad)

    if hasattr(desc, 'customize_trainer'):
        desc.customize_trainer(trainer)

    if args.embed:
        from IPython import embed; embed()

    logger.critical('Building the data loader.')
    validation_dataloader = validation_dataset.make_dataloader(args.batch_size, shuffle=False, drop_last=False, nr_workers=args.data_workers)
    if extra_dataset is not None:
        extra_dataloader = extra_dataset.make_dataloader(args.batch_size, shuffle=False, drop_last=False, nr_workers=args.data_workers)

    if args.evaluate:
        meters.reset()
        model.eval()
        validate_epoch(0, trainer, validation_dataloader, meters)
        if extra_dataset is not None:
            validate_epoch(0, trainer, extra_dataloader, meters, meter_prefix='validation_extra')
        logger.critical(meters.format_simple('Validation', {k: v for k, v in meters.avg.items() if v != 0}, compressed=False))
        return meters

    # assert args.curriculum == 'off', 'Unimplemented feature: curriculum mode {}.'.format(args.curriculum)
    curriculum_strategy = [
        (0, 3, 4),
        (5, 3, 6),
        (10, 3, 8),
        (15, 4, 8),
        (25, 4, 12),
        (35, 5, 12),
        (45, 6, 12),
        (55, 7, 16),
        (65, 8, 20),
        (75, 9, 22),
        (90, 10, 25),
        (1e9, None, None)
    ]

    # trainer.register_event('backward:after', backward_check_nan)

    for epoch in range(args.start_epoch + 1, args.epochs + 1):
        meters.reset()

        model.train()

        this_train_dataset = train_dataset
        if args.curriculum != 'off':
            for si, s in enumerate(curriculum_strategy):
                if curriculum_strategy[si][0] < epoch <= curriculum_strategy[si + 1][0]:
                    max_scene_size, max_program_size = s[1:]
                    if args.curriculum in ('scene', 'all'):
                        this_train_dataset = this_train_dataset.filter_scene_size(max_scene_size)
                    if args.curriculum in ('program', 'all'):
                        this_train_dataset = this_train_dataset.filter_program_size_raw(max_program_size)
                    logger.critical('Building the data loader. Curriculum = {}/{}, length = {}.'.format(*s[1:], len(this_train_dataset)))
                    break

        train_dataloader = this_train_dataset.make_dataloader(args.batch_size, shuffle=True, drop_last=True, nr_workers=args.data_workers)

        for enum_id in range(args.enums_per_epoch):
            train_epoch(epoch, trainer, train_dataloader, meters)

        if epoch % args.validation_interval == 0:
            model.eval()
            validate_epoch(epoch, trainer, validation_dataloader, meters)

        if not args.debug:
            meters.dump(args.meter_file)

        logger.critical(meters.format_simple(
            'Epoch = {}'.format(epoch),
            {k: v for k, v in meters.avg.items() if epoch % args.validation_interval == 0 or not k.startswith('validation')},
            compressed=False
        ))

        if epoch % args.save_interval == 0 and not args.debug:
            fname = osp.join(args.ckpt_dir, 'epoch_{}.pth'.format(epoch))
            trainer.save_checkpoint(fname, dict(epoch=epoch, meta_file=args.meta_file))

        if epoch > int(args.epochs * 0.6):
            trainer.set_learning_rate(args.lr * 0.1)
Exemplo n.º 6
0
def main():
    # directories
    if not args.debug:
        args.dump_dir = ensure_path(osp.join('dumps', args.series_name, args.desc_name, args.run_name))
        args.ckpt_dir = ensure_path(osp.join(args.dump_dir, 'checkpoints'))
        args.vis_dir = ensure_path(osp.join(args.dump_dir, 'visualizations'))
        args.meta_file = osp.join(args.dump_dir, 'metainfo.json')
        args.log_file = osp.join(args.dump_dir, 'log.log')
        args.meter_file = osp.join(args.dump_dir, 'meter.json')

        # Initialize the tensorboard.
        if args.use_tb:
            args.tb_dir = ensure_path(osp.join(args.dump_dir, 'tensorboard'))
        else:
            args.tb_dir = None

    if not args.debug:
        logger.critical('Writing logs to file: "{}".'.format(args.log_file))
        set_output_file(args.log_file)

        logger.critical('Writing metainfo to file: "{}".'.format(args.meta_file))
        with open(args.meta_file, 'w') as f:
            f.write(dump_metainfo(args=args.__dict__, configs=configs))

    if args.debug and args.use_tb:
        logger.warning('Disabling the tensorboard in the debug mode.')
        args.use_tb = False
    if args.evaluate and args.use_tb:
        logger.warning('Disabling the tensorboard in the evaluation mode.')
        args.use_tb = False

    # TODO(Jiayuan Mao @ 04/23): load the dataset.
    logger.critical('Loading the dataset.')
    train_dataset = None
    validation_dataset = None
    # configs.validate_dataset_compatibility(train_dataset)

    # TODO(Jiayuan Mao @ 04/23): build the model.
    logger.critical('Building the model.')
    model = desc.make_model(args)

    if args.use_gpu:
        model.cuda()
        # Use the customized data parallel if applicable.
        if args.gpu_parallel:
            from jactorch.parallel import JacDataParallel
            # Set user_scattered because we will add a multi GPU wrapper to the dataloader. See below.
            model = JacDataParallel(model, device_ids=args.gpus, user_scattered=True).cuda()
        # TODO(Jiayuan Mao @ 04/23): disable the cudnn benchmark.
        # Disable the cudnn benchmark.
        cudnn.benchmark = False

    if hasattr(desc, 'make_optimizer'):
        logger.critical('Building customized optimizer.')
        optimizer = desc.make_optimizer(model, args.lr)
    else:
        from jactorch.optim import AdamW
        # TODO(Jiayuan Mao @ 04/23): set the default optimizer.
        trainable_parameters = filter(lambda x: x.requires_grad, model.parameters())
        optimizer = AdamW(trainable_parameters, args.lr, weight_decay=configs.train.weight_decay)

    if args.acc_grad > 1:
        from jactorch.optim import AccumGrad
        optimizer = AccumGrad(optimizer, args.acc_grad)
        logger.warning('Use accumulated grad={:d}, effective iterations per epoch={:d}.'.format(args.acc_grad, int(args.iters_per_epoch / args.acc_grad)))

    trainer = TrainerEnv(model, optimizer)

    if args.resume:
        extra = trainer.load_checkpoint(args.resume)
        if extra:
            args.start_epoch = extra['epoch']
            logger.critical('Resume from epoch {}.'.format(args.start_epoch))
    elif args.load:
        if trainer.load_weights(args.load):
            logger.critical('Loaded weights from pretrained model: "{}".'.format(args.load))

    if args.use_tb:
        from jactorch.train.tb import TBLogger, TBGroupMeters
        tb_logger = TBLogger(args.tb_dir)
        meters = TBGroupMeters(tb_logger)
        logger.critical('Writing tensorboard logs to: "{}".'.format(args.tb_dir))
    else:
        from jacinle.utils.meter import GroupMeters
        meters = GroupMeters()

    if not args.debug:
        logger.critical('Writing metainfo to file: "{}".'.format(args.meta_file))
        with open(args.meta_file, 'w') as f:
            f.write(dump_metainfo(args=args.__dict__, configs=configs))
        logger.critical('Writing meter logs to file: "{}".'.format(args.meter_file))

        logger.critical('Initializing MLDash.')
        mldash.init(
            desc_name=args.series_name + '/' + args.desc_name,
            expr_name=args.expr,
            run_name=args.run_name,
            args=args,
            highlight_args=parser,
            configs=configs,
        )
        mldash.update(metainfo_file=args.meta_file, log_file=args.log_file, meter_file=args.meter_file, tb_dir=args.tb_dir)

    if args.embed:
        from IPython import embed; embed()

    if hasattr(desc, 'customize_trainer'):
        desc.customize_trainer(trainer)

    # TODO(Jiayuan Mao @ 04/23): make the data loader.
    logger.critical('Building the data loader.')
    train_dataloader = train_dataset.make_dataloader(args.batch_size, shuffle=True, drop_last=True, nr_workers=args.data_workers)
    validation_dataloader = validation_dataset.make_dataloader(args.batch_size, shuffle=False, drop_last=False, nr_workers=args.data_workers)

    if args.use_gpu and args.gpu_parallel:
        from jactorch.data.dataloader import JacDataLoaderMultiGPUWrapper
        train_dataloader = JacDataLoaderMultiGPUWrapper(train_dataloader, args.gpus)
        validation_dataloader = JacDataLoaderMultiGPUWrapper(validation_dataloader, args.gpus)

    if args.evaluate:
        epoch = 0

        model.eval()
        validate_epoch(epoch, trainer, validation_dataloader, meters)

        if not args.debug:
            meters.dump(args.meter_file)

        logger.critical(meters.format_simple('Epoch = {}'.format(epoch), compressed=False))
        return

    for epoch in range(args.start_epoch + 1, args.epochs + 1):
        meters.reset()

        model.train()
        train_epoch(epoch, trainer, train_dataloader, meters)

        if args.validation_interval > 0 and epoch % args.validation_interval == 0:
            model.eval()
            with torch.no_grad():
                validate_epoch(epoch, trainer, validation_dataloader, meters)

        if not args.debug:
            meters.dump(args.meter_file)

        # TODO(Jiayuan Mao @ 02/15): config the MLDash.
        if not args.debug:
            mldash.log_metric('epoch', epoch, desc=False, expr=False)
            for key, value in meters.items():
                if key.startswith('loss') or key.startswith('validation/loss'):
                    mldash.log_metric_min(key, value.avg)
            for key, value in meters.items():
                if key.startswith('acc') or key.startswith('validation/acc'):
                    mldash.log_metric_max(key, value.avg)

        logger.critical(meters.format_simple('Epoch = {}'.format(epoch), compressed=False))

        if not args.debug:
            if epoch % args.save_interval == 0:
                fname = osp.join(args.ckpt_dir, 'epoch_{}.pth'.format(epoch))
                trainer.save_checkpoint(fname, dict(epoch=epoch, meta_file=args.meta_file))
Exemplo n.º 7
0
def main_train(
    train_dataset,
    validation_dataset,
    test_dataset=None,
    prototype_dataset=None,
    one_shot_dataset=None,
):
    logger.critical("Building the model.")
    model = desc.make_model(args, train_dataset.unwrapped.vocab)

    if args.use_gpu:
        model.cuda()
        # Use the customized data parallel if applicable.
        if args.gpu_parallel:
            from jactorch.parallel import JacDataParallel

            # from jactorch.parallel import UserScatteredJacDataParallel as JacDataParallel
            model = JacDataParallel(model, device_ids=args.gpus).cuda()
        # Disable the cudnn benchmark.
        cudnn.benchmark = False

    if hasattr(desc, "make_optimizer"):
        logger.critical("Building customized optimizer.")
        optimizer = desc.make_optimizer(model, args.lr)
    else:
        from jactorch.optim import AdamW

        trainable_parameters = filter(lambda x: x.requires_grad,
                                      model.parameters())
        optimizer = AdamW(trainable_parameters,
                          args.lr,
                          weight_decay=configs.train.weight_decay)

    if args.acc_grad > 1:
        from jactorch.optim import AccumGrad

        optimizer = AccumGrad(optimizer, args.acc_grad)
        logger.warning(
            "Use accumulated grad={:d}, effective iterations per epoch={:d}.".
            format(args.acc_grad, int(args.iters_per_epoch / args.acc_grad)))

    trainer = TrainerEnv(model, optimizer)

    if args.resume:
        extra = trainer.load_checkpoint(args.resume)
        if extra:
            args.start_epoch = extra["epoch"]
            logger.critical("Resume from epoch {}.".format(args.start_epoch))
    elif args.load:
        if trainer.load_weights(args.load):
            logger.critical(
                'Loaded weights from pretrained model: "{}".'.format(
                    args.load))

    if args.use_tb and not args.debug:
        from jactorch.train.tb import TBLogger, TBGroupMeters

        tb_logger = TBLogger(args.tb_dir)
        meters = TBGroupMeters(tb_logger)
        logger.critical('Writing tensorboard logs to: "{}".'.format(
            args.tb_dir))
    else:
        from jacinle.utils.meter import GroupMeters

        meters = GroupMeters()

    if not args.debug:
        logger.critical('Writing meter logs to file: "{}".'.format(
            args.meter_file))

    if args.clip_grad:
        logger.info("Registering the clip_grad hook: {}.".format(
            args.clip_grad))

        def clip_grad(self, loss):
            from torch.nn.utils import clip_grad_norm_

            clip_grad_norm_(self.model.parameters(), max_norm=args.clip_grad)

        trainer.register_event("backward:after", clip_grad)

    if hasattr(desc, "customize_trainer"):
        desc.customize_trainer(trainer)

    if args.embed:
        from IPython import embed

        embed()

    logger.critical("Building the data loader.")
    validation_dataloader = validation_dataset.make_dataloader(
        args.batch_size,
        shuffle=False,
        drop_last=False,
        nr_workers=args.data_workers)
    if test_dataset is not None:
        #     test_dataloader = {
        #         dataset: test_dataset[dataset].make_dataloader(
        #             args.batch_size,
        #             shuffle=False,
        #             drop_last=False,
        #             nr_workers=args.data_workers,
        #         )
        #         for dataset in test_dataset
        #     }

        # if args.evaluate:
        #     meters.reset()
        #     model.eval()
        #     validate_epoch(args.start_epoch, trainer, validation_dataloader, meters)
        #     if test_dataset is not None:
        #         for dataloader in test_dataloader:
        #             validate_epoch(
        #                 args.start_epoch,
        #                 trainer,
        #                 test_dataloader[dataloader],
        #                 meters,
        #                 meter_prefix=dataloader,
        #             )
        # logger.critical(
        #     meters.format_simple(
        #         "Validation",
        #         {k: v for k, v in meters.avg.items() if v != 0},
        #         compressed=False,
        #     )
        # )
        main_one_shot(
            prototype_dataset,
            one_shot_dataset,
            model,
            args.start_epoch,
            trainer,
            meters,
            args.batch_size,
        )
        if not args.debug:
            meters.dump(args.meter_file)

        return meters

    # assert args.curriculum == 'off', 'Unimplemented feature: curriculum mode {}.'.format(args.curriculum)
    curriculum_strategy = [
        (0, 3, 4),
        (5, 3, 6),
        (10, 3, 8),
        (15, 4, 8),
        (25, 4, 12),
        (35, 5, 12),
        (45, 6, 12),
        (55, 7, 16),
        (65, 8, 20),
        (75, 9, 22),
        (90, 10, 25),
        (1e9, None, None),
    ]

    # trainer.register_event('backward:after', backward_check_nan)
    # args.curriculum = "off"

    for epoch in range(args.start_epoch + 1, args.epochs + 1):
        meters.reset()

        model.train()

        this_train_dataset = train_dataset
        if args.curriculum != "off":
            for si, s in enumerate(curriculum_strategy):
                if curriculum_strategy[si][0] < epoch <= curriculum_strategy[
                        si + 1][0]:
                    max_scene_size, max_program_size = s[1:]
                    if args.curriculum in ("scene", "all"):
                        this_train_dataset = this_train_dataset.filter_scene_size(
                            max_scene_size)
                    if args.curriculum in ("program", "all"):
                        this_train_dataset = this_train_dataset.filter_program_size_raw(
                            max_program_size)
                    logger.critical(
                        "Building the data loader. Curriculum = {}/{}, length = {}."
                        .format(*s[1:], len(this_train_dataset)))
                    break

        train_dataloader = this_train_dataset.make_dataloader(
            args.batch_size,
            shuffle=True,
            drop_last=True,
            nr_workers=args.data_workers)

        for enum_id in range(args.enums_per_epoch):
            train_epoch(epoch, trainer, train_dataloader, meters)

        if epoch % args.validation_interval == 0:
            model.eval()
            validate_epoch(epoch, trainer, validation_dataloader, meters)

        if not args.debug:
            meters.dump(args.meter_file)

        logger.critical(
            meters.format_simple(
                "Epoch = {}".format(epoch),
                {
                    k: v
                    for k, v in meters.avg.items()
                    if epoch % args.validation_interval == 0
                    or not (k.startswith("validation") or k.startswith("test"))
                },
                compressed=False,
            ))

        if epoch % args.save_interval == 0 and not args.debug:
            fname = osp.join(args.ckpt_dir, "epoch_{}.pth".format(epoch))
            trainer.save_checkpoint(
                fname, dict(epoch=epoch, meta_file=args.meta_file))

        if epoch > int(args.epochs * 0.6):
            trainer.set_learning_rate(args.lr * 0.1)

    if test_dataset is not None:
        model.eval()
        for dataloader in test_dataloader:
            validate_epoch(
                epoch,
                trainer,
                test_dataloader[dataloader],
                meters,
                meter_prefix=dataloader,
            )
        if not args.debug:
            meters.dump(args.meter_file)
    main_one_shot(
        prototype_dataset,
        one_shot_dataset,
        model,
        epoch,
        trainer,
        meters,
        args.batch_size,
    )
Exemplo n.º 8
0
def main():
    # directories
    if not args.debug:
        args.dump_dir = ensure_path(
            osp.join('dumps', args.series_name, args.desc_name))
        args.ckpt_dir = ensure_path(osp.join(args.dump_dir, 'checkpoints'))
        args.meta_dir = ensure_path(osp.join(args.dump_dir, 'meta'))
        args.meta_file = osp.join(args.meta_dir, args.run_name + '.json')
        args.log_file = osp.join(args.meta_dir, args.run_name + '.log')
        args.meter_file = osp.join(args.meta_dir,
                                   args.run_name + '.meter.json')

    if not args.debug:
        logger.critical('Writing logs to file: "{}".'.format(args.log_file))
        set_output_file(args.log_file)

        logger.critical('Writing metainfo to file: "{}".'.format(
            args.meta_file))
        with open(args.meta_file, 'w') as f:
            f.write(dump_metainfo(args=args.__dict__, configs=configs))
    else:
        if args.use_tb:
            logger.warning(
                'Disabling the tensorboard in the debug mode.'.format(
                    args.meta_file))
            args.use_tb = False

    # TODO(Jiayuan Mao @ 04/23): load the dataset.
    logger.critical('Loading the dataset.')
    validation_dataset = None
    # configs.validate_dataset_compatibility(train_dataset)

    # TODO(Jiayuan Mao @ 04/23): build the model.
    logger.critical('Building the model.')
    model = desc.make_model(args)

    if args.use_gpu:
        model.cuda()
        # Use the customized data parallel if applicable.
        if args.gpu_parallel:
            from jactorch.parallel import JacDataParallel
            # from jactorch.parallel import UserScatteredJacDataParallel as JacDataParallel
            model = JacDataParallel(model, device_ids=args.gpus).cuda()
        # TODO(Jiayuan Mao @ 04/23): disable the cudnn benchmark.
        # Disable the cudnn benchmark.
        cudnn.benchmark = False

    if load_weights(model, args.load):
        logger.critical('Loaded weights from pretrained model: "{}".'.format(
            args.load))

    if args.use_tb:
        from jactorch.train.tb import TBLogger, TBGroupMeters
        tb_logger = TBLogger(args.tb_dir)
        meters = TBGroupMeters(tb_logger)
        logger.critical('Writing tensorboard logs to: "{}".'.format(
            args.tb_dir))
    else:
        from jacinle.utils.meter import GroupMeters
        meters = GroupMeters()

    if not args.debug:
        logger.critical('Writing meter logs to file: "{}".'.format(
            args.meter_file))

    if args.embed:
        from IPython import embed
        embed()

    # TODO(Jiayuan Mao @ 04/23): make the data loader.
    logger.critical('Building the data loader.')
    validation_dataloader = validation_dataset.make_dataloader(
        args.batch_size,
        shuffle=False,
        drop_last=False,
        nr_workers=args.data_workers)

    model.eval()
    validate_epoch(model, validation_dataloader, meters)

    if not args.debug:
        meters.dump(args.meter_file)

    logger.critical(meters.format_simple('Test', compressed=False))