Esempio n. 1
0
def build_recognizer(cfg, device):
    world_size = du.get_world_size()

    model = registry.RECOGNIZER[cfg.MODEL.RECOGNIZER.TYPE](cfg)

    if cfg.MODEL.NORM.SYNC_BN and world_size > 1:
        logger.info("start sync BN on the process group of {}".format(
            du.LOCAL_RANK_GROUP))
        convert_sync_bn(model, du.LOCAL_PROCESS_GROUP)
    preloaded = cfg.MODEL.RECOGNIZER.PRELOADED
    if preloaded != "":
        logger.info(f'load pretrained: {preloaded}')
        check_pointer = CheckPointer(model)
        check_pointer.load(preloaded, map_location=device)
        logger.info("finish loading model weights")
    if cfg.MODEL.CONV.ADD_BLOCKS is not None:
        assert isinstance(cfg.MODEL.CONV.ADD_BLOCKS, tuple)
        for add_block in cfg.MODEL.CONV.ADD_BLOCKS:
            if add_block == 'RepVGGBlock':
                insert_repvgg_block(model)
            if add_block == 'ACBlock':
                insert_acblock(model)

    model = model.to(device=device)
    if du.get_world_size() > 1:
        model = DDP(model,
                    device_ids=[device],
                    output_device=device,
                    find_unused_parameters=True)

    return model
Esempio n. 2
0
def build_dataloader(cfg, dataset, is_train=True):
    world_size = du.get_world_size()
    num_gpus = cfg.NUM_GPUS
    rank = du.get_rank()
    if is_train:
        batch_size = cfg.DATALOADER.TRAIN_BATCH_SIZE

        if num_gpus > 1:
            sampler = DistributedSampler(dataset,
                                         num_replicas=world_size,
                                         rank=rank,
                                         shuffle=True)
        else:
            sampler = RandomSampler(dataset)
    else:
        batch_size = cfg.DATALOADER.TEST_BATCH_SIZE

        if num_gpus > 1:
            sampler = DistributedSampler(dataset,
                                         num_replicas=world_size,
                                         rank=rank,
                                         shuffle=False)
        else:
            sampler = SequentialSampler(dataset)

    data_loader = DataLoader(
        dataset,
        num_workers=cfg.DATALOADER.NUM_WORKERS,
        sampler=sampler,
        batch_size=batch_size,
        drop_last=False,
        # [When to set pin_memory to true?](https://discuss.pytorch.org/t/when-to-set-pin-memory-to-true/19723)
        pin_memory=True)

    return data_loader
Esempio n. 3
0
def setup_logging(output_dir=None):
    """
    Sets up the logging for multiple processes. Only enable the logging for the
    master process, and suppress logging for the non-master processes.
    """
    # Set up logging format.
    _FORMAT = "[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s"

    if du.is_master_proc():
        # Enable logging for the master process.
        logging.root.handlers = []
    else:
        # Suppress logging for non-master processes.
        _suppress_print()

    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    logger.propagate = False
    plain_formatter = logging.Formatter(
        "[%(asctime)s][%(levelname)s] %(filename)s: %(lineno)3d: %(message)s",
        datefmt="%m/%d %H:%M:%S",
    )

    if du.is_master_proc():
        ch = logging.StreamHandler(stream=sys.stdout)
        ch.setLevel(logging.DEBUG)
        ch.setFormatter(plain_formatter)
        logger.addHandler(ch)

    if output_dir is not None and du.is_master_proc(du.get_world_size()):
        filename = os.path.join(output_dir, "stdout.log")
        fh = logging.FileHandler(filename)
        fh.setLevel(logging.DEBUG)
        fh.setFormatter(plain_formatter)
        logger.addHandler(fh)
Esempio n. 4
0
def build_sampler(cfg, dataset):
    world_size = du.get_world_size()
    num_gpus = cfg.NUM_GPUS
    rank = du.get_rank()
    if num_gpus > 1:
        shuffle = cfg.DATALOADER.RANDOM_SAMPLE
        sampler = DistributedSampler(dataset,
                                     num_replicas=world_size,
                                     rank=rank,
                                     shuffle=shuffle)
    elif cfg.DATALOADER.RANDOM_SAMPLE:
        sampler = RandomSampler(dataset)
    else:
        sampler = SequentialSampler(dataset)

    return sampler
Esempio n. 5
0
def build_dataloader(cfg, is_train=True):
    transform = build_transform(cfg, is_train=is_train)
    dataset = build_dataset(cfg, transform=transform, is_train=is_train)

    world_size = du.get_world_size()
    num_gpus = cfg.NUM_GPUS
    rank = du.get_rank()
    if is_train:
        batch_size = cfg.DATALOADER.TRAIN_BATCH_SIZE
        drop_last = True

        if num_gpus > 1:
            sampler = DistributedSampler(dataset,
                                         num_replicas=world_size,
                                         rank=rank,
                                         shuffle=True)
        else:
            sampler = RandomSampler(dataset)
    else:
        batch_size = cfg.DATALOADER.TEST_BATCH_SIZE
        drop_last = False

        if num_gpus > 1:
            sampler = DistributedSampler(dataset,
                                         num_replicas=world_size,
                                         rank=rank,
                                         shuffle=False)
        else:
            sampler = SequentialSampler(dataset)

    data_loader = DataLoader(dataset,
                             num_workers=cfg.DATALOADER.NUM_WORKERS,
                             sampler=sampler,
                             batch_size=batch_size,
                             drop_last=drop_last,
                             pin_memory=True)

    return data_loader