예제 #1
0
def get_model(cfg, num_classes, device, logger):
    if 'decouple' in cfg.NAME:
        if cfg.TRAIN_STAGE == 1:
            model = Network1(cfg, mode="train", num_classes=num_classes)
        else:
            model = Network(cfg,
                            mode="train",
                            num_classes=int(sum(num_classes) / 100) * 100)
    else:
        if isinstance(num_classes, list) and not cfg.MULTI_BRANCH:
            model = Network1(cfg, mode="train", num_classes=num_classes)
        elif isinstance(num_classes, list) and cfg.MULTI_BRANCH:
            model = Network2(cfg, mode="train", num_classes=num_classes)
        else:
            model = Network(cfg, mode="train", num_classes=num_classes)

    if cfg.BACKBONE.FREEZE == True:
        model.freeze_backbone()
        logger.info("Backbone has been freezed")

    if cfg.CPU_MODE:
        model = model.to(device)
    else:
        model = torch.nn.DataParallel(model).cuda()

    return model
예제 #2
0
파일: utils.py 프로젝트: zymale/BBN
def get_model(cfg, num_classes, device, logger):
    model = Network(cfg, mode="train", num_classes=num_classes)

    if cfg.BACKBONE.FREEZE == True:
        model.freeze_backbone()
        logger.info("Backbone has been freezed")

    if cfg.CPU_MODE:
        model = model.to(device)
    else:
        model = torch.nn.DataParallel(model).cuda()

    return model
예제 #3
0
파일: valid.py 프로젝트: zp1018/BBN
if __name__ == "__main__":
    args = parse_args()
    update_config(cfg, args)

    test_set = eval(cfg.DATASET.DATASET)("valid", cfg)
    num_classes = test_set.get_num_classes()
    device = torch.device("cpu" if cfg.CPU_MODE else "cuda")
    model = Network(cfg, mode="test", num_classes=num_classes)

    model_dir = os.path.join(cfg.OUTPUT_DIR, cfg.NAME, "models")
    model_file = cfg.TEST.MODEL_FILE
    if "/" in model_file:
        model_path = model_file
    else:
        model_path = os.path.join(model_dir, model_file)
    model.load_model(model_path)

    if cfg.CPU_MODE:
        model = model.to(device)
    else:
        model = torch.nn.DataParallel(model).cuda()

    testLoader = DataLoader(
        test_set,
        batch_size=cfg.TEST.BATCH_SIZE,
        shuffle=False,
        num_workers=cfg.TEST.NUM_WORKERS,
        pin_memory=cfg.PIN_MEMORY,
    )
    valid_model(testLoader, model, cfg, device, num_classes)
split = int(np.floor(validation_split * dataset_size))
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]

train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)

train_loader = torch.utils.data.DataLoader(dataset,
                                           batch_size=32,
                                           sampler=train_sampler)
valid_loader = torch.utils.data.DataLoader(dataset,
                                           batch_size=32,
                                           sampler=valid_sampler)
model = Network()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
criterion = nn.CrossEntropyLoss()

train_iter = iter(train_loader)
for e in range(num_epoch):
    try:
        inputs, _ = next(train_iter)
    except StopIteration:
        train_iter = iter(train_loader)
        inputs, _ = next(train_iter)

    #inputs = inputs.to(device)
    lr_batch = torch.zeros(32, 3, 8, 8, dtype=torch.float)
    hr_batch = torch.zeros(32, 3, 32, 32, dtype=torch.float)