dataset=val_dataset,
    batch_size=args.batch_size,
    num_workers=args.num_workers,
    shuffle=False
)
dataloaders = {'train': train_dataloader, 'val': val_dataloader}


model = VideoModel(output_stride=args.os, pretrained=True, cfg=CFG)
# # load pretrained models
if os.path.exists(args.checkpoint):
    print('Loading state dict from: {0}'.format(args.checkpoint))
    logger = get_logger()
    if args.start_epoch == 0:
        # load_backbone(model, args.checkpoint, logger)
        model = load_model(model=model, model_file=args.checkpoint)   # 别人这么写了,肯定没错
    else:
        # load_backbone(model, args.checkpoint, logger)
        model = load_model(model=model, model_file=args.checkpoint)
else:
    raise ValueError("Cannot find model file at {}".format(args.checkpoint))


model = nn.DataParallel(model)  #数据并行,但会出现负载不均衡
model.to(device)    #把模型model拷贝一份到GPU device上
# print(model)

# ------- 1. define loss function --------

bce_loss = nn.BCELoss(size_average=True)
ssim_loss = pytorch_ssim.SSIM(window_size=11,size_average=True)
Exemplo n.º 2
0
    drop_last=True
)
val_dataloader = data.DataLoader(
    dataset=val_dataset,
    batch_size=args.batch_size,
    num_workers=args.num_workers,
    shuffle=False
)
dataloaders = {'train': train_dataloader, 'val': val_dataloader}

model = VideoModel(output_stride=args.os)
# load pretrained models
if os.path.exists(args.checkpoint):
    print('Loading state dict from: {0}'.format(args.checkpoint))
    if args.start_epoch == 0:
        model = load_model(model=model, model_file=args.checkpoint, is_restore=False)
    else:
        model = load_model(model=model, model_file=args.checkpoint, is_restore=True)
else:
    raise ValueError("Cannot find model file at {}".format(args.checkpoint))

model = nn.DataParallel(model)
model.to(device)

criterion = nn.BCEWithLogitsLoss()

optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.module.parameters()), lr=args.lr)

if not os.path.exists(args.save_folder):
    os.makedirs(args.save_folder)
Exemplo n.º 3
0
                                   num_workers=args.num_workers,
                                   shuffle=True,
                                   drop_last=True)
val_dataloader = data.DataLoader(dataset=val_dataset,
                                 batch_size=args.batch_size,
                                 num_workers=args.num_workers,
                                 shuffle=False)
dataloaders = {'train': train_dataloader, 'val': val_dataloader}

pseudo_label_generator = FGPLG(args=args, output_stride=args.os)

if os.path.exists(args.checkpoint):
    print('Loading state dict from: {0}'.format(args.checkpoint))
    if args.start_epoch == 0:
        pseudo_label_generator = load_model(model=pseudo_label_generator,
                                            model_file=args.checkpoint,
                                            is_restore=False)
        if os.path.exists(args.flownet_checkpoint):
            pseudo_label_generator.flownet = load_model(
                model=pseudo_label_generator.flownet,
                model_file=args.flownet_checkpoint,
                is_restore=True)
        else:
            raise ValueError(
                "Cannot pretrained flownet model file at {}".format(
                    args.flownet_checkpoint))
    else:
        pseudo_label_generator = load_model(model=pseudo_label_generator,
                                            model_file=args.checkpoint,
                                            is_restore=True)
else: