Пример #1
0
train_dataloader = data.DataLoader(
    dataset=train_dataset,
    batch_size=args.batch_size,
    num_workers=args.num_workers,
    shuffle=True,
    drop_last=True
)
val_dataloader = data.DataLoader(
    dataset=val_dataset,
    batch_size=args.batch_size,
    num_workers=args.num_workers,
    shuffle=False
)
dataloaders = {'train': train_dataloader, 'val': val_dataloader}

model = VideoModel(output_stride=args.os)
# load pretrained models
if os.path.exists(args.checkpoint):
    print('Loading state dict from: {0}'.format(args.checkpoint))
    if args.start_epoch == 0:
        model = load_model(model=model, model_file=args.checkpoint, is_restore=False)
    else:
        model = load_model(model=model, model_file=args.checkpoint, is_restore=True)
else:
    raise ValueError("Cannot find model file at {}".format(args.checkpoint))

model = nn.DataParallel(model)
model.to(device)

criterion = nn.BCEWithLogitsLoss()
    dataset=train_dataset,
    batch_size=args.batch_size,
    num_workers=args.num_workers,
    shuffle=True,
    drop_last=True
)
val_dataloader = data.DataLoader(
    dataset=val_dataset,
    batch_size=args.batch_size,
    num_workers=args.num_workers,
    shuffle=False
)
dataloaders = {'train': train_dataloader, 'val': val_dataloader}


model = VideoModel(output_stride=args.os, pretrained=True, cfg=CFG)
# # load pretrained models
if os.path.exists(args.checkpoint):
    print('Loading state dict from: {0}'.format(args.checkpoint))
    logger = get_logger()
    if args.start_epoch == 0:
        # load_backbone(model, args.checkpoint, logger)
        model = load_model(model=model, model_file=args.checkpoint)   # 别人这么写了,肯定没错
    else:
        # load_backbone(model, args.checkpoint, logger)
        model = load_model(model=model, model_file=args.checkpoint)
else:
    raise ValueError("Cannot find model file at {}".format(args.checkpoint))


model = nn.DataParallel(model)  #数据并行,但会出现负载不均衡
Пример #3
0
    transforms=data_transforms['test'],
    read_clip=True,
    random_reverse_clip=False,
    label_interval=1,
    frame_between_label_num=0,
    clip_len=args.clip_len
)

dataloader = data.DataLoader(
    dataset=dataset,
    batch_size=1, # only support 1 video clip
    num_workers=args.num_workers,
    shuffle=False
)

model = VideoModel(output_stride=args.os, cfg=CFG)

# load pretrained models
if os.path.exists(args.checkpoint):
    print('Loading state dict from: {0}'.format(args.checkpoint))
    model = load_model(model=model, model_file=args.checkpoint, is_restore=True)
else:
    raise ValueError("Cannot find model file at {}".format(args.checkpoint))

model.to(device)


def inference():    # _call_函数,将类实例当做函数来使用
    model.eval()
    print("Begin inference on {} {}.".format(args.dataset, args.split))
    running_mae = 0.0
Пример #4
0
    # inputs = torch.randn(1, 3, 448, 448)
    # inputs = inputs.cuda()
    if device == 'cuda':
        model = model.cuda()
        # inputs = inputs.cuda()

    model.eval()

    time_spent = []
    for idx in range(100):
        start_time = time.time()
        with torch.no_grad():
            _ = model(inputs)

        if device == 'cuda':
            torch.cuda.synchronize()  # wait for cuda to finish (cuda is asynchronous!)
        if idx > 10:
            time_spent.append(time.time() - start_time)
    print('Avg execution time (ms): %.4f, FPS:%d' % (np.mean(time_spent), 1*1//np.mean(time_spent) * 4))
    return 1*1//np.mean(time_spent)


if __name__=="__main__":

    torch.backends.cudnn.benchmark = True

    from libs.networks import VideoModel, ImageModel
    model = VideoModel(output_stride=16, pretrained=True, cfg=CFG)

    computeTime(model)