Exemplo n.º 1
0
def load_model(opt, pretrained_path):
    seed = int(time.time())
    use_cuda = True
    gpus = '0'
    torch.manual_seed(seed)
    if use_cuda:
        os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    torch.cuda.manual_seed(seed)

    # Create model
    model = YOWO(opt)
    model = model.cuda()
    # model = nn.DataParallel(model, device_ids=None)  # in multi-gpu case
    model.seen = 0

    checkpoint = torch.load(pretrained_path)
    epoch = checkpoint['epoch']
    fscore = checkpoint['fscore']
    model.load_state_dict(checkpoint['state_dict'], strict=False)

    return model, epoch, fscore
Exemplo n.º 2
0
iou_thresh    = 0.5

if not os.path.exists(backupdir):
    os.mkdir(backupdir)
    
torch.manual_seed(seed)
if use_cuda:
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    torch.cuda.manual_seed(seed)

# Create model
model = YOWO(opt)

model       = model.cuda()
model       = nn.DataParallel(model, device_ids=gpu_ids) # in multi-gpu case
model.seen  = 0

logging("============================ starting =============================")
print(model)
logging(f"# of GPUs: {ngpus}, batch_size: {batch_size}")

parameters = get_fine_tuning_parameters(model, opt)
optimizer = optim.SGD(parameters, lr=learning_rate/batch_size, momentum=momentum, dampening=0, weight_decay=decay*batch_size)

kwargs = {'num_workers': num_workers, 'pin_memory': True} if use_cuda else {}

# Load resume path if necessary
if opt.resume_path:
    logging("===================================================================")
    if '.pth' in opt.resume_path:
        chkpt = opt.resume_path
anchors = [float(i) for i in anchors]
num_anchors = 5

# Step 1: Set path for model and test dataset
model_path = r'F:\NUS\Courses\ISY5004\Practice Module\backup'
model_name = 'yowo_tennis_8f_best.pth'
Data_path = ''


# Step 2: load the model
opt = parse_opts()
model = YOWO(opt)

model       = model.cuda()
model       = nn.DataParallel(model, device_ids=None) # in multi-gpu case
model.seen  = 0
print(model)

print("===================================================================")
print('loading model {}'.format(model_path))
checkpoint = torch.load(os.path.join(model_path, model_name))
begin_epoch = checkpoint['epoch'] + 1
best_fscore = checkpoint['fscore']
model.load_state_dict(checkpoint['state_dict'])
# optimizer.load_state_dict(checkpoint['optimizer'])
# model.seen = checkpoint['epoch'] * nsamples
print("Loaded model fscore: ", checkpoint['fscore'])

# Step 3: Load test data
init_width = 224
init_height = 224