Exemplo n.º 1
0
        new_state_dict = vgg.state_dict()
        dd = net.state_dict()
        for k in new_state_dict.keys():
            print(k)
            if k in dd.keys() and k.startswith('features'):
                print('yes')
                dd[k] = new_state_dict[k]
        net.load_state_dict(dd)

if use_gpu:
    print('this computer has gpu %d and current is %s' %
          (torch.cuda.device_count(), torch.cuda.current_device()))
    net.cuda()

# ---------------------损失函数---------------------
criterion = yoloLoss(7, 2, 5, 0.5)

# ---------------------优化器----------------------

# different learning rate
params = []
params_dict = dict(net.named_parameters())
for key, value in params_dict.items():
    if key.startswith('features'):
        params += [{'params': [value], 'lr': learning_rate * 1}]
    else:
        params += [{'params': [value], 'lr': learning_rate}]
optimizer = torch.optim.SGD(params,
                            lr=learning_rate,
                            momentum=0.9,
                            weight_decay=5e-4)
Exemplo n.º 2
0
    else:
        vgg = models.vgg16_bn(pretrained=True)
        new_state_dict = vgg.state_dict()
        dd = net.state_dict()
        for k in new_state_dict.keys():
            if k in dd.keys() and k.startswith('features'):
                dd[k] = new_state_dict[k]
        net.load_state_dict(dd)

if use_gpu:
    print('this computer has gpu %d and current is %s' %
          (torch.cuda.device_count(), torch.cuda.current_device()))
    net.cuda()

# ---------------------损失函数---------------------
criterion = yoloLoss(7, 2, 5, 0.5)  # S=7,B=2,coord=5,noobj=0.5

# ---------------------优化器----------------------

# different learning rate
params = []
params_dict = dict(net.named_parameters())
for key, value in params_dict.items():
    if key.startswith('features'):
        params += [{'params': [value], 'lr': learning_rate * 1}]
    else:
        params += [{'params': [value], 'lr': learning_rate}]
optimizer = torch.optim.SGD(params,
                            lr=learning_rate,
                            momentum=0.9,
                            weight_decay=5e-4)