示例#1
0
文件: debug.py 项目: Bohao-Lee/CME
region_loss = m.loss
m.load_weights(weightfile)

print('--- bn weight ---')
print(m.models[0][1].weight)
print('--- bn bias ---')
print(m.models[0][1].bias)
print('--- bn running_mean ---')
print(m.models[0][1].running_mean)
print('--- bn running_var ---')
print(m.models[0][1].running_var)

m.train()
m = m.cuda()

optimizer = optim.SGD(m.parameters(), lr=1e-2, momentum=0.9, weight_decay=0.1)

img = Image.open(imgpath)
img = image2torch(img)
img = Variable(img.cuda())

target = Variable(label)

print('----- img ---------------------')
print(img.data.storage()[0:100])
print('----- target  -----------------')
print(target.data.storage()[0:100])

optimizer.zero_grad()
output = m(img)
print('----- output ------------------')
示例#2
0
                                          **kwargs)

if use_cuda:
    if ngpus > 1:
        model = torch.nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

params_dict = dict(model.named_parameters())
params = []
for key, value in params_dict.items():
    if key.find('.bn') >= 0 or key.find('.bias') >= 0:
        params += [{'params': [value], 'weight_decay': 0.0}]
    else:
        params += [{'params': [value], 'weight_decay': decay * batch_size}]
optimizer = optim.SGD(model.parameters(),
                      lr=learning_rate / batch_size,
                      momentum=momentum,
                      dampening=0,
                      weight_decay=decay * batch_size)


def adjust_learning_rate(optimizer, batch):
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    lr = learning_rate
    for i in range(len(steps)):
        scale = scales[i] if i < len(scales) else 1
        if batch >= steps[i]:
            lr = lr * scale
            if batch == steps[i]:
                break