classname = m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.02) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) from crnn.models.crnn import CRNN from config import ocrModel, LSTMFLAG, GPU model = CRNN(32, 1, len(alphabetChinese) + 1, 256, 1, lstmFlag=LSTMFLAG) model.apply(weights_init) preWeightDict = torch.load(ocrModel, map_location=lambda storage, loc: storage) ##加入项目训练的权重 modelWeightDict = model.state_dict() for k, v in preWeightDict.items(): name = k.replace('module.', '') # remove `module.` if 'rnn.1.embedding' not in name: ##不加载最后一层权重 modelWeightDict[name] = v model.load_state_dict(modelWeightDict) ##优化器 from crnn.util import strLabelConverter lr = 0.1 optimizer = optim.Adadelta(model.parameters(), lr=lr) converter = strLabelConverter(''.join(alphabetChinese))
n_correct += 1 # print('n_correct: ', n_correct) accuracy = n_correct / float(max_iter) print('val_accruracy: ', accuracy) return accuracy print('lstm: ', LSTMFLAG) model = CRNN(32, 1, len(alphabetChinese) + 1, 256, 1, lstmFlag=LSTMFLAG) # just run this line when training from strach # model.apply(weights_init) print('load weights: ', ocrModel) preWeightDict = torch.load( ocrModel, map_location=lambda storage, loc: storage) ##加入项目训练的权重 modelWeightDict = model.state_dict() for k, v in preWeightDict.items(): name = k.replace('module.', '') # remove `module.` if 'rnn.1.embedding' not in name: ##不加载最后一层权重 modelWeightDict[name] = v model.load_state_dict(modelWeightDict) print('model has been loaded') #print(model) # if dense optimizer = SGD; if lstm optimizer = adadelta # lr = 0.1 # optimizer = optim.Adadelta(model.parameters(), lr=0.001) optimizer = optim.SGD(model.parameters(), lr=learning_reate, momentum=0.6)