testloss_to_file,
                  trainAcc_to_file,
                  testAcc_to_file,
                  Parameters,
                  model_name,
                  train_mode,
                  dataset,
                  plot=False)
#########    error-2
lr_stage2 = 1e-4  #max(1e-5,10*optimizer.param_groups[-1]['lr'])

print('STAGE2')
#    optimizer=torch.optim.Adam([{'params': model.features.parameters()},{'params': model.compressed_features.parameters()},
#                            {'params': model.frozen_features.parameters(), 'lr': lr_stage2*1},
#                {'params': model.classifier.parameters(), 'lr': lr_stage2*1}], lr=lr_stage2,betas=(0.9, 0.999), eps=1e-08,weight_decay=0.0005)
optimizer = torch.optim.Adam(model.parameters(),
                             lr=lr_stage2,
                             betas=(0.9, 0.999),
                             eps=1e-08,
                             weight_decay=0.0005)
scheduler = ReduceLROnPlateau(optimizer,
                              'max',
                              verbose=True,
                              patience=5,
                              eps=1e-9)
trainAcc_to_file, testAcc_to_file, trainloss_to_file, testloss_to_file, Parameters = Train_stage2(
    model,
    optimizer,
    Trainloader,
    Testloader,
    epochs=None,
Exemplo n.º 2
0
model = torch.nn.DataParallel(model) #make model DataParrallel
if (ensembleModel is not None):
    ensembleModel=torch.nn.DataParallel(ensembleModel)
    
print('model='+model_name)
print('big model='+big_model_name)
lr_stage1=1e-4
if 'fitnet'==train_mode.lower():
    optimizer=torch.optim.Adam([{'params': model.features.parameters()},{'params': model.compressed_features.parameters(), 'lr': lr_stage1*1},
                        {'params': model.frozen_features.parameters(), 'lr': lr_stage1*1},
                            {'params': model.regressor.parameters(), 'lr': lr_stage1*1},
                {'params': model.classifier.parameters(), 'lr': lr_stage1*1}], lr=lr_stage1,
    betas=(0.9, 0.999), eps=1e-08,weight_decay=0.0005)
else:
    optimizer=torch.optim.Adam(model.parameters(), lr=lr_stage1,
    betas=(0.9, 0.999), eps=1e-08,weight_decay=0.0005)
scheduler=ReduceLROnPlateau(optimizer, 'min',verbose=True,patience=5,eps=1e-8,threshold=1e-20)
print ('STAGE1')
trainloss_to_file,testloss_to_file,Parameters=Train_stage1(model,optimizer,Trainloader,Testloader,epochs=None,Train_mode=train_mode,
                                                                                  Model_name=model_name,
                                                                                  Dataset=dataset,
                                                                                  scheduler=scheduler,
                                                                                  big_model_name=big_model_name,
                                                                                  ensembleModel=ensembleModel)
    
    #########################################################
    #####EVAL
   
trainAcc_to_file,testAcc_to_file,_,_,_=Train_stage2(model,optimizer,Trainloader,Testloader, epochs=0,
                                                                                                Train_mode=train_mode,