Beispiel #1
0
    Net.load_state_dict(torch.load(Trained_model_path))
Net=Net.cuda()
#optimizer=torch.optim.SGD(params=Net.parameters(),lr=Learning_Rate,weight_decay=Weight_Decay,momentum=0.5)
optimizer=torch.optim.Adam(params=Net.parameters(),lr=Learning_Rate,weight_decay=Weight_Decay) # Create adam optimizer
#--------------------------- Create logs files for saving loss during training----------------------------------------------------------------------------------------------------------
if not os.path.exists(TrainedModelWeightDir): os.makedirs(TrainedModelWeightDir) # Create folder for trained weight
f = open(TrainLossTxtFile, "w+")# Training loss log file
f.write("Iteration\tloss\t Learning Rate=")
f.close()
#..............Start Training loop: Main Training....................................................................
AVGLoss=-1# running average loss
print("Start Training")
for itr in range(860001,MAX_ITERATION): # Main training loop
    if  np.random.rand()<0.5: Reader.ClassBalance=True
    else: Reader.ClassBalance=False
    Imgs, GTMask, PredMask, IOU, IsThing = Reader.LoadBatch()
    # for oo in range(PredMask.shape[0]):
    #     print(IOU[oo])
    #     Imgs[oo,:,:,0] *=1 - PredMask[oo,:,:]
    #     Imgs[oo, :, :, 1] *= 1 - GTMask[oo,:,:]
    #     misc.imshow(Imgs[oo])


    Prob, Lb=Net.forward(Images=Imgs,InMask=PredMask) # Run net inference and get prediction
    Net.zero_grad()
    OneHotLabels = ConvertLabelToOneHotEncoding.LabelConvert(GTMask,2)  # Convert labels map to one hot encoding pytorch

#    Loss = -torch.mean((OneHotLabels * torch.log(Prob + 0.0000001)))  # Calculate loss between prediction and ground truth label
    TorchGtIOU = torch.autograd.Variable(torch.from_numpy(IOU.astype(np.float32)).cuda(), requires_grad=False)
    Loss = -torch.mean(torch.mean(torch.mean(torch.mean(OneHotLabels * torch.log(Prob + 0.0000001), dim=1), dim=1), dim=1) * TorchGtIOU)*3
    Loss.backward() # Backpropogate loss