def val(epoch, args, criterion, val_loader, test_loader, filename=None): fcn_model.eval() total_ious = [] pixel_accs = [] iteration = 0 val_loss = 0 count = 0 for iter, (data, target) in tqdm.tqdm( enumerate(val_loader), total=len(val_loader), desc='Valid iteration=%d' % iteration, ncols=80, leave=False): if use_gpu: inputs = Variable(data.cuda(gpu_used)) else: inputs = Variable(data) output = fcn_model(inputs) if args.loss == "CE": val_loss += criterion(output, target.cuda(gpu_used)).item() else: val_loss += L.lovasz_softmax(output, target.cuda(gpu_used), classes=[1]).item() count = count + 1 output = output.data.cpu().numpy() N, c, h, w = output.shape pred = output.transpose(0, 2, 3, 1).reshape(-1, n_class).argmax(axis=1).reshape(N, h, w) target = target.cpu().numpy().reshape(N, h, w) for p, t in zip(pred, target): total_ious.append(L.iou_binary(p, t)) pixel_accs.append(pixel_acc(p, t)) iteration += 1 val_loss /= count pixel_accs = np.array(pixel_accs).mean() print("epoch: {}, pix_acc: {}, IoU: {}, val_loss: {}".format(epoch, pixel_accs, np.mean(total_ious), val_loss)) if args.file == True: csv_file = open(filename, "a") csv_file.write(str(pixel_accs) + "," + str(np.mean(total_ious)) + "," + str(val_loss) + "\n") csv_file.close() early_stopping(np.mean(total_ious))#, model) if early_stopping.early_stop: print("Early stopping") #test_set(test_loader) test_set(test_loader, filename) sys.exit()
def criterion(inputs, target): sigmoid = torch.sigmoid(inputs['out']) sigmoid_aux = torch.sigmoid(inputs['aux']) preds = (inputs['out'].data > 0).long() loss = L.lovasz_softmax(sigmoid, target, classes=[1], ignore=128) loss_aux = L.lovasz_softmax(sigmoid_aux, target, classes=[1], ignore=128) iou = L.iou_binary(preds, target, ignore=128, per_image=True) return loss + 0.5 * loss_aux, iou
def test_set(test_loader, filename): print("TEST SET EVALUATION") fcn_model.eval() total_ious = [] pixel_accs = [] for iter, (data, target) in tqdm.tqdm( enumerate(test_loader), total=len(test_loader), desc='Test iteration', ncols=80, leave=False): if use_gpu: inputs = Variable(data.cuda()) else: inputs = Variable(data) output = fcn_model(inputs) output = output.data.cpu().numpy() N, c, h, w = output.shape pred = output.transpose(0, 2, 3, 1).reshape(-1, n_class).argmax(axis=1).reshape(N, h, w) target = target.cpu().numpy().reshape(N, h, w) if iter % 100 == 0: now = datetime.datetime.now() #if args.file == True: img_name = "predictions/" + str(now.day) + "_" + str(now.hour) + "_" + str(now.minute) + "-" + str(iter) + "-prediction.jpg" targ_name = "predictions/" + str(now.day) + "_" + str(now.hour) + "_" + str(now.minute) + "-" + str(iter) + "-truth.jpg" #inp_name = "predictions/" + str(now.day) + "_" + str(now.hour) + "_" + str(now.minute) + "-" + str(iter) + "-raw.jpg" scipy.misc.imsave(img_name, pred.squeeze()) scipy.misc.imsave(targ_name, target.squeeze()) #scipy.misc.imsave(inp_name, input_print) for p, t in zip(pred, target): total_ious.append(L.iou_binary(p, t)) pixel_accs.append(pixel_acc(p, t)) pixel_accs = np.array(pixel_accs).mean() print("pix_acc: {}, IoU: {}, file: {}".format(pixel_accs, np.mean(total_ious), filename)) csv_file = open("test_fcn_combined_results" + ".csv", "a") csv_file.write(str(np.mean(total_ious)) + "\n") csv_file.close()
def val(fcn_model, epoch, args, criterion, val_loader, filename=None, filename1=None): fcn_model.eval() total_ious = [] pixel_accs = [] # pixel_background = [] # pixel_building = [] iteration = 0 val_loss = 0 count = 0 for iter, (data, target) in enumerate(val_loader): if use_gpu: #print("CUDA") inputs = Variable(data.cuda(3)) else: #print("NO CUDA") inputs = Variable(data) output = fcn_model(inputs) if args.loss == "CE": val_loss += criterion(output, target.cuda(3)).item() else: val_loss += L.lovasz_softmax(output, target.cuda(3), classes=[1]).item() count = count + 1 output = output.data.cpu().numpy() N, c, h, w = output.shape pred = output.transpose(0, 2, 3, 1).reshape(-1, n_class).argmax(axis=1).reshape( N, h, w) #pred = output.transpose(0, 2, 3, 1).reshape(-1, 1).argmax(axis=1).reshape(N, h, w) target = target.cpu().numpy().reshape(N, h, w) # pixel_building.append(np.unique(target, return_counts=True)[1].item(1)) # pixel_background.append(np.unique(target, return_counts=True)[1].item(0)) # N, c, h, w = inputs.shape # input_print = inputs.cpu().numpy().reshape(c, h, w).transpose(1,2,0) if (iter + 1) % 700 == 0: now = datetime.datetime.now() #if args.file == True: img_name = "predictions/" + str(now.day) + "_" + str( now.hour) + "_" + str(now.minute) + "-" + str( epoch) + "-" + str(iter) + "-prediction.jpg" targ_name = "predictions/" + str(now.day) + "_" + str( now.hour) + "_" + str(now.minute) + "-" + str( epoch) + "-" + str(iter) + "-truth.jpg" #inp_name = "predictions/" + str(now.day) + "_" + str(now.hour) + "_" + str(now.minute) + "-" + str(epoch) + "-" + str(iter) + "-raw.jpg" scipy.misc.imsave(img_name, pred.squeeze()) scipy.misc.imsave(targ_name, target.squeeze()) #scipy.misc.imsave(inp_name, input_print) for p, t in zip(pred, target): # total_ious.append(L.iou_binary(p, t)) total_ious.append(L.iou_binary(p, t)) pixel_accs.append(pixel_acc(p, t)) iteration += 1 val_loss /= count pixel_accs = np.array(pixel_accs).mean() print("epoch: {}, pix_acc: {}, IoU: {}, val_loss: {}".format( epoch, pixel_accs, np.mean(total_ious), val_loss)) # if args.file == True: # csv_file = open(filename, "a") # csv_file.write(str(pixel_accs) + "," + str(np.mean(total_ious)) + "," + str(val_loss) + "\n") # csv_file.close() early_stopping(np.mean(total_ious)) #, model) if early_stopping.early_stop: print("Early stopping") end_time = time.time() total_time = end_time - start_time print(total_time) timings_file = open(filename1, "a") timings_file.write(str(total_time) + " / " + str(epoch) + "\n") # model_name = "./best_fcn_models/" + filename + ".pth" # torch.save(fcn_model.state_dict(), model_name) sys.exit()
def train_model(model, criterion, optimizer, scheduler, num_epochs=25): since = time.time() best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) iters = len(dataloaders['train']) # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Iterate over data. for i, (inputs, labels) in zip(tqdm(range(len(dataloaders[phase]))), dataloaders[phase]): inputs = inputs.to(device) labels = labels.to(device) if phase == 'train': scheduler.step(epoch + i / iters) # zero the parameter gradients optimizer.zero_grad() # forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): output = model(inputs) pred = (nn.Sigmoid()(output) > .5).long() loss = criterion(output, labels, ignore=2) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics running_loss += loss.item() * inputs.size(0) running_corrects += L.iou_binary( pred, labels, ignore=2, per_image=False) * inputs.size(0) epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects / dataset_sizes[phase] print('{} Overall Loss: {:.4f} IoU: {:.4f}'.format( phase, epoch_loss, epoch_acc)) log = open(os.path.join(path, 'handEpoch.txt'), 'a') log.writelines( '{} Hand Overall Loss: {:.4f} No Hand IoU: {:.4f}\n\n'.format( phase, epoch_loss, epoch_acc)) log.close() # deep copy the model if phase == 'val' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) print() time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) print('Best val IoU: {:4f}'.format(best_acc)) # load best model weights model.load_state_dict(best_model_wts) return model