def train(model, optimizer, criterions, trainloader, epoch, scheduler, data): global l_avg, totalclasswise_pixel_acc, totalclasswise_gtpixels, totalclasswise_predpixels global steps model.train() for i, (images, sbd_labels, lip_labels) in enumerate(trainloader): sbd_valid_pixel = float( (sbd_labels.data != criterions[0].ignore_index).long().sum()) lip_valid_pixel = float( (lip_labels.data != criterions[1].ignore_index).long().sum()) images = images.to(device) sbd_labels = sbd_labels.to(device) lip_labels = lip_labels.to(device) sbd_outputs, lip_outputs = model(images, task=2) sbd_loss = criterions[0](sbd_outputs, sbd_labels) classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat( [sbd_outputs], sbd_labels, data.n_classes[0]) classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc]) classwise_gtpixels = torch.FloatTensor([classwise_gtpixels]) classwise_predpixels = torch.FloatTensor([classwise_predpixels]) totalclasswise_pixel_acc[0] += classwise_pixel_acc.sum(0).data.numpy() totalclasswise_gtpixels[0] += classwise_gtpixels.sum(0).data.numpy() totalclasswise_predpixels[0] += classwise_predpixels.sum( 0).data.numpy() sbd_total_loss = sbd_loss.sum() sbd_total_loss = sbd_total_loss / float(sbd_valid_pixel) sbd_total_loss.backward(retain_graph=True) lip_loss = criterions[1](lip_outputs, lip_labels) classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat( [lip_outputs], lip_labels, data.n_classes[1]) classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc]) classwise_gtpixels = torch.FloatTensor([classwise_gtpixels]) classwise_predpixels = torch.FloatTensor([classwise_predpixels]) totalclasswise_pixel_acc[1] += classwise_pixel_acc.sum(0).data.numpy() totalclasswise_gtpixels[1] += classwise_gtpixels.sum(0).data.numpy() totalclasswise_predpixels[1] += classwise_predpixels.sum( 0).data.numpy() lip_total_loss = lip_loss.sum() lip_total_loss = lip_total_loss / float(lip_valid_pixel) lip_total_loss.backward() l_avg[0] += sbd_loss.sum().data.cpu().numpy() steps[0] += sbd_valid_pixel l_avg[1] += lip_loss.sum().data.cpu().numpy() steps[1] += lip_valid_pixel optimizer.step() optimizer.zero_grad() scheduler.step()
def val(model, criterions, valloader, epoch, data): global l_avg_test, totalclasswise_pixel_acc_test, totalclasswise_gtpixels_test, totalclasswise_predpixels_test global steps_test model.eval() for i, (images, sbd_labels, lip_labels) in enumerate(valloader): sbd_valid_pixel = float( (sbd_labels.data != criterions[0].ignore_index).long().sum()) lip_valid_pixel = float( (lip_labels.data != criterions[1].ignore_index).long().sum()) images = images.to(device) sbd_labels = sbd_labels.to(device) lip_labels = lip_labels.to(device) with torch.no_grad(): sbd_outputs, lip_outputs = model(images, task=2) sbd_loss = criterions[0](sbd_outputs, sbd_labels) lip_loss = criterions[1](lip_outputs, lip_labels) classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat( [sbd_outputs], sbd_labels, data.n_classes[0]) classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc]) classwise_gtpixels = torch.FloatTensor([classwise_gtpixels]) classwise_predpixels = torch.FloatTensor([classwise_predpixels]) totalclasswise_pixel_acc_test[0] += classwise_pixel_acc.sum( 0).data.numpy() totalclasswise_gtpixels_test[0] += classwise_gtpixels.sum( 0).data.numpy() totalclasswise_predpixels_test[0] += classwise_predpixels.sum( 0).data.numpy() classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat( [lip_outputs], lip_labels, data.n_classes[1]) classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc]) classwise_gtpixels = torch.FloatTensor([classwise_gtpixels]) classwise_predpixels = torch.FloatTensor([classwise_predpixels]) totalclasswise_pixel_acc_test[1] += classwise_pixel_acc.sum( 0).data.numpy() totalclasswise_gtpixels_test[1] += classwise_gtpixels.sum( 0).data.numpy() totalclasswise_predpixels_test[1] += classwise_predpixels.sum( 0).data.numpy() l_avg_test[0] += sbd_loss.sum().data.cpu().numpy() steps_test[0] += sbd_valid_pixel l_avg_test[1] += lip_loss.sum().data.cpu().numpy() steps_test[1] += lip_valid_pixel
def val(model, criterion, valloader, epoch, data): global l_avg_test, totalclasswise_pixel_acc_test, totalclasswise_gtpixels_test, totalclasswise_predpixels_test global steps_test model.eval() for i, (images, labels) in enumerate(valloader): images = images.to(device) labels = labels.to(device) with torch.no_grad(): outputs = model(images) loss = criterion(outputs, labels) total_valid_pixel = total_valid_pixel = float( (labels.data != criterion.ignore_index).long().sum()) classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat( [outputs], labels, data.n_classes) classwise_pixel_acc = torch.FloatTensor(classwise_pixel_acc) classwise_gtpixels = torch.FloatTensor(classwise_gtpixels) classwise_predpixels = torch.FloatTensor(classwise_predpixels) l_avg_test += loss.sum().data.cpu().numpy() steps_test += total_valid_pixel totalclasswise_pixel_acc_test += classwise_pixel_acc.numpy() totalclasswise_gtpixels_test += classwise_gtpixels.numpy() totalclasswise_predpixels_test += classwise_predpixels.numpy()
def train(model, optimizer, criterion, trainloader, epoch, scheduler, data): global l_avg, totalclasswise_pixel_acc, totalclasswise_gtpixels, totalclasswise_predpixels global steps model.train() for i, (images, labels) in enumerate(trainloader): images = images.to(device) labels = labels.to(device) outputs = model(images) loss = criterion(outputs, labels) total_valid_pixel = float( (labels.data != criterion.ignore_index).long().sum()) total_loss = loss.sum() total_loss = total_loss / float(total_valid_pixel) total_loss.backward() classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat( [outputs], labels, data.n_classes) classwise_pixel_acc = torch.FloatTensor(classwise_pixel_acc) classwise_gtpixels = torch.FloatTensor(classwise_gtpixels) classwise_predpixels = torch.FloatTensor(classwise_predpixels) l_avg += loss.sum().data.cpu().numpy() steps += total_valid_pixel totalclasswise_pixel_acc += classwise_pixel_acc.numpy() totalclasswise_gtpixels += classwise_gtpixels.numpy() totalclasswise_predpixels += classwise_predpixels.numpy() optimizer.step() optimizer.zero_grad() scheduler.step()
def val(model, criterions, valloader, epoch, data): global l_avg_test, totalclasswise_pixel_acc_test, totalclasswise_gtpixels_test, totalclasswise_predpixels_test global steps_test model.eval() for i, (images, labels, task) in enumerate(valloader): sbd_index = task == 0 lip_index = task == 1 sbd_images = images[sbd_index] lip_images = images[lip_index] sbd_labels = labels[sbd_index] lip_labels = labels[lip_index] num_sbd = sbd_images.size(0) num_lip = lip_images.size(0) sbd_valid_pixel = float( (sbd_labels.data != criterions[0].ignore_index).long().sum()) lip_valid_pixel = float( (lip_labels.data != criterions[1].ignore_index).long().sum()) sbd_images = sbd_images.to(device) lip_images = lip_images.to(device) sbd_labels = sbd_labels.to(device) lip_labels = lip_labels.to(device) with torch.no_grad(): if num_sbd > 0: sbd_outputs = model(sbd_images, 0) loss0 = criterions[0](sbd_outputs, sbd_labels) classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat( [sbd_outputs], sbd_labels, data.n_classes[0]) classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc]) classwise_gtpixels = torch.FloatTensor([classwise_gtpixels]) classwise_predpixels = torch.FloatTensor( [classwise_predpixels]) l_avg_test[0] += loss0.sum().data.cpu().numpy() steps_test[0] += sbd_valid_pixel totalclasswise_pixel_acc_test[0] += classwise_pixel_acc.sum( 0).data.numpy() totalclasswise_gtpixels_test[0] += classwise_gtpixels.sum( 0).data.numpy() totalclasswise_predpixels_test[0] += classwise_predpixels.sum( 0).data.numpy() if num_lip > 0: lip_outputs = model(lip_images, 1) loss1 = criterions[1](lip_outputs, lip_labels) classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat( [lip_outputs], lip_labels, data.n_classes[1]) classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc]) classwise_gtpixels = torch.FloatTensor([classwise_gtpixels]) classwise_predpixels = torch.FloatTensor( [classwise_predpixels]) l_avg_test[1] += loss1.sum().data.cpu().numpy() steps_test[1] += lip_valid_pixel totalclasswise_pixel_acc_test[1] += classwise_pixel_acc.sum( 0).data.numpy() totalclasswise_gtpixels_test[1] += classwise_gtpixels.sum( 0).data.numpy() totalclasswise_predpixels_test[1] += classwise_predpixels.sum( 0).data.numpy()
def train(model, optimizers, criterions, trainloader, epoch, schedulers, data, counter_sizes): global l_avg, totalclasswise_pixel_acc, totalclasswise_gtpixels, totalclasswise_predpixels global steps global counters, bug_counter model.train() if args.freeze: model.apply(set_bn_eval) for i, (images, labels, task) in enumerate(trainloader): sbd_index = task == 0 lip_index = task == 1 sbd_images = images[sbd_index] lip_images = images[lip_index] sbd_labels = labels[sbd_index] lip_labels = labels[lip_index] num_sbd = sbd_images.size(0) num_lip = lip_images.size(0) sbd_valid_pixel = float( (sbd_labels.data != criterions[0].ignore_index).long().sum()) lip_valid_pixel = float( (lip_labels.data != criterions[1].ignore_index).long().sum()) sbd_images = sbd_images.to(device) lip_images = lip_images.to(device) sbd_labels = sbd_labels.to(device) lip_labels = lip_labels.to(device) # Increment common CNN's counter for each image counters[0] += images.size(0) counters[1] += num_sbd counters[2] += num_lip if num_sbd > 0: sbd_outputs = model(sbd_images, 0) loss0 = criterions[0](sbd_outputs, sbd_labels) total_loss0 = loss0.sum() total_loss0 = total_loss0 / sbd_valid_pixel total_loss0 = total_loss0 / float(args.iter_size) total_loss0.backward() classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat( [sbd_outputs], sbd_labels, data.n_classes[0]) classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc]) classwise_gtpixels = torch.FloatTensor([classwise_gtpixels]) classwise_predpixels = torch.FloatTensor([classwise_predpixels]) l_avg[0] += loss0.sum().data.cpu().numpy() steps[0] += sbd_valid_pixel totalclasswise_pixel_acc[0] += classwise_pixel_acc.sum( 0).data.numpy() totalclasswise_gtpixels[0] += classwise_gtpixels.sum( 0).data.numpy() totalclasswise_predpixels[0] += classwise_predpixels.sum( 0).data.numpy() if num_lip > 0: lip_outputs = model(lip_images, 1) loss1 = criterions[1](lip_outputs, lip_labels) total_loss1 = loss1.sum() total_loss1 = total_loss1 / lip_valid_pixel total_loss1 = total_loss1 / float(args.iter_size) total_loss1.backward() classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat( [lip_outputs], lip_labels, data.n_classes[1]) classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc]) classwise_gtpixels = torch.FloatTensor([classwise_gtpixels]) classwise_predpixels = torch.FloatTensor([classwise_predpixels]) l_avg[1] += loss1.sum().data.cpu().numpy() steps[1] += lip_valid_pixel totalclasswise_pixel_acc[1] += classwise_pixel_acc.sum( 0).data.numpy() totalclasswise_gtpixels[1] += classwise_gtpixels.sum( 0).data.numpy() totalclasswise_predpixels[1] += classwise_predpixels.sum( 0).data.numpy() for j in range(3): if counters[j] >= counter_sizes[j]: optimizers[j].step() optimizers[j].zero_grad() schedulers[j].step() counters[j] -= counter_sizes[j]
def val(model, criterion, valloader, epoch, data): print('=' * 10, 'Validate step', '=' * 10, '\n') global l_avg_test, totalclasswise_pixel_acc_test, totalclasswise_gtpixels_test, totalclasswise_predpixels_test global steps_test model.eval() for i, (images, labels) in enumerate(valloader): images = images.to(device) labels = labels.to(device) with torch.no_grad(): outputs = model(images, labels) loss = criterion(outputs, labels) total_valid_pixel = torch.sum( labels.data != criterion.ignore_index) classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat( [outputs], labels, data.n_classes) total_valid_pixel = torch.FloatTensor([total_valid_pixel ]).to(device) classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc ]).to(device) classwise_gtpixels = torch.FloatTensor([classwise_gtpixels ]).to(device) classwise_predpixels = torch.FloatTensor([classwise_predpixels ]).to(device) total_valid_pixel = float( total_valid_pixel.sum(0).data.cpu().numpy()) l_avg_test += loss.sum().data.cpu().numpy() steps_test += total_valid_pixel totalclasswise_pixel_acc_test += classwise_pixel_acc.sum( 0).data.cpu().numpy() totalclasswise_gtpixels_test += classwise_gtpixels.sum( 0).data.cpu().numpy() totalclasswise_predpixels_test += classwise_predpixels.sum( 0).data.cpu().numpy() if (i + 1) % 200 == 0: pickle.dump( images[0].cpu().numpy(), open( os.path.join( ROOT_ADDRESS, "results_parts/saved_val_images/" + str(epoch) + "_" + str(i) + "_input.p"), "wb")) pickle.dump( np.transpose( data.decode_segmap( outputs[0].data.cpu().numpy().argmax(0)), [2, 0, 1]), open( os.path.join( ROOT_ADDRESS, "results_parts/saved_val_images/" + str(epoch) + "_" + str(i) + "_output.p"), "wb")) pickle.dump( np.transpose(data.decode_segmap(labels[0].cpu().numpy()), [2, 0, 1]), open( os.path.join( ROOT_ADDRESS, "results_parts/saved_val_images/" + str(epoch) + "_" + str(i) + "_target.p"), "wb"))
def train(model, optimizer, criterion, trainloader, epoch, scheduler, data): print('=' * 10, 'Train step', '=' * 10, '\n') global l_avg, totalclasswise_pixel_acc, totalclasswise_gtpixels, totalclasswise_predpixels global steps model.train() if args.freeze: model.apply(set_bn_eval) for i, (images, labels) in enumerate(trainloader): images = images.to(device) labels = labels.to(device) # assert images.size()[2:] == labels.size()[1:] # print('Inputs size =', images.size()) # print('Labels size =', labels.size()) if i % args.iter_size == 0: optimizer.zero_grad() outputs = model(images, labels) # assert outputs.size()[2:] == labels.size()[1:] # assert outputs.size(1) == data.n_classes # print('Outputs size =', outputs.size()) loss = criterion(outputs, labels) total_valid_pixel = torch.sum(labels.data != criterion.ignore_index) classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat( [outputs], labels, data.n_classes) total_valid_pixel = torch.FloatTensor([total_valid_pixel]).to(device) classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc ]).to(device) classwise_gtpixels = torch.FloatTensor([classwise_gtpixels]).to(device) classwise_predpixels = torch.FloatTensor([classwise_predpixels ]).to(device) total_valid_pixel = float(total_valid_pixel.sum(0).data.cpu().numpy()) total_loss = loss.sum() total_loss = total_loss / float(total_valid_pixel) total_loss = total_loss / float(args.iter_size) total_loss.backward() if i % args.iter_size == 0: optimizer.step() l_avg += loss.sum().data.cpu().numpy() steps += total_valid_pixel totalclasswise_pixel_acc += classwise_pixel_acc.sum( 0).data.cpu().numpy() totalclasswise_gtpixels += classwise_gtpixels.sum(0).data.cpu().numpy() totalclasswise_predpixels += classwise_predpixels.sum( 0).data.cpu().numpy() if (i + 1) % args.epoch_log_size == 0: print("Epoch [%d/%d] Loss: %.4f" % (epoch + 1, args.epochs, loss.sum().item())) if (i + 1) % args.iter_size == 0: scheduler.step() if (i + 1) % args.log_size == 0: pickle.dump( images[0].cpu().numpy(), open( os.path.join( ROOT_ADDRESS, "results_parts/saved_train_images/" + str(epoch) + "_" + str(i) + "_input.p"), "wb")) pickle.dump( np.transpose( data.decode_segmap( outputs[0].data.cpu().numpy().argmax(0)), [2, 0, 1]), open( os.path.join( ROOT_ADDRESS, "results_parts/saved_train_images/" + str(epoch) + "_" + str(i) + "_output.p"), "wb")) pickle.dump( np.transpose(data.decode_segmap(labels[0].cpu().numpy()), [2, 0, 1]), open( os.path.join( ROOT_ADDRESS, "results_parts/saved_train_images/" + str(epoch) + "_" + str(i) + "_target.p"), "wb"))