Esempio n. 1
0
def valmodel(model, valloader, epoch):
    global l_avg_test, totalclasswise_pixel_acc_test, totalclasswise_gtpixels_test, totalclasswise_predpixels_test
    global steps_test

    model.eval()

    for i, (imgs_test, lbls_test) in enumerate(valloader):
        if torch.cuda.is_available():
            imgs_testV = Variable(imgs_test.cuda(), volatile=True)
            lbls_testV = Variable(lbls_test.cuda(), volatile=True)
        else:
            imgs_testV = Variable(imgs_test, volatile=True)
            lbls_testV = Variable(lbls_test, volatile=True)

        outputs, losses, classwise_pixel_acc, classwise_gtpixels, classwise_predpixels, total_valid_pixel = \
            model(imgs_testV, lbls_testV)

        total_valid_pixel = float(total_valid_pixel.sum(0).data.cpu().numpy())

        l_avg_test += (losses.sum().data.cpu().numpy())
        steps_test += total_valid_pixel
        totalclasswise_pixel_acc_test += classwise_pixel_acc.sum(
            0).data.cpu().numpy()
        totalclasswise_gtpixels_test += classwise_gtpixels.sum(
            0).data.cpu().numpy()
        totalclasswise_predpixels_test += classwise_predpixels.sum(
            0).data.cpu().numpy()

        if (i + 1) % 50 == 0:
            pickle.dump(
                imgs_test[0].numpy(),
                open(
                    "results/saved_val_images/" + str(epoch) + "_" + str(i) +
                    "_input.p", "wb"))

            pickle.dump(
                np.transpose(
                    data.decode_segmap(
                        outputs[0].data.cpu().numpy().argmax(0)), [2, 0, 1]),
                open(
                    "results/saved_val_images/" + str(epoch) + "_" + str(i) +
                    "_output.p", "wb"))

            pickle.dump(
                np.transpose(data.decode_segmap(lbls_test[0].numpy()),
                             [2, 0, 1]),
                open(
                    "results/saved_val_images/" + str(epoch) + "_" + str(i) +
                    "_target.p", "wb"))
Esempio n. 2
0
def val(model, criterion, valloader, epoch, data):
    print('=' * 10, 'Validate step', '=' * 10, '\n')

    global l_avg_test, totalclasswise_pixel_acc_test, totalclasswise_gtpixels_test, totalclasswise_predpixels_test
    global steps_test

    model.eval()

    for i, (images, labels) in enumerate(valloader):
        images = images.to(device)
        labels = labels.to(device)

        with torch.no_grad():
            outputs = model(images, labels)

            loss = criterion(outputs, labels)
            total_valid_pixel = torch.sum(
                labels.data != criterion.ignore_index)
            classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat(
                [outputs], labels, data.n_classes)

            total_valid_pixel = torch.FloatTensor([total_valid_pixel
                                                   ]).to(device)
            classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc
                                                     ]).to(device)
            classwise_gtpixels = torch.FloatTensor([classwise_gtpixels
                                                    ]).to(device)
            classwise_predpixels = torch.FloatTensor([classwise_predpixels
                                                      ]).to(device)

            total_valid_pixel = float(
                total_valid_pixel.sum(0).data.cpu().numpy())

            l_avg_test += loss.sum().data.cpu().numpy()
            steps_test += total_valid_pixel
            totalclasswise_pixel_acc_test += classwise_pixel_acc.sum(
                0).data.cpu().numpy()
            totalclasswise_gtpixels_test += classwise_gtpixels.sum(
                0).data.cpu().numpy()
            totalclasswise_predpixels_test += classwise_predpixels.sum(
                0).data.cpu().numpy()

            if (i + 1) % 200 == 0:
                pickle.dump(
                    images[0].cpu().numpy(),
                    open(
                        os.path.join(
                            ROOT_ADDRESS, "results_parts/saved_val_images/" +
                            str(epoch) + "_" + str(i) + "_input.p"), "wb"))

                pickle.dump(
                    np.transpose(
                        data.decode_segmap(
                            outputs[0].data.cpu().numpy().argmax(0)),
                        [2, 0, 1]),
                    open(
                        os.path.join(
                            ROOT_ADDRESS, "results_parts/saved_val_images/" +
                            str(epoch) + "_" + str(i) + "_output.p"), "wb"))

                pickle.dump(
                    np.transpose(data.decode_segmap(labels[0].cpu().numpy()),
                                 [2, 0, 1]),
                    open(
                        os.path.join(
                            ROOT_ADDRESS, "results_parts/saved_val_images/" +
                            str(epoch) + "_" + str(i) + "_target.p"), "wb"))
Esempio n. 3
0
def trainmodel(model, optimizer, trainloader, epoch, scheduler, data):
    global l_avg, totalclasswise_pixel_acc, totalclasswise_gtpixels, totalclasswise_predpixels
    global steps

    model.train()
    if args.freeze:
        model.apply(set_bn_eval)

    for i, (images, labels) in enumerate(trainloader):
        if torch.cuda.is_available():
            imagesV = Variable(images.cuda())
            labelsV = Variable(labels.cuda(), requires_grad=False)
        else:
            imagesV = Variable(images)
            labelsV = Variable(labels, requires_grad=False)

        if i % args.iter_size == 0:
            optimizer.zero_grad()

        outputs, losses, classwise_pixel_acc, classwise_gtpixels, classwise_predpixels, total_valid_pixel = \
            model(imagesV, labelsV)

        total_valid_pixel = float(total_valid_pixel.sum(0).data.cpu().numpy())

        totalloss = losses.sum()

        # Because size_average=False
        totalloss = totalloss / float(total_valid_pixel)

        # This is normalize loss when weight updates is done after multiple forward pass
        totalloss = totalloss / float(args.iter_size)

        totalloss.backward()

        if (i + 1) % args.iter_size == 0:
            optimizer.step()

        l_avg += (losses.sum().data.cpu().numpy())
        steps += total_valid_pixel
        totalclasswise_pixel_acc += classwise_pixel_acc.sum(
            0).data.cpu().numpy()
        totalclasswise_gtpixels += classwise_gtpixels.sum(0).data.cpu().numpy()
        totalclasswise_predpixels += classwise_predpixels.sum(
            0).data.cpu().numpy()

        print("Epoch [%d/%d] Loss: %.4f" %
              (epoch + 1, args.n_epoch, losses.sum().data[0]))

        if (i + 1) % args.iter_size == 0:
            scheduler.step()

        if (i + 1) % args.log_size == 0:
            pickle.dump(
                images[0].numpy(),
                open(
                    "results/saved_train_images/" + str(epoch) + "_" + str(i) +
                    "_input.p", "wb"))

            pickle.dump(
                np.transpose(
                    data.decode_segmap(
                        outputs[0].data.cpu().numpy().argmax(0)), [2, 0, 1]),
                open(
                    "results/saved_train_images/" + str(epoch) + "_" + str(i) +
                    "_output.p", "wb"))

            pickle.dump(
                np.transpose(data.decode_segmap(labels[0].numpy()), [2, 0, 1]),
                open(
                    "results/saved_train_images/" + str(epoch) + "_" + str(i) +
                    "_target.p", "wb"))
Esempio n. 4
0
def train(model, optimizer, criterion, trainloader, epoch, scheduler, data):
    print('=' * 10, 'Train step', '=' * 10, '\n')

    global l_avg, totalclasswise_pixel_acc, totalclasswise_gtpixels, totalclasswise_predpixels
    global steps

    model.train()

    if args.freeze:
        model.apply(set_bn_eval)

    for i, (images, labels) in enumerate(trainloader):
        images = images.to(device)
        labels = labels.to(device)
        # assert images.size()[2:] == labels.size()[1:]
        # print('Inputs size =', images.size())
        # print('Labels size =', labels.size())

        if i % args.iter_size == 0:
            optimizer.zero_grad()

        outputs = model(images, labels)
        # assert outputs.size()[2:] == labels.size()[1:]
        # assert outputs.size(1) == data.n_classes
        # print('Outputs size =', outputs.size())

        loss = criterion(outputs, labels)

        total_valid_pixel = torch.sum(labels.data != criterion.ignore_index)
        classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat(
            [outputs], labels, data.n_classes)

        total_valid_pixel = torch.FloatTensor([total_valid_pixel]).to(device)
        classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc
                                                 ]).to(device)
        classwise_gtpixels = torch.FloatTensor([classwise_gtpixels]).to(device)
        classwise_predpixels = torch.FloatTensor([classwise_predpixels
                                                  ]).to(device)

        total_valid_pixel = float(total_valid_pixel.sum(0).data.cpu().numpy())

        total_loss = loss.sum()
        total_loss = total_loss / float(total_valid_pixel)
        total_loss = total_loss / float(args.iter_size)
        total_loss.backward()

        if i % args.iter_size == 0:
            optimizer.step()

        l_avg += loss.sum().data.cpu().numpy()
        steps += total_valid_pixel
        totalclasswise_pixel_acc += classwise_pixel_acc.sum(
            0).data.cpu().numpy()
        totalclasswise_gtpixels += classwise_gtpixels.sum(0).data.cpu().numpy()
        totalclasswise_predpixels += classwise_predpixels.sum(
            0).data.cpu().numpy()

        if (i + 1) % args.epoch_log_size == 0:
            print("Epoch [%d/%d] Loss: %.4f" %
                  (epoch + 1, args.epochs, loss.sum().item()))

        if (i + 1) % args.iter_size == 0:
            scheduler.step()

        if (i + 1) % args.log_size == 0:
            pickle.dump(
                images[0].cpu().numpy(),
                open(
                    os.path.join(
                        ROOT_ADDRESS, "results_parts/saved_train_images/" +
                        str(epoch) + "_" + str(i) + "_input.p"), "wb"))

            pickle.dump(
                np.transpose(
                    data.decode_segmap(
                        outputs[0].data.cpu().numpy().argmax(0)), [2, 0, 1]),
                open(
                    os.path.join(
                        ROOT_ADDRESS, "results_parts/saved_train_images/" +
                        str(epoch) + "_" + str(i) + "_output.p"), "wb"))

            pickle.dump(
                np.transpose(data.decode_segmap(labels[0].cpu().numpy()),
                             [2, 0, 1]),
                open(
                    os.path.join(
                        ROOT_ADDRESS, "results_parts/saved_train_images/" +
                        str(epoch) + "_" + str(i) + "_target.p"), "wb"))