コード例 #1
0
def test_losses_zero(verbose=False):

    gt = np.random.randint(20, size=[4, 512, 512])
    hot = to_onehot(gt, num_classes=20)
    hot = hot.transpose(1, 0, 2, 3)
    hot = 20 * hot

    gt_var = Variable(torch.Tensor(gt)).long()
    hot_var = Variable(torch.Tensor(hot))

    res1 = loss.cross_entropy2d(hot_var, gt_var)
    if verbose:
        logging.info('Res1: {}'.format(res1))
    assert (res1.data.numpy() < 1e-7)

    loss2 = loss.CrossEntropyLoss2d()
    res2 = loss2(hot_var, gt_var)
    if verbose:
        logging.info('Res2: {}'.format(res2))
    # assert(res2 == 0)

    loss3 = loss.CrossEntropyLoss2dTranspose()
    res3 = loss3(hot_var, gt_var)
    if verbose:
        logging.info('Res3: {}'.format(res3))
    assert (res3.data.numpy() < 1e-7)
コード例 #2
0
def train_onePic(train_img_path, pths_path, batch_size, lr, num_workers,
                 epoch_iter, interval):
    #file_num = 1056 #暂定
    trainset = onePic_dataset(train_img_path)
    train_loader = data.DataLoader(trainset, batch_size=batch_size, \
                                   shuffle=True, num_workers=num_workers, drop_last=False)
    #criterion = cross_entropy2d()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    #device = torch.device("cpu")
    #model = segNet_onePic()
    #model = segNet_mobileNet()
    model = segNet_fcn()
    data_parallel = False
    model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=[epoch_iter // 2],
                                         gamma=0.1)
    #criterion = BinaryDiceLoss()

    for epoch in range(epoch_iter):
        model.train()

        epoch_loss = 0
        epoch_time = time.time()
        for i, (img, mask) in enumerate(train_loader):
            start_time = time.time()
            img, mask = img.to(device), mask.to(device)

            output = model(img)
            loss = cross_entropy2d(output, mask)
            #loss = get_dice_loss(output, mask)
            #loss = criterion(output, mask)
            #loss /= len(img)
            epoch_loss += loss.item()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step()

            print('Epoch is [{}/{}], time consumption is {:.8f}, batch loss is {:.8f}'.format(\
                epoch+1, epoch_iter, time.time()-start_time, loss.item()))

        print('epoch_loss is {:.8f}, epoch_time is {:.8f}'.format(
            epoch_loss / len(img),
            time.time() - epoch_time))
        print(time.asctime(time.localtime(time.time())))
        print('=' * 50)

        if (epoch + 1) % interval == 0:
            state_dict = model.module.state_dict(
            ) if data_parallel else model.state_dict()
            torch.save(
                state_dict,
                os.path.join(pths_path,
                             'model_epoch_{}.pth'.format(epoch + 1)))
コード例 #3
0
ファイル: train.py プロジェクト: nexus-kgp/adsemseg
def train(epochs):

    # Setup Dataloader

    data_loader = pascalVOCLoader
    data_path = "/media/sangeet/Stuff/DC Shares/Datasets/VOCdevkit/VOC2012/"
    loader = data_loader(data_path, is_transform=True, img_size=(256, 256))
    n_classes = loader.n_classes
    trainloader = data.DataLoader(loader,
                                  batch_size=1,
                                  num_workers=4,
                                  shuffle=True)

    # segmentor.cuda()

    # Setup optimizer for segmentor and discriminator
    # optimizer = torch.optim.SGD(segmentor.parameters(), lr=1e-5, momentum=0.99, weight_decay=5e-4)

    for epoch in range(epochs):
        for i, (images, labels) in enumerate(trainloader):

            if use_gpu:
                images = Variable(images.cuda())
                labels = Variable(labels.cuda())
            else:
                images = Variable(images)
                labels = Variable(labels)
            import pudb
            pu.db

            fake_out = segmentor(images)

            discriminator.zero_grad()
            segmentor.zero_grad()

            d_fake_out = discriminator(fake_out)
            fake_err = d_loss(d_fake_out, zeros)
            fake_err.backward(retain_graph=True)
            fake_loss_d.append(fake_err[0].clone().cpu().data.numpy()[0])

            d_real_out = discriminator(labels.float())
            real_err = d_loss(d_real_out, ones)
            real_err.backward()
            real_loss_d.append(real_err[0].clone().cpu().data.numpy()[0])
            d_optim.step()

            g_err = cross_entropy2d(fake_out,
                                    labels) + 0.65 * (d_loss(d_fake_out, ones))
            g_err.backward()
            real_loss_gen.append(g_err[0].clone().cpu().data.numpy()[0])
            g_optim.step()
コード例 #4
0
    def run_WrappedVOCSBDSegmentation5i_network():
        from torch.utils import data
        from model.head.pgn import PGN
        import torch.nn.functional as F
        from loss import cross_entropy2d
        from optimizer import get_optimizer

        batch_size = 4
        epoch = 1

        train_set = WrappedVOCSBDSegmentation5i(
            root=roots_path,
            fold=1,
            # remember to run both train and test set
            split='test',
            rebuild_mask=False,
            img_size=224)
        train_loader = data.DataLoader(train_set,
                                       batch_size=batch_size,
                                       shuffle=True,
                                       num_workers=8)

        model = PGN()

        optim = get_optimizer()(model.parameters(),
                                lr=0.0025,
                                momentum=0.9,
                                dampening=0,
                                weight_decay=0,
                                nesterov=False)

        for e in range(epoch):
            for i_iter, data in enumerate(train_loader):
                Is, Ys, Iq, Yq, sample_class, _, _ = data
                Ys, Yq = Ys.unsqueeze(1).float(), Yq.unsqueeze(1).float()

                pred = model(Is, Ys, Iq)

                pred = F.interpolate(pred,
                                     size=Yq.size()[-2:],
                                     mode='bilinear',
                                     align_corners=True)

                loss = cross_entropy2d(pred, Yq.long())
                optim.zero_grad()
                loss.backward()
                optim.step()
                print(loss.item(), sample_class)
コード例 #5
0
    def run_WrappedVOCSBDSegmentation5_network():
        from torch.utils import data
        from model.head.amp import AMP
        import torch.nn.functional as F
        from loss import cross_entropy2d
        from optimizer import get_optimizer

        batch_size = 4
        epoch = 1

        train_set = WrappedVOCSBDSegmentation5(root=roots_path,
                                               fold=1,
                                               split='train',
                                               rebuild_mask=False,
                                               img_size=224)
        train_loader = data.DataLoader(train_set,
                                       batch_size=batch_size,
                                       shuffle=True,
                                       num_workers=8)

        model = AMP(maximum_num_classes=21)

        optim = get_optimizer()(model.parameters(),
                                lr=0.0025,
                                momentum=0.9,
                                dampening=0,
                                weight_decay=0,
                                nesterov=False)

        for e in range(epoch):
            for i_iter, data in enumerate(train_loader):
                I, Y = data
                Y = Y.unsqueeze(1).float()

                pred = model(I, phase='train')
                pred = F.interpolate(pred,
                                     size=Y.size()[-2:],
                                     mode='bilinear',
                                     align_corners=True)
                loss = cross_entropy2d(pred, Y.long())
                optim.zero_grad()
                loss.backward()
                optim.step()
                print(loss.item())
コード例 #6
0
def train(n_epoch=1, n_co=10):
    for i in range(n_epoch):
        count = 0
        for i in train_loader:
            if use_gpu:
                sample_image = Variable(i['image'].float().cuda())
                sample_image = local_response_norm(sample_image, 3)

                label = Variable(i['region'].float().cuda())
            else:
                sample_image = Variable(i['image'].float())
                sample_image = local_response_norm(sample_image, 3)

                label = Variable(i['region'].float())

            fake_out = segmentor(sample_image)

            disc.zero_grad()
            segmentor.zero_grad()

            d_fake_out = disc(fake_out, sample_image)

            fake_err = d_loss(d_fake_out, zeros)
            fake_err.backward(retain_graph=True)
            fake_loss_d.append(fake_err[0].clone().cpu().data.numpy()[0])

            d_real_out = disc(label, sample_image)
            real_err = d_loss(d_real_out, ones)
            real_err.backward()
            real_loss_d.append(real_err[0].clone().cpu().data.numpy()[0])

            d_optim.step()

            g_err = cross_entropy2d(fake_out,
                                    label) + 0.65 * (d_loss(d_fake_out, ones))
            g_err.backward()
            real_loss_gen.append(g_err[0].clone().cpu().data.numpy()[0])

            g_optim.step()
            count += 1
            print("done")
            if count == n_co:
                break
コード例 #7
0
def test_losses_equal(verbose=False):
    gt = np.random.randint(20, size=[4, 512, 512])
    gt_var = Variable(torch.Tensor(gt)).long()
    pred = torch.rand(4, 20, 512, 512)
    pred_var = Variable(torch.Tensor(pred))

    loss2 = loss.CrossEntropyLoss2d()
    loss3 = loss.CrossEntropyLoss2dTranspose()

    res1 = loss.cross_entropy2d(pred_var, gt_var)  # NOQA
    res2 = loss2(pred_var, gt_var)
    res3 = loss3(pred_var, gt_var)

    assert ((res1 == res2).data.numpy())
    assert ((res2 == res3).data.numpy())

    if verbose:
        logging.info("Random Data.")
        logging.info('Res1: {}'.format(res1))
        logging.info('Res2: {}'.format(res2))
        logging.info('Res3: {}'.format(res3))
コード例 #8
0
def train(args):

    # Setup Dataloader
    data_loader = get_loader(args.dataset)
    data_path = get_data_path(args.dataset)
    loader = data_loader(data_path,
                         is_transform=True,
                         img_size=(args.img_rows, args.img_cols))
    n_classes = loader.n_classes
    trainloader = data.DataLoader(loader,
                                  batch_size=args.batch_size,
                                  num_workers=4,
                                  shuffle=True)
    model_path = '/home/models/'
    # Setup visdom for visualization
    # vis = visdom.Visdom()
    #
    # loss_window = vis.line(X=torch.zeros((1,)).cpu(),
    #                        Y=torch.zeros((1)).cpu(),
    #                        opts=dict(xlabel='minibatches',
    #                                  ylabel='Loss',
    #                                  title='Training Loss',
    #                                  legend=['Loss']))

    # Setup Model
    model = get_model(args.arch, n_classes)

    if torch.cuda.is_available():
        model.cuda()
        model = torch.nn.DataParallel(model,
                                      device_ids=range(
                                          torch.cuda.device_count()))
        # test_image, test_segmap = loader[0]
        # test_image = Variable(test_image.unsqueeze(0).cuda())
    # else:
    #     test_image, test_segmap = loader[0]
    #     test_image = Variable(test_image.unsqueeze(0))

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.l_rate,
                                momentum=0.99,
                                weight_decay=5e-4)

    for epoch in range(args.n_epoch):
        loss_sum = 0
        for i, (images, labels) in enumerate(trainloader):
            if torch.cuda.is_available():
                images = Variable(images.cuda())
                labels = Variable(labels.cuda())
            else:
                images = Variable(images)
                labels = Variable(labels)

            iter = len(trainloader) * epoch + i
            poly_lr_scheduler(optimizer, args.l_rate, iter)

            optimizer.zero_grad()
            outputs = model(images)

            loss = cross_entropy2d(outputs, labels)

            loss.backward()
            optimizer.step()

            # vis.line(
            #     X=torch.ones((1, 1)).cpu() * i,
            #     Y=torch.Tensor([loss.data[0]]).unsqueeze(0).cpu(),
            #     win=loss_window,
            #     update='append')

            loss_sum = loss_sum + loss.data[0]
            if (i + 1) % 20 == 0:
                print("Epoch [%d/%d] Loss: %.4f" %
                      (epoch + 1, args.n_epoch, loss.data[0]))
        mean_loss = loss_sum / len(trainloader)
        print("Epoch [%d/%d] mean_Loss: %.4f" %
              (epoch + 1, args.n_epoch, mean_loss))

        # test_output = model(test_image)
        # predicted = loader.decode_segmap(test_output[0].cpu().data.numpy().argmax(0))
        # target = loader.decode_segmap(test_segmap.numpy())

        # vis.image(test_image[0].cpu().data.numpy(), opts=dict(title='Input' + str(epoch)))
        # vis.image(np.transpose(target, [2,0,1]), opts=dict(title='GT' + str(epoch)))
        # vis.image(np.transpose(predicted, [2,0,1]), opts=dict(title='Predicted' + str(epoch)))

        # torch.save(model,  model_path+"{}_{}_{}_{}.pkl".format(args.arch, args.dataset, args.feature_scale, epoch))
    torch.save(
        model, model_path +
        "{}_{}_{}_{}_{}_v.pkl".format(args.arch, args.dataset, args.batch_size,
                                      args.l_rate, args.n_epoch))
コード例 #9
0
    def train_epoch(self):
        self.logging.info("length trainloader: %s", len(self.train_loader))
        self.logging.info("current_lr is : %s",
                          self.optim_fusion.param_groups[0]['lr'])
        for batch_idx, (img, mask, depth,
                        bins) in enumerate(tqdm(self.train_loader)):
            ########## for debug
            # if batch_idx % 10==0 and batch_idx>10:
            #     self.save_test(iteration)
            iteration = batch_idx + self.epoch * len(self.train_loader)

            if self.iteration != 0 and (iteration - 1) != self.iteration:
                continue  # for resuming
            self.iteration = iteration

            if self.cuda:
                img, mask, depth, bins = img.cuda(), mask.cuda(), depth.cuda(
                ), bins.cuda()
                img, mask, depth, bins = Variable(img), Variable(
                    mask), Variable(depth), bins.cuda()
            # print(img.size())
            n, c, h, w = img.size()  # batch_size, channels, height, weight
            depth = depth.view(n, 1, h, w).repeat(1, c, 1, 1)

            self.optim_depth.zero_grad()
            self.optim_rgb.zero_grad()
            self.optim_fusion.zero_grad()

            global running_loss_final, iou_final, aux_final

            d0, d1, d2, d3, d4 = self.model_depth(depth)
            h1, h2, h3, h4, h5 = self.model_rgb(img, bins, gumbel=True)
            predict_mask = self.model_fusion(h1, h2, h3, h4, h5, d0, d1, d2,
                                             d3, d4)

            ce_loss = cross_entropy2d(predict_mask, mask, size_average=False)
            iou_loss = torch.zeros(1)
            aux_ce_loss = torch.zeros(1)
            # iou_loss = iou(predict_mask, mask,size_average=False ) * 0.2
            # iou_loss = self.dice(predict_mask, mask)
            loss = ce_loss  #+  iou_loss + aux_ce_loss

            running_loss_final += ce_loss.item()
            iou_final += iou_loss.item()
            aux_final += aux_ce_loss.item()

            if iteration % self.sshow == (self.sshow - 1):
                self.logging.info(
                    '\n [%3d, %6d,   RGB-D Net ce_loss: %.3f aux_loss: %.3f  iou_loss: %.3f]'
                    % (self.epoch + 1, iteration + 1, running_loss_final /
                       (n * self.sshow), aux_final /
                       (n * self.sshow), iou_final / (n * self.sshow)))

                self.writer.add_scalar('train/iou_loss',
                                       iou_final / (n * self.sshow),
                                       iteration + 1)
                self.writer.add_scalar('train/aux_loss',
                                       aux_final / (n * self.sshow),
                                       iteration + 1)

                self.writer.add_scalar('train/lr',
                                       self.optim_fusion.param_groups[0]['lr'],
                                       iteration + 1)
                self.writer.add_scalar('train/iter_ce_loss',
                                       running_loss_final / (n * self.sshow),
                                       iteration + 1)

                self.writer.add_scalar('train/epoch_ce_loss',
                                       running_loss_final / (n * self.sshow),
                                       self.epoch + 1)
                running_loss_final = 0.0
                iou_final = 0.0
                aux_final = 0.0

            loss.backward()
            self.optim_depth.step()
            self.optim_rgb.step()
            self.optim_fusion.step()

            if iteration <= 200000:
                if iteration % self.snapshot == (self.snapshot - 1):
                    self.save_test(iteration)
            else:
                if iteration % 10000 == (10000 - 1):
                    self.save_test(iteration)
コード例 #10
0
ファイル: train.py プロジェクト: vietdoan/Enet_Pytorch
def train(args):
    data_path = get_data_path(args.dataset)
    data_loader = get_loader(args.dataset)
    label_scale = 1
    if (args.model == 'encoder'):
        label_scale = 8
    loader = data_loader(data_path, is_transform=True, label_scale=label_scale)
    n_classes = loader.n_classes
    trainloader = data.DataLoader(loader, batch_size=args.batch_size)
    another_loader = data_loader(data_path,
                                 split='val',
                                 is_transform=True,
                                 label_scale=label_scale)
    valloader = data.DataLoader(another_loader, batch_size=args.batch_size)
    # Setup Model
    if (args.model == 'encoder'):
        model = Encoder(n_classes, train=True)
    else:
        encoder_weight = torch.load('enet_encoder_camvid_299.pkl')
        del encoder_weight['classifier.bias']
        del encoder_weight['classifier.weight']
        model = Enet(n_classes)
        model.encoder.load_state_dict(encoder_weight)

    # compute weight for cross_entropy2d
    norm_hist = hist / hist.sum()
    weight = 1 / np.log(norm_hist + 1.02)
    # unlabeled data is not used.
    weight[-1] = 0
    weight = torch.FloatTensor(weight)

    if torch.cuda.is_available():
        model.cuda(0)
        weight = weight.cuda(0)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr_rate,
                                 weight_decay=args.w_decay)
    scheduler = StepLR(optimizer, step_size=300, gamma=args.lr_decay)
    pooling_stack = None
    for epoch in xrange(args.epochs):
        scheduler.step()
        model.train()
        loss_list = []
        file = open(
            '../another_' + args.model + '/{}_{}.txt'.format('enet', epoch),
            'w')
        for i, (images, labels) in enumerate(trainloader):
            if torch.cuda.is_available():
                images = Variable(images.cuda(0))
                labels = Variable(labels.cuda(0))
            else:
                images = Variable(images)
                labels = Variable(labels)
            optimizer.zero_grad()
            if (args.model == 'encoder'):
                outputs, pooling_stack = model(images)
            else:
                outputs = model(images)
            loss = cross_entropy2d(outputs, labels, weight=weight)
            loss_list.append(loss.data[0])
            loss.backward()
            optimizer.step()

        file.write(str(np.average(loss_list)))

        model.eval()
        gts, preds = [], []
        for i, (images, labels) in enumerate(valloader):
            if torch.cuda.is_available():
                images = Variable(images.cuda(0))
                labels = Variable(labels.cuda(0))
            else:
                images = Variable(images)
                labels = Variable(labels)
            if (args.model == 'encoder'):
                outputs, pooling_stack = model(images)
            else:
                outputs = model(images)
            pred = outputs.data.max(1)[1].cpu().numpy()
            gt = labels.data.cpu().numpy()
            for gt_, pred_ in zip(gt, pred):
                gts.append(gt_)
                preds.append(pred_)
        score, class_iou = scores(gts, preds, n_class=n_classes)
        for k, v in score.items():
            file.write('{} {}\n'.format(k, v))

        for i in range(n_classes):
            file.write('{} {}\n'.format(i, class_iou[i]))
        torch.save(
            model.state_dict(), "../another_" + args.model +
            "/{}_{}_{}.pkl".format('enet_' + args.model, args.dataset, epoch))
        file.close()
コード例 #11
0
def train(model, Para=False):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    NUM_CLASSES = 2
    num_epochs = 62
    if Para:
        model = torch.nn.DataParallel(model,
                                      device_ids=range(
                                          torch.cuda.device_count()))
    savedir = "./log_save"
    weight = torch.ones(NUM_CLASSES)
    loader = get_loader(
        "/home/user/ICNet-master/evaluation/list/train_640.txt")
    #if cuda:
    # criterion = CrossEntropyLoss2d(weight).cuda()
    #else:
    # criterion = CrossEntropyLoss2d(weight)
    automated_log_path = savedir + "/automated_log.txt"
    if (not os.path.exists(automated_log_path)
        ):  #dont add first line if it exists
        with open(automated_log_path, "a") as myfile:
            myfile.write("Epoch\t\tTrain-loss")
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=0.01,
                                momentum=0.9,
                                weight_decay=0.0005)
    start_epoch = 1
    loss_fn = cross_entropy2d
    for epoch in range(start_epoch, num_epochs):
        print("----- TRAINING - EPOCH", epoch, "-----")
        usedLr = 0
        epoch_loss = []
        time_train = []
        for param_group in optimizer.param_groups:
            print("LEARNING RATE: ", param_group['lr'])
            usedLr = float(param_group['lr'])
        model.train()
        count = 1
        for step, (images, label1, label4, label24) in enumerate(loader):
            start_time = time.time()
            images = images.to(device)
            label1 = label1.to(device)
            label4 = label4.to(device)
            label24 = label24.to(device)
            sub4, sub24, sub124 = model(images)  #sub4,sub24,sub124

            loss1 = cross_entropy2d(sub4, label4) * 0.16
            loss2 = cross_entropy2d(sub24, label24) * 0.4
            loss3 = cross_entropy2d(sub124, label1) * 1.0
            loss = loss1 + loss2 + loss3
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            epoch_loss.append(loss.item())
            time_train.append(time.time() - start_time)
            if step % 50 == 0:
                average = sum(epoch_loss) / len(epoch_loss)
                print(
                    'loss: {} (epoch: {}, step: {})'.format(
                        average, epoch, step), "// Avg time/img: %.4f s" %
                    (sum(time_train) / len(time_train) / 30))
        if epoch % 10 == 0:
            torch.save(
                model.state_dict(),
                '{}_{}.pth'.format(os.path.join(savedir, "icnet"), str(epoch)))

        #save log
        average_epoch_loss_train = sum(epoch_loss) / len(epoch_loss)
        with open(automated_log_path, "a") as myfile:
            myfile.write("\n%d\t\t%.4f" % (epoch, average_epoch_loss_train))

    return (model)