def test(img_path, data_size='single'):

    device = torch.device('cuda') if torch.cuda.is_available else torch.device(
        'cpu')

    # Image input
    im = Image.open(img_path)
    im = np.array(im, dtype=np.float32) / 255
    image = np.transpose(im, (2, 0, 1))
    data = torch.from_numpy(image).unsqueeze(0)
    data = Variable(data).to(device)

    model = SegNet(opt, data.shape[1])
    if opt.model_path:
        model.load_state_dict(torch.load(opt.model_path))
    model = model.to(device)
    model.train()

    feats, output = model(data)
    output = output[0].permute(1, 2, 0).contiguous().view(-1, opt.nClass)
    feats = feats[0].permute(1, 2, 0).contiguous().view(-1, opt.nChannel)
    _, pred_clusters = torch.max(output, 1)
    pred_clusters = pred_clusters.data.cpu().numpy()

    # Post processing
    labels = np.unique(pred_clusters)
    counts = {}
    for i in pred_clusters:
        counts[i] = counts.get(i, 0) + 1
    sorts = sorted(counts.items(), key=lambda x: x[1])
    cache = {}
    cache[sorts[-1][0]] = 0
    n = 1
    for (num, _) in sorts[:-1]:
        cache[num] = n
        n += 1

    label_colors = [[10, 10, 10], [0, 0, 255], [0, 255, 0], [255, 0, 0],
                    [255, 255, 0], [0, 255, 255], [255, 0, 255]]

    im_target_rgb = np.array([label_colors[cache[c]] for c in pred_clusters])
    im_target_rgb = im_target_rgb.reshape(im.shape).astype(np.uint8)

    # change path
    path = ".".join(img_path.split('/')[1].split('.')[:2])
    #path = img_path.split('/')[1].split('.')[0]
    if data_size == 'single':
        cv2.imwrite("outputs_single/{}_out.png".format(path), im_target_rgb)
    elif data_size == 'all':
        cv2.imwrite("outputs_all/{}_out.png".format(path), im_target_rgb)
def train(data_size='all'):

    device = torch.device('cuda') if torch.cuda.is_available else torch.device(
        'cpu')

    # Image input
    model = SegNet(opt, 3)
    model = model.to(device)
    model.train()
    criterion = torch.nn.CrossEntropyLoss()
    criterion_d = DiscriminativeLoss()
    optimizer = SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum)

    if data_size == 'all':
        dataloader = get_dataloader(opt.paths, opt, device)
        model = batch_step(opt, optimizer, model, dataloader, criterion,
                           criterion_d, device)
        torch.save(model.state_dict(), 'model_all.pth')
    else:
        im = Image.open(opt.img_path)
        im = np.array(im, dtype=np.float32) / 255
        image = np.transpose(im, (2, 0, 1))
        data = torch.from_numpy(image).unsqueeze(0)
        data = Variable(data).to(device)

        labels = segmentation.slic(im,
                                   compactness=opt.compactness,
                                   n_segments=opt.num_superpixels)
        labels = labels.reshape(-1)
        label_nums = np.unique(labels)
        label_indices = [
            np.where(labels == label_nums[i])[0]
            for i in range(len(label_nums))
        ]

        model = one_step(opt, optimizer, model, data, label_indices, criterion,
                         criterion_d, device)
        torch.save(model.state_dict(), 'model_single.pth')
Exemple #3
0
def train_autoencoder(epoch_plus):
    writer = SummaryWriter(log_dir='./runs_autoencoder_2')
    num_epochs = 400 - epoch_plus
    lr = 0.001
    bta1 = 0.9
    bta2 = 0.999
    weight_decay = 0.001

    # model = autoencoder(nchannels=3, width=172, height=600)
    model = SegNet(3)
    if ngpu > 1:
        model = nn.DataParallel(model)
    if use_gpu:
        model = model.to(device, non_blocking=True)
    if epoch_plus > 0:
        model.load_state_dict(
            torch.load('./autoencoder_models_2/autoencoder_{}.pth'.format(
                epoch_plus)))
    criterion = nn.MSELoss(reduction='sum')
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=lr,
                                 betas=(bta1, bta2),
                                 weight_decay=weight_decay)

    for epoch in range(num_epochs):
        degree = randint(-180, 180)

        transforms = torchvision.transforms.Compose([
            torchvision.transforms.CenterCrop((172, 200)),
            torchvision.transforms.Resize((172, 200)),
            torchvision.transforms.RandomRotation((degree, degree)),
            torchvision.transforms.ToTensor()
        ])

        dataloader = get_dataloader(data_dir,
                                    train=True,
                                    transform=transforms,
                                    batch_size=batch_size)

        model.train()
        epoch_losses = AverageMeter()

        with tqdm(total=(1000 - 1000 % batch_size)) as _tqdm:
            _tqdm.set_description('epoch: {}/{}'.format(
                epoch + 1 + epoch_plus, num_epochs + epoch_plus))
            for data in dataloader:
                gt, text = data
                if use_gpu:
                    gt, text = gt.to(device, non_blocking=True), text.to(
                        device, non_blocking=True)

                predicted = model(text)

                # loss = criterion_bce(predicted, gt) + criterion_dice(predicted, gt)
                loss = criterion(
                    predicted, gt - text
                )  # predicts extracted text in white, all others in black
                epoch_losses.update(loss.item(), len(gt))
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                _tqdm.set_postfix(loss='{:.6f}'.format(epoch_losses.avg))
                _tqdm.update(len(gt))

        save_path = './autoencoder_models_2'
        if not os.path.exists(save_path):
            os.mkdir(save_path)

        gt_text = gt - text
        predicted_mask = text + predicted

        torch.save(
            model.state_dict(),
            os.path.join(save_path,
                         'autoencoder_{}.pth'.format(epoch + 1 + epoch_plus)))
        writer.add_scalar('Loss', epoch_losses.avg, epoch + 1 + epoch_plus)
        writer.add_image('text/text_image_{}'.format(epoch + 1 + epoch_plus),
                         text[0].squeeze(), epoch + 1 + epoch_plus)
        writer.add_image('gt/gt_image_{}'.format(epoch + 1 + epoch_plus),
                         gt[0].squeeze(), epoch + 1 + epoch_plus)
        writer.add_image('gt_text/gt_image_{}'.format(epoch + 1 + epoch_plus),
                         gt_text[0].squeeze(), epoch + 1 + epoch_plus)
        writer.add_image(
            'predicted/predicted_image_{}'.format(epoch + 1 + epoch_plus),
            predicted_mask[0].squeeze(), epoch + 1 + epoch_plus)
        writer.add_image(
            'predicted_text/predicted_image_{}'.format(epoch + 1 + epoch_plus),
            predicted[0].squeeze(), epoch + 1 + epoch_plus)

    writer.close()
Exemple #4
0
    # start from checkpoint
    if args.checkpoint:
        model.load_state_dict(torch.load(args.checkpoint))

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=LEARNING_RATE,
                                momentum=MOMENTUM)

    # training
    is_better = True
    prev_loss = float('inf')
    epoch_loss = AverageMeter()
    logger.info(args)

    model.train()

    for epoch in range(args.epochs):
        t_start = time.time()

        for index, (image, mask) in enumerate(train_dataloader):
            batches_done = len(train_dataloader) * epoch + index

            input_tensor = torch.autograd.Variable(image.to(device))
            target_tensor = torch.autograd.Variable(mask.to(device))
            output = model(input_tensor)

            optimizer.zero_grad()
            loss = criterion(output, target_tensor)
            loss.backward()
            optimizer.step()