Example #1
0
class Visualizer(object):
    def __init__(self, input_topic, output_topic, resize_width, resize_height,
                 model_path, force_cpu):
        self.bridge = CvBridge()

        self.graph = UNet([3, resize_width, resize_height], 3)
        self.graph.load_state_dict(torch.load(model_path))
        self.force_cpu = force_cpu and torch.cuda.is_available()

        self.resize_width, self.resize_height = resize_width, resize_height

        if not self.force_cpu:
            self.graph.cuda()
        self.graph.eval()
        self.to_tensor = transforms.Compose([transforms.ToTensor()])

        self.publisher = rospy.Publisher(output_topic, ImMsg, queue_size=1)
        self.raw_subscriber = rospy.Subscriber(input_topic,
                                               CompressedImage,
                                               self.image_cb,
                                               queue_size=1,
                                               buff_size=10**8)

    def convert_to_tensor(self, image):
        np_arr = np.fromstring(image.data, np.uint8)
        image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
        image_np = cv2.resize(image_np,
                              dsize=(self.resize_width, self.resize_height))
        img_to_tensor = PIL.Image.fromarray(image_np)
        img_tensor = self.to_tensor(img_to_tensor)

        if not self.force_cpu:
            return Variable(img_tensor.unsqueeze(0)).cuda()
        else:
            return Variable(img_tensor.unsqueeze(0))

    def image_cb(self, image):
        img_tensor = self.convert_to_tensor(image)

        # Inference
        output = self.graph(img_tensor)
        output_data = output.cpu().data.numpy()[0][0]

        # # Convert from 32fc1 (0 - 1) to 8uc1 (0 - 255)
        cv_output = np.uint8(255 * output_data)
        cv_output = cv2.applyColorMap(cv_output, cv2.COLORMAP_JET)

        # Convert to ROS message to publish
        msg_out = self.bridge.cv2_to_imgmsg(cv_output, 'bgr8')
        msg_out.header.stamp = image.header.stamp
        self.publisher.publish(msg_out)
Example #2
0
def load_finetuned_model(args, baseline_model):
    """

    :param args:
    :param baseline_model:
    :return:
    """
    # augment_net = Net(0, 0.0, 32, 3, 0.0, num_classes=32**2 * 3, do_res=True)
    augment_net = UNet(in_channels=3, n_classes=3, depth=1, wf=2, padding=True, batch_norm=False,
                       do_noise_channel=True,
                       up_mode='upsample', use_identity_residual=True)  # TODO(PV): Initialize UNet properly
    # TODO (JON): DEPTH 1 WORKED WELL.  Changed upconv to upsample.  Use a wf of 2.

    # This ResNet outputs scalar weights to be applied element-wise to the per-example losses
    from models.simple_models import CNN, Net
    imsize, in_channel, num_classes = 32, 3, 10
    reweighting_net = Net(0, 0.0, imsize, in_channel, 0.0, num_classes=1)
    #resnet_cifar.resnet20(num_classes=1)

    if args.load_finetune_checkpoint:
        checkpoint = torch.load(args.load_finetune_checkpoint)
        baseline_model.load_state_dict(checkpoint['elementary_model_state_dict'])
        augment_net.load_state_dict(checkpoint['augment_model_state_dict'])
        try:
            reweighting_net.load_state_dict(checkpoint['reweighting_model_state_dict'])
        except KeyError:
            pass

    augment_net, reweighting_net, baseline_model = augment_net.cuda(), reweighting_net.cuda(), baseline_model.cuda()
    augment_net.train(), reweighting_net.train(), baseline_model.train()
    return augment_net, reweighting_net, baseline_model
Example #3
0
def main():
    """Create the model and start the evaluation process."""

    args = get_arguments()

    gpu0 = args.gpu

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    model = UNet(3, n_classes=args.num_classes)

    saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict)

    model.cuda(gpu0)
    model.train()

    testloader = data.DataLoader(REFUGE(False,
                                        domain='REFUGE_TEST',
                                        is_transform=True),
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 pin_memory=True)

    if version.parse(torch.__version__) >= version.parse('0.4.0'):
        interp = nn.Upsample(size=(460, 460),
                             mode='bilinear',
                             align_corners=True)
    else:
        interp = nn.Upsample(size=(460, 460), mode='bilinear')

    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print('%d processd' % index)
        image, label, _, _, name = batch
        if args.model == 'Unet':
            _, _, _, _, output2 = model(
                Variable(image, volatile=True).cuda(gpu0))

            output = interp(output2).cpu().data.numpy()

        for idx, one_name in enumerate(name):
            pred = output[idx]
            pred = pred.transpose(1, 2, 0)
            pred = np.asarray(np.argmax(pred, axis=2), dtype=np.uint8)
            output_col = colorize_mask(pred)

            if is_polar:

                # plt.imshow(output_col)
                # plt.show()

                output_col = np.array(output_col)
                output_col[output_col == 0] = 0
                output_col[output_col == 1] = 128
                output_col[output_col == 2] = 255

                # plt.imshow(output_col)
                # plt.show()

                output_col = cv2.linearPolar(
                    rotate(output_col, 90),
                    (args.ROI_size / 2, args.ROI_size / 2), args.ROI_size / 2,
                    cv2.WARP_FILL_OUTLIERS + cv2.WARP_INVERSE_MAP)

                # plt.imshow(output_col)
                # plt.show()

                output_col = np.array(output_col * 255, dtype=np.uint8)
                output_col[output_col > 200] = 210
                output_col[output_col == 0] = 255
                output_col[output_col == 210] = 0
                output_col[(output_col > 0) & (output_col < 255)] = 128

                output_col = Image.fromarray(output_col)

                # plt.imshow(output_col)
                # plt.show()

            one_name = one_name.split('/')[-1]
            if len(one_name.split('_')) > 0:
                one_name = one_name[:-4]
            #pred.save('%s/%s.bmp' % (args.save, one_name))
            output_col = output_col.convert('L')

            print(output_col.size)
            output_col.save('%s/%s.bmp' % (args.save, one_name.split('.')[0]))
Example #4
0
                                  shuffle=False,
                                  num_workers=4)


    if args.model == "unet":
        model = UNet(input_channels=NUM_INPUT_CHANNELS,
                       output_channels=NUM_OUTPUT_CHANNELS)
    elif args.model == "segnet":
        model = SegNet(input_channels=NUM_INPUT_CHANNELS,
                       output_hannels=NUM_OUTPUT_CHANNELS)
    else:
        model = PSPNet(layers=50, bins=(1, 2, 3, 6), dropout=0.1, classes=NUM_OUTPUT_CHANNELS, use_ppm=True, pretrained=True)

    class_weights = 1.0/train_dataset.get_class_probability()
    criterion = torch.nn.CrossEntropyLoss(weight=class_weights)

    if CUDA:
        model = model.cuda(GPU_ID)

        class_weights = class_weights.cuda(GPU_ID)
        criterion = criterion.cuda(GPU_ID)


    if args.checkpoint:
        model.load_state_dict(torch.load(args.checkpoint))


    optimizer = torch.optim.Adam(model.parameters(),
                                     lr=LEARNING_RATE)

    train()
Example #5
0
class Noise2Noise(object):
    """Implementation of Noise2Noise from Lehtinen et al. (2018)."""

    def __init__(self, params, trainable):
        """Initializes model."""

        self.p = params
        self.trainable = trainable
        self._compile()  #初始化模型


    def _compile(self):
        """
        Compiles model (architecture, loss function, optimizers, etc.).
        初始化 网络、损失函数、优化器等
        """

        print('Noise2Noise: Learning Image Restoration without Clean Data (Lethinen et al., 2018)')

        # Model (3x3=9 channels for Monte Carlo since it uses 3 HDR buffers)  已删除蒙特卡洛相关代码
        if self.p.noise_type == 'mc':
            self.is_mc = True
            self.model = UNet(in_channels=9)
        else:
            self.is_mc = False
            self.model = UNet(in_channels=3)

        # Set optimizer and loss, if in training mode
        # 如果 为训练,则初始化优化器和损失
        if self.trainable:
            self.optim = Adam(self.model.parameters(),
                              lr=self.p.learning_rate,
                              betas=self.p.adam[:2],
                              eps=self.p.adam[2])

            # Learning rate adjustment
            self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optim,
                patience=self.p.nb_epochs/4, factor=0.5, verbose=True)

            # Loss function
            if self.p.loss == 'hdr':
                assert self.is_mc, 'Using HDR loss on non Monte Carlo images'
                self.loss = HDRLoss()
            elif self.p.loss == 'l2':
                self.loss = nn.MSELoss()
            else:
                self.loss = nn.L1Loss()

        # CUDA support
        self.use_cuda = torch.cuda.is_available() and self.p.cuda
        if self.use_cuda:
            self.model = self.model.cuda()
            if self.trainable:
                self.loss = self.loss.cuda()


    def _print_params(self):
        """Formats parameters to print when training."""

        print('Training parameters: ')
        self.p.cuda = self.use_cuda
        param_dict = vars(self.p)
        pretty = lambda x: x.replace('_', ' ').capitalize()
        print('\n'.join('  {} = {}'.format(pretty(k), str(v)) for k, v in param_dict.items()))
        print()


    def save_model(self, epoch, stats, first=False):
        """Saves model to files; can be overwritten at every epoch to save disk space."""

        # Create directory for model checkpoints, if nonexistent
        if first:
            if self.p.clean_targets:
                ckpt_dir_name = f'{datetime.now():{self.p.noise_type}-clean-%H%M}'
            else:
                ckpt_dir_name = f'{datetime.now():{self.p.noise_type}-%H%M}'
            if self.p.ckpt_overwrite:
                if self.p.clean_targets:
                    ckpt_dir_name = f'{self.p.noise_type}-clean'
                else:
                    ckpt_dir_name = self.p.noise_type

            self.ckpt_dir = os.path.join(self.p.ckpt_save_path, ckpt_dir_name)
            if not os.path.isdir(self.p.ckpt_save_path):
                os.mkdir(self.p.ckpt_save_path)
            if not os.path.isdir(self.ckpt_dir):
                os.mkdir(self.ckpt_dir)

        # Save checkpoint dictionary
        if self.p.ckpt_overwrite:
            fname_unet = '{}/n2n-{}.pt'.format(self.ckpt_dir, self.p.noise_type)
        else:
            valid_loss = stats['valid_loss'][epoch]
            fname_unet = '{}/n2n-epoch{}-{:>1.5f}.pt'.format(self.ckpt_dir, epoch + 1, valid_loss)
        print('Saving checkpoint to: {}\n'.format(fname_unet))
        torch.save(self.model.state_dict(), fname_unet)

        # Save stats to JSON
        fname_dict = '{}/n2n-stats.json'.format(self.ckpt_dir)
        with open(fname_dict, 'w') as fp:
            json.dump(stats, fp, indent=2)


    def load_model(self, ckpt_fname):
        """Loads model from checkpoint file."""

        print('Loading checkpoint from: {}'.format(ckpt_fname))
        if self.use_cuda:
            self.model.load_state_dict(torch.load(ckpt_fname))
        else:
            self.model.load_state_dict(torch.load(ckpt_fname, map_location='cpu'))


    def _on_epoch_end(self, stats, train_loss, epoch, epoch_start, valid_loader):
        """Tracks and saves starts after each epoch."""

        # Evaluate model on validation set
        print('\rTesting model on validation set... ', end='')
        epoch_time = time_elapsed_since(epoch_start)[0]
        valid_loss, valid_time, valid_psnr = self.eval(valid_loader)
        show_on_epoch_end(epoch_time, valid_time, valid_loss, valid_psnr)

        # Decrease learning rate if plateau
        self.scheduler.step(valid_loss)

        # Save checkpoint
        stats['train_loss'].append(train_loss)
        stats['valid_loss'].append(valid_loss)
        stats['valid_psnr'].append(valid_psnr)
        self.save_model(epoch, stats, epoch == 0)




    def test(self, test_loader, show=1):
        """Evaluates denoiser on test set."""

        self.model.train(False)

        source_imgs = []
        denoised_imgs = []
        clean_imgs = []

        # Create directory for denoised images
        denoised_dir = os.path.dirname(self.p.data)
        save_path = os.path.join(denoised_dir, 'denoised')
        if not os.path.isdir(save_path):
            os.mkdir(save_path)

        for batch_idx, (source, target) in enumerate(test_loader):
            # Only do first <show> images
            if show == 0 or batch_idx >= show:
                break

            source_imgs.append(source)
            clean_imgs.append(target)

            if self.use_cuda:
                source = source.cuda()

            # Denoise
            denoised_img = self.model(source).detach()
            denoised_imgs.append(denoised_img)

        # Squeeze tensors
        source_imgs = [t.squeeze(0) for t in source_imgs]
        denoised_imgs = [t.squeeze(0) for t in denoised_imgs]
        clean_imgs = [t.squeeze(0) for t in clean_imgs]

        # Create montage and save images
        print('Saving images and montages to: {}'.format(save_path))
        for i in range(len(source_imgs)):
            img_name = test_loader.dataset.imgs[i]
            create_montage(img_name, self.p.noise_type, save_path, source_imgs[i], denoised_imgs[i], clean_imgs[i], show)


    def eval(self, valid_loader):
        """Evaluates denoiser on validation set."""

        self.model.train(False)

        valid_start = datetime.now()
        loss_meter = AvgMeter()
        psnr_meter = AvgMeter()

        for batch_idx, (source, target) in enumerate(valid_loader):
            if self.use_cuda:
                source = source.cuda()
                target = target.cuda()

            # Denoise
            source_denoised = self.model(source)

            # Update loss
            loss = self.loss(source_denoised, target)
            loss_meter.update(loss.item())

            # Compute PSRN
            if self.is_mc:
                source_denoised = reinhard_tonemap(source_denoised)
            # TODO: Find a way to offload to GPU, and deal with uneven batch sizes
            for i in range(self.p.batch_size):
                source_denoised = source_denoised.cpu()
                target = target.cpu()
                psnr_meter.update(psnr(source_denoised[i], target[i]).item())

        valid_loss = loss_meter.avg
        valid_time = time_elapsed_since(valid_start)[0]
        psnr_avg = psnr_meter.avg

        return valid_loss, valid_time, psnr_avg


    def train(self, train_loader, valid_loader):
        """Trains denoiser on training set."""

        self.model.train(True)

        self._print_params()
        num_batches = len(train_loader)
        assert num_batches % self.p.report_interval == 0, 'Report interval must divide total number of batches'

        # Dictionaries of tracked stats
        stats = {'noise_type': self.p.noise_type,
                 'noise_param': self.p.noise_param,
                 'train_loss': [],
                 'valid_loss': [],
                 'valid_psnr': []}

        # Main training loop
        train_start = datetime.now()
        for epoch in range(self.p.nb_epochs):
            print('EPOCH {:d} / {:d}'.format(epoch + 1, self.p.nb_epochs))

            # Some stats trackers
            epoch_start = datetime.now()
            train_loss_meter = AvgMeter()
            loss_meter = AvgMeter()
            time_meter = AvgMeter()

            # Minibatch SGD
            for batch_idx, (source, target) in enumerate(train_loader):
                batch_start = datetime.now()
                progress_bar(batch_idx, num_batches, self.p.report_interval, loss_meter.val)

                if self.use_cuda:
                    source = source.cuda()
                    target = target.cuda()

                # Denoise image
                source_denoised = self.model(source)

                loss = self.loss(source_denoised, target)
                loss_meter.update(loss.item())

                # Zero gradients, perform a backward pass, and update the weights
                self.optim.zero_grad()
                loss.backward()
                self.optim.step()

                # Report/update statistics
                time_meter.update(time_elapsed_since(batch_start)[1])
                if (batch_idx + 1) % self.p.report_interval == 0 and batch_idx:
                    show_on_report(batch_idx, num_batches, loss_meter.avg, time_meter.avg)
                    train_loss_meter.update(loss_meter.avg)
                    loss_meter.reset()
                    time_meter.reset()

            # Epoch end, save and reset tracker
            self._on_epoch_end(stats, train_loss_meter.avg, epoch, epoch_start, valid_loader)
            train_loss_meter.reset()

        train_elapsed = time_elapsed_since(train_start)[0]
        print('Training done! Total elapsed time: {}\n'.format(train_elapsed))
Example #6
0
                                         remove_alpha=True,
                                         one_hot_mask=2)
test_dataset = TestFromFolder(os.path.join(all_datasets,
                                           'stage1_test/loc.csv'),
                              transform=T.ToTensor(),
                              remove_alpha=True)
"""
-----------------
----- Model -----
-----------------
"""

s = UNet(3, 2)
t = UNet(3, 3)
if use_gpu:
    s.cuda()
    t.cuda()

# lr = 0.001 seems to work WITHOUT PRETRAINING
s_optim = optim.Adam(s.parameters(), lr=0.1)
t_optim = optim.Adam(t.parameters(), lr=0.1)
s_scheduler = torch.optim.lr_scheduler.StepLR(s_optim, step_size=10)
t_scheduler = torch.optim.lr_scheduler.StepLR(t_optim, step_size=10)

gan = GANv2(s=s,
            s_optim=s_optim,
            s_loss=CrossEntropyLoss2d().cuda(),
            s_scheduler=s_scheduler,
            g=t,
            g_optim=t_optim)
Example #7
0
        model = UNet(n_channels=3, n_classes=1, height=height, width=width)
    else:
        height, width = 224, 224
        model = eval(args.model, {'torch': torch, 'torchvision': torchvision})

    if 'mobilenet_v2' in args.model:
        model = torch.nn.Sequential(model.features,
                                    torch.nn.AdaptiveAvgPool2d((1, 1)),
                                    torch.nn.Flatten(start_dim=1),
                                    model.classifier[0], model.classifier[1])
    if args.check_diff:
        disable_dropout(model)

    if args.check_runtime:
        graph = Graph.create(model, input_shape=(3, height, width))
        model.cuda()
        solvert = -1

        if len(args.solution_file) > 0:
            solver_info, solution = load_solution(args.solution_file)
        else:
            input_ = torch.randn((bs, 3, height, width)).cuda()
            solver_info = SolverInfo(bs=bs, model_name=model_name, mode=mode)
            solver_info.extract(
                graph, input_,
                *list(model.state_dict(keep_vars=True).values()))
            solver_model = Model(solver_info, budget, args.solver,
                                 args.ablation)
            t0 = time()
            solution = solver_model.solve()
            solvert = time() - t0
Example #8
0
                          pin_memory=True)
val_loader = DataLoader(scan_dataset_val,
                        batch_size=1,
                        shuffle=False,
                        num_workers=5,
                        pin_memory=True)
test_loader = DataLoader(scan_dataset_test,
                         batch_size=1,
                         shuffle=False,
                         num_workers=5,
                         pin_memory=True)

pseudo = UNet(1, 1)
net = UNet(1, 1, domain_specific=True)

pseudo.cuda()
net.cuda()

# list optimisers here...
# single optimiser variant 1

optimiser_ps = optim.Adam(pseudo.parameters(), lr=learning_rate)
optimiser_net = optim.Adam(net.parameters(), lr=learning_rate)

print('Project name ', project_name)
print('Learning rate ', learning_rate)
print('Epochs ', epochs)

train_loss = []
train_loss_rec = []
train_loss_seg = []
Example #9
0
def main():
    """Create the model and start the training."""
    args = get_arguments()

    cudnn.enabled = True
    n_discriminators = 5

    # create teacher & student
    student_net = UNet(3, n_classes=args.num_classes)
    teacher_net = UNet(3, n_classes=args.num_classes)
    student_params = list(student_net.parameters())

    # teacher doesn't need gradient as it's just a EMA of the student
    teacher_params = list(teacher_net.parameters())
    for param in teacher_params:
        param.requires_grad = False

    student_net.train()
    student_net.cuda(args.gpu)
    teacher_net.train()
    teacher_net.cuda(args.gpu)

    cudnn.benchmark = True
    unsup_weights = [
        args.unsup_weight5, args.unsup_weight6, args.unsup_weight7,
        args.unsup_weight8, args.unsup_weight9
    ]
    lambda_adv_tgts = [
        args.lambda_adv_tgt5, args.lambda_adv_tgt6, args.lambda_adv_tgt7,
        args.lambda_adv_tgt8, args.lambda_adv_tgt9
    ]

    # create a list of discriminators
    discriminators = []
    for dis_idx in range(n_discriminators):
        discriminators.append(FCDiscriminator(num_classes=args.num_classes))
        discriminators[dis_idx].train()
        discriminators[dis_idx].cuda(args.gpu)

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    max_iters = args.num_steps * args.iter_size * args.batch_size
    src_set = REFUGE(True,
                     domain='REFUGE_SRC',
                     is_transform=True,
                     augmentations=aug_student,
                     aug_for_target=aug_teacher,
                     max_iters=max_iters)
    src_loader = data.DataLoader(src_set,
                                 batch_size=args.batch_size,
                                 shuffle=True,
                                 num_workers=args.num_workers,
                                 pin_memory=True)

    src_loader_iter = enumerate(src_loader)
    tgt_set = REFUGE(True,
                     domain='REFUGE_DST',
                     is_transform=True,
                     augmentations=aug_student,
                     aug_for_target=aug_teacher,
                     max_iters=max_iters)
    tgt_loader = data.DataLoader(tgt_set,
                                 batch_size=args.batch_size,
                                 shuffle=True,
                                 num_workers=args.num_workers,
                                 pin_memory=True)

    tgt_loader_iter = enumerate(tgt_loader)
    student_optimizer = optim.SGD(student_params,
                                  lr=args.learning_rate,
                                  momentum=args.momentum,
                                  weight_decay=args.weight_decay)
    teacher_optimizer = optim_weight_ema.WeightEMA(teacher_params,
                                                   student_params,
                                                   alpha=args.teacher_alpha)

    d_optimizers = []
    for idx in range(n_discriminators):
        optimizer = optim.Adam(discriminators[idx].parameters(),
                               lr=args.learning_rate_D,
                               betas=(0.9, 0.99))
        d_optimizers.append(optimizer)

    calc_bce_loss = torch.nn.BCEWithLogitsLoss()

    # labels for adversarial training
    source_label, tgt_label = 0, 1
    for i_iter in range(args.num_steps):

        total_seg_loss = 0
        seg_loss_vals = [0] * n_discriminators
        adv_tgt_loss_vals = [0] * n_discriminators
        d_loss_vals = [0] * n_discriminators
        unsup_loss_vals = [0] * n_discriminators

        for d_optimizer in d_optimizers:
            d_optimizer.zero_grad()
            adjust_learning_rate_D(d_optimizer, i_iter, args)

        student_optimizer.zero_grad()
        adjust_learning_rate(student_optimizer, i_iter, args)

        for sub_i in range(args.iter_size):

            # ******** Optimize source network with segmentation loss ********
            # As we don't change the discriminators, their parameters are fixed
            for discriminator in discriminators:
                for param in discriminator.parameters():
                    param.requires_grad = False

            _, src_batch = src_loader_iter.__next__()
            _, _, src_images, src_labels, _ = src_batch
            src_images = Variable(src_images).cuda(args.gpu)

            # calculate the segmentation losses
            sup_preds = list(student_net(src_images))
            seg_losses, total_seg_loss = [], 0
            for idx, sup_pred in enumerate(sup_preds):
                sup_interp_pred = (sup_pred)
                # you also can use dice loss like: dice_loss(src_labels, sup_interp_pred)
                seg_loss = Weighted_Jaccard_loss(src_labels, sup_interp_pred,
                                                 args.class_weights, args.gpu)
                seg_losses.append(seg_loss)
                total_seg_loss += seg_loss * unsup_weights[idx]
                seg_loss_vals[idx] += seg_loss.item() / args.iter_size

            _, tgt_batch = tgt_loader_iter.__next__()
            tgt_images0, tgt_lbl0, tgt_images1, tgt_lbl1, _ = tgt_batch
            tgt_images0 = Variable(tgt_images0).cuda(args.gpu)
            tgt_images1 = Variable(tgt_images1).cuda(args.gpu)

            # calculate ensemble losses
            stu_unsup_preds = list(student_net(tgt_images1))
            tea_unsup_preds = teacher_net(tgt_images0)
            total_mse_loss = 0
            for idx in range(n_discriminators):
                stu_unsup_probs = F.softmax(stu_unsup_preds[idx], dim=-1)
                tea_unsup_probs = F.softmax(tea_unsup_preds[idx], dim=-1)

                unsup_loss = calc_mse_loss(stu_unsup_probs, tea_unsup_probs,
                                           args.batch_size)
                unsup_loss_vals[idx] += unsup_loss.item() / args.iter_size
                total_mse_loss += unsup_loss * unsup_weights[idx]

            total_mse_loss = total_mse_loss / args.iter_size

            # As the requires_grad is set to False in the discriminator, the
            # gradients are only accumulated in the generator, the target
            # student network is optimized to make the outputs of target domain
            # images close to the outputs of source domain images
            stu_unsup_preds = list(student_net(tgt_images0))
            d_outs, total_adv_loss = [], 0
            for idx in range(n_discriminators):
                stu_unsup_interp_pred = (stu_unsup_preds[idx])
                d_outs.append(discriminators[idx](stu_unsup_interp_pred))
                label_size = d_outs[idx].data.size()
                labels = torch.FloatTensor(label_size).fill_(source_label)
                labels = Variable(labels).cuda(args.gpu)
                adv_tgt_loss = calc_bce_loss(d_outs[idx], labels)

                total_adv_loss += lambda_adv_tgts[idx] * adv_tgt_loss
                adv_tgt_loss_vals[idx] += adv_tgt_loss.item() / args.iter_size

            total_adv_loss = total_adv_loss / args.iter_size

            # requires_grad is set to True in the discriminator,  we only
            # accumulate gradients in the discriminators, the discriminators are
            # optimized to make true predictions
            d_losses = []
            for idx in range(n_discriminators):
                discriminator = discriminators[idx]
                for param in discriminator.parameters():
                    param.requires_grad = True

                sup_preds[idx] = sup_preds[idx].detach()
                d_outs[idx] = discriminators[idx](sup_preds[idx])

                label_size = d_outs[idx].data.size()
                labels = torch.FloatTensor(label_size).fill_(source_label)
                labels = Variable(labels).cuda(args.gpu)

                d_losses.append(calc_bce_loss(d_outs[idx], labels))
                d_losses[idx] = d_losses[idx] / args.iter_size / 2
                d_losses[idx].backward()
                d_loss_vals[idx] += d_losses[idx].item()

            for idx in range(n_discriminators):
                stu_unsup_preds[idx] = stu_unsup_preds[idx].detach()
                d_outs[idx] = discriminators[idx](stu_unsup_preds[idx])

                label_size = d_outs[idx].data.size()
                labels = torch.FloatTensor(label_size).fill_(tgt_label)
                labels = Variable(labels).cuda(args.gpu)

                d_losses[idx] = calc_bce_loss(d_outs[idx], labels)
                d_losses[idx] = d_losses[idx] / args.iter_size / 2
                d_losses[idx].backward()
                d_loss_vals[idx] += d_losses[idx].item()

        for d_optimizer in d_optimizers:
            d_optimizer.step()

        total_loss = total_seg_loss + total_adv_loss + total_mse_loss
        total_loss.backward()
        student_optimizer.step()
        teacher_optimizer.step()

        log_str = 'iter = {0:7d}/{1:7d}'.format(i_iter, args.num_steps)
        log_str += ', total_seg_loss = {0:.3f} '.format(total_seg_loss)
        templ = 'seg_losses = [' + ', '.join(['%.2f'] * len(seg_loss_vals))
        log_str += templ % tuple(seg_loss_vals) + '] '
        templ = 'ens_losses = [' + ', '.join(['%.5f'] * len(unsup_loss_vals))
        log_str += templ % tuple(unsup_loss_vals) + '] '
        templ = 'adv_losses = [' + ', '.join(['%.2f'] * len(adv_tgt_loss_vals))
        log_str += templ % tuple(adv_tgt_loss_vals) + '] '
        templ = 'd_losses = [' + ', '.join(['%.2f'] * len(d_loss_vals))
        log_str += templ % tuple(d_loss_vals) + '] '

        print(log_str)
        if i_iter >= args.num_steps_stop - 1:
            print('save model ...')
            filename = 'UNet' + str(
                args.num_steps_stop) + '_v18_weightedclass.pth'
            torch.save(teacher_net.cpu().state_dict(),
                       os.path.join(args.snapshot_dir, filename))
            break

        if i_iter % args.save_pred_every == 0 and i_iter != 0:
            print('taking snapshot ...')
            filename = 'UNet' + str(i_iter) + '_v18_weightedclass.pth'
            torch.save(teacher_net.cpu().state_dict(),
                       os.path.join(args.snapshot_dir, filename))
            teacher_net.cuda(args.gpu)
import torch.optim as optim
from models.utils import z, imshow, image_to_tensor, tensor_to_image, crop_image
from models.configs import superresolutionSettings

img_path = "data/superresolution/snail.jpg"
img = Image.open(img_path)
imshow(asarray(img))

img = crop_image(img)

x = image_to_tensor(img)

net = UNet(superresolutionSettings)

if torch.cuda.is_available():
    net = net.cuda()

mse = torch.nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=0.01)

# Num of iters for training
num_iters = 2000

# Num of iters when to save image
save_frequency = 100

z0 = z(shape=(img.height, img.width), channels=32)

for i in range(num_iters):
    optimizer.zero_grad()
    output = net(z0)
Example #11
0
args = parser.parse_args()

dataset_train = MaterialsDataset("PBR_dataset_256/", test = True)
dataset_test = MaterialsDataset("PBR_dataset_256/", test = False)
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=16, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=16, shuffle=True)

if not torch.cuda.is_available():
    print("Warning cuda not found, falling back on CPU!")
    args.cuda = False

G = UNet(n_channels=3, n_output=3)
D = Discriminator()

if args.cuda:
    G.cuda()
    D.cuda()

lr=0.001

# run forever
for epoch in range(10000):
    lr *= 0.9995
    print("Epoch {}, lr: {}".format(epoch,lr) )
    G_solver = torch.optim.Adam(model.parameters(),lr=lr )
    D_solver = torch.optim.Adam(model.parameters(),lr=lr )

    train_loss = list()
    tic = time()
    # Train Epoch
    for batch_idx, batch_item in enumerate(train_loader):
    if args.model == 'unet':
        model = UNet(3, n_classes=args.num_classes, first_conv_channels=args.unet_channels, batch_norm=unet_batch_norm, dropout_rate=unet_dropout_rate)
    else:
        raise Exception('Unknown models')
    if load_model:
        model_path = os.path.join(args.output_dir, 'checkpoint', args.initial_checkpoint)
        print_to_log('loading models from file', args.initial_checkpoint, log_file)
        model.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
        # model.load_state_dict(torch.load(model_path))
        # torch.load(model_path)
        print_to_log('models loaded!', '', log_file)

    print("Running models: " + args.model)
    cuda = torch.cuda.is_available() and args.use_gpu
    if cuda:
        model = model.cuda()
    else:
        model = model.cpu()
    print_to_log('gpu', cuda, log_file)
    log_file.close()

    if not predict:
        print("training on train set")
        train_set = SemanticSegmentationDataset(args.train_dir,
                                                os.path.join('image_sets/', args.train_set),
                                                image_size, mode='train')
        val_set = SemanticSegmentationDataset(args.validation_dir,
                                              os.path.join('image_sets/', args.valid_set),
                                              image_size, mode='valid')

        loss = torch.nn.CrossEntropyLoss()
Example #13
0
                           batch_size=1,
                           shuffle=False,
                           num_workers=5,
                           pin_memory=True)
test_b_loader_gold = DataLoader(test_b_dataset_gold,
                                batch_size=1,
                                shuffle=False,
                                num_workers=5,
                                pin_memory=True)

pseudo = UNet(1, 1)
refine_net = UNet(1, 1)
# UNet(1, 1, norm_layer=nn.BatchNorm2d, affine=True)
net = UNet(1, 1, domain_specific=True)

pseudo.cuda()
refine_net.cuda()
net.cuda()

# list optimisers here...
# single optimiser variant 1

optimiser_ps = optim.Adam(pseudo.parameters(), lr=learning_rate)
optimiser_ref = optim.Adam(refine_net.parameters(), lr=learning_rate * 10)
optimiser_net = optim.Adam(net.parameters(), lr=learning_rate)

scheduler_ps = None  # StepLR(optimiser_ps, step_size=1, gamma=0.5)
scheduler_net = None  # StepLR(optimiser_net, step_size=5, gamma=0.8)

print('Project name ', project_name)
print('Learning rate ', learning_rate)
                      '--load',
                      dest='load',
                      default=False,
                      help='load file model')

    (options, args) = parser.parse_args()

    if (options.model == 1):
        net = UNet(3, 1)

        if options.load:
            net.load_state_dict(torch.load(options.load))
            print('Model loaded from {}'.format(options.load))

        if options.gpu:
            net.cuda()
            cudnn.benchmark = True

        try:
            train_net(net,
                      options.epochs,
                      options.batchsize,
                      options.lr,
                      gpu=options.gpu)
        except KeyboardInterrupt:
            torch.save(net.state_dict(), 'INTERRUPTED.pth')
            print('Saved interrupt')
            try:
                sys.exit(0)
            except SystemExit:
                os._exit(0)
                        batch_size=1,
                        shuffle=False,
                        num_workers=5,
                        pin_memory=True)
test_loader = DataLoader(test_dataset,
                         batch_size=1,
                         shuffle=False,
                         num_workers=5,
                         pin_memory=True)

refine_net = UNet(1,
                  1,
                  norm_layer=nn.BatchNorm2d,
                  affine=True,
                  track_running_stats=True)
refine_net.cuda()

# list optimisers here...
# single optimiser variant 1

optimiser_ref = optim.Adam(refine_net.parameters(), lr=learning_rate)

print('Project name ', project_name)

train_dices = []
train_losses = []
val_dices = []
val_losses = []

for i in range(epochs):
    train_dice, train_loss = train_segmentation_net(refine_net,
Example #16
0
                            num_workers=5,
                            pin_memory=True)
val_b_loader = DataLoader(val_b_dataset,
                          batch_size=1,
                          shuffle=False,
                          num_workers=5,
                          pin_memory=True)
test_b_loader = DataLoader(test_b_dataset,
                           batch_size=1,
                           shuffle=False,
                           num_workers=5,
                           pin_memory=True)

# net and optimizer
ds_unet = UNet(1, 1, domain_specific=True)
ds_unet.cuda()
labeller = UNet(1, 1)
# import weights here...
labeller_path = './results/unet_sobel_eadan_in/net'
labeller.load_state_dict(
    torch.load(os.path.join(labeller_path),
               map_location=lambda storage, loc: storage))
labeller.cuda()

optimiser = optim.Adam(ds_unet.parameters(), lr=learning_rate)

print('Project name ', project_name)

train_dices = []
train_losses = []
val_a_dices = []
Example #17
0
    all_datasets, 'stage1_train_merged/loc.csv'),
                                         transform=T.ToTensor(),
                                         remove_alpha=True)
test_dataset = TestFromFolder(os.path.join(all_datasets,
                                           'stage1_test/loc.csv'),
                              transform=T.ToTensor(),
                              remove_alpha=True)
"""
-----------------
----- Model -----
-----------------
"""

generator = UNet(3, 1)
discriminator = Discriminator(4, 1)
generator.cuda()
discriminator.cuda()
# lr = 0.001 seems to work WITHOUT PRETRAINING
g_optim = optim.Adam(generator.parameters(), lr=0.001)
d_optim = optim.Adam(discriminator.parameters(), lr=0.001)
#g_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(g_optim, factor=0.1, verbose=True, patience=5)
#d_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(d_optim, factor=0.1, verbose=True, patience=5)

gan = GAN(
    g=generator,
    d=discriminator,
    g_optim=g_optim,
    d_optim=d_optim,
    g_loss=nn.MSELoss().cuda(),
    d_loss=nn.MSELoss().cuda(),
    #g_scheduler=g_scheduler, d_scheduler=d_scheduler
        model = UNet(input_channels=NUM_INPUT_CHANNELS,
                     output_channels=NUM_OUTPUT_CHANNELS)
    elif args.model == "segnet":
        model = SegNet(input_channels=NUM_INPUT_CHANNELS,
                       output_hannels=NUM_OUTPUT_CHANNELS)
    else:
        model = PSPNet(
            layers=50,
            bins=(1, 2, 3, 6),
            dropout=0.1,
            classes=NUM_OUTPUT_CHANNELS,
            use_ppm=True,
            pretrained=True,
        )

    # class_weights = 1.0 / train_dataset.get_class_probability()
    # criterion = torch.nn.CrossEntropyLoss(weight=class_weights)
    criterion = torch.nn.CrossEntropyLoss()

    if CUDA:
        model = model.cuda(device=GPU_ID)

        # class_weights = class_weights.cuda(GPU_ID)
        criterion = criterion.cuda(device=GPU_ID)

    if args.checkpoint:
        model.load_state_dict(torch.load(args.checkpoint))

    optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)

    train()
def train_eval_model(opts):
    # parse model configuration
    num_epochs = opts["num_epochs"]
    train_batch_size = opts["train_batch_size"]
    val_batch_size = opts["eval_batch_size"]
    dataset_type = opts["dataset_type"]

    opti_mode = opts["optimizer"]
    loss_criterion = opts["loss_criterion"]
    lr = opts["lr"]
    lr_decay = opts["lr_decay"]
    wd = opts["weight_decay"]

    gpus = opts["gpu_list"].split(',')
    os.environ['CUDA_VISIBLE_DEVICE'] = opts["gpu_list"]
    train_dir = opts["log_dir"]

    train_data_dir = opts["train_data_dir"]
    eval_data_dir = opts["eval_data_dir"]

    pretrained = opts["pretrained_model"]
    resume = opts["resume"]
    display_iter = opts["display_iter"]
    save_epoch = opts["save_every_epoch"]
    show = opts["vis"]

    # backup train configs
    log_file = os.path.join(train_dir, "log_file.txt")
    os.makedirs(train_dir, exist_ok=True)
    model_dir = os.path.join(train_dir, "code_backup")
    os.makedirs(model_dir, exist_ok=True)
    if resume is None and os.path.exists(log_file): os.remove(log_file)
    shutil.copy("./models/unet.py", os.path.join(model_dir, "unet.py"))
    shutil.copy("./trainer_unet.py", os.path.join(model_dir,
                                                  "trainer_unet.py"))
    shutil.copy("./datasets/dataset.py", os.path.join(model_dir, "dataset.py"))

    ckt_dir = os.path.join(train_dir, "checkpoints")
    os.makedirs(ckt_dir, exist_ok=True)

    # format printing configs
    print("*" * 50)
    table_key = []
    table_value = []
    n = 0
    for key, value in opts.items():
        table_key.append(key)
        table_value.append(str(value))
        n += 1
    print_table([table_key, ["="] * n, table_value])

    # format gpu list
    gpu_list = []
    for str_id in gpus:
        id = int(str_id)
        gpu_list.append(id)

    # dataloader
    print("==> Create dataloader")
    dataloaders_dict = {
        "train":
        er_data_loader(train_data_dir,
                       train_batch_size,
                       dataset_type,
                       is_train=True),
        "eval":
        er_data_loader(eval_data_dir,
                       val_batch_size,
                       dataset_type,
                       is_train=False)
    }

    # define parameters of two networks
    print("==> Create network")
    num_channels = 1
    num_classes = 1
    model = UNet(num_channels, num_classes)
    init_weights(model)

    # loss layer
    criterion = create_criterion(criterion=loss_criterion)

    best_acc = 0.0
    start_epoch = 0

    # load pretrained model
    if pretrained is not None and os.path.isfile(pretrained):
        print("==> Train from model '{}'".format(pretrained))
        checkpoint_gan = torch.load(pretrained)
        model.load_state_dict(checkpoint_gan['model_state_dict'])
        print("==> Loaded checkpoint '{}')".format(pretrained))
        for param in model.parameters():
            param.requires_grad = False

    # resume training
    elif resume is not None and os.path.isfile(resume):
        print("==> Resume from checkpoint '{}'".format(resume))
        checkpoint = torch.load(resume)
        start_epoch = checkpoint['epoch'] + 1
        best_acc = checkpoint['best_acc']
        model_dict = model.state_dict()
        pretrained_dict = {
            k: v
            for k, v in checkpoint['model_state_dict'].items()
            if k in model_dict and v.size() == model_dict[k].size()
        }
        model_dict.update(pretrained_dict)
        model.load_state_dict(pretrained_dict)
        print("==> Loaded checkpoint '{}' (epoch {})".format(
            resume, checkpoint['epoch'] + 1))

    # train from scratch
    else:
        print("==> Train from initial or random state.")

    # define mutiple-gpu mode
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model.cuda()
    model = nn.DataParallel(model)

    # print learnable parameters
    print("==> List learnable parameters")
    for name, param in model.named_parameters():
        if param.requires_grad == True:
            print("\t{}, size {}".format(name, param.size()))
    params_to_update = [{'params': model.parameters()}]

    # define optimizer
    print("==> Create optimizer")
    optimizer = create_optimizer(params_to_update,
                                 opti_mode,
                                 lr=lr,
                                 momentum=0.9,
                                 wd=wd)
    if resume is not None and os.path.isfile(resume):
        optimizer.load_state_dict(checkpoint['optimizer'])

    # start training
    since = time.time()

    # Each epoch has a training and validation phase
    print("==> Start training")
    total_steps = 0

    for epoch in range(start_epoch, num_epochs):

        print('-' * 50)
        print("==> Epoch {}/{}".format(epoch + 1, num_epochs))

        total_steps = train_one_epoch(epoch, total_steps,
                                      dataloaders_dict['train'], model, device,
                                      criterion, optimizer, lr, lr_decay,
                                      display_iter, log_file, show)

        epoch_acc, epoch_iou, epoch_f1 = eval_one_epoch(
            epoch, dataloaders_dict['eval'], model, device, log_file)

        if best_acc < epoch_acc and epoch >= 5:
            best_acc = epoch_acc
            torch.save(
                {
                    'epoch': epoch,
                    'model_state_dict': model.module.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'best_acc': best_acc
                }, os.path.join(ckt_dir, "best.pth"))

        if (epoch + 1) % save_epoch == 0 and (epoch + 1) >= 20:
            torch.save(
                {
                    'epoch': epoch,
                    'model_state_dict': model.module.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'best_iou': epoch_iou
                },
                os.path.join(ckt_dir,
                             "checkpoints_" + str(epoch + 1) + ".pth"))

    time_elapsed = time.time() - since
    time_message = 'Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60)
    print(time_message)
    with open(log_file, "a+") as fid:
        fid.write('%s\n' % time_message)
    print('==> Best val Acc: {:4f}'.format(best_acc))
Example #20
0
def train(frame_num,
          layer_nums,
          input_channels,
          output_channels,
          discriminator_num_filters,
          bn=False,
          pretrain=False,
          generator_pretrain_path=None,
          discriminator_pretrain_path=None):
    generator = UNet(n_channels=input_channels,
                     layer_nums=layer_nums,
                     output_channel=output_channels,
                     bn=bn)
    discriminator = PixelDiscriminator(output_channels,
                                       discriminator_num_filters,
                                       use_norm=False)

    generator = generator.cuda()
    discriminator = discriminator.cuda()

    flow_network = Network()
    flow_network.load_state_dict(torch.load(lite_flow_model_path))
    flow_network.cuda().eval()

    adversarial_loss = Adversarial_Loss().cuda()
    discriminate_loss = Discriminate_Loss().cuda()
    gd_loss = Gradient_Loss(alpha, num_channels).cuda()
    op_loss = Flow_Loss().cuda()
    int_loss = Intensity_Loss(l_num).cuda()
    step = 0

    if not pretrain:
        generator.apply(weights_init_normal)
        discriminator.apply(weights_init_normal)
    else:
        assert (generator_pretrain_path != None
                and discriminator_pretrain_path != None)
        generator.load_state_dict(torch.load(generator_pretrain_path))
        discriminator.load_state_dict(torch.load(discriminator_pretrain_path))
        step = int(generator_pretrain_path.split('-')[-1])
        print('pretrained model loaded!')

    print('initializing the model with Generator-Unet {} layers,'
          'PixelDiscriminator with filters {} '.format(
              layer_nums, discriminator_num_filters))

    optimizer_G = torch.optim.Adam(generator.parameters(), lr=g_lr)
    optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=d_lr)

    writer = SummaryWriter(writer_path)

    dataset = img_dataset.ano_pred_Dataset(training_data_folder, frame_num)
    dataset_loader = DataLoader(dataset=dataset,
                                batch_size=batch_size,
                                shuffle=True,
                                num_workers=1,
                                drop_last=True)

    test_dataset = img_dataset.ano_pred_Dataset(testing_data_folder, frame_num)
    test_dataloader = DataLoader(dataset=test_dataset,
                                 batch_size=batch_size,
                                 shuffle=True,
                                 num_workers=1,
                                 drop_last=True)

    for epoch in range(epochs):
        for (input, _), (test_input, _) in zip(dataset_loader,
                                               test_dataloader):
            # generator = generator.train()
            # discriminator = discriminator.train()

            target = input[:, -1, :, :, :].cuda()

            input = input[:, :-1, ]
            input_last = input[:, -1, ].cuda()
            input = input.view(input.shape[0], -1, input.shape[-2],
                               input.shape[-1]).cuda()

            test_target = test_input[:, -1, ].cuda()
            test_input = test_input[:, :-1].view(test_input.shape[0], -1,
                                                 test_input.shape[-2],
                                                 test_input.shape[-1]).cuda()

            #------- update optim_G --------------

            G_output = generator(input)

            pred_flow_esti_tensor = torch.cat([input_last, G_output], 1)
            gt_flow_esti_tensor = torch.cat([input_last, target], 1)

            flow_gt = batch_estimate(gt_flow_esti_tensor, flow_network)
            flow_pred = batch_estimate(pred_flow_esti_tensor, flow_network)

            g_adv_loss = adversarial_loss(discriminator(G_output))
            g_op_loss = op_loss(flow_pred, flow_gt)
            g_int_loss = int_loss(G_output, target)
            g_gd_loss = gd_loss(G_output, target)

            g_loss = lam_adv * g_adv_loss + lam_gd * g_gd_loss + lam_op * g_op_loss + lam_int * g_int_loss

            optimizer_G.zero_grad()

            g_loss.backward()
            optimizer_G.step()

            train_psnr = psnr_error(G_output, target)

            #----------- update optim_D -------
            optimizer_D.zero_grad()

            d_loss = discriminate_loss(discriminator(target),
                                       discriminator(G_output.detach()))
            #d_loss.requires_grad=True

            d_loss.backward()
            optimizer_D.step()

            #----------- cal psnr --------------
            test_generator = generator.eval()
            test_output = test_generator(test_input)
            test_psnr = psnr_error(test_output, test_target).cuda()

            if step % 10 == 0:
                print("[{}/{}]: g_loss: {} d_loss {}".format(
                    step, epoch, g_loss, d_loss))
                print('\t gd_loss {}, op_loss {}, int_loss {} ,'.format(
                    g_gd_loss, g_op_loss, g_int_loss))
                print('\t train psnr{},test_psnr {}'.format(
                    train_psnr, test_psnr))

                writer.add_scalar('psnr/train_psnr',
                                  train_psnr,
                                  global_step=step)
                writer.add_scalar('psnr/test_psnr',
                                  test_psnr,
                                  global_step=step)

                writer.add_scalar('total_loss/g_loss',
                                  g_loss,
                                  global_step=step)
                writer.add_scalar('total_loss/d_loss',
                                  d_loss,
                                  global_step=step)
                writer.add_scalar('g_loss/adv_loss',
                                  g_adv_loss,
                                  global_step=step)
                writer.add_scalar('g_loss/op_loss',
                                  g_op_loss,
                                  global_step=step)
                writer.add_scalar('g_loss/int_loss',
                                  g_int_loss,
                                  global_step=step)
                writer.add_scalar('g_loss/gd_loss',
                                  g_gd_loss,
                                  global_step=step)

                writer.add_image('image/train_target',
                                 target[0],
                                 global_step=step)
                writer.add_image('image/train_output',
                                 G_output[0],
                                 global_step=step)
                writer.add_image('image/test_target',
                                 test_target[0],
                                 global_step=step)
                writer.add_image('image/test_output',
                                 test_output[0],
                                 global_step=step)

            step += 1

            if step % 500 == 0:
                utils.saver(generator.state_dict(),
                            model_generator_save_path,
                            step,
                            max_to_save=10)
                utils.saver(discriminator.state_dict(),
                            model_discriminator_save_path,
                            step,
                            max_to_save=10)
                if step >= 2000:
                    print('==== begin evaluate the model of {} ===='.format(
                        model_generator_save_path + '-' + str(step)))

                    auc = evaluate(frame_num=5,
                                   layer_nums=4,
                                   input_channels=12,
                                   output_channels=3,
                                   model_path=model_generator_save_path + '-' +
                                   str(step),
                                   evaluate_name='compute_auc')
                    writer.add_scalar('results/auc', auc, global_step=step)
Example #21
0
scan_dataset, _ = random_split(train_scan_dataset, [num_train, len(train_scan_dataset) - num_train])
scan_dataset_test, scan_dataset_val, _ = random_split(test_scan_dataset, 
                                                      [num_val, num_test, len(test_scan_dataset) - (num_val + num_test)])

train_loader = DataLoader(scan_dataset, batch_size=batch_size, num_workers=5, pin_memory=True)
val_loader = DataLoader(scan_dataset_val, batch_size=1, shuffle=False, num_workers=5, pin_memory=True)
test_loader = DataLoader(scan_dataset_test, batch_size=1, shuffle=False, num_workers=5, pin_memory=True)


pseudo = UNet(1, 1)
# this one works best!
net = UNetDense(1, 1, norm_layer=nn.BatchNorm2d, track_running_stats=False, affine=True)
# net = UNetWavelet(1, 1)

pseudo.cuda()
net.cuda()

# list optimisers here...
# single optimiser variant 1

optimiser_ps = optim.Adam(pseudo.parameters(), lr=learning_rate)
optimiser_net = optim.Adam(net.parameters(), lr=learning_rate)

print('Project name ', project_name)
print('Learning rate ', learning_rate)
print('Epochs ', epochs)

train_loss = []
train_loss_rec = []
train_loss_seg = []