Exemplo n.º 1
0
    def interpolate_holes(self, mask2d, kernel=[3, 3, 3]):
        '''
        Fill in the holes, for instance, saturated pixels.
        
        Args:
            mask2d: holes are zeros. Mask is the same for all projections.
        '''

        misc.progress_bar(0)
        for ii, block in enumerate(self._parent.data):

            # Compute the filler:
            tmp = ndimage.filters.gaussian_filter(mask2d, sigma=kernel)
            tmp[tmp > 0] **= -1

            # Apply filler:
            block = block * mask2d[:, None, :]
            block += ndimage.filters.gaussian_filter(
                block, sigma=kernel) * (~mask2d[:, None, :])

            self._parent.data[ii] = block

            # Show progress:
            misc.progress_bar((ii + 1) / self._parent.data.block_number)

        self._parent.meta.history.add_record(
            'process.interpolate_holes(mask2d, kernel)', kernel)
Exemplo n.º 2
0
    def test(self, k=0):
        print("test k = {}:".format(k))
        self.model.eval()
        test_loss = 0
        test_correct = 0
        total = 0

        with torch.no_grad():
            for batch_num, (data, target) in enumerate(self.test_loader):
                # for batch_num, (data, target) in enumerate(self.train_loader):
                data, target = data.to(self.device), target.to(self.device)
                self.model.fill_k(k)
                output = self.model(data)
                loss = self.criterion(output, target)
                test_loss += loss.item()
                prediction = torch.max(output, 1)
                total += target.size(0)
                test_correct += np.sum(
                    prediction[1].cpu().numpy() == target.cpu().numpy())

                progress_bar(
                    batch_num, len(self.test_loader),
                    'Loss: %.4f | Acc: %.3f%% (%d/%d)' %
                    (test_loss / (batch_num + 1), 100. * test_correct / total,
                     test_correct, total))

        return test_loss, test_correct / total
Exemplo n.º 3
0
    def test(self):
        print("test:")
        self.model.eval()
        test_loss = 0
        test_correct = 0
        total = 0
        start = time.time()
        with torch.no_grad():
            for batch_num, (data, target) in enumerate(self.test_loader):
                data, target = data.to(self.device), target.to(self.device)
                output = self.model(data)
                loss = self.criterion(output, target)
                test_loss += loss.item()
                prediction = torch.max(output, 1)
                total += target.size(0)
                test_correct += np.sum(
                    prediction[1].cpu().numpy() == target.cpu().numpy())

                progress_bar(
                    batch_num, len(self.test_loader),
                    'Loss: %.4f | Acc: %.3f%% (%d/%d)' %
                    (test_loss / (batch_num + 1), 100. * test_correct / total,
                     test_correct, total))
        end = time.time()
        time_used = end - start

        return test_loss, test_correct / total, time_used
Exemplo n.º 4
0
    def test_set5_img(self):
        """
        Get PSNR value for test set Set 5 images, and write to Tensorboards logs.
        """
        self.model.eval()
        avg_psnr = 0
        for batch_num, (data, target) in enumerate(self.set5_img_loader):
            target = target.numpy()
            target = target[:, :, 6:target.shape[2] - 6, 6:target.shape[3] - 6]
            # target = torch.from_numpy(target)
            if self.GPU:
                data, target = data.cuda(), torch.from_numpy(target).cuda()
            else:
                data, target = data, torch.from_numpy(target)

            prediction = self.model(data)
            prediction = prediction.data.cpu().numpy()
            prediction = prediction[:, :, 6:prediction.shape[2] - 6,
                                    6:prediction.shape[3] - 6]
            if self.GPU:
                prediction = torch.from_numpy(prediction).cuda()
            else:
                prediction = torch.from_numpy(prediction)
            mse = self.criterion(prediction, target)
            psnr = 10 * log10(1 / mse.item())
            avg_psnr += psnr
            progress_bar(batch_num, len(self.set5_img_loader),
                         'PSNR: %.4fdB' % (avg_psnr / (batch_num + 1)))

        self.info['PSNR for Set5'] = avg_psnr / len(self.set5_img_loader)
Exemplo n.º 5
0
    def train(self):
        """
        The main traning function.
        """
        self.model.train()
        train_loss = 0
        for batch_num, (data, target) in enumerate(self.train_loader):
            if self.GPU:
                data, target = data.cuda(), target.cuda()

            # if self.graph: # plot the network
            #     graph = make_dot(self.model(data))
            #     graph.view()
            #     self.graph = False

            self.optimizer.zero_grad()
            model_out = self.model(data)
            loss = self.criterion(model_out, target)
            train_loss += loss.item()
            loss.backward()
            self.optimizer.step()
            progress_bar(batch_num, len(self.train_loader),
                         'Loss: %.5f' % (train_loss / (batch_num + 1)))

        self.info['loss'] = train_loss / len(self.train_loader)
Exemplo n.º 6
0
    def train(self):
        print("train:")
        self.model.train()
        train_loss = 0
        train_correct = 0
        total = 0


        for batch_num, (data, target) in enumerate(self.train_loader):
            data, target = data.to(self.device), target.to(self.device)
            self.optimizer.zero_grad()
            output = self.model(data)
            loss = self.criterion(output, target)
            loss.backward()
            self.optimizer.step()
            train_loss += loss.item()
            #writer.add_scalar('Scalar/loss', loss.item(), iter)
            prediction = torch.max(output, 1)  # second param "1" represents the dimension to be reduced
            '''
            print(data,data.shape)
            print(target,target.shape)
            print(output,output.shape)
            print(prediction[0],prediction[0].shape)
            print(prediction[1],prediction[1].shape)
            print('--------------------------------------------------------------------------------------')
            '''
            total += target.size(0)

            # train_correct incremented by one if predicted right
            train_correct += np.sum(prediction[1].cpu().numpy() == target.cpu().numpy())

            progress_bar(batch_num, len(self.train_loader), 'Loss: %.4f | Acc: %.3f%% (%d/%d)'
                         % (train_loss / (batch_num + 1), 100. * train_correct / total, train_correct, total))

        return train_loss, train_correct / total
Exemplo n.º 7
0
    def test(self):
        print("test:")
        self.model.eval()
        total_loss = 0
        correct = 0
        total = 0

        with torch.no_grad():
            for batch_num, (data, target) in enumerate(self.test_loader):
                data, target = data.to(self.device), target.to(self.device)
                output = self.model(data)
                loss = self.criterion(output, target)
                self.writer.add_scalar("Test/Batch Loss", loss.item(),
                                       self.get_batch_plot_idx())
                total_loss += loss.item()
                prediction = torch.max(output, 1)
                total += target.size(0)

                correct += torch.sum((prediction[1] == target).float()).item()

                if self.args.progress_bar:
                    progress_bar(
                        batch_num, len(self.test_loader),
                        'Loss: %.4f | Acc: %.3f%% (%d/%d)' %
                        (total_loss / (batch_num + 1), 100. * correct / total,
                         correct, total))

        return total_loss, correct / total
Exemplo n.º 8
0
def train(args, model, optimizer, train_loader, info, print=print):
    print('Train ' + time.ctime())
    model.train()
    if info['epoch'] in args.decreasing_lr:
        optimizer.param_groups[0]['lr'] *= 0.1
    msg = None
    for batch_idx, (data, target) in enumerate(train_loader):
        indx_target = target.clone()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)

        optimizer.zero_grad()
        output = model(data)
        loss = F.cross_entropy(output, target)
        loss.backward()
        optimizer.step()

        if batch_idx % args.log_interval == 0 and batch_idx > 0:
            pred = output.data.max(1)[
                1]  # get the index of the max log-probability
            correct = pred.cpu().eq(indx_target).sum()
            acc = correct * 1.0 / len(data)
            msg = 'Loss:{:.3f}/ACC:{:.3f}/lr:{:.5f}'.format(
                loss.data[0], acc, optimizer.param_groups[0]['lr'])
            # print('Elapsed: {} Epoch: {} [{}/{}] Loss: {:.6f} Acc: {:.4f} lr: {:.2e}'.format(
            # 	misc.format_time(time.time() - info['t_begin']), info['epoch'], batch_idx * len(data), len(train_loader),
            # 	loss.data[0], acc, optimizer.param_groups[0]['lr']))
        progress_bar(batch_idx, len(train_loader), msg)
Exemplo n.º 9
0
    def salt_pepper(self, kernel=[3, 1, 3]):
        '''
        Get rid of nasty speakles
        '''

        prnt = self._parent

        self._parent.message('Supressing salt & papper noise....')

        misc.progress_bar(0)
        for ii, block in enumerate(prnt.data):
            # Make a smooth version of the data and look for outlayers:
            smooth = ndimage.filters.median_filter(block, kernel)
            mask = block / smooth
            mask = (numpy.abs(mask) > 2) | (numpy.abs(mask) < 0.5)

            block[mask] = smooth[mask]
            prnt.data[ii] = block

            misc.progress_bar((ii + 1) / self._parent.data.block_number)

        self._parent.message('Salt and pepper filter is applied.')

        self._parent.meta.history.add_record('process.salt_pepper(kernel)',
                                             kernel)
Exemplo n.º 10
0
def test(epoch):
    global best_eval
    net.eval()
    # evaluator = Evaluator_Miou(2)
    evaluator_dice = Evaluator_dice()
    evaluator_F1 = Evaluator_F1()
    with torch.no_grad():
        for idx, sample_name in enumerate(os.listdir(os.path.join(testing_root))):
            volume_path = os.path.join(testing_root, sample_name, 'data.nii.gz')
            gt_path = os.path.join(testing_root, sample_name, 'label.nii.gz')
            itk_CT = sitk.ReadImage(volume_path)
            itk_gt = sitk.ReadImage(gt_path)
            torch_CT = itk_transfor(itk_CT)
            torch_gt = itk_transfor(itk_gt)
            torch_CT = norm_filter(torch_CT)
            sub_batch_len = torch_CT.shape[0]
            gt_volume = np.array(torch_gt, dtype=np.uint8)
            for sub_batch_idx in range(sub_batch_len):
                img = torch_CT[sub_batch_idx, :, :]
                img = img.expand(torch.Size((1, 3, *img.shape)))
                img_var = Variable(img).cuda()
                outputs = net(img_var)
                prediction = np.array(outputs.data.squeeze().cpu())
                # prediction = crf_refine(np.array(img.permute(0,2,3,1)).squeeze(0).astype(np.uint8), prediction.astype(np.uint8))

                # if flag.save_pred:
                #     check_mkdir(os.path.join(ckpt_path, exp_name, 'prediction'))
                #     Image.fromarray(prediction).save(
                #         os.path.join(ckpt_path, exp_name, 'prediction', img_name))
                pred = Image.fromarray(prediction).convert('1')
                pred = np.array(pred, dtype=np.uint8).reshape((1, *pred.size))
                if sub_batch_idx == 0:
                    pred_volume = pred
                else:
                    pred_volume = np.concatenate((pred_volume, pred), axis=0)

            evaluator_dice.add_batch(pred_volume, gt_volume)
            evaluator_F1.add_batch(pred_volume, gt_volume)

            current_dice = evaluator_dice.get_dice()
            progress_bar(idx, len(os.listdir(os.path.join(testing_root))), 'Dice: %.4f' % (current_dice))
        # Miou = evaluator.Frequency_Weighted_Intersection_over_Union()
        dice = evaluator_dice.get_dice()
        F1 = evaluator_F1.get_F1()
        print('Mean dice is %.4f | Mean F1 is %.4f'%(dice, F1))

        # Save checkpoint.
        if dice > best_eval and not flag.test:
            print('Saving..')
            state = {
                'net': net.state_dict(),
                'eval': dice,
                'epoch': epoch,
                'eval_type': 'dice'
            }
            checkpoint_path = os.path.join(ckpt_path, exp_name)
            if not os.path.isdir(checkpoint_path):
                os.mkdir(checkpoint_path)
            torch.save(state, os.path.join(checkpoint_path, 'model.pth'))
            best_eval = dice
Exemplo n.º 11
0
    def test_set14_img(self):
        '''
        Get PSNR value for test set Set 14 images, and write to Tensorboards logs.
        :return:
        '''
        self.model.eval()
        avg_psnr = 0
        for batch_num, (data, target) in enumerate(self.set14_img_loader):
            target = target.numpy()
            target = target[:, :, 6:target.shape[2] - 6, 6:target.shape[3] - 6]
            # target = Variable(torch.from_numpy(target))
            if self.GPU:
                data, target = Variable(data).cuda(), Variable(
                    torch.from_numpy(target)).cuda()
            else:
                data, target = Variable(data), Variable(
                    torch.from_numpy(target))

            prediction = self.model(data)
            prediction = prediction.data.cpu().numpy()
            # prediction = prediction[:, :, 6:prediction.shape[2] - 6, 6:prediction.shape[3] - 6]
            if self.GPU:
                prediction = Variable(torch.from_numpy(prediction)).cuda()
            else:
                prediction = Variable(torch.from_numpy(prediction))
            mse = self.criterion(prediction, target)
            psnr = 10 * log10(1 / mse.data[0])
            avg_psnr += psnr
            progress_bar(batch_num, len(self.set14_img_loader),
                         'PSNR: %.4fdB' % (avg_psnr / (batch_num + 1)))

        self.info['PSNR for Set14'] = avg_psnr / len(self.set14_img_loader)
Exemplo n.º 12
0
    def train(self):
        """
        data: [torch.cuda.FloatTensor], 4 batches: [64, 64, 64, 8]
        """
        self.model.train()
        train_loss = 0
        for batch_num, (data, target) in enumerate(self.training_loader):
            data = self.img_preprocess(data)  # resize input image size
            data, target = data.to(self.device), target.to(self.device)
            target_d, output = self.model(data)

            # loss1
            loss_1 = 0
            for d in range(self.num_recursions):
                loss_1 += (self.criterion(target_d[d], target) / self.num_recursions)

            # loss2
            loss_2 = self.criterion(output, target)

            # regularization
            reg_term = 0
            for theta in self.model.parameters():
                reg_term += torch.mean(torch.sum(theta ** 2))

            # total loss
            loss = self.loss_alpha * loss_1 + (1 - self.loss_alpha) * loss_2 + self.loss_beta * reg_term
            loss.backward()

            train_loss += loss.item()
            self.optimizer.step()
            progress_bar(batch_num, len(self.training_loader), 'Loss: %.4f' % (train_loss / (batch_num + 1)))

        print("    Average Loss: {:.4f}".format(train_loss / len(self.training_loader)))
Exemplo n.º 13
0
def train():
    print("train:")
    Net.train()
    train_loss = 0
    train_correct = 0
    total = 0

    for batch_num, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = Net(data)
        loss = loss_function(output, target)
        loss.backward()
        optimizer.step()
        train_loss += loss.item()
        prediction = torch.max(
            output,
            1)  # second param "1" represents the dimension to be reduced
        total += target.size(0)

        # train_correct incremented by one if predicted right
        train_correct += np.sum(
            prediction[1].cpu().numpy() == target.cpu().numpy())

        progress_bar(
            batch_num, len(train_loader), 'Loss: %.4f | Acc: %.3f%% (%d/%d)' %
            (train_loss / (batch_num + 1), 100. * train_correct / total,
             train_correct, total))

    return train_loss, train_correct / total
Exemplo n.º 14
0
    def train(self):
        '''
        The main traning function.
        :return:
        '''
        self.model.train()
        train_loss = 0
        for batch_num, (data, target) in enumerate(self.train_loader):
            if self.GPU:
                data, target = Variable(data).cuda(), Variable(target).cuda()
            else:
                data, target = Variable(data), Variable(target)

            # if self.graph: # plot the network
            #     graph = make_dot(self.model(data))
            #     graph.view()
            #     self.graph = False

            self.optimizer.zero_grad()
            loss = self.criterion(self.model(data), target)
            train_loss += loss.data[0]
            loss.backward()
            self.optimizer.step()
            progress_bar(batch_num, len(self.train_loader),
                         'Loss: %.5f' % (train_loss / (batch_num + 1)))

        self.info['loss'] = train_loss / len(self.train_loader)
Exemplo n.º 15
0
def _train(epoch):
    print('\nTrain Epoch: %d' % epoch)
    net.train()
    criterion.train()
    train_loss = 0

    scheduler_net.step()

    for batch_idx, (inputs, targets) in enumerate(train_loader):

        targets = targets.long()

        inputs, targets = inputs.to(device), targets.to(device)

        optimizer_net.zero_grad()
        outputs = net(inputs)
        if not use_ce:
            targets = one_hot_encoding(logits=outputs, targets=targets)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer_net.step()

        train_loss += loss.item()

        targets_ = label_binarize(targets.cpu().numpy(),
                                  classes=list(range(10)))

        roc = roc_auc_score(targets_, outputs.detach().cpu().numpy())
        progress_bar(
            batch_idx, len(train_loader),
            'Loss: %.3f, ROC: %.3f' % (train_loss / (len(train_loader)), roc))
Exemplo n.º 16
0
    def profile_grad(self):
        print("train:")
        self.model.train()
        train_loss = 0
        train_correct = 0
        total = 0

        for batch_num, (data, target) in enumerate(self.train_loader):
            data, target = data.to(self.device), target.to(self.device)
            target = torch.randint(0, 10, target.shape)
            self.optimizer.zero_grad()
            output = self.model(data)
            before = self.model.state_dict()
            loss = self.criterion(output, target)
            loss.backward()
            self.optimizer.step()
            after = self.model.state_dict()
            pass
            train_loss += loss.item()
            prediction = torch.max(
                output,
                1)  # second param "1" represents the dimension to be reduced
            total += target.size(0)

            # train_correct incremented by one if predicted right
            train_correct += np.sum(
                prediction[1].cpu().numpy() == target.cpu().numpy())

            progress_bar(
                batch_num, len(self.train_loader),
                'Loss: %.4f | Acc: %.3f%% (%d/%d)' %
                (train_loss / (batch_num + 1), 100. * train_correct / total,
                 train_correct, total))
        return train_loss, train_correct / total
Exemplo n.º 17
0
def write_raw(path, name, data, dim = 1, dtype = None):
    """
    Write tiff stack.
    
    Args:
        path (str): destination path
        name (str): first part of the files name
        data (numpy.array): data to write
        dim (int): dimension along which array is separated into images
        dtype (type): forse this data type       
    """
    # Make path if does not exist:
    if not os.path.exists(path):
        os.makedirs(path)
    
    # Write files stack:    
    file_num = data.shape[dim]
    
    for ii in range(file_num):
        
        path_name = os.path.join(path, name + '_%06u.tiff'%ii)
        
        # Extract one slice from the big array
        img = misc.anyslice(data, ii, dim)
          
        # Cast data to another type if needed
        if dtype is not None:
            img = misc.cast2type(img, dtype)
        
        # Write it!!!
        imageio.imwrite(path_name, img)
        
        misc.progress_bar((ii+1) / file_num)
Exemplo n.º 18
0
def test():
    print("test:")
    Net.eval()
    test_loss = 0
    test_correct = 0
    total = 0

    with torch.no_grad():
        for batch_num, (data, target) in enumerate(test_loader):
            data, target = data.to(device), target.to(device)
            output = Net(data)
            loss = loss_function(output, target)
            test_loss += loss.item()
            prediction = torch.max(output, 1)
            total += target.size(0)
            test_correct += np.sum(
                prediction[1].cpu().numpy() == target.cpu().numpy())

            progress_bar(
                batch_num, len(test_loader),
                'Loss: %.4f | Acc: %.3f%% (%d/%d)' %
                (test_loss / (batch_num + 1), 100. * test_correct / total,
                 test_correct, total))

    return test_loss, test_correct / total
Exemplo n.º 19
0
def SIRT_CPU(projections,
             volume,
             geometry,
             iterations=10,
             relaxation=1,
             options={
                 'poisson_weight': False,
                 'l2_update': True,
                 'preview': False,
                 'bounds': None
             },
             psf=None):
    """
    SIRT on CPU with or without detector PSF
    """
    # Make sure array is contiguous (if not memmap):
    if not isinstance(projections, numpy.memmap):
        projections = numpy.ascontiguousarray(projections)

    # We will NOT use quick and dirty scaling coefficient instead of proper calculation of weights
    #m = (geometry['src2obj'] + geometry['det2obj']) / geometry['src2obj']
    #prj_weight = 2 / (projections.shape[1] * geometry['det_pixel'] ** 4 * max(volume.shape) / m)

    # Create the forward and the backward weights
    fwd_weights = np.zeros_like(projections)
    forwardproject(fwd_weights,
                   np.ones_like(volume, dtype=np.float32),
                   geometry,
                   psf=psf)
    tol = 1e-6
    fwd_weights[fwd_weights < tol] = np.Inf
    fwd_weights = 1 / fwd_weights

    bwd_weights = np.zeros_like(volume, dtype=np.float32)
    backproject(np.ones_like(projections, dtype=np.float32),
                bwd_weights,
                geometry,
                psf=psf)
    bwd_weights[bwd_weights < tol] = np.Inf
    bwd_weights = relaxation / bwd_weights

    #cur_proj = np.zeros_like(projections,dtype=np.float32)

    print('Doing SIRT with CPU/GPU iterations...')

    misc.progress_bar(0)

    for ii in range(iterations):
        #vol_update = bwd_weights.copy()
        vol_update = np.zeros_like(volume)
        cur_proj = projections.copy()
        forwardproject(cur_proj, -volume, geometry, operation='+')
        cur_proj *= fwd_weights

        backproject(cur_proj, vol_update, geometry, operation='+')
        volume += bwd_weights * vol_update
        volume += vol_update
        misc.progress_bar((ii + 1) / iterations)
Exemplo n.º 20
0
    def equivalent_thickness(self, energy, spectrum, compound, density):
        '''
        Transfer intensity values to equivalent thicknessb
        '''
        prnt = self._parent

        # Assuming that we have log data!
        #if not 'process.log(air_intensity, bounds)' in self._parent.meta.history.keys:
        #    self._parent.error('Logarithm was not found in history of the projection stack. Apply log first!')

        print('Generating the transfer function.')

        # Attenuation of 1 mm:
        mu = simulate.spectra.linear_attenuation(energy,
                                                 compound,
                                                 density,
                                                 thickness=0.1)
        width = self._parent.data.slice_shape[1]

        # Make thickness range that is sufficient for interpolation:
        thickness_min = 0
        thickness_max = width * self._parent.meta.geometry.img_pixel[1]

        print('Assuming thickness range:', [thickness_min, thickness_max])
        thickness = numpy.linspace(thickness_min, thickness_max, 1000)

        exp_matrix = numpy.exp(-numpy.outer(thickness, mu))
        synth_counts = exp_matrix.dot(spectrum)

        synth_counts = -numpy.log(synth_counts)

        plt.figure()
        plt.plot(thickness, synth_counts, 'r-', lw=4, alpha=.8)
        plt.axis('tight')
        plt.title('THickness (mm) v.s. absorption length.')
        plt.show()

        print('Callibration attenuation range:',
              [synth_counts[0], synth_counts[-1]])
        print('Data attenuation range:',
              [self._parent.analyse.min(),
               self._parent.analyse.max()])

        print('Applying transfer function.')

        for ii, block in enumerate(self._parent.data):
            block = numpy.array(numpy.interp(block, synth_counts,
                                             thickness * density),
                                dtype='float32')

            prnt.data[ii] = block

            misc.progress_bar((ii + 1) / self._parent.data.block_number)

        self._parent.meta.history.add_record(
            'process.equivalent_thickness(energy, spectrum, compound, density)',
            [energy, spectrum, compound, density])
Exemplo n.º 21
0
    def train(self):
        """Train generator and discriminator."""
        total_step = len(self.data_loader)
        for epoch in range(self.num_epochs):
            print("===> Epoch [%d/%d]" % (epoch + 1, self.num_epochs))
            for i, images in enumerate(self.data_loader):

                # ===================== Train D ===================== #
                images = images.to(self.device)
                noise = torch.randn(images.size(0),
                                    self.z_dim,
                                    device=self.device)

                # Train D to recognize real images as real.
                outputs = self.discriminator(images)
                real_loss = func.binary_cross_entropy(
                    outputs, torch.ones(images.size(0), device=self.device))

                # Train D to recognize fake images as fake.
                fake_images = self.generator(noise.view(-1, 100, 1, 1))
                outputs = self.discriminator(fake_images)
                fake_loss = func.binary_cross_entropy(
                    outputs, torch.zeros(images.size(0), device=self.device))

                # Backpropagation + optimize
                self.reset_grad()
                d_loss = real_loss + fake_loss
                d_loss.backward()
                self.d_optimizer.step()

                # ===================== Train G =====================#
                noise = torch.randn(images.size(0),
                                    self.z_dim,
                                    device=self.device)

                # Train G so that D recognizes G(z) as real.
                fake_images = self.generator(noise.view(-1, 100, 1, 1))
                outputs = self.discriminator(fake_images)
                g_loss = func.binary_cross_entropy(
                    outputs, torch.ones(images.size(0), device=self.device))

                # Backpropagation + optimize
                self.reset_grad()
                g_loss.backward()
                self.g_optimizer.step()

                # print the log info via progress bar
                progress_bar(
                    i, total_step,
                    'd_real_loss: %.4f | d_fake_loss: %.4f | g_loss: %.4f' %
                    (real_loss.data[0], fake_loss.data[0], g_loss.data[0]))

                # save the sampled images
                self.save_fakes(step=i, epoch=epoch)

            # save the model parameters for each epoch
            self.save_model(epoch=epoch)
Exemplo n.º 22
0
Arquivo: solver.py Projeto: icpm/GANs
    def train(self):
        """Train generator and discriminator."""
        total_step = len(self.data_loader)
        k = 0  # control how much emphasis is put on L(G(z_D)) during gradient descent.

        for epoch in range(self.num_epochs):
            print("===> Epoch [%d/%d]" % (epoch + 1, self.num_epochs))
            for i, images in enumerate(self.data_loader):

                # ===================== Train D ===================== #
                images = images.to(self.device)
                noise = torch.randn(images.size(0), self.z_dim, device=self.device)

                # Train D to recognize real images as real.
                d_outputs = self.discriminator(images)
                # compute L(x)
                real_loss = torch.mean(torch.abs(d_outputs - images))

                # Train D to recognize fake images as fake.
                fake_images = self.generator(noise.view(-1, self.z_dim, 1, 1))
                fake_images = fake_images.detach()
                fake_output = self.discriminator(fake_images)
                # compute L(G(z_D))
                fake_loss = torch.mean(torch.abs(fake_output - fake_images))

                # Backpropagation + optimize
                self.reset_grad()
                # compute L_D
                d_loss = real_loss - k * fake_loss
                d_loss.backward()
                self.d_optimizer.step()

                # ===================== Train G =====================#

                # Train G so that D recognizes G(z) as real.
                fake_images = self.generator(noise.view(-1, self.z_dim, 1, 1))
                g_outputs = self.discriminator(fake_images)
                # compute L_G
                g_loss = torch.mean(torch.abs(g_outputs - fake_images))

                # Backpropagation + optimize
                self.reset_grad()
                g_loss.backward()
                self.g_optimizer.step()

                # compute k_t
                balance = (self.gamma * real_loss - fake_loss).data[0]
                k = min(max(k + self.lr_k * balance, 0), 1)

                # print the log info via progress bar
                progress_bar(i, total_step, 'd_real_loss: %.4f | d_fake_loss: %.4f | g_loss: %.4f' % (real_loss.data[0], fake_loss.data[0], g_loss.data[0]))

                # save the sampled images
                self.save_fakes(step=i, epoch=epoch)

            # save the model parameters for each epoch
            self.save_model(epoch=epoch)
Exemplo n.º 23
0
    def stich_tiles(self, projection_data):
        """
        Stitch several tiles together and produce a new projection data. Output into projection_data.
        """

        #print('Stitching tiles...')

        # Total data shape:
        slice_shape, centre = self._stitch_shape()
        data_length = self.projections[0].data.length
        data_shape = [slice_shape[0], data_length, slice_shape[1]]

        projection_data.data.init_total(data_shape)

        # Assuming all data has the same amount of blocks and projections
        bn = self.projections[0].data.block_number

        # Make sure that the block length of the stiched data is the same as the input:
        projection_data.data.change_block_length(
            self.projections[0].data.block_shape[1])

        print('New block length is ', self.projections[0].data.block_shape[1])
        print('New block sizeGB is ', projection_data.data.sizeGB)

        for ii in range(bn):
            # Blocks from different datasets:
            blocks = []

            # geometries
            geometries = []

            # Loop over different projection stacks:
            for proj in self.projections:

                blocks.append(proj.data[ii])
                geometries.append(proj.meta.geometry)

            pixel = self.projections[0].meta.geometry.det_pixel

            # Produce one big block:
            big_block = self._stitch_block(blocks, pixel)
            projection_data.data[ii] = big_block

            misc.progress_bar((ii + 1) / bn)

        # Create a projection geometry:
        slice_shape = projection_data.data.slice_shape

        print('New slice shape is:', slice_shape)

        big_geom = flex_geometry.mean(geometries)

        # This is important, new geometry should know it's parent!
        big_geom._parent = projection_data

        projection_data.meta.geometry = big_geom
Exemplo n.º 24
0
def train(epoch):
    print('\nEpoch: %d' % epoch)
    net.train()
    train_loss_record, bce_loss_record, dice_loss_record = AvgMeter(
    ), AvgMeter(), AvgMeter()
    for batch_idx, data in enumerate(train_loader):
        if epoch == args['lr_step']:
            optimizer.param_groups[0]['lr'] = 2 * args['lr'] / args['lr_decay']
            optimizer.param_groups[1]['lr'] = args['lr'] / args['lr_decay']

        inputs, labels, counter = data
        batch_size = inputs.size(0)
        inputs = Variable(inputs).cuda()
        labels = Variable(labels).cuda()
        counter = Variable(counter).cuda()
        optimizer.zero_grad()
        outputs, outputs_counter = net(inputs)
        # outputs = net(inputs)

        # BCE loss and dice loss can be used
        criterion_bce = nn.BCELoss()
        criterion_dice = Dice_loss()
        # if not isinstance(fnd_out, list):
        loss_bce = criterion_bce(outputs, labels) + criterion_bce(
            outputs_counter, counter)
        loss_dice = criterion_dice(outputs, labels) + criterion_dice(
            outputs_counter, counter)
        # loss_bce = criterion_bce(outputs, labels)
        # loss_dice = criterion_dice(outputs, labels)
        # else:
        #     loss_bce = criterion_bce(outputs, labels)
        #     loss_dice = criterion_dice(outputs, labels)
        #     for fnd_mask, fpd_mask in zip(fnd_out, fpd_out):
        #         loss_bce += criterion_bce(fnd_mask, fnd) + criterion_bce(fpd_mask, fpd)

        # else:
        #     loss_bce_each = [None] * len(outputs)
        #     loss_dice_each = [None] * len(outputs)
        #     for idx in range(len(outputs)):
        #         loss_bce_each[idx] = criterion_bce(outputs[idx], labels)
        #         loss_dice_each[idx] = criterion_dice(outputs[idx], labels)
        #     loss_bce = sum(loss_bce_each)
        #     loss_dice = sum(loss_dice_each)
        coeff = loss_dice.item() / loss_bce.item(
        ) if loss_dice.item() / loss_bce.item() < 1 else 1
        # coeff = 1
        loss = coeff * loss_bce + loss_dice
        # loss = loss_bce + loss_dice
        loss.backward()
        optimizer.step()
        train_loss_record.update(loss.item(), batch_size)
        bce_loss_record.update(loss_bce.item(), batch_size)
        dice_loss_record.update(loss_dice.item(), batch_size)
        log = 'iter: %d | [bce loss: %.5f], [dice loss: %.5f],[Total loss: %.5f], [lr: %.8f]' % \
              (epoch, bce_loss_record.avg, dice_loss_record.avg, train_loss_record.avg, optimizer.param_groups[1]['lr'])
        progress_bar(batch_idx, len(train_loader), log)
Exemplo n.º 25
0
def get_category_info(data_set):
	from misc import progress_bar
	category_elems = {}
	for i in range(len(data_set)):
		image, category = data_set[i]
		if not category_elems.has_key(category):
			category_elems[category] = []
		category_elems[category].append(i)
		progress_bar(i, len(data_set))
	return category_elems
Exemplo n.º 26
0
def run(args, model, data_loader, info, optimizer=None, print=print):
    is_train = (not optimizer is None)
    print('%s %d - %s' %
          (is_train and 'Train' or 'Eval', info['epoch'], time.ctime()))
    if is_train:
        model.train()
    else:
        model.eval()
    if is_train and info['epoch'] in args.decreasing_lr:
        optimizer.param_groups[0]['lr'] *= args.lr_decreasing_rate
    msg = None
    total_loss = 0
    total_correct = 0
    data_count = 0

    for batch_idx, (data, target) in enumerate(data_loader):
        data_count += data.size(0)
        indx_target = target.clone()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)

        if is_train:
            optimizer.zero_grad()
        output = model(data)
        loss = F.cross_entropy(output, target)
        if is_train:
            loss.backward()
            optimizer.step()

        total_loss += loss.data[0]
        pred = output.data.max(1)[
            1]  # get the index of the max log-probability
        total_correct += pred.cpu().eq(indx_target).sum()

        if (not is_train) or (batch_idx % args.log_interval == 0
                              and batch_idx > 0):
            loss = total_loss / data_count
            acc = 100. * total_correct / data_count
            msg = 'Loss:{:.3f},Acc:{}/{}({:.3f}%)'.format(
                total_loss / data_count, total_correct, data_count,
                100. * total_correct / data_count)
            if is_train:
                msg += 'lr:{:.5f}'.format(optimizer.param_groups[0]['lr'])
        progress_bar(batch_idx, len(data_loader), msg)

    if (not is_train) and acc > info['best_acc']:
        new_file = os.path.join(args.logdir,
                                'best-{}.pth'.format(info['epoch']))
        misc.save_model(model,
                        new_file,
                        old_file=info['old_file'],
                        verbose=True)
        info['best_acc'] = acc
        info['old_file'] = new_file
Exemplo n.º 27
0
def read_raw(path, name, skip = 1, sample = 1, x_roi = [], y_roi = [], dtype = 'float32', disk_map = None):
    """
    Read tiff files stack and return numpy array.
    
    Args:
        path (str): path to the files location
        name (str): common part of the files name
        skip (int): read every so many files
        sample (int): sampling factor in x/y direction
        x_roi ([x0, x1]): horizontal range
        y_roi ([y0, y1]): vertical range
        dtype (str or numpy.dtype): data type to return
        disk_map (bool): if true, return a disk mapped array to save RAM
        
    Returns:
        numpy.array : 3D array with the first dimension representing the image index
        
    """    
    # Retrieve files, sorted by name
    files = _get_files_sorted_(path, name)
    
    # Read the first file:
    image = _read_tiff_(files[0], sample, x_roi, y_roi)
    sz = numpy.shape(image)
    
    file_n = len(files) // skip
    
    # Create a mapped array if needed:
    if disk_map:
        data = numpy.memmap(disk_map, dtype='float32', mode='w+', shape = (file_n, sz[0], sz[1]))
        
    else:    
        data = numpy.zeros((file_n, sz[0], sz[1]), dtype = numpy.float32)
    
    # Read all files:
    for ii in range(file_n):
        
        filename = files[ii*skip]
        try:
            a = _read_tiff_(filename, sample, x_roi, y_roi)
        except:
            print('WARNING! FILE IS CORRUPTED. CREATING A BLANK IMAGE: ', filename)
            a = numpy.zeros(data.shape[1:], dtype = numpy.float32)
            
        if a.ndim > 2:
          a = a.mean(2)
          
        data[ii, :, :] = a

        misc.progress_bar((ii+1) / (numpy.shape(files)[0] // skip))

    print('%u files were loaded.' % file_n)

    return data    
Exemplo n.º 28
0
    def train(self):
        """Train generator and discriminator."""
        total_step = len(self.data_loader)
        for epoch in range(self.num_epochs):
            print("===> Epoch [%d/%d]" % (epoch + 1, self.num_epochs))
            for i, images in enumerate(self.data_loader):

                # ===================== Train D ===================== #
                images = images.to(self.device)
                batch_size = images.size(0)
                noise = torch.randn(batch_size, self.z_dim, device=self.device)

                # Train D to recognize real images as real.
                outputs = self.discriminator(images)
                real_loss = self.least_square_loss(
                    outputs, 1
                )  # L2 loss instead of Binary cross entropy loss (this is optional for stable training)

                # Train D to recognize fake images as fake.
                fake_images = self.generator(noise)
                outputs = self.discriminator(fake_images)
                fake_loss = self.least_square_loss(outputs, 0)

                # Backpropagation + optimize
                self.reset_grad()
                d_loss = real_loss + fake_loss
                d_loss.backward()
                self.d_optimizer.step()

                # ===================== Train G =====================#
                noise = torch.randn(batch_size, self.z_dim, device=self.device)

                # Train G so that D recognizes G(z) as real.
                fake_images = self.generator(noise)
                outputs = self.discriminator(fake_images)
                g_loss = self.least_square_loss(outputs, 1)

                # Backpropagation + optimize
                self.reset_grad()
                g_loss.backward()
                self.g_optimizer.step()

                # print the log info via progress bar
                progress_bar(
                    i, total_step,
                    'd_real_loss: %.4f | d_fake_loss: %.4f | g_loss: %.4f' %
                    (real_loss.item(), fake_loss.item(), g_loss.item()))

                # save the sampled images
                self.save_fakes(step=i, epoch=epoch)

            # save the model parameters for each epoch
            self.save_model(epoch=epoch)
Exemplo n.º 29
0
    def train(self):
        """
        data: [torch.cuda.FloatTensor], 4 batches: [64, 64, 64, 8]
        """
        # models setup
        self.netG.train()
        self.netD.train()
        g_train_loss = 0
        d_train_loss = 0
        for batch_num, (data, target) in enumerate(self.training_loader):
            # setup noise
            real_label = self.to_variable(
                torch.ones(data.size(0), data.size(1)))
            fake_label = self.to_variable(
                torch.zeros(data.size(0), data.size(1)))
            if self.GPU_IN_USE:
                data, target = Variable(data).cuda(), Variable(target).cuda()

            # Train Discriminator
            self.optimizerD.zero_grad()
            d_real = self.netD(target)
            d_real_loss = self.criterionD(d_real, real_label)

            d_fake = self.netD(self.netG(data))
            d_fake_loss = self.criterionD(d_fake, fake_label)
            d_total = d_real_loss + d_fake_loss
            d_train_loss += d_total.data[0]
            d_total.backward()
            self.optimizerD.step()

            # Train generator
            self.optimizerG.zero_grad()
            g_real = self.netG(data)
            g_fake = self.netD(g_real)
            gan_loss = self.criterionD(g_fake, real_label)
            mse_loss = self.criterionG(g_real, target)

            g_total = mse_loss + 1e-3 * gan_loss
            g_train_loss += g_total.data[0]
            g_total.backward()
            self.optimizerG.step()

            progress_bar(
                batch_num, len(self.training_loader),
                'G_Loss: %.4f | D_Loss: %.4f' %
                (g_train_loss / (batch_num + 1), d_train_loss /
                 (batch_num + 1)))

        print("    Average G_Loss: {:.4f}".format(g_train_loss /
                                                  len(self.training_loader)))
Exemplo n.º 30
0
def FDK(projections, volume, geometry):
    """
    FDK
    """
    # Make sure array is contiguous (if not memmap):
    if not isinstance(projections, numpy.memmap):
        projections = numpy.ascontiguousarray(projections)

    misc.progress_bar(0)

    # Yeeey!
    backproject(projections, volume, geometry, 'FDK_CUDA')

    misc.progress_bar(1)