예제 #1
0
    def get_data(self):
        """
        Return one batch data

        template_image - (127, 127, 3)
        detection_image - (255, 255, 3)
        gt_objectness - (17, 17, 5, 2)
        gt_regression - (17, 17, 5, 4)
        """
        template_image = normalize(self.ret['template_cropped_resized'])
        detection_image = normalize(self.ret['detection_cropped_resized'])
        pos_neg_diff = self.ret['pos_neg_diff']
        gt_objectness = pos_neg_diff[:, :2].reshape(self.score_size,
                                                    self.score_size, -1, 2)
        gt_regression = pos_neg_diff[:, 2:].reshape(self.score_size,
                                                    self.score_size, -1, 4)

        return template_image, detection_image, (gt_objectness, gt_regression)
예제 #2
0
def train(GAN, data_batch, epochs, run_dir, gen_lr, disc_lr, gen_train_freq,
          disc_train_freq, logger):
    """
    --- GAN Training Loop -- 

    For every epoch: 
        - go through each batch in dataset 
        - run DCGAN's train_step() function
    - Every 15 epochs: 
        - save checkpoint 

    @param GAN: <<GAN Object>> a GAN object that implements the base.py GAN model 
    @param data_batch: <<TensorFlow Data Iterator>> iterator containing real images for discriminator training
    @param epochs : <<int>> number of training epochs
    @param run_dir : <<str>> path to save all training run data 
    @param gen_lr: <<float>> learning rate for ADAM optimizer (generator)
    @param disc_lr: <<float>> learning rate for discriminator Adam optimizer 
    @param logger:?
    @param gen_train_freq: generator training frequency per epoch
    @param disc_train_freq: discriminator training frequency per epoch
    """

    for epoch in range(epochs):

        # Create directory to save run images
        epoch_save_path = run_dir + '/epoch_{}'.format(epoch)
        os.mkdir(epoch_save_path)

        start = time.time()
        batch_ctr = 0

        for i in range(len(data_batch)):

            # Extract image tensor and disregard labels
            real_data, _ = data_batch.next()

            #---- normalize images ---
            real_data = util.normalize(real_data)

            # Train GAN
            generated = GAN.train_step(real_data, gen_lr, disc_lr,
                                       gen_train_freq, disc_train_freq, epoch,
                                       logger)

            # Save images to epoch dir
            util.save_image_batch(generated, epoch_save_path)

            print("Batch {} completed".format(i))

        print('Time for epoch {} is {} sec'.format(epoch + 1,
                                                   time.time() - start))
예제 #3
0
def train_discrim(GAN, disc_lr, data_batch, epochs, logger,batch_size):

    start = time.time()

    for i in range(len(data_batch) - 5):

        imgs,_ = data_batch.next()
        imgs = util.normalize(imgs)

        # lr decay function
        initial_lr = 0.01
        def lr_decay(initial_lr, i):
            return initial_lr * math.pow(0.25, i)
        lr = lr_decay(initial_lr, i)
        # lr schedule
        disc_lr = lr
        logger.log_metric ('Disc Learning Rate', disc_lr,step=i) 

        for epoch in range(epochs):

            start = time.time()
            gan.discTrain(disc_lr, imgs, epoch, logger)
            print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))
        
        print("Batch {} completed".format(i))

    # check discrim validation loss 
    for j in range(5):

        imgs,_ = data_batch.next()
        imgs = util.normalize(imgs)

        preds = gan.discriminator(imgs)
        pred_loss = loss(preds)
        print("valid loss at iteration {}".format(i))
        print(pred_loss) 
        logger.log_metric ('Validation Loss', pred_loss,step=i)
예제 #4
0
def test_single(weight_path, file_path, save_dir, do_postprocess=False, anchor_params=None):
    pid = file_path.split('/')[-1]

    logging.info('Starting processing, dicom file path %s, output dir %s' % (file_path, save_dir))
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    # Load image data
    standard_spacing = np.array([2.5, 0.976562, 0.976562])
    raw_img, origin, spacing = load_dicom_image(file_path)

    logging.info('Spacing %s', str(spacing))
    logging.info('Shape %s', str(raw_img.shape))

    dicom_file_single_path = None
    for dirpath, dirnames, filenames in os.walk(file_path):
        dicom_file_single_path = os.path.join(dirpath, filenames[0])
    ds = pydicom.dcmread(dicom_file_single_path, force=True)


    # Preprocess raw image
    logging.info('Preprocessing...')
    processed_image = raw_img.copy()
    npy_mask = preprocess_image(sitk.GetImageFromArray(raw_img))
    processed_image[npy_mask == 0] = -1024

    # Crop only brain region to reduce image size
    # Only choose certain number of slices
    D_raw, H_raw, W_raw = raw_img.shape
    z_start = max(0, D_raw - config['test_max_size'][0])
    z_end = D_raw
    _, yy, xx = np.where(processed_image > -1024)
    y_start = yy.min()
    y_end = yy.max()
    x_start = xx.min()
    x_end = xx.max()

    # Ensure only certain XY resolution
    y_center, x_center = H_raw // 2, W_raw // 2
    if y_end - y_start > config['test_max_size'][1]:
        y_start = max(0, y_center - config['test_max_size'][1] // 2)
        y_end = min(H_raw, y_center + config['test_max_size'][1] // 2)

    if x_end - x_start > config['test_max_size'][2]:
        x_start = max(0, x_center - config['test_max_size'][2] // 2)
        x_end = min(W_raw, x_center + config['test_max_size'][2] // 2)

    processed_image = processed_image[z_start:z_end, y_start:y_end, x_start:x_end]
    logging.info('Image input size: %s' % (str(processed_image.shape)))

    # Data to torch tensor
    D, H, W = processed_image.shape
    input = processed_image.copy()
    input = pad2factor(input)
    input = input[np.newaxis, ...].astype(np.float32)
    input = normalize(input)
    input = torch.from_numpy(input).float()
    input = input.unsqueeze(0).cuda()

    # Load net
    logging.info('Predicting...')
    net = Net(config).cuda()
    if not weight_path:
        logging.error('No model weight file specified')
        return

    if os.path.isfile(weight_path):
        # Single model prediction
        logging.info('Single model prediction mode')
        logging.info('Loading model from %s' % weight_path)
        checkpoint = torch.load(weight_path)
        net.load_state_dict(checkpoint['state_dict'])

        pred_mask = predict(net, input)
    else:
        # Multiple model predictions, using majority ensemble
        logging.info('Multi models prediction mode')
        input_d, input_h, input_w = input.shape[2:]
        pred_mask = np.zeros((len(config['roi_names']), 
                               input_d, input_h, input_w))
        ckpts = os.listdir(weight_path)
        n_consensus = (len(ckpts) + 1) // 2
        logging.info('Number of consensus %d' % (n_consensus))

        for ckpt in ckpts:
            one_weight_path = os.path.join(weight_path, ckpt)
            logging.info('Loading model from %s' % one_weight_path)
            checkpoint = torch.load(one_weight_path)
            net.load_state_dict(checkpoint['state_dict'])

            pred_mask += predict(net, input)

        pred_mask = (pred_mask >= n_consensus).astype(np.uint8)

    pred_mask = pred_mask[:, :D, :H, :W]

    # Post process
    if do_postprocess:
        raise NotImplementedError
    
    # Convert back into raw image size
    raw_pred_mask = np.zeros((len(config['roi_names']), D_raw, H_raw, W_raw), dtype=np.uint8)
    raw_pred_mask[:, z_start:z_end, y_start:y_end, x_start:x_end] = pred_mask

    normalized_img = (normalize(raw_img) + 1) / 2
    raw_pred_contours = merge_contours(get_contours_from_masks(raw_pred_mask))
    pred_img = draw_pred(normalized_img, raw_pred_contours)

    np.save(os.path.join(save_dir, '%s_raw_mask.npy' % (pid)), raw_pred_mask)
    np.save(os.path.join(save_dir, '%s_raw_contours.npy' % (pid)), raw_pred_contours)

    png_save_dir = os.path.join(save_dir, '%s' % (pid))
    if not os.path.exists(png_save_dir):
        os.makedirs(png_save_dir)
    logging.info('Saving predicted pngs to %s' % (png_save_dir))
    generate_image_pngs(raw_img, raw_pred_mask, png_save_dir)
    # generate_image_anim(pred_img, save_path=os.path.join(save_dir, '%s.mp4' % (pid)))

    logging.info('Finished')
예제 #5
0
    def preprocessing(self, template_img, detection_img, template_gt_center, detection_gt_center, id):
        template_origin_height, template_origin_width, _ = template_img.shape
        detection_origin_height, detection_origin_width, _ = detection_img.shape

        template_target_cx, template_target_cy, template_target_w, template_target_h = template_gt_center
        detecion_target_cx, detecion_target_cy, detecion_target_w, detecion_target_h = detection_gt_center

        p = (template_target_w + template_target_h) // 2
        template_square_size = int(np.sqrt((template_target_w + p) * (template_target_h + p)))
        detection_square_size = int(template_square_size * 2)

        template_target_left = template_target_cx - template_square_size // 2
        template_target_top = template_target_cy - template_square_size // 2
        template_target_right = template_target_cx + template_square_size // 2
        template_target_bottom = template_target_cy + template_square_size // 2

        randon_shift_x = random.randint(0, detection_square_size // 2)
        randon_shift_y = random.randint(0, detection_square_size // 2)

        detection_target_cx_random = np.clip(0, random.randint(detecion_target_cx-randon_shift_x, detecion_target_cx+randon_shift_x), detection_origin_width-1)
        detection_target_cy_random = np.clip(0, random.randint(detecion_target_cy - randon_shift_y, detecion_target_cy + randon_shift_y), detection_origin_height-1)

        detection_target_left = detection_target_cx_random - detection_square_size // 2
        detection_target_top = detection_target_cy_random - detection_square_size // 2
        detection_target_right = detection_target_cx_random + detection_square_size // 2
        detection_target_bottom = detection_target_cy_random + detection_square_size // 2

        # calculate template padding
        template_left_padding = -template_target_left if template_target_left < 0 else 0
        template_top_padding = -template_target_top if template_target_top < 0 else 0
        template_right_padding = template_target_right - template_origin_width \
            if template_target_right > template_origin_width else 0
        template_bottom_padding = template_target_bottom - template_origin_height \
            if template_target_bottom > template_origin_height else 0

        new_template_width = template_left_padding + template_origin_width + template_right_padding
        new_template_height = template_top_padding + template_origin_height + template_bottom_padding

        # calculate detection padding
        detection_left_padding = -detection_target_left if detection_target_left < 0 else 0
        detection_top_padding = -detection_target_top if detection_target_top < 0 else 0
        detection_right_padding = detection_target_right - detection_origin_width \
            if detection_target_right > detection_origin_width else 0
        detection_bottom_padding = detection_target_bottom - detection_origin_height \
            if detection_target_bottom > detection_origin_height else 0

        new_detection_width = detection_left_padding + detection_origin_width + detection_right_padding
        new_detection_height = detection_top_padding + detection_origin_height + detection_bottom_padding

        if any([detection_left_padding, detection_top_padding, detection_right_padding, detection_bottom_padding]):
            img_mean = tuple(map(int, detection_img.mean(axis=(0, 1))))
            detection_with_padding = np.zeros((new_detection_height, new_detection_width, 3))
            detection_with_padding[detection_top_padding:detection_top_padding + detection_origin_height, detection_left_padding:detection_left_padding + detection_origin_width, :] = detection_img
            if detection_top_padding:
                detection_with_padding[0:detection_top_padding, detection_left_padding:detection_left_padding + detection_origin_width, :] = img_mean
            if detection_bottom_padding:
                detection_with_padding[detection_origin_height + detection_top_padding:, detection_left_padding:detection_left_padding + detection_origin_width, :] = img_mean
            if detection_left_padding:
                detection_with_padding[:, 0:detection_left_padding, :] = img_mean
            if detection_right_padding:
                detection_with_padding[:, detection_origin_width + detection_left_padding:, :] = img_mean
            new_detection_img_padding = detection_with_padding
        else:
            new_detection_img_padding = detection_img

        if any([template_left_padding, template_top_padding, template_right_padding, template_bottom_padding]):
            img_mean = tuple(map(int, template_img.mean(axis=(0, 1))))
            template_with_padding = np.zeros((new_template_height, new_template_width, 3), np.uint8)
            template_with_padding[template_top_padding:template_top_padding + template_origin_height, template_left_padding:template_left_padding + template_origin_width, :] = template_img
            if template_top_padding:
                template_with_padding[0:template_top_padding, template_left_padding:template_left_padding + template_origin_width, :] = img_mean
            if template_bottom_padding:
                template_with_padding[template_origin_height + template_top_padding:, template_left_padding:template_left_padding + template_origin_width, :] = img_mean
            if template_left_padding:
                template_with_padding[:, 0:template_left_padding, :] = img_mean
            if template_right_padding:
                template_with_padding[:, template_origin_width + template_left_padding:, :] = img_mean
            new_template_img_padding = template_with_padding
        else:
            new_template_img_padding = template_img

        # crop
        tl = int(template_target_cx + template_left_padding - template_square_size // 2)
        tt = int(template_target_cy + template_top_padding - template_square_size // 2)
        template_cropped = new_template_img_padding[tt:tt+template_square_size, tl:tl+template_square_size, :]

        dl = int(detection_target_cx_random + detection_left_padding - detection_square_size // 2)
        dt = int(detection_target_cy_random + detection_top_padding - detection_square_size // 2)
        detection_cropped = new_detection_img_padding[dt:dt+detection_square_size, dl:dl+detection_square_size, :]

        detection_tlcords_of_padding_image = (
            detection_target_cx_random - detection_square_size // 2 + detection_left_padding,
            detection_target_cy_random - detection_square_size // 2 + detection_top_padding
        )
        detection_rbcords_of_padding_image = (
            detection_target_cx_random + detection_square_size // 2 + detection_left_padding,
            detection_target_cy_random + detection_square_size // 2 + detection_top_padding
        )
        # resize
        template_cropped_resized = cv2.resize(template_cropped, (127, 127))
        try:
            detection_cropped_resized = cv2.resize(detection_cropped, (255, 255))
        except:
            print(1)
        detection_cropped_resized_ratio = round(255. / detection_square_size, 2)

        target_tlcords_of_padding_image = np.array(
            [detecion_target_cx + detection_left_padding - detecion_target_w // 2,
             detecion_target_cy + detection_top_padding - detecion_target_h // 2
             ])
        target_rbcords_of_padding_image = np.array(
            [detecion_target_cx + detection_left_padding + detecion_target_w // 2,
             detecion_target_cy + detection_top_padding + detecion_target_h // 2
             ])

        x11, y11 = detection_tlcords_of_padding_image
        x12, y12 = detection_rbcords_of_padding_image
        x21, y21 = target_tlcords_of_padding_image
        x22, y22 = target_rbcords_of_padding_image

        # Caculate target's relative coordinate with respect to padded detection image
        x1_of_d, y1_of_d, x3_of_d, y3_of_d = x21 - x11, y21 - y11, x22 - x11, y22 - y11
        x1 = np.clip(x1_of_d, 0, x12 - x11)
        y1 = np.clip(y1_of_d, 0, y12 - y11)
        x2 = np.clip(x3_of_d, 0, x12 - x11)
        y2 = np.clip(y3_of_d, 0, y12 - y11)

        cords_in_cropped_detection = np.array([x1, y1, x2, y2])
        cords_in_cropped_resized_detection = (cords_in_cropped_detection * detection_cropped_resized_ratio).astype(np.int32)

        x1, y1, x2, y2 = cords_in_cropped_resized_detection
        cx, cy, w, h = (x1 + x2) // 2, (y1 + y2) // 2, x2 - x1, y2 - y1
        target_in_resized_detection_xywh = np.array([cx, cy, w, h])

        gt_box_in_detection = target_in_resized_detection_xywh
        pos, neg = self.anchor.pos_neg_anchor(gt_box_in_detection)
        diff = self.anchor.diff_anchor_gt(gt_box_in_detection)
        pos, neg, diff = pos.reshape((-1, 1)), neg.reshape((-1, 1)), diff.reshape((-1, 4))
        class_target = np.zeros((self.anchor.gen_anchors().shape[0], 2)).astype(np.float32)

        # positive anchor
        pos_index = np.where(pos == 1)[0]
        pos_num = len(pos_index)
        if pos_num > 0:
            class_target[pos_index] = [1., 0.]

        # negative anchor
        neg_index = np.where(neg == 1)[0]
        class_target[neg_index] = [0., 1.]
        neg_num = len(neg_index)
        # draw pos and neg anchor box
        debug_img = detection_cropped_resized.copy()
        # cv2.rectangle(debug_img, (x1, y1), (x2, y2), (0, 255, 0), 1)

        for i in range(pos_num):
            index = pos_index[i]
            cx, cy, w, h = self.anchor.get_anchors()[index]
            x1, y1, x2, y2 = int(cx - w / 2), int(cy - h / 2), int(cx + w / 2), int(cy + h / 2)
            cv2.rectangle(debug_img, (x1, y1), (x2, y2), (255, 0, 0), 1)

        for i in range(neg_num):
            index = neg_index[i]
            cx, cy, w, h = self.anchor.get_anchors()[index]
            x1, y1, x2, y2 = int(cx - w / 2), int(cy - h / 2), int(cx + w / 2), int(cy + h / 2)
            cv2.rectangle(debug_img, (x1, y1), (x2, y2), (0, 255, 0), 1)
        save_path = os.path.join('/home/seok/data/{}_{}.jpg'.format('pos_neg_anchor', id))
        save_image(debug_img, save_path)

        pos_neg_diff = np.hstack((class_target, diff))
        template_image = normalize(template_cropped_resized)
        detection_image = normalize(detection_cropped_resized)

        tem_img = template_cropped_resized.copy()
        de_img = detection_cropped_resized.copy()

        xx = center_to_corner(gt_box_in_detection)
        de_img = cv2.rectangle(de_img, (xx[0], xx[1]), (xx[2], xx[3]), (255, 0, 0), 3)

        save_path = os.path.join('./{}.jpg'.format('template'))
        save_image(tem_img, save_path)

        save_path = os.path.join('/home/seok/data/{}_{}.jpg'.format('detection', id))
        save_image(de_img, save_path)

        gt_objectness = pos_neg_diff[:, :2].reshape(self.score_size, self.score_size, -1)
        gt_regression = pos_neg_diff[:, 2:].reshape(self.score_size, self.score_size, -1)
        return template_image, detection_image, gt_objectness, gt_regression
예제 #6
0
    def __getitem__(self, idx):
        if self.mode in ['train', 'val']:
            filename = self.filenames[idx]
            # mask = self.masks[idx].astype(np.float32)
            # m_weight = self.m_weight[idx]
            # mask: [num_class, D, H, W]
            mask = self.load_mask(filename)
            mask = mask.astype(np.float32)

            # imgs: original CT, [D, H, W]
            # Add one more channel dimension, [1, D, H, W]
            imgs, _ = nrrd.read(
                os.path.join(self.data_dir, '%s_clean.nrrd' % (filename)))
            imgs = self.truncate_image(imgs)
            imgs = imgs[np.newaxis, ...].astype(np.float32)

            # # You will need to do this augmentation if using MICCAI15 data
            # if self.mode in ['train'] and np.random.randint(2, size=1).item():
            #     imgs = np.flip(imgs, 3).copy()
            #     mask = np.flip(mask, 3).copy()
            #     new_mask = mask.copy()
            #     new_mask[13] = mask[14] # optical nerve l
            #     new_mask[14] = mask[13] # optical nerve r
            #     new_mask[16] = mask[17] # parotid r
            #     new_mask[17] = mask[16] # parotid l
            #     new_mask[18] = mask[19] # SMG r
            #     new_mask[19] = mask[18] # SMG l
            #     mask = new_mask

            # Crop the CT image, according to
            # 1) the center of the imgs,
            # 2) limit D, H, W, with a maximum size specified by train_max_crop_size
            #
            # TODO: Delete do_scale, do_rotate. The elastic_transform_all has take all these into account
            input, masks = self.crop(imgs, mask, do_jitter=True)

            # Normalize the input
            input = normalize(input)

            # In training mode, and if do_elastic, then 50% of the time perform affine and elastic
            # transform to the input image
            if self.mode in [
                    'train'
            ] and self.config['do_elastic'] and np.random.randint(
                    2, size=1).item():
                input, masks = elastic_transform_all(input, masks)

            # Mask to bounding box, the last column of bboxes is the class
            bboxes, truth_masks = masks2bboxes_masks(
                masks, border=self.config['bbox_border'])
            truth_masks = np.array(truth_masks).astype(np.uint8)
            bboxes = np.array(bboxes)

            # This should never happen
            if not len(bboxes):
                print(filename, input.shape)

            # class label for each bounding box
            truth_labels = bboxes[:, -1]

            # [z, y, x, d, h, w] for each bounding box
            truth_bboxes = bboxes[:, :-1]

            return [
                torch.from_numpy(input).float(), truth_bboxes, truth_labels,
                truth_masks, masks
            ]

        elif self.mode in ['eval']:
            filename = self.filenames[idx]

            # Load OAR masks
            mask = self.load_mask(filename)

            # Load original CT image
            original_img, _ = nrrd.read(
                os.path.join(self.data_dir, '%s_clean.nrrd' % (filename)))
            imgs = original_img.copy()

            # pad the CT image, so that it can fit the downsampling
            imgs = pad2factor(imgs)
            imgs = imgs[np.newaxis, ...].astype(np.float32)

            input = normalize(imgs)
            original_img = (normalize(original_img) + 1) / 2

            # Mask to bounding box, the last column of bboxes is the class
            bboxes, truth_masks = masks2bboxes_masks(
                mask, border=self.config['bbox_border'])
            truth_masks = np.array(truth_masks).astype(np.uint8)
            bboxes = np.array(bboxes)
            truth_labels = bboxes[:, -1]
            truth_bboxes = bboxes[:, :-1]

            return [
                torch.from_numpy(input).float(), truth_bboxes, truth_labels,
                truth_masks, mask, original_img
            ]
        elif self.mode in ['test']:
            filename = self.filenames[idx]
            original_img = np.load(
                os.path.join(self.data_dir, '%s_clean.npy' % (filename)))

            imgs = original_img.copy()
            imgs = imgs[np.newaxis, ...].astype(np.float32)
            imgs = pad2factor(imgs)

            input = normalize(imgs)

            return [torch.from_numpy(input).float(), original_img]
def test_pixel(model, test_loader_pixel, device, vlog, elog, image_size, batch_size, log_var_std):
    model.eval()

    test_loss = []
    kl_loss = []
    rec_loss = []

    pixel_class = []
    pixel_rec_err = []
    pixel_grad_all = []
    pixel_grad_kl = []
    pixel_grad_rec = []
    pixel_combi_err = []

    with torch.no_grad():
        for i, data in enumerate(test_loader_pixel):
            inpt = data["data"][0].float().to(device)
            seg = data["seg"].float()[0, :, 0]
            seg_flat = seg.flatten() > 0.5
            pixel_class += seg_flat.tolist()

            recon_batch, mu, logstd = model(inpt)

            loss, kl, rec = loss_function(recon_batch, inpt, mu, logstd, log_var_std, sum_samplewise=False)
            rec = rec.detach().cpu()
            pixel_rec_err += rec.flatten().tolist()

            def __err_fn_all(x, loss_idx=0):  # loss_idx 0: elbo, 1: kl part, 2: rec part
                outpt = model(x)
                recon_batch, mu, logstd = outpt
                loss = loss_function(recon_batch, x, mu, logstd, log_var_std)
                return torch.mean(loss[loss_idx])

            with torch.enable_grad():
                loss_grad_all = get_inpt_grad(model=model, inpt=inpt, err_fn=lambda x: __err_fn_all(x, 0),
                                              ).detach().cpu()
                loss_grad_kl = get_inpt_grad(model=model, inpt=inpt, err_fn=lambda x: __err_fn_all(x, 1),
                                             ).detach().cpu()
                loss_grad_rec = get_inpt_grad(model=model, inpt=inpt, err_fn=lambda x: __err_fn_all(x, 2),
                                              ).detach().cpu()

            pixel_grad_all += smooth_tensor(loss_grad_all).flatten().tolist()
            pixel_grad_kl += smooth_tensor(loss_grad_kl).flatten().tolist()
            pixel_grad_rec += smooth_tensor(loss_grad_rec).flatten().tolist()

            pixel_combi_err += (smooth_tensor(normalize(loss_grad_kl)) * rec).flatten().tolist()

    kl_normalized = np.asarray(pixel_grad_kl)
    kl_normalized = (kl_normalized - np.min(kl_normalized)) / (np.max(kl_normalized) - np.min(kl_normalized))
    rec_normalized = np.asarray(pixel_rec_err)
    rec_normalized = (rec_normalized - np.min(rec_normalized)) / (np.max(rec_normalized) - np.min(rec_normalized))
    combi_add = kl_normalized + rec_normalized

    rec_err_roc, rec_err_pr = elog.get_classification_metrics(pixel_rec_err, pixel_class)[0]
    grad_all_roc, grad_all_pr = elog.get_classification_metrics(pixel_grad_all, pixel_class)[0]
    grad_kl_roc, grad_kl_pr = elog.get_classification_metrics(pixel_grad_kl, pixel_class)[0]
    grad_rec_roc, grad_rec_pr = elog.get_classification_metrics(pixel_grad_rec, pixel_class)[0]
    pixel_combi_roc, pixel_combi_pr = elog.get_classification_metrics(pixel_combi_err, pixel_class)[0]
    add_combi_roc, add_combi_pr = elog.get_classification_metrics(combi_add, pixel_class)[0]

    rec_err_dice, reconst_thres = find_best_val(pixel_rec_err, pixel_class, calc_hard_dice, max_steps=8,
                                                val_range=(0, np.max(pixel_rec_err)))
    grad_kl_dice, grad_kl_thres = find_best_val(pixel_grad_kl, pixel_class, calc_hard_dice, max_steps=8,
                                                val_range=(0, np.max(pixel_grad_kl)))
    pixel_combi_dice, pixel_combi_thres = find_best_val(pixel_combi_err, pixel_class, calc_hard_dice, max_steps=8,
                                                        val_range=(0, np.max(pixel_combi_err)))
    add_combi_dice, _ = find_best_val(combi_add, pixel_class, calc_hard_dice, max_steps=8,
                                      val_range=(0, np.max(combi_add)))

    with open(os.path.join(elog.result_dir, "pixel.json"), "a+") as file_:
        json.dump({
            "rec_err_roc": rec_err_roc, "rec_err_pr": rec_err_pr,
            "grad_all_roc": grad_all_roc, "grad_all_pr": grad_all_pr,
            "grad_kl_roc": grad_kl_roc, "grad_kl_pr": grad_kl_pr,
            "grad_rec_roc": grad_rec_roc, "grad_rec_pr": grad_rec_pr,
            "pixel_combi_roc": pixel_combi_roc, "pixel_combi_pr": pixel_combi_pr,
            "rec_err_dice": rec_err_dice, "grad_kl_dice": grad_kl_dice, "pixel_combi_dice":
                pixel_combi_dice,

        }, file_, indent=4)