コード例 #1
0
def make_submission(imgid_to_pred) -> pd.DataFrame:
    print(f'make submission')
    test_df_path = '../input/sample_submission.csv'
    labels = ['Fish', 'Flower', 'Gravel', 'Sugar']
    df_sub = pd.read_csv(test_df_path)
    df_sub = df_sub.drop(columns=['EncodedPixels'])

    ret_imgids = list()
    ret_rles = list()

    # Encode pred to rle for each channel
    for img_id, pred in tqdm(imgid_to_pred.items()):
        for i, label in enumerate(labels):
            img_id_tmp = f'{img_id}_{label}'
            ret_imgids.append(img_id_tmp)

            # Convert pred to RLE
            pred_tmp = pred[i]
            if pred_tmp.sum() < 1:
                ret_rles.append('')
            else:
                ret_rles.append(mask2rle(pred_tmp))

    # Image_Label, EncodedPixels
    df_tmp = pd.DataFrame({
        'Image_Label': ret_imgids,
        'EncodedPixels': ret_rles
    })
    df = df_sub.merge(df_tmp, on=['Image_Label'], how='left')
    return df
コード例 #2
0
    def make_submission(self, imgid_to_pred) -> pd.DataFrame:
        df_sub = pd.read_csv(self.test_df_path)
        df_sub = df_sub.drop(columns=['EncodedPixels'])

        ret_imgids = list()
        ret_rles = list()

        # Encode pred to rle for each channel
        for img_id, pred in tqdm(imgid_to_pred.items()):
            for i, label in enumerate(self.labels):
                img_id_tmp = f'{img_id}_{label}'
                ret_imgids.append(img_id_tmp)

                # Convert pred to RLE
                pred_tmp = pred[i]
                if pred_tmp.sum() < 1:
                    ret_rles.append('')
                else:
                    ret_rles.append(mask2rle(pred_tmp))

        # Image_Label, EncodedPixels
        df_tmp = pd.DataFrame({
            'Image_Label': ret_imgids,
            'EncodedPixels': ret_rles
        })
        df = df_sub.merge(df_tmp, on=['Image_Label'], how='left')
        return df
コード例 #3
0
    def search_parameter(self):
        'Using Bayes optimization to determine the threshold for each label'
        # evaluate the network
        self.eval_net()

        # store the parameters
        self.dicPara = {}

        def cal_dice(thres_seg, size_seg, thres_after = 0.2):
            ipos = 0
            dice = 0.0
            for pred, true_rle in zip(preds, trues):
                # post process
                true = rle2mask(true_rle, self.args.width, self.args.height)
                pred = post_process_segment(pred, thres_seg, size_seg, thres_after)
                ipos += 1
                dice += dice_metric(true, pred)
            return dice/len(preds)

        preds, trues, others = [], [], []
        category = self.args.spec_cat

        # get the prediction and save it
        with torch.no_grad():
            for data in tqdm(self.dataloader):
                images, labels = data[0], data[1]
                images = images.permute(0,3,1,2).to(self.device)

		# flip and predict
                output_masks, output_labels = self.predict_flip_batch(images)
                # store the prediction
                for output_mask, label_true in zip(output_masks, labels):

                    true_mask = label_true[:,:,0].detach().numpy().astype(int)
                    trues.append(mask2rle(true_mask))
                    preds.append(output_mask[:,:,0])

            
            # using bayes optimize to determine the threshold
            if self.args.output == 0:
                pbounds = {'thres_seg': (0.5, 0.7), 'size_seg': (500, 3000), 'thres_after': (0.1, 0.7)}
            elif self.args.output == 1:
                pbounds = {'thres_seg': (0.5, 0.7), 'size_seg': (500, 3000), 'thres_oth':(0.1, 0.7), 'size_oth':(500, 6000)}
            elif self.args.output == 2:
                pbounds = {'thres_seg': (0.5, 0.7), 'size_seg': (500, 3000), 'thres_oth':(0.1, 0.7), 'size_oth':(500, 6000)}
            optimizer = BayesianOptimization(f = cal_dice, pbounds = pbounds, random_state = 1)   
            # adjust the bayes opt stage
            if self.args.test_run or self.args.epoch < 5:
                optimizer.maximize(init_points = 5, n_iter = 1)
            else:
                optimizer.maximize(init_points = 100, n_iter = 10)

            self.dicPara['thres_seg'] = optimizer.max['params']['thres_seg']
            self.dicPara['size_seg']  = optimizer.max['params']['size_seg']
            self.dicPara['thres_after'] = optimizer.max['params']['thres_after']
        return
コード例 #4
0
ファイル: ensemble.py プロジェクト: chicm/clouds
def ensemble(args):
    #class_params = {0: (0.5, 25000), 1: (0.7, 15000), 2: (0.4, 25000), 3: (0.6, 10000)}
    models = create_models(args)
    class_params = find_class_params(args, models)
    #exit(0)

    test_loader = get_test_loader(args.encoder_types.split(',')[0], args.batch_size)
    probs, _ = predict_loader(models, test_loader)

    encoded_pixels, encoded_pixels_no_minsize = [], []
    image_id = 0
    for img_out in tqdm(probs):
        #runner_out = runner.predict_batch({"features": test_batch[0].cuda()})['logits']
        #for i, batch in enumerate(runner_out):
        for probability in img_out:
            
            #probability = probability.cpu().detach().numpy()
            if probability.shape != (350, 525):
                probability = cv2.resize(probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)
            predict, num_predict = post_process(probability, class_params[image_id % 4][0], class_params[image_id % 4][1])
            if num_predict == 0:
                encoded_pixels.append('')
            else:
                r = mask2rle(predict)
                encoded_pixels.append(r)

            predict2, num_predict2 = post_process(probability, class_params[image_id % 4][0], 0)
            if num_predict2 == 0:
                encoded_pixels_no_minsize.append('')
            else:
                r2 = mask2rle(predict2)
                encoded_pixels_no_minsize.append(r2)

            image_id += 1

    sub = pd.read_csv(os.path.join(settings.DATA_DIR, 'sample_submission.csv'))

    sub['EncodedPixels'] = encoded_pixels
    sub.to_csv(args.out, columns=['Image_Label', 'EncodedPixels'], index=False)

    sub['EncodedPixels'] = encoded_pixels_no_minsize
    sub.to_csv(args.out+'_no_minsize', columns=['Image_Label', 'EncodedPixels'], index=False)
def generate_segmentation(cam, weights, test_df, th = 0.3):
    test_df.loc[:,'pred_rle_fish'],  test_df.loc[:,'pred_rle_flower'], test_df.loc[:,'pred_rle_gravel'], test_df.loc[:,'pred_rle_sugar'] = "", "", "", ""
    test_df.loc[:,'pred_vec_fish'],  test_df.loc[:,'pred_vec_flower'], test_df.loc[:,'pred_vec_gravel'], test_df.loc[:,'pred_vec_sugar'] = 0, 0, 0, 0

    for i, idx in enumerate(test_df.index.values):
        img_path = IMG_PATH + idx + '.jpg'
        # calculate 4 masks
        for k, label in enumerate(LABELS):
            output, pred = get_rle_probs(cam, weights, img_path, k)
            test_df.loc[idx, "pred_rle_" + label] = mask2rle((output > th).astype(int))
            test_df.loc[idx, "pred_vec_" + label] = pred
    return test_df
コード例 #6
0
ファイル: test.py プロジェクト: Alf162/severstal
def make_predict(model):
    model.eval()
    predict = []
    for data in test_loader:
        data = data.cuda()
        output = model(data)
        output = output.cpu().detach().numpy() * (-1)
        img = np.copy(abs(output[0]))
        mn = np.mean(img) * 1.2
        img[img <= mn] = 0
        img[img > mn] = 1
        img = cv2.resize(img[0], (1600, 256))
        predict.append(mask2rle(img))
    return predict
コード例 #7
0
def predict(**kwargs):
    model_vals = kwargs['model_vals']
    model = load_model('model.pth')
    model.eval()
    predict = []
    test_loader = model_vals.xcom_pull(key='test_loader',
                                       task_ids='prepare_data')
    for data in test_loader:
        data = data.cuda()
        output = model(data)
        output = output.cpu().detach().numpy() * (-1)
        img = np.copy(abs(output[0]))
        mn = np.mean(img) * 1.2
        img[img <= mn] = 0
        img[img > mn] = 1
        img = cv2.resize(img[0], (1600, 256))
        predict.append(mask2rle(img))
    kwargs['model_vals'].xcom_push(key='predict_arr', value=predict)
def save_segmentation(cam, weights, num, path, thresholds = [0.8,0.5,0.7,0.7]):
    th = thresholds
    for img_idx in test_df.index:
        img_name = IMG_PATH + img_idx + ".jpg"
        # while IMG_LIST[k].split(".")[0] not in test_df.index: k = np.random.randint(0, len(IMG_LIST))
        img = cv2.resize(cv2.imread(img_name), (512, 352))
        for label_idx in [0,1,2,3]:
            mask_pred, probs = get_rle_probs(cam, weights, img_name, label_idx)
            label = LABELS[label_idx]
            rle_true = data_df.loc[img_idx.split('.')[0], "rle_" + label]
            rle_pred = mask2rle((mask_pred > th[label_idx]).astype(int))
            mask_true = rle2mask(rle_true)[::4,::4]

            skimage.io.imsave(path + img_idx + "_pred_" + label + ".jpg", (mask_pred > th[label_idx]).astype("uint8")*100)
            skimage.io.imsave(path + img_idx + "_heat_" + label + ".jpg", (mask_pred*100).astype("uint8"))
            skimage.io.imsave(path + img_idx + "_true_" + label + ".jpg", mask_true*100)
            dice = dice_coef(rle_true, rle_pred, probs, th[label_idx])
            print("Dice = " + str(np.round(dice,3)))
コード例 #9
0
def mask_ensemble_csv(csvs):
    sample_sub = '/data/Clouds_Classify/sample_submission.csv'
    sample_sub = pd.read_csv(open(sample_sub))
    sample_sub.head()

    sample_sub['label'] = sample_sub['Image_Label'].apply(
        lambda x: x.split('_')[1])
    sample_sub['im_id'] = sample_sub['Image_Label'].apply(
        lambda x: x.split('_')[0])

    image_name_list = np.unique(sample_sub['im_id'].values).tolist()

    sub_list = []
    for i in range(len(csvs)):
        sub = pd.read_csv(open(csvs[i]))
        sub['im_id'] = sub['Image_Label'].apply(lambda x: x.split('_')[0])
        sub_list.append(sub)

    encoded_pixels = []
    for index, image_name in enumerate(tqdm.tqdm(image_name_list)):
        # image = utils.get_img(image_name, file_name='test_images')
        mask_sum = np.zeros((350, 525, 4), dtype=np.float32)
        for sub in sub_list:
            mask = utils.make_mask(sub,
                                   image_name=image_name,
                                   shape=(350, 525))  # [H, W, 4]
            mask_sum += mask
        ensemble_mask = np.where(mask_sum < len(sub_list) // 2 + 1, 0, 1)
        # utils.visualize(image_name, image, ensemble_mask)
        for i in range(4):
            rle = utils.mask2rle(ensemble_mask[:, :, i])
            encoded_pixels.append(rle)

    sample_sub['EncodedPixels'] = encoded_pixels
    sample_sub.to_csv('./sub/tta_ensemble_submission_5unet_3fpn_1resnet34.csv',
                      columns=['Image_Label', 'EncodedPixels'],
                      index=False)
コード例 #10
0
def predict(args):
    #model = create_model(args.encoder_type, ckp=args.ckp).cuda()
    #model = nn.DataParallel(model)
    #runner = SupervisedRunner(model=model)
    class_params, runner = find_class_params(args)
    #runner = create_runner(args)

    test_loader = get_test_loader(args.encoder_type, args.batch_size)

    loaders = {"test": test_loader}

    encoded_pixels = []
    image_id = 0
    for i, test_batch in enumerate(tqdm(loaders['test'])):
        runner_out = runner.predict_batch({"features":
                                           test_batch[0].cuda()})['logits']
        for i, batch in enumerate(runner_out):
            for probability in batch:

                probability = probability.cpu().detach().numpy()
                if probability.shape != (350, 525):
                    probability = cv2.resize(probability,
                                             dsize=(525, 350),
                                             interpolation=cv2.INTER_LINEAR)
                predict, num_predict = post_process(
                    sigmoid(probability), class_params[image_id % 4][0],
                    class_params[image_id % 4][1])
                if num_predict == 0:
                    encoded_pixels.append('')
                else:
                    r = mask2rle(predict)
                    encoded_pixels.append(r)
                image_id += 1

    sub = pd.read_csv(os.path.join(settings.DATA_DIR, 'sample_submission.csv'))
    sub['EncodedPixels'] = encoded_pixels
    sub.to_csv(args.out, columns=['Image_Label', 'EncodedPixels'], index=False)
コード例 #11
0
    augment_test = Compose([
        Normalize(mean=(test_mean, test_mean, test_mean),
                  std=(test_std, test_std, test_std)),
        ToFloat(max_value=1.)
    ],
                           p=1)

    ########################################################################
    # do some simple checking
    if args.test_run:
        # check rle2mask and mask2rle
        mask_df = pd.read_csv(TRAIN_MASKS).set_index(['ImageId_ClassId'
                                                      ]).fillna('-1')
        for i, pixel in enumerate(mask_df['EncodedPixels']):
            if pixel != '-1':
                rle_pass = mask2rle(rle2mask(pixel, 1600, 256))
                if rle_pass != pixel:
                    print(i)

        # check dataloader
        steel_ds = SteelDataset(TRAIN_FILES, args, mask_df=mask_df)
        steel_ds_train = SteelDataset(TRAIN_FILES,
                                      args,
                                      mask_df=mask_df,
                                      augment=augment_train)
        steel_ds_valid = SteelDataset(VALID_FILES,
                                      args,
                                      mask_df=mask_df,
                                      augment=augment_valid)
        res = steel_ds_train[1]
        image, mask = res[0], res[1]
コード例 #12
0
original_size = (1400, 2100)
new_size = (350, 525)  # ResNet
interpolation = cv2.INTER_CUBIC

df = pd.read_csv(os.path.join(dataset_dir, 'train.csv'))

print('update train.csv ...')

# masking 영역을 ratio 맞게 resize 하기
for idx, row in tqdm(df.iterrows()):
    encoded_pixels = row[1]
    if encoded_pixels is not np.nan:
        mask = rle2mask(encoded_pixels, shape=original_size[::-1])
        mask = cv2.resize(mask, new_size[::-1], interpolation=interpolation)

        rle = mask2rle(mask)
        df.at[idx, 'EncodedPixels'] = rle

df.to_csv(os.path.join(dataset_dir, '350_525_train.csv'), index=False)


# Resizing Train and Test Images

train_images_dir = os.path.join(dataset_dir, 'train_images')
train_image_files = os.listdir(train_images_dir)

test_images_dir = os.path.join(dataset_dir, 'test_images')
test_image_files = os.listdir(test_images_dir)

preprocessed_train_image_dir = os.path.join(processed_dataset_dir, 'train_images')
preprocessed_test_image_dir = os.path.join(processed_dataset_dir, 'test_images')
コード例 #13
0
ファイル: test.py プロジェクト: polmonroig/cloud_segmentation
shape = (1400, 2100, 3)
test_dataset = ImageDataset(utils.TEST_IMAGES, os.listdir(utils.TEST_IMAGES),
                            None, transforms, shape, True)
batch_size = 1
data_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=batch_size,
                                          shuffle=True,
                                          num_workers=4)

encodes = [["Image_Label", "EncodedPixels"]]
for i, data in enumerate(data_loader):
    image, path = data
    image = image.to(device)
    out = model(image.view(-1, 3, 350, 525))
    out = out.cpu().detach().numpy()
    print(str(i) + "/" + str(len(os.listdir(utils.TEST_IMAGES))))
    plt.imshow(utils.conv_image(image[0]))
    plt.show()
    for mask, cat in zip(out[0], utils.CLASSES):
        current_name = path[0] + "_" + cat
        mask, n_masks = utils.post_process(mask, 0.65, 10000)
        if n_masks != 0:
            encodes.append([current_name, utils.mask2rle(mask)])
        else:
            encodes.append([current_name, ""])

with open("submission.csv", 'w', newline='') as myfile:
    wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
    for row in encodes:
        wr.writerow(row)
コード例 #14
0
    batch_pred_masks = (batch_pred_masks1 + batch_pred_masks2 +
                        batch_pred_masks3) / 3
    # Predict out put shape is (320X480X4)
    # 4  = 4 classes, Fish, Flower, Gravel Surger.

    for j, idx in enumerate(batch_idx):
        # Batch prediction result set
        pred_masks = batch_pred_masks[j, ]

        for k in range(pred_masks.shape[-1]):
            pred_mask = pred_masks[..., k].astype('float32')

            if pred_mask.shape != (350, 525):
                pred_mask = cv2.resize(pred_mask,
                                       dsize=(525, 350),
                                       interpolation=cv2.INTER_LINEAR)

            pred_mask, num_predict = post_process(pred_mask, threshold,
                                                  min_size[k], (350, 525))

            if num_predict == 0:
                encoded_pixels.append('')
            else:
                r = mask2rle(pred_mask)
                encoded_pixels.append(r)

# # Submission
sub_df['EncodedPixels'] = encoded_pixels
sub_df.to_csv('submission.csv',
              columns=['Image_Label', 'EncodedPixels'],
              index=False)
コード例 #15
0
    2: (0.7, 10000),
    3: (0.6, 10000)
}

encoded_pixels = []
image_id = 0
for i, test_batch in enumerate(tqdm.tqdm(test_loader)):
    test_batch = {"features": test_batch[0].to(DEVICE)}
    output = model(test_batch["features"])
    for i, batch in enumerate(output):
        for probability in batch:

            probability = probability.cpu().detach().numpy()
            if probability.shape != (350, 525):
                probability = cv2.resize(probability,
                                         dsize=(525, 350),
                                         interpolation=cv2.INTER_LINEAR)
            predict, num_predict = utils.post_process(
                sigmoid(probability), class_params[image_id % 4][0],
                class_params[image_id % 4][1])
            if num_predict == 0:
                encoded_pixels.append('')
            else:
                r = utils.mask2rle(predict)
                encoded_pixels.append(r)
            image_id += 1

sub['EncodedPixels'] = encoded_pixels
sub.to_csv('submission.csv',
           columns=['Image_Label', 'EncodedPixels'],
           index=False)
コード例 #16
0
def predict(best_threshold, min_size, device, transforms):
    test_dataset = SegmentationDataset(data_folder=config_main['path_to_data'], transforms=AUGMENTATIONS_TEST, phase='test')
    test_loader = DataLoader(test_dataset, batch_size=config['batch_size'], shuffle=False, num_workers=16, drop_last=False)

    models = []
    for weight in glob.glob(os.path.join(config['weights'], config['name'], 'cosine/') + "*.pth"):
        model = pydoc.locate(config['model'])(**config['model_params'])
        model.load_state_dict(torch.load(weight))
        model = model.to(device)
        model.eval()
        models.append(model)

    if len(config['cls_predict_test']) > 0:
        print("Use classification model results")
        cls_df = pd.read_csv(config['cls_predict_test'])
        cls_df['is_mask_empty'] = cls_df['EncodedPixels'].map(lambda x: 1 if x==0 else 0)
        cls_df.index = cls_df.Image_Label.values
        cls_df.drop_duplicates(inplace=True)
    else:
        cls_df = None

    predictions = []
    image_names = []

    with torch.no_grad():
        for i, batch in enumerate(tqdm(test_loader)):
            fnames = batch["filename"]
            images = batch["image"].to(device)
            batch_preds = np.zeros((images.size(0), 4, TRAIN_SHAPE[0], TRAIN_SHAPE[1]), dtype=np.float32)
            batch_preds_test_shape = np.zeros((images.size(0), 4, TEST_SHAPE[0], TEST_SHAPE[1]), dtype=np.float32)
            if config['type'] == 'crop':
                for model in models:
                    if config['TTA'] == 'true':
                        model = tta.SegmentationTTAWrapper(model, transforms)
                    tmp_batch_preds = np.zeros((images.size(0), 4, TRAIN_SHAPE[0], TRAIN_SHAPE[1]), dtype=np.float32)
                    for step in np.arange(0, TRAIN_SHAPE[1], 384)[:-1]:
                        tmp_pred = torch.sigmoid(model(images[:, :, :, step:step + 448])).cpu().numpy()
                        tmp_batch_preds[:, :, :, step:step + 448] += tmp_pred
                    tmp_batch_preds[:, :, :, 384:384 + 64] /= 2
                    tmp_batch_preds[:, :, :, 2 * 384:2 * 384 + 64] /= 2
                    tmp_batch_preds[:, :, :, 3 * 384:3 * 384 + 64] /= 2
                    batch_preds += tmp_batch_preds
            else:
                for model in models:
                    if config['TTA'] == 'true':
                        model = tta.SegmentationTTAWrapper(model, transforms)
                    batch_preds += torch.sigmoid(model(images)).cpu().numpy()
            batch_preds = batch_preds / len(models)

            for i in range(batch_preds.shape[0]):
                tmp = cv2.resize(np.moveaxis(batch_preds[i], 0, -1), (TEST_SHAPE[1], TEST_SHAPE[0]))
                batch_preds_test_shape[i] = np.moveaxis(tmp, -1, 0)

            for fname, preds in zip(fnames, batch_preds_test_shape):
                for cls, pred in enumerate(preds):
                    if cls_df is not None:
                        if cls_df.loc[fname + f"_{inv_map[cls]}"]['is_mask_empty'] == 1:
                            pred = np.zeros((TEST_SHAPE[0], TEST_SHAPE[1]))
                        else:
                            pred = post_process(pred, best_threshold, min_size, cls,
                                                use_dense_crf=config['use_dense_crf'],
                                                image=cv2.imread(test_dataset.images[i]) if config['use_dense_crf']=='true' else None,
                                                use_dilations=config['use_dilations'],
                                                use_poligonization=config['use_poligonization'])

                    else:
                        pred = post_process(pred, best_threshold, min_size, cls,
                                            use_dense_crf=config['use_dense_crf'],
                                            image=cv2.imread(test_dataset.images[i]) if config['use_dense_crf']=='true' else None,
                                            use_dilations=config['use_dilations'],
                                            use_poligonization=config['use_poligonization'])
                    rle = mask2rle(pred)
                    name = fname + f"_{inv_map[cls]}"
                    image_names.append(name)
                    predictions.append(rle)

    df = pd.DataFrame()
    df["Image_Label"] = image_names
    df["EncodedPixels"] = predictions
    df.to_csv(os.path.join(config['weights'], config['name'], "submission.csv"), index=False)
コード例 #17
0
    def search_parameter(self):
        'Bayes opt to determine the threshold for each label'
        self.eval_net()

        # store the parameters
        self.dicPara = {}

        def cal_dice(thres_seg, size_seg, thres_after, thres_oth=-float('inf'), size_oth=0):
            ipos = 0
            dice = 0.0
            if self.args.use_weight:
                nnormal = self.nweight
            else:
                nnormal = len(self.dataloader.dataset)

            for pred, other, true_rle in zip(preds, others, trues):
                # post process
                true = rle2mask(true_rle, self.args.width, self.args.height)
                pred = post_process_single(pred, other, thres_seg, size_seg, thres_after, thres_oth, size_oth)
                if self.args.use_weight:
                    dice += dice_metric(true, pred)*self.weight[ipos]
                else:
                    dice += dice_metric(true, pred)
                ipos += 1

            return dice/nnormal

        preds, trues, others = [], [], []
        bfirst = True
        categories = [0,1,2,3]

        for category in categories:
            ipos = 0
            # get the prediction and save it
            with torch.no_grad():
                for data in tqdm(self.dataloader):
                    images, labels = data[0], data[1]
                    for image_raw, label_raw in zip(images, labels):
                        # flip and predict
                        output_merge, output_other = self.predict_flip(image_raw, category)
                        true_mask = label_raw[:,:,category].detach().numpy().astype(int)
                        if bfirst:
                            trues.append(mask2rle(true_mask))
                            preds.append(output_merge)
                            others.append(output_other)
                        else:
                            trues[ipos] = mask2rle(true_mask)
                            preds[ipos] = output_merge
                            others[ipos] = output_other
                        ipos += 1
            bfirst = False
            
            # using bayes optimize to determine the threshold
            if self.args.output == 0:
                pbounds = {'thres_seg': (0.5, 0.7), 'size_seg' : (500, 6000)}
            elif self.args.output == 1:
                pbounds = {'thres_seg': (0.5, 0.7), 'size_seg' : (500, 6000), \
                            'thres_oth':(0.2, 0.7), 'size_oth':(500, 6000)}
            elif self.args.output == 2:
                pbounds = {'thres_seg': (0.5, 0.7), 'size_seg' : (1000, 3000), 
                           'thres_after': (0.1, 0.5), 'thres_oth':(0.3, 0.7), 'size_oth':(500, 3000)}
            optimizer = BayesianOptimization(f = cal_dice, pbounds = pbounds, random_state = 1)   
            # adjust the bayes opt stage
            if self.args.test_run or self.args.epoch < 5:
                optimizer.maximize(init_points = 15, n_iter = 1)
            else:
                optimizer.maximize(init_points = 300, n_iter = 100)

            self.dicPara['thres_seg{:d}'.format(category+1)] = optimizer.max['params']['thres_seg']
            self.dicPara['size_seg{:d}'.format(category+1)]  = optimizer.max['params']['size_seg']
            self.dicPara['thres_after{:d}'.format(category+1)] = optimizer.max['params']['thres_after']
            if self.args.output > 0:
                self.dicPara['thres_oth{:d}'.format(category+1)] = optimizer.max['params']['thres_oth']
                self.dicPara['size_oth{:d}'.format(category+1)]  = optimizer.max['params']['size_oth']
       
        print(self.dicPara)
        return
コード例 #18
0
    def predict_dataloader(self, to_rle = False, fnames = None):
        if self.dicPara is None:
            self.search_parameter()

        if to_rle and fnames is None:
            raise ValueError('File names are not given.')
        # evaluate the net
        self.eval_net()
        
        dicPred = dict()
        for classid in range(self.args.category):
            dicPred['Class '+str(classid+1)] = []
            dicPred['Dice '+str(classid+1)] = []
            dicPred['True '+str(classid+1)] = []
        dicSubmit = {'ImageId_ClassId':[], 'EncodedPixels':[]}
        dice, preds = 0.0, []
        diceW = 0.0
        ipos = 0
        def area_ratio(mask):
            return mask.sum()/self.args.height/self.args.width
        
            
        with torch.no_grad():
            for data in tqdm(self.dataloader):
                # load the data
                images, labels = data[0].to(self.device), data[1].to(self.device)
                images = images.permute(0, 3, 1, 2)

                output_masks, output_labels = self.predict_flip_batch(images)
                
                for output_mask, output_label, label_raw in zip(output_masks, output_labels, labels):
                    # using simple threshold and output the result
                    output_thres = post_process(output_mask, output_label, self.dicPara)
                    # transfer into the rles
                    # record the predicted labels
                    for category in range(self.args.category):
                        # to rle if required
                        if to_rle:
                            fname = fnames[ipos]
                            fname_short = fname.split('/')[-1]+'_{:d}'.format(category+1)
                            dicSubmit['ImageId_ClassId'].append(fname_short)
                            rle = mask2rle(output_thres[:,:,category])
                            dicSubmit['EncodedPixels'].append(rle)
                        dicPred['Class {:d}'.format(category+1)].append(area_ratio(output_thres[:,:,category]))
                        
                        if not self.isTest:
                            dice_cat = dice_metric(label_raw[:,:,category].detach().cpu().numpy(), output_thres[:,:,category])
                            dicPred['Dice {:d}'.format(category+1)].append(dice_cat)
                            dicPred['True {:d}'.format(category+1)].append(area_ratio(label_raw[:,:,category].detach().cpu().numpy()))
                            # add to the final dice
                            # print(self.weight.shape, ipos)
                            dice  += dice_cat
                            diceW += dice_cat*self.weight[ipos]                            
                    ipos += 1
        
        keys = [key for key in dicPred.keys()]
        for key in keys:
            if len(dicPred[key]) == 0:
                dicPred.pop(key, None)

        # regularize result
        diceW =  diceW/self.nweight/self.args.category
        dice  =  dice/len(self.dataloader.dataset)/self.args.category
        print("Weighted Dice {:.4f}\t Unweighted Dice {:.4f}".format(diceW, dice))

        return dice, dicPred, dicSubmit