def generate_prob_map_hr(cfg, file_name, sorted_abnormal_patches, model_hr,
                         time, patch_out_size, b_map, p_map):
    # for high resolution image.
    t = time
    slide = extract_patch_fun.single_img_process(file_name, None, None, None,
                                                 False)
    gm_dataset = dataloader_fun.gm_cls_DataLoader_hr(sorted_abnormal_patches,
                                                     slide._img, 0)

    gm_loader = torch.utils.data.DataLoader(gm_dataset,
                                            batch_size=cfg.gm_batch_size,
                                            shuffle=False,
                                            num_workers=cfg.gm_work_num)
    print('start inference the model')
    output = _get_label_prob(gm_loader, model_hr)
    print('Done! %.4fs' % t.value())
    output_b = np.argmax(output, axis=1)
    output_p = output[:, 1]

    print('start fill the output into the map')
    b_map = _fill_list_into_map(sorted_abnormal_patches, b_map, output_b,
                                patch_out_size)
    p_map = _fill_list_into_map(sorted_abnormal_patches, p_map, output_p,
                                patch_out_size)
    print('Done! %.4fs' % t.value())

    return b_map, p_map
def generate_prob_map(cfg, model, file_name):
    t = timemeter.TimeMeter()
    slide = extract_patch_fun.single_img_process(file_name, None, None, None,
                                                 False)
    print('start extract background ')
    img, mask = slide._generate_img_bg_mask()
    print('Done! %.4fs' % t.value())

    size_raw = slide._img.level_dimensions[0]

    mask_frac = size_raw[1] * 1.0 / mask.shape[0]

    # calculate the gm row column and patch out size
    row = cal_new_len(size_raw[1])
    col = cal_new_len(size_raw[0])
    patch_out_size = cal_new_len(cfg.windows_size)

    size_out = (col, row)

    img = img.resize(size_out)
    # mask = _np_resize(mask, (size_out[1], size_out[0]))

    b_map = np.zeros((row, col)).astype(np.uint8)
    p_map = np.zeros((row, col)).astype(np.float32)

    print('start get input list ')
    input_list = _get_input_list(mask, mask_frac, cfg.windows_size, size_raw,
                                 size_out, patch_out_size)
    print('Done! %.4fs' % t.value())
    print('get %d input patch' % len(input_list))

    gm_dataset = dataloader_fun.gm_fcn_DataLoader(input_list, slide._img,
                                                  cfg.windows_size)

    gm_loader = torch.utils.data.DataLoader(gm_dataset,
                                            batch_size=cfg.gm_batch_size,
                                            shuffle=False,
                                            num_workers=cfg.gm_work_num)
    print('start inference the model')
    output = _get_label_prob(gm_loader, model)
    print('Done! %.4fs' % t.value())
    output_b = np.argmax(output, axis=1)
    output_p = output[:, 1]

    print('start fill the output into the map')
    b_map = _fill_list_into_map(input_list, b_map, output_b, patch_out_size)
    p_map = _fill_list_into_map(input_list, p_map, output_p, patch_out_size)
    print('Done! %.4fs' % t.value())

    return img, b_map, p_map
def patch_filter(cfg, model, file_name, level):
    # for low resolution.
    t = timemeter.TimeMeter()
    slide = extract_patch_fun.single_img_process(file_name, None, None, None,
                                                 False)
    print('start extract background ')
    img, mask = slide._generate_img_bg_mask_ForTest()
    print('Done! %.4fs' % t.value())

    mask_frac = slide._times_target_level_divide_rescaled_mask

    size_raw = slide._img.level_dimensions[level]

    scale_time = slide._rescaled_times

    patch_out_size = cal_new_len_lr(cfg.patch_size, mask_frac)

    row = cal_new_len_lr(size_raw[1], mask_frac)
    col = cal_new_len_lr(size_raw[0], mask_frac)

    b_map = np.zeros((row, col)).astype(np.uint8)
    p_map = np.zeros((row, col)).astype(np.float32)
    #row = cal_new_len_lr(size_raw[1], mask_frac)
    #col = cal_new_len_lr(size_raw[0], mask_frac)

    print('start get input list ')
    input_list = _get_input_list_lr(mask, cfg.patch_size, size_raw, scale_time,
                                    mask_frac, patch_out_size)
    print('Done! %.4fs' % t.value())
    print('get %d input patch' % len(input_list))

    gm_dataset = dataloader_fun.gm_cls_DataLoader(input_list, slide._img,
                                                  cfg.patch_size, level)

    gm_loader = torch.utils.data.DataLoader(gm_dataset,
                                            batch_size=cfg.gm_batch_size,
                                            shuffle=False,
                                            num_workers=cfg.gm_work_num)
    print('start inference the model')
    sorted_abnormal_patches = _sort_abnormal_patches(gm_loader, model,
                                                     input_list)
    print('Done! %.4fs' % t.value())
    return img, sorted_abnormal_patches, t, patch_out_size, b_map, p_map
Ejemplo n.º 4
0
def generate_prob_map(cfg, model, file_name):
    t = timemeter.TimeMeter()
    slide = extract_patch_fun.single_img_process(file_name, None, None, None, False)
    print('start extract background ')
    img, mask = slide._generate_img_bg_mask()
    print('Done! %.4fs'%t.value())
    size_raw = slide._img.level_dimensions[0]
    size_out = (int(size_raw[0]/cfg.gm_stride), int(size_raw[1]/cfg.gm_stride))

    frac = np.ceil(size_raw[0]/size_out[0])

    img = img.resize(size_out)
    mask = _np_resize(mask, size_out)

    b_map = np.zeros(mask.shape).astype(np.uint8)
    p_map = np.zeros(mask.shape).astype(np.float32)

    print('start get input list ')
    input_list = _get_input_list(cfg, mask, frac)
    print('Done! %.4fs' % t.value())
    print('get %d input patch'%len(input_list))

    gm_dataset = dataloader_fun.gm_cls_DataLoader(input_list, slide._img, cfg.patch_size)

    gm_loader = torch.utils.data.DataLoader(gm_dataset, batch_size=cfg.gm_batch_size,
                                            shuffle=False, num_workers=cfg.gm_work_num)
    print('start inference the model')
    output = _get_label_prob(gm_loader, model)
    print('Done! %.4fs' % t.value())
    output_b = np.argmax(output, axis=1)
    output_p = output[:, 1]

    print('start fill the output into the map')
    b_map = _fill_list_into_map(input_list, b_map, output_b)
    p_map = _fill_list_into_map(input_list, p_map, output_p)
    print('Done! %.4fs' % t.value())

    return img, b_map, p_map