コード例 #1
0
def anti_interp_2d_pack(image_block, info_dict: InfoDict, kernal_size=0):
    '''2d image interpolation package
    :param image_block: 3d volume, format: zyx
    :param info_dict:  should have info_dict.image_before_interp
    :param kind: interpolation methods, cv2.INTER_LINEAR cv2.INTER_NEAREST cv2.INTER_CUBIC(slow)
    :param kernel_size: used in median blurring for interpolation results. if 0, then no blurring operation
    :return image_interp: resized image volume ,its dtype is same with image_block
    :return info_dict: info_dict
    '''

    if len(image_block.shape) != 3:
        # 输入图像的shape错误, 返回错误码
        Error.exit(ErrorCode.process_input_shape_error)

    if not "image_shape_before_interp" in info_dict:
        Error.exit(ErrorCode.process_module_error)

    raw_dtype = image_block.dtype
    image_block = check_for_cv2_dtype(image_block, raw_dtype)

    origin_x = info_dict.image_shape_before_interp[2]
    origin_y = info_dict.image_shape_before_interp[1]
    image_anti_interp = np.zeros((image_block.shape[0], origin_x, origin_y),
                                 np.float32)

    for i in range(image_block.shape[0]):
        image_one = interp_2d_yx(image_block[i, :, :], origin_x, origin_y,
                                 info_dict.interp_kind, kernal_size)
        image_anti_interp[i, :, :] = image_one

    return image_anti_interp, info_dict
コード例 #2
0
def interp_2d_yx(image_2d,
                 row_size_new,
                 col_size_new,
                 kind=CV2_interp_type.linear,
                 kernal_size=0):
    '''2d image interpolation
    :param image_2d: 2d volume, format: yx  
    :param row_size_new:   can be call "y"
    :param col_size_new:   can be call "x"
    :param kind: interpolation methods, cv2.INTER_LINEAR cv2.INTER_NEAREST cv2.INTER_CUBIC(slow)
    :param kernel_size: used in median blurring for interpolation results. if 0, then no blurring operation
    :return: resized image volume ,its dtype is same with image_2d
    '''
    if len(image_2d.shape) != 2:
        # 输入图像的shape错误, 返回错误码
        Error.exit(ErrorCode.process_input_shape_error)

    resize_slice = cv2.resize(image_2d, (col_size_new, row_size_new),
                              interpolation=kind)
    resize_slice = resize_slice
    if kernal_size:
        # smoothes an image using the median filter
        image_new = cv2.medianBlur(resize_slice, kernal_size)
    else:
        image_new = resize_slice
    image_new = np.array(image_new, dtype=image_2d.dtype)

    return image_new
コード例 #3
0
def read_slice_w_filter(dcm_path):
    try:
        if not os.path.exists(dcm_path):
            Error.exit(ErrorCode.ld_ct_path_not_exist)
        scan = pydicom.dcmread(dcm_path, force=True)
    except PermissionError:
        return None

    return is_valid_image(scan)
コード例 #4
0
def mask2file(mask_3d, info_dict, organ_name=None):
    """
    逐层计算单个roi的物理坐标,并存储为json文件。
    :param mask_3d:
    :param info_dict:
    :param roi_ind:
    :return:
    """
    # 增加organ_names的适用性---by YY

    if not os.path.exists(info_dict.goal_path):
        Error.exit(ErrorCode.tofile_ouput_path_not_exist)
    if not organ_name:
        Error.exit(ErrorCode.tofile_json_name_is_none)

    response_ind = [
        a for a in range(mask_3d.shape[0]) if np.amax(mask_3d[a]) > 0
    ]
    # sop_list = [info_dict.sop_list[i] for i in response_ind]
    # ipp_list = [info_dict.ipp_list[i] for i in response_ind]
    ###提取含有分割结果的各层的信息 #J就是slice的层号
    label_slice_list = []
    transform_matrix = grid2world_matrix(info_dict.iop, info_dict.spacing_list)

    info_dict = contour_type_mode_by_roi(organ_name, info_dict)

    for r in response_ind:
        slice_obj = slice_roi_contours(
            mask_3d[r],
            info_dict.sop_list[r],
            info_dict.ipp_list[r],
            transform_matrix,
            organ_name,
            contour_type=info_dict.contour_type,
            chain_mode=info_dict.chain_mode,
            smooth_polygon_times=info_dict.smooth_polygon_times,
            smooth_polgygon_degree=info_dict.smooth_polygon_degree)
        label_slice_list.extend(slice_obj)
    # with ProcessPoolExecutor() as executor:
    #     future_obj = executor.map(slice_roi_contours, mask_3d[response_ind],
    #                               sop_list, ipp_list,
    #                               itertools.repeat(transform_matrix, len(response_ind)),
    #                               itertools.repeat(organ_names[roi_ind], len(response_ind)),
    #                               itertools.repeat(info_dict.contour_type, len(response_ind)),
    #                               itertools.repeat(info_dict.chain_mode, len(response_ind))
    #                               )
    #     for ll_slice in future_obj:
    #         label_slice_list.extend(ll_slice)

    one_roi2json(label_slice_list, info_dict.goal_path)
コード例 #5
0
def check_shape(image,
                standar_shape=(512, 512),
                interp_kind=cv2.INTER_NEAREST):
    """
    检查图像尺寸,如果不为设定大小(512,512),则调整到设定尺寸
    :param1: image - 待检测尺寸,shape = 3
    param2: standar_shape 标准尺寸
    :param3: cv2插值方式
    :return: 调整后的图像大小
    """
    assertor.type_assert(image,
                         np.ndarray,
                         error_code=ErrorCode.process_data_type_error,
                         msg='Assert pos: check_shape module')
    img_shape = image.shape
    raw_dtype = image.dtype
    image = check_for_cv2_dtype(image, raw_dtype)

    if len(img_shape) != 3:
        # 输入图像的shape错误, 返回错误码
        Error.exit(ErrorCode.process_input_shape_error)

    if img_shape[1] == standar_shape[0] and img_shape[2] == standar_shape[1]:
        return image

    resize_image = np.zeros(shape=(image.shape[0], standar_shape[0],
                                   standar_shape[1]),
                            dtype=image.dtype)
    for i in range(image.shape[0]):
        resize_image[i, :, :] = cv2.resize(
            image[i, :, :], (standar_shape[1], standar_shape[0]),
            interpolation=interp_kind)

    resize_image = anti_check_for_cv2_dtype(resize_image, raw_dtype)

    return resize_image


# if __name__ == '__main__':
#
#     test_data = np.zeros(shape=(3,1024, 1024),dtype=np.uint32)  #np.uint16
#     image = check_shape(test_data,(512,512))
#     print('dtype:',image.dtype)
#     print(image.shape)
コード例 #6
0
def load_dcm_scan(info_dict):
    """
    读取单套CT序列,筛选出有效的dicom图像文件,提取序列信息并得到图像

    :param info_dict: 带有data_path, include_series 指定序列等字段
    :return: image_3d(按ipp_z从小到大排列的扫描图像), info_dict(添加sop_list和ipp_list)
    """

    # 1. 筛选出可用pydicom读取成功的文件
    # 扫描数据根目录,筛选出dcm数据
    # 存两个字典(key=路径,value=pydicom object)
    # path_slices_dict存断层扫描,path_rts_dict存rs
    path_slices_dicts, path_rts_dicts = scan4image_rt(info_dict.data_path)
    series_list = list(path_slices_dicts.keys())
    most_series = None
    if len(series_list) == 0:
        Error.exit(ErrorCode.ld_ct_load_fail)
    elif len(series_list) == 1:
        most_series = series_list[0]
    else:
        nb_slices_in_series = [len(path_slices_dicts[s]) for s in series_list]
        most_series = series_list[nb_slices_in_series.index(
            max(nb_slices_in_series))]

    # 2. 将pydicom类转换成自定义类,筛选出带有效图像数据的层
    slice_data_dict, info_dict.slice_path = data_in_image_scans(
        path_slices_dicts[most_series])

    # 3. 提取断层扫描数据基本信息,
    info_dict = get_case_info(info_dict, slice_data_dict)

    # 筛选图像数据并根据参数排序
    order_slice_list, info_dict = sort_filter_slices(info_dict,
                                                     slice_data_dict)

    # 4. 提取扫描图像数据
    image_3d = np.stack([s.image for s in order_slice_list], axis=0)

    return image_3d, info_dict
コード例 #7
0
def image_interp(data, target_size, interpolation):
    """插值函数(默认线性插值)
    # Arguments:
        data:待插值图像,三维数组
        target_size:插值后x、y的大小

    # Returns
        img_new:插值后的图像

    # Example
    """

    if len(np.shape(data)) != 3:
        print('DataError: the channel of data is not equal to 3')
        Error.exit(ErrorCode.process_input_shape_error)

    print('start interpolation......')

    z_old, rows_old, cols_old = np.shape(data)

    if len(target_size) == 2:
        rows_new = target_size[0]
        cols_new = target_size[1]
    elif len(target_size) == 1:
        rows_new = target_size[0]
        cols_new = target_size[0]
    else:
        rows_new = rows_old
        cols_new = cols_old

    img_new = np.zeros([z_old, rows_new, cols_new], dtype=np.float32)
    for i in range(z_old):
        # note: cv2.resize 函数的size输入为 宽(cols_new) * 高(rows_new)
        img_new[i, :, :] = cv2.resize(data[i, :, :], (cols_new, rows_new),
                                      interpolation=interpolation)

    print('complete interpolation......')
    return img_new
コード例 #8
0
def crop_by_size_and_shift(imgs, image_size, center=None, pixely=0, pixelx=0):
    '''
    剪切函数,相当于原来的cutting(),pixelx、pixely的移动值是相对图像中心
    注意参数次序和原来的cutting函数不同,shift参数不用传入
    :param imgs: 需要裁剪的数据
    :param image_size: 需要的图像大小
    :param pixely: 偏移y
    :param pixelx: 偏移x
    :return:
    '''

    if len(imgs.shape) == 2:  # 2D image
        imgs = imgs.copy()
        image_sizeY = image_size[0]
        image_sizeX = image_size[1]

        if center is None:
            center = [imgs.shape[0] // 2, imgs.shape[1] // 2]

        pixely = int(center[0] - imgs.shape[0] // 2) + pixely
        pixelx = int(center[1] - imgs.shape[1] // 2) + pixelx

        #    z, x, y = np.shape(imgs)
        y, x = np.shape(imgs)
        shift = np.max([
            abs(pixely),
            abs(pixelx),
            np.max((abs(y - image_sizeY), abs(x - image_sizeX)))
        ])
        judge = sum([
            y > (image_sizeY + abs(pixely) * 2), x >
            (image_sizeX + abs(pixelx) * 2)
        ])
        imgs_new = []
        image_std = imgs
        #    for i, image_std in enumerate(imgs):
        if judge == 2:
            image_std = image_std[int((y - image_sizeY) / 2 +
                                      pixely):int((y + image_sizeY) / 2 +
                                                  pixely),
                                  int((x - image_sizeX) / 2 +
                                      pixelx):int((x + image_sizeX) / 2) +
                                  pixelx]
        #        imgs_new.append(image_std)
        else:
            image_new = np.min(image_std) * np.ones(
                [image_sizeY + shift * 2, image_sizeX + shift * 2],
                dtype=np.int32)
            image_new[int((image_sizeY + shift * 2 - y) /
                          2):int((image_sizeY + shift * 2 - y) / 2) + y,
                      int((image_sizeX + shift * 2 - x) /
                          2):int((image_sizeX + shift * 2 - x) / 2) +
                      x] = image_std
            y1, x1 = np.shape(image_new)
            image_std = image_new[int((y1 - image_sizeY) / 2 +
                                      pixely):int((y1 + image_sizeY) / 2 +
                                                  pixely),
                                  int((x1 - image_sizeX) / 2 +
                                      pixelx):int((x1 + image_sizeX) / 2) +
                                  pixelx]

        #    imgs_new = np.array(imgs_new, np.float32)
        imgs_new = image_std

    elif len(imgs.shape) == 3:  # 3D image
        imgs = imgs.copy()
        image_sizeY = image_size[0]
        image_sizeX = image_size[1]

        if center is None:
            center = [imgs.shape[1] // 2, imgs.shape[2] // 2]

        pixely = int(center[0] - imgs.shape[1] // 2) + pixely
        pixelx = int(center[1] - imgs.shape[2] // 2) + pixelx

        z, y, x = np.shape(imgs)
        #        x, y = np.shape(imgs)
        shift = np.max([
            abs(pixely),
            abs(pixelx),
            np.max((abs(y - image_sizeY), abs(x - image_sizeX)))
        ])
        judge = sum([
            y > (image_sizeY + abs(pixely) * 2), x >
            (image_sizeX + abs(pixelx) * 2)
        ])
        imgs_new = []
        image_std = imgs
        if judge == 2:
            for i, image_std in enumerate(imgs):
                image_std = image_std[int((y - image_sizeY) / 2 +
                                          pixely):int((y + image_sizeY) / 2 +
                                                      pixely),
                                      int((x - image_sizeX) / 2 +
                                          pixelx):int((x + image_sizeX) / 2) +
                                      pixelx]
                imgs_new.append(image_std)
        else:
            for i, image_std in enumerate(imgs):
                # 按最小值填补imgs外不足部分
                image_new = np.min(image_std) * np.ones(
                    [image_sizeY + shift * 2, image_sizeX + shift * 2],
                    dtype=np.int32)
                image_new[int((image_sizeY + shift * 2 - y) /
                              2):int((image_sizeY + shift * 2 - y) / 2) + y,
                          int((image_sizeX + shift * 2 - x) /
                              2):int((image_sizeX + shift * 2 - x) / 2) +
                          x] = image_std
                y1, x1 = np.shape(image_new)
                image_std = image_new[int((y1 - image_sizeY) / 2 +
                                          pixely):int((y1 + image_sizeY) / 2 +
                                                      pixely),
                                      int((x1 - image_sizeX) / 2 +
                                          pixelx):int((x1 + image_sizeX) / 2) +
                                      pixelx]
                imgs_new.append(image_std)

        imgs_new = np.array(imgs_new)
    else:
        Error.exit(ErrorCode.process_input_shape_error)

    return imgs_new
コード例 #9
0
def preprocess(image_raw, info_dict):

    print('\nBegin to preprocess')
    # 体位校正
    orig_data = rotate_3D(image_raw,
                          info_dict.head_adjust_angle,
                          info_dict.head_adjust_center,
                          use_adjust=info_dict.use_head_adjust)

    # 大图判定
    orig_data, info_dict = check_large_image_zyx(orig_data, info_dict)

    z_orig_data, rows_orig_data, cols_orig_data = np.shape(orig_data)
    info_dict.image_shape_raw = [z_orig_data, rows_orig_data, cols_orig_data]

    classes_value = np.array(info_dict.classes_value, np.int8)
    index = []
    classe_orgen = info_dict.organ_classify_pos
    for i, value in enumerate(classes_value):
        if value in classe_orgen:
            index.append(i)
    if len(index) == 0:
        print('there is no 3rd classification')
        Error.exit(ErrorCode.ld_no_target_layer)

    # 眼球、晶状体、视神经、视交叉、垂体,考虑到头部角度(仰头或低头)需向两侧各扩充3层
    if index[0] - 3 > 0:
        index_floor = index[0] - 3
    elif index[0] - 2 > 0:
        index_floor = index[0] - 2
    elif index[0] - 1 > 0:
        index_floor = index[0] - 1
    else:
        index_floor = index[0]

    if index[-1] + 2 < len(classes_value):
        index_ceil = index[-1] + 3
    elif index[-1] + 1 < len(classes_value):
        index_ceil = index[-1] + 2
    else:
        index_ceil = index[-1] + 1

    data2D = orig_data[index_floor:index_ceil +
                       2, :, :]  # 利用全身分类网络取出待分割器官所在层号的图像
    data2D[data2D > 2000] = 2000
    data2D[data2D < -1024] = -1024
    print('data.min(), data.max()', data2D.min(), data2D.max())
    data2D = (data2D - (-1024)) / (2000 - (-1024))
    data2D = np.array(data2D, np.float32)
    print('data.min(), data.max()', data2D.min(), data2D.max())
    data2D = (data2D - data2D.min()) / (data2D.max() - data2D.min())
    data2D = np.array(data2D, np.float32)

    info_dict.orig_images = data2D
    original_space = info_dict.spacing_list[1]
    if original_space <= 0.8 or original_space > 1.2:
        # rows_target_size = int(rows_orig_data * original_space / info_dict.target_spacing[0])     # 2D模型是插值到1.0
        # cols_target_size = int(cols_orig_data * original_space / info_dict.target_spacing[1])     # 2D模型是插值到1.0
        # data2D = image_interp(data2D, [rows_target_size, cols_target_size], cv2.INTER_LINEAR)
        data2D = interp_3d_zyx(data2D,
                               info_dict.spacing_list[1:],
                               info_dict.target_spacing,
                               kind=CV2_interp_type.linear)

    size_2D = np.shape(data2D)  # 插值后的size
    # STEP 3:裁剪
    img_judge = data2D[int((index_ceil - index_floor) / 2), :, :]
    img_centre, img_box = judge_center(img_judge, 'image')
    img_centre = list(np.array(img_centre, np.int16))
    # 裁剪成模型输入的大小 IMAGE_CROP_HEIGHT * IMAGE_CROP_WIDTH
    # data2D, four_corner = im_train_crop(data2D, img_centre, info_dict.model_input_size[0], info_dict.model_input_size[1])
    data2D = crop_by_size_and_shift(data2D, [320, 320], img_centre)

    info_dict.images = data2D

    # STEP 4: 标准化操作,减均值除方差

    # STEP 5: 制作网络的输入,shape为[z,x,y,1],2.5D
    imgs_train = np.zeros((1, 320, 320, 3))
    imgs_train = Crop_3D(data2D, 3, 1, imgs_train)
    imgs_train = imgs_train[1:, ...]
    # data2D = data2D.reshape(data2D.shape[0], data2D.shape[1], data2D.shape[2], 1)

    info_dict.size_2D = size_2D
    # info_dict.four_corner = four_corner
    info_dict.img_centre = img_centre
    info_dict.index_floor = index_floor
    info_dict.index_ceil = index_ceil

    return imgs_train, info_dict
コード例 #10
0
def im_train_crop(imgs, img_centre, crop_height, crop_width):
    # 判断原图的x与y是否大于裁剪后图像的大小
    z, height, width = np.shape(imgs)  # height 代表二维矩阵的行数  width 代表二维矩阵的列数
    judge = sum([height > crop_height, width > crop_width])
    imgs_new = []

    if judge == 2:  # 当原图大小大于裁剪后图像大小
        width_center = int(img_centre[1])
        height_center = int(img_centre[0])

        half_crop_width = int(crop_width / 2)

        half_crop_height_u = int(crop_height * 4 / 5)
        half_crop_height_d = int(crop_height * 1 / 5)

        width_l = width_center - half_crop_width
        width_r = width_center + half_crop_width

        height_u = height_center - half_crop_height_u
        height_b = height_center + half_crop_height_d

        if (width_l < 0):
            width_l = 0
            width_r = crop_width
        if (width_r > width):
            width_l = width - crop_width
            width_r = width
        if (height_u < 0):
            height_u = 0
            height_b = crop_height
        if (height_b > height):
            height_u = height - crop_height
            height_b = height

        four_corner = [height_u, height_b, width_l, width_r]
        for i, imgs_slice in enumerate(imgs):
            imgs_slice = imgs_slice[height_u:height_b, width_l:width_r]
            imgs_new.append(imgs_slice)
    elif judge == 0:  # 当原图大小小于裁剪后图像大小,则扩张
        image_std_new = np.ones([crop_height, crop_width],
                                dtype=np.int32)  # 初始化为img的最小值,即背景
        if img_centre[0] - height / 2 < 0 or img_centre[
                0] + height / 2 > crop_height:
            if img_centre[0] - height / 2 < 0:
                height_u = 0
                height_b = int(height)
            else:
                height_u = int(crop_height - height)
                height_b = int(crop_height)
        else:
            height_u = int(img_centre[0] - height / 2)
            height_b = int(img_centre[0] + height / 2)

        if img_centre[1] - width / 2 < 0 or img_centre[
                1] + width / 2 > crop_width:
            if img_centre[1] - width / 2 < 0:
                width_l = 0
                width_r = int(width)
            else:
                width_l = int(crop_width - width)
                width_r = int(crop_width)
        else:
            width_l = int(img_centre[1] - width / 2)
            width_r = int(img_centre[1] + width / 2)

        four_corner = [height_u, height_b, width_l, width_r]
        for i, imgs_slice in enumerate(imgs):
            image_std_new = np.min(imgs_slice) * image_std_new
            image_std_new[height_u:height_b, width_l:width_r] = imgs_slice
            imgs_new.append(image_std_new)
    else:
        Error.exit(ErrorCode.process_clips_out_of_range)
    # imgs_new与labels_new转换格式
    imgs_new = np.array(imgs_new, np.float32)

    return imgs_new, four_corner
コード例 #11
0
def interp_3d_zyx(img_volume,
                  spacing,
                  new_spacing,
                  kind=CV2_interp_type.linear,
                  kernel_size=0,
                  is_inference=True):
    """
      interpolate on each slice of the image volume
img_volume
    :param img_volume: 3d volume, dimension order best be z rows cols, dtype best be np.int16
    :param spacing: (y,x) spacing
    :param new_spacing: (y,x) spacing
    :param kind: interpolation methods, cv2.INTER_LINEAR cv2.INTER_NEAREST cv2.INTER_CUBIC(slow)
    :param kernel_size: used in median blurring for interpolation results. if 0, then no blurring operation
    :return: resized image volume ,its dtype is same with img_volume
    """
    # 断言保证
    assertor.equal_assert(len(spacing),
                          2,
                          error_code=ErrorCode.process_input_shape_error,
                          msg='Assert pos: interp_3d_zyx')
    assertor.equal_assert(len(new_spacing),
                          2,
                          error_code=ErrorCode.process_input_shape_error,
                          msg='Assert pos: interp_3d_zyx')

    if is_inference:
        print('     Original volume shape is %s' % str(img_volume.shape))

    if None in spacing or None in new_spacing:
        print('     target_spacing is None and interpolation is discarded\n')
        return img_volume

    # 检查数据类型,并转换
    raw_dtype = img_volume.dtype
    img_volume = check_for_cv2_dtype(img_volume, raw_dtype)

    if len(img_volume.shape) != 3:
        # 输入图像的shape错误, 返回错误码
        Error.exit(ErrorCode.process_input_shape_error)

    img_volume = np.array(img_volume, dtype=img_volume.dtype)
    # z_size = img_volume.shape[0]
    # get new row,col from spacing  row - y, col - x
    row_size_new, col_size_new = trans_shape_by_spacing(
        img_volume.shape[1:], spacing, new_spacing)

    resize_image = np.zeros(shape=(img_volume.shape[0], row_size_new,
                                   col_size_new),
                            dtype=img_volume.dtype)
    for i in range(img_volume.shape[0]):
        resize_image[i, :, :] = interp_2d_yx(img_volume[i, :, :], row_size_new,
                                             col_size_new, kind, kernel_size)
    # 使结果数据类型和输入一致

    resize_image = anti_check_for_cv2_dtype(resize_image, raw_dtype)
    if is_inference:
        print('     Shape after necessary interpolation is %s \n' %
              str(resize_image.shape))

    return resize_image
コード例 #12
0
def anti_check_large_image_zyx(input_array: np.ndarray, info_dict: InfoDict):
    """
    功能: 反处理图像大于512,512的情况
    整体流程:
    1. 得到返回图像大小和原始图像大小并进行判断
    2. 如果为None, 直接返回
    3. 计算出新的spacing, 存回info_dict
    4. 对原图像进行插值得到返回图像
    """
    # 断言保证
    assertor.array_x_dims_assert(
        input_array,
        3,
        error_code=ErrorCode.process_input_shape_error,
        msg='Assert pos: interp_large_image_zyx')

    # 开关保护
    if not info_dict.use_large_image_check:
        return input_array, info_dict

    # 1. 得到返回图像大小和原始图像大小并进行判断
    large_image_shape = info_dict.large_image_shape
    # 2. 如果为None, 直接返回
    if large_image_shape is None:
        return input_array, info_dict

    # 3. 计算出新的spacing, 存回info_dict
    large_raw_spacing = info_dict.large_raw_spacing
    if large_raw_spacing is None:
        print('large_image_shape exist, but large_raw_spacing is None')
        Error.exit(ErrorCode.process_data_type_error)
    info_dict.spacing_list = large_raw_spacing

    # 4. 对原图像进行插值得到返回图像
    rst_array = np.zeros(large_image_shape, dtype=input_array.dtype)
    input_z, input_y, input_x = large_image_shape
    for i in range(input_z):
        # 注意,这里需要反着调用
        rst_array[i, :, :] = interp_2d_yx(input_array[i, :, :],
                                          input_x,
                                          input_y,
                                          kind=CV2_interp_type.nearest)
    info_dict.image_shape_raw = large_image_shape

    return rst_array, info_dict


# if __name__ == '__main__':
#     info_dict = InfoDict()
#     info_dict.spacing_list = [1,1,1]
#     input_array = np.zeros((15,1024,512), dtype=np.int16)
#     out_array, info_dict = check_large_image_zyx(input_array, info_dict)
#     print(input_array.shape)
#     print(out_array.shape)
#     print(info_dict.spacing_list)
#     print(info_dict.large_image_shape)
#     print(info_dict.large_raw_spacing)
#     print()
#     new_array, info_dict = anti_check_large_image_zyx(out_array, info_dict)
#     print(input_array.shape)
#     print(out_array.shape)
#     print(new_array.shape)
#     print(info_dict.spacing_list)
#     print(info_dict.large_image_shape)
#     print(info_dict.large_raw_spacing)