Ejemplo n.º 1
0
def translate_values_frame_sorted(
    x_frame_sorted: np.array,
    frame_sorted: np.array,
    bruker_translator_foo: Callable,
    x_dtype,
    result_dtype,
) -> np.array:
    assert x_dtype in (
        np.double, np.uint32
    ), f"Wrong x_dtype: Bruker code only uses np.double and np.uint32, not {x_dtype}."
    assert result_dtype in (
        np.double, np.uint32
    ), f"Wrong result_dtype: Bruker code only uses np.double and np.uint32, not {x_dtype}."
    if x_frame_sorted.dtype != x_dtype:
        x_frame_sorted = x_frame_sorted.astype(x_dtype)
    if frame_sorted.dtype != np.uint32:
        frame_sorted = frame_sorted.astype(np.uint32)
    result = np.empty(x_frame_sorted.shape, dtype=result_dtype)
    i_prev = 0
    frame_id_prev = frame_sorted[0]
    for i, frame_id in enumerate(frame_sorted):
        if frame_id != frame_id_prev:
            result[i_prev:i] = bruker_translator_foo(
                frame_id_prev,
                x_frame_sorted[i_prev:i],
            )
            i_prev = i
        frame_id_prev = frame_id
    result[i_prev:len(result)] = bruker_translator_foo(
        frame_id_prev,
        x_frame_sorted[i_prev:len(result)],
    )
    return result
Ejemplo n.º 2
0
    def _coordinate(self,
                    img: np.array,
                    color_space="rgb",
                    imagenet_mean=False) -> np.array:
        if color_space == "yuv":
            img = img.astype(np.uint8)
            img = cv.cvtColor(img, cv.COLOR_BGR2YCrCb)
            img = img.transpose(2, 0, 1).astype(np.float32)
        elif color_space == "gray":
            img = img.astype(np.uint8)
            img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
            img = np.expand_dims(img, axis=0).astype(np.float32)
        else:
            img = img[:, :, ::-1].astype(np.float32)
            img = img = img.transpose(2, 0, 1)

        if imagenet_mean:
            height, width = img.shape[1], img.shape[2]
            mean = np.tile(self.mean.reshape(3, 1, 1), (1, height, width))
            std = np.tile(self.std.reshape(3, 1, 1), (1, height, width))
            img = (img - mean) / std
        else:
            img = (img - 127.5) / 127.5

        img = np.expand_dims(img, axis=0)
        img = torch.cuda.FloatTensor(img)

        return img
Ejemplo n.º 3
0
def sample_fs(a: np.array, grid_sz: np.array = None, rescale=True):
    """Samples the Fourier series."""

    # Size of the fourier series
    sz = np.array([a.shape[2], 2 * a.shape[3] - 1], 'float32')

    # Default grid
    if grid_sz is None or sz[0] == grid_sz[0] and sz[1] == grid_sz[1]:
        if rescale:
            return np.prod(sz) * cifft2(a)
        return cifft2(a)

    if sz[0] > grid_sz[0] or sz[1] > grid_sz[1]:
        raise ValueError(
            "Only grid sizes that are smaller than the Fourier series size are supported."
        )

    tot_pad = (grid_sz - sz).tolist()
    is_even = [s % 2 == 0 for s in sz]

    # Compute paddings
    pad_top = int((tot_pad[0] + 1) / 2) if is_even[0] else int(tot_pad[0] / 2)
    pad_bottom = int(tot_pad[0] - pad_top)
    pad_right = int((tot_pad[1] + 1) / 2)

    if rescale:
        return np.prod(grid_sz) * cifft2(
            _padding(a, (0, 0, 0, pad_right, pad_top, pad_bottom)),
            signal_sizes=grid_sz.astype('long').tolist())
    else:
        return cifft2(_padding(a, (0, 0, 0, pad_right, pad_top, pad_bottom)),
                      signal_sizes=grid_sz.astype('long').tolist())
Ejemplo n.º 4
0
def bgr2ycbcr(img: np.array, only_y: bool = True):
    """ bgr image to ycbcr image,
    inspired by https://github.com/xinntao/BasicSR/blob/master/metrics/calculate_PSNR_SSIM.py
    :param img: np.array. expected types, uint8 & float
        uint8 : [0, 255]
        float : [0, 1]
    :param only_y: bool. return only y channel
    :return: np.array.
    """
    _dtype = img.dtype
    if not _dtype == np.uint8:
        img *= 255.

    img.astype(np.float32)

    if only_y:
        rlt = np.dot(img,
                     [24.966, 128.553, 65.481]) / 255. + 16.
    else:
        rlt = np.matmul(img, [
            [24.966, 112., -18.214],
            [128.553, -74.203, -93.786],
            [65.481, -37.797, 112.]
        ]) / 255. + [16, 128, 128]

    if _dtype == np.uint8:
        rlt = rlt.round()
    else:
        rlt /= 255.
    return rlt.astype(_dtype)
Ejemplo n.º 5
0
def siddon_fp_arbitrary(projector: ct_projector, img: np.array,
                        det_center: np.array, det_u: np.array, det_v: np.array,
                        src: np.array) -> np.array:
    '''
    Conebeam forward projection with arbitrary geometry and flat panel. Using Siddon ray tracing.
    The size of the img will override that of projector.nx, projector.ny, projector.nz. The projection size
    will be [batch, nview, projector.nv, projector.nu].

    Parameters
    -------------------------
    img: np.array(float32) of size [batch, nz, ny, nx].
        The image to be projected, nz, ny, nx can be different than projector.nz, projector.ny, projector.nx.
        The projector will always use the size of the image.
    det_center: np.array(float32) of size [nview, 3].
        The center of the detector in mm. Each row records the center of detector as (z, y, x).
    det_u: np.array(float32) of size [nview, 3].
        The u axis of the detector. Each row is a normalized vector in (z, y, x).
    det_v: np.array(float32) of size [nview ,3].
        The v axis of the detector. Each row is a normalized vector in (z, y, x).
    src: np.array(float32) of size [nview, 3].
        The src positions in mm. Each row records in the source position as (z, y, x).

    Returns
    -------------------------
    prj: np.array(float32) of size [batch, projector.nview, projector.nv, projector.nu].
        The forward projection.
    '''

    # make sure they are float32
    img = img.astype(np.float32)
    det_center = det_center.astype(np.float32)
    det_u = det_u.astype(np.float32)
    det_v = det_v.astype(np.float32)
    src = src.astype(np.float32)

    # projection of size
    prj = np.zeros(
        [img.shape[0], det_center.shape[0], projector.nv, projector.nu],
        np.float32)

    module.cSiddonConeProjectionArbitrary.restype = c_int

    err = module.cSiddonConeProjectionArbitrary(
        prj.ctypes.data_as(POINTER(c_float)),
        img.ctypes.data_as(POINTER(c_float)),
        det_center.ctypes.data_as(POINTER(c_float)),
        det_u.ctypes.data_as(POINTER(c_float)),
        det_v.ctypes.data_as(POINTER(c_float)),
        src.ctypes.data_as(POINTER(c_float)), c_ulong(img.shape[0]),
        c_ulong(img.shape[3]), c_ulong(img.shape[2]), c_ulong(img.shape[1]),
        c_float(projector.dx), c_float(projector.dy), c_float(projector.dz),
        c_float(projector.cx), c_float(projector.cy), c_float(projector.cz),
        c_ulong(prj.shape[3]), c_ulong(prj.shape[2]), c_ulong(prj.shape[1]),
        c_float(projector.du), c_float(projector.dv), c_float(projector.off_u),
        c_float(projector.off_v))

    if err != 0:
        print(err)

    return prj
Ejemplo n.º 6
0
def nlm(img: np.array,
        guide: np.array,
        d: float,
        search_size: Tuple[int, int, int],
        kernel_size: Tuple[int, int, int],
        kernel_std: float,
        eps: float = 1e-6) -> np.array:
    '''
    Non local mean denoising with guide.

    Parameters
    -----------------
    img: np.array(float32) of shape [batch, nz, ny, nx].
        The image to be denoised.
    guide: np.array(float32) of shape [batch, nz, ny, nx].
        The guide image for NLM.
    d: float.
        The larger the d, the weaker the guide. d should be estimated based on the noise level of guide.
    search_size: tuple of length 3.
        The search window size for averaging.
    kernel_size: tuple of length 3.
        the gaussian kernel size to calculate distance between two points.
    kernel_std: float.
        Std of the gaussian kernel
    eps: float.
        Regularization factor.

    Returns
    -------------------
    res: np.array(float) of shape [batch, nz, ny, nx].
        The denoised image.
    '''
    kernel = np.zeros(kernel_size, np.float32)
    kernel[int(kernel_size[0] / 2),
           int(kernel_size[1] / 2),
           int(kernel_size[2] / 2)] = 1
    kernel = gaussian_filter(kernel, kernel_std)

    res = np.zeros(img.shape, np.float32)
    img = img.astype(np.float32)
    guide = guide.astype(np.float32)
    kernel = kernel.astype(np.float32)

    module.cNlm.restype = c_int
    err = module.cNlm(res.ctypes.data_as(POINTER(c_float)),
                      img.ctypes.data_as(POINTER(c_float)),
                      guide.ctypes.data_as(POINTER(c_float)),
                      kernel.ctypes.data_as(POINTER(c_float)), c_float(d * d),
                      c_float(eps), c_int(search_size[2]),
                      c_int(search_size[1]), c_int(search_size[0]),
                      c_ulong(img.shape[0]), c_ulong(img.shape[3]),
                      c_ulong(img.shape[2]), c_ulong(img.shape[1]),
                      c_int(kernel_size[2]), c_int(kernel_size[1]),
                      c_int(kernel_size[0]))

    if not err == 0:
        print(err)

    return res
Ejemplo n.º 7
0
def distance_driven_bp(projector: ct_projector,
                       prj: np.array,
                       det_center: np.array,
                       src: np.array,
                       branchless: bool = False) -> np.array:
    '''
    Distance driven backprojection for tomosynthesis. It assumes that the detector has
    u=(1,0,0) and v = (0,1,0).
    The backprojection should be along the z-axis (main axis for distance driven projection).
    The size of the img will override that of projector.nx, projector.ny, projector.nz. The projection size
    will be [batch, nview, projector.nv, projector.nu].

    Parameters
    -------------------------
    prj: np.array(float32) of size [batch, nview, nv, nu].
        The projection to be backprojected. It will override the default shape predefined,
        i.e. projector.nview, projector.nv, projector.nu.
    det_center: np.array(float32) of size [nview, 3].
        The center of the detector in mm. Each row records the center of detector as (z, y, x).
    src: np.array(float32) of size [nview, 3].
        The src positions in mm. Each row records in the source position as (z, y, x).
    branchless: bool
        If True, use the branchless mode (double precision required).

    Returns
    -------------------------
    img: np.array(float32) of size [batch, projector.nz, projector.ny, projector.nx].
        The backprojected image.
    '''

    prj = prj.astype(np.float32)
    det_center = det_center.astype(np.float32)
    src = src.astype(np.float32)

    img = np.zeros([prj.shape[0], projector.nz, projector.ny, projector.nx],
                   np.float32)

    if branchless:
        type_projector = 1
    else:
        type_projector = 0

    module.cDistanceDrivenTomoBackprojection.restype = c_int
    err = module.cDistanceDrivenTomoBackprojection(
        img.ctypes.data_as(POINTER(c_float)),
        prj.ctypes.data_as(POINTER(c_float)),
        det_center.ctypes.data_as(POINTER(c_float)),
        src.ctypes.data_as(POINTER(c_float)), c_ulong(img.shape[0]),
        c_ulong(img.shape[3]), c_ulong(img.shape[2]), c_ulong(img.shape[1]),
        c_float(projector.dx), c_float(projector.dy), c_float(projector.dz),
        c_float(projector.cx), c_float(projector.cy), c_float(projector.cz),
        c_ulong(prj.shape[3]), c_ulong(prj.shape[2]), c_ulong(prj.shape[1]),
        c_float(projector.du), c_float(projector.dv), c_float(projector.off_u),
        c_float(projector.off_v), c_int(type_projector))

    if err != 0:
        print(err)

    return img
Ejemplo n.º 8
0
    def render(self, camera: np.array, light1: np.array):
        self.ctx.clear()

        self.prog['u_light1'].write(light1.astype('f4'))
        self.prog['u_ambient'].write(np.array(0.01).astype('f4'))
        self.prog['u_projection'].write(self.projection.astype('f4'))
        self.prog['u_modelView'].write(camera.astype('f4'))
        self.vao.render()
Ejemplo n.º 9
0
 def __init__(self, bottom_left: Coord, top_right: Coord,
              orig_dim: np.array, state_dim: np.array):
     self.dx = np.abs(top_right.x - bottom_left.x) / float(orig_dim[0])
     self.dy = np.abs(top_right.y - bottom_left.y) / float(orig_dim[1])
     self.bottom_left = bottom_left
     self.top_right = top_right
     self.dim_rescale = np.divide(orig_dim, state_dim.astype(np.float))
     self.dim_rescale_inv = np.divide(state_dim, orig_dim.astype(np.float))
     self.state_dim = state_dim
Ejemplo n.º 10
0
def _numpy_downcast(array: np.array) -> np.array:
    dtype = array.dtype
    if dtype in [np.int32, np.float32]:
        return array
    elif dtype == np.int64:
        return array.astype(np.int32)
    elif dtype == np.float64:
        return array.astype(np.float32)
    else:
        raise ValueError(f"Array type {dtype} is not supported!")
Ejemplo n.º 11
0
def create_percent_diff(area_high: np.array, area_low: np.array) -> np.array:
    """Mask with expected loss of quality.
    [-100%..1%] - [0..99]
    [1%..100%] - [101..200]
    0% - 100
    """
    diff = np.round(
        (area_high.astype(float) - area_low.astype(float)) / 256 * 100) + 100
    assert (diff < 0).sum() + (diff > 200).sum() == 0
    return diff.astype("uint8")
Ejemplo n.º 12
0
def get_transform(v1: np.array, v2: np.array) -> float:
    norm_1 = np.linalg.norm(v1)
    norm_2 = np.linalg.norm(v2)
    ratio = min(1, max(-1, np.dot(v1, v2) / (norm_1 * norm_2)))
    angle = math.acos(ratio)
    scale = norm_2 / norm_1

    if np.linalg.det([v1.astype(np.float32), v2.astype(np.float32)]) > 0:
        angle = -angle

    return angle, scale
Ejemplo n.º 13
0
def fbp_bp(projector: ct_projector, prj: np.array,
           angles: np.array) -> np.array:
    '''
    Fanbeam backprojection with circular equiangular detector. Ray driven
    and weighted for FBP.

    Parameters
    ----------------
    prj: np.array(float32) of size [batch, nview, nv, nu].
        The projection to be backprojected. The size does not need to be the same
        with projector.nview, projector.nv, projector.nu.
    angles: np.array(float32) of size [nview].
        The projection angles in radius.

    Returns
    --------------
    img: np.array(float32) of size [batch, projector.nz, projector.ny, projector.nx]
        The backprojected image.
    '''

    prj = prj.astype(np.float32)
    angles = angles.astype(np.float32)
    img = np.zeros([prj.shape[0], projector.nz, projector.ny, projector.nx],
                   np.float32)

    module.cfbpFanBackprojection.restype = c_int

    err = module.cfbpFanBackprojection(img.ctypes.data_as(POINTER(c_float)),
                                       prj.ctypes.data_as(POINTER(c_float)),
                                       angles.ctypes.data_as(POINTER(c_float)),
                                       c_ulong(img.shape[0]),
                                       c_ulong(img.shape[3]),
                                       c_ulong(img.shape[2]),
                                       c_ulong(img.shape[1]),
                                       c_float(projector.dx),
                                       c_float(projector.dy),
                                       c_float(projector.dz),
                                       c_float(projector.cx),
                                       c_float(projector.cy),
                                       c_float(projector.cz),
                                       c_ulong(prj.shape[3]),
                                       c_ulong(prj.shape[2]),
                                       c_ulong(prj.shape[1]),
                                       c_float(projector.du / projector.dsd),
                                       c_float(projector.dv),
                                       c_float(projector.off_u),
                                       c_float(projector.off_v),
                                       c_float(projector.dsd),
                                       c_float(projector.dso))

    if err != 0:
        print(err)

    return img
Ejemplo n.º 14
0
def normalize(im: np.array) -> np.array:
    """
    Linear normalization
    http://en.wikipedia.org/wiki/Normalization_%28image_processing%29
    """
    im = im.astype(np.float)
    minval = np.min(im)
    maxval = np.max(im)
    if minval != maxval:
        im -= minval
        im *= (255.0 / (maxval - minval))
    return im.astype(np.uint8)
Ejemplo n.º 15
0
def siddon_fp(projector: ct_projector, img: np.array,
              angles: np.array) -> np.array:
    '''
    Fanbeam forward projection with circular equiangular detector. Siddon ray driven.

    Parameters
    ----------------
    img: np.array(float32) of size [batch, nz, ny, nx]
        The image to be projected.
    angles: np.array(float32) of size [nview]
        The projection angles in radius.

    Returns
    --------------
    prj: np.array(float32) of size [batch, projector.nview, projector.nv, projector.nu]
        The forward projection.
    '''

    img = img.astype(np.float32)
    angles = angles.astype(np.float32)
    prj = np.zeros(
        [img.shape[0], len(angles), projector.nv, projector.nu], np.float32)

    module.cSiddonFanProjection.restype = c_int

    err = module.cSiddonFanProjection(prj.ctypes.data_as(POINTER(c_float)),
                                      img.ctypes.data_as(POINTER(c_float)),
                                      angles.ctypes.data_as(POINTER(c_float)),
                                      c_ulong(img.shape[0]),
                                      c_ulong(img.shape[3]),
                                      c_ulong(img.shape[2]),
                                      c_ulong(img.shape[1]),
                                      c_float(projector.dx),
                                      c_float(projector.dy),
                                      c_float(projector.dz),
                                      c_float(projector.cx),
                                      c_float(projector.cy),
                                      c_float(projector.cz),
                                      c_ulong(prj.shape[3]),
                                      c_ulong(prj.shape[2]),
                                      c_ulong(prj.shape[1]),
                                      c_float(projector.du / projector.dsd),
                                      c_float(projector.dv),
                                      c_float(projector.off_u),
                                      c_float(projector.off_v),
                                      c_float(projector.dsd),
                                      c_float(projector.dso))

    if err != 0:
        print(err)

    return prj
Ejemplo n.º 16
0
def preprocess_input(image: np.array) -> np.array:
    """preprocess image function
    Args:
        image (np.array): image as numpy array
    Returns:
        np.array: preprocesses image as numpy array
    """
    image = image.astype(np.float32)
    if image.shape[2] == 3:
        channel_means = [123.68, 116.779, 103.939]
        return (image - [[channel_means]]).astype(np.float32)
    else:
        return image.astype(np.float32)
Ejemplo n.º 17
0
def write_bin_grid(path: str, grid: np.array) -> None:
    """
    Function for writing a NonLinLoc binary grid

    Parameters
    ----------
    path : str
        Path for the binary file
    grid : numpy array
        Three-dimensional numpy array containing the values for the grid.
    """
    grid = np.flip(grid, axis=2).reshape((1, grid.size))
    grid.astype("float32").tofile(path)
Ejemplo n.º 18
0
 def save_as_bin_file(self,
                      buffer: np.array,
                      pre_string: str,
                      dtype_save=np.uint16) -> None:
     """ Saves selected volume in main directory with the established dimensions """
     # TODO: Rethink how to handle loading/saving dimensions tuples!!!
     vol_dims = self.create_vol_dims_suffix
     print(vol_dims)
     filename_saving = pre_string + f'_{self.oct_dims[0]}x{self.oct_dims[1]}_' + '.bin'
     _path_saving = os.path.join(self.dir_main, filename_saving)
     print(f"Saving selected volume to file {filename_saving}... ")
     buffer.astype(dtype_save).tofile(_path_saving)
     print("[DONE] Saving selected volume!")
Ejemplo n.º 19
0
def draw_masks(frame: np.array,
               detections: Sequence["Detection"],
               alpha: float = 0.4,
               color=(255, 0, 0)):
    if detections is None:
        return
    frame = frame.astype(np.float32)

    for d in detections:
        mask = d.mask
        idx = np.nonzero(mask)
        frame[idx[0], idx[1], :] *= 1.0 - alpha
        frame[idx[0], idx[1], :] += alpha * np.array(color)
    return frame.astype(np.uint8)
Ejemplo n.º 20
0
    def __init__(self,
                 image_paths0: list,
                 image_paths1: list,
                 image_attrs0: np.array,
                 image_attrs1: np.array,
                 labels: np.array,
                 transform: transforms.Compose = None):

        self.image_paths0 = image_paths0
        self.image_paths1 = image_paths1
        self.image_attrs0 = image_attrs0.astype(np.float32)
        self.image_attrs1 = image_attrs1.astype(np.float32)
        self.labels = labels
        self.transform = transform
Ejemplo n.º 21
0
def modify_coord_dtype(coord: np.array, dtype: str):
    if dtype == 'datetime':
        return pd.to_datetime(coord)

    if dtype == 'int':
        return coord.astype(int)

    if dtype == 'float':
        return coord.astype(float)

    if dtype == 'str':
        return coord.astype(str)

    return coord
Ejemplo n.º 22
0
def get_phenotype(icd10_codes: Union[str, List[str]] = "I84",
                  samples: np.array = None,
                  max_samples: int = None,
                  balance_pheno: str = None,
                  random_state=42):
    """
    if samples argument is provided from genetic file, then find common set of samples and output ordered phenotype

    if `max_samples` is provided, then over-sample the data so that we have `max_samples/2` for both cases and controls
    """
    icd10_codes = [icd10_codes
                   ] if not isinstance(icd10_codes, list) else icd10_codes
    pheno_df_list = [
        icd10_pheno_matrix[icd10_primary_cols].isin([icd10_code
                                                     ]).any(axis=1).astype(int)
        for icd10_code in icd10_codes
    ]
    pheno_df = pd.concat(pheno_df_list, axis=1)
    pheno_df.columns = icd10_codes

    if samples is not None:
        geno_pheno_sample_index_mask = np.isin(samples.astype(int),
                                               pheno_df.index)
        pheno_geno_samples_common_set = samples[
            geno_pheno_sample_index_mask].astype(int)
        pheno_df_ordered = pheno_df.loc[list(pheno_geno_samples_common_set), :]
        pheno_df_ordered = pheno_df_ordered.loc[
            ~pheno_df_ordered.index.duplicated(keep="first"), :]
        sample_index = np.argwhere(geno_pheno_sample_index_mask).reshape(-1)

        if max_samples is not None:
            if balance_pheno is None:
                raise ValueError(
                    "Need to specify `balance_pheno` param: phenotype to balance during subsampling"
                )
            pheno_df_ordered = upsample_pheno(pheno_df_ordered, balance_pheno,
                                              max_samples, random_state)

            sorter = np.argsort(samples.astype(int))
            sample_index = sorter[np.searchsorted(samples.astype(int),
                                                  pheno_df_ordered.index,
                                                  sorter=sorter)]

        assert np.allclose(
            samples[sample_index].astype(int), pheno_df_ordered.index
        ), "sample mismatch between genotype and phenotype, something wrong with the `get_phenotype` function!"

        pheno_df_ordered.index = pheno_df_ordered.index.astype(str)
        return pheno_df_ordered
    return pheno_df
Ejemplo n.º 23
0
    def plot_mask(self, img: np.array, mask: np.array):
        if len(img.shape) == 2:
            img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        img = img.astype(np.uint8)
        mask = mask.astype(np.uint8)

        for i, c in enumerate(mask):
            contours, _ = cv2.findContours(c, cv2.RETR_LIST,
                                           cv2.CHAIN_APPROX_NONE)
            color = self.colors[i] if self.colors else (0, 255, 0)
            for i in range(0, len(contours)):
                cv2.polylines(img, contours[i], True, color, 2)

        return img
Ejemplo n.º 24
0
    def update(self, x_input: array, d_input: array) -> None:
        """Perform the update pass.

        Args:
            x_input: ``[N, in_size]`` matrix. If ``in_trans`` is set, transposed.
            d_input: ``[N, out_size]`` matrix. If ``out_trans`` is set, transposed.
        """
        if self.is_cuda:
            x_tensor = from_numpy(x_input.astype('float32')).cuda()
            d_tensor = from_numpy(d_input.astype('float32')).cuda()
            return self.tile.update(x_tensor, d_tensor, self.bias,
                                    self.in_trans, self.out_trans)

        return self.tile.update_numpy(x_input, d_input, self.bias,
                                      self.in_trans, self.out_trans)
Ejemplo n.º 25
0
def _filter_sobel(image: np.array, axis: str, krnsize: int):
    """ The Sobel kernel filtering """
    assert (axis == 'x' or axis == 'y')
    if axis == 'x':
        return cv.Sobel(image.astype(np.float32),
                        cv.CV_32F,
                        1,
                        0,
                        ksize=krnsize)
    elif axis == 'y':
        return cv.Sobel(image.astype(np.float32),
                        cv.CV_32F,
                        0,
                        1,
                        ksize=krnsize)
Ejemplo n.º 26
0
    def preprocess(self, data: np.array) -> np.array:
        """Pre-process the data by reshaping and normalizing, if necessary.

        Args:
            data: Array of data.

        Returns:
            (np.array): Pre-processed data.

        """

        data = data.astype("float32")

        if self.input_shape:
            data = data.reshape(self.input_shape)

        if self.normalize:
            # Gathers the lower and upper bounds of normalization
            low, high = self.normalize[0], self.normalize[1]

            # Gathers the minimum and maximum values of the data
            _min, _max = tf.math.reduce_min(data), tf.math.reduce_max(data)

            # Normalizes the data between `low` and `high`
            data = (high - low) * ((data - _min) / (_max - _min)) + low

        return data
Ejemplo n.º 27
0
    def query_ids_by_embedding(
        self,
        query_emb: np.array,
        filters: Optional[dict] = None,
        top_k: int = 10,
        index: Optional[str] = None,
        return_embedding: Optional[bool] = None,
    ) -> List[Document]:
        """
        Find the document that is most similar to the provided `query_emb` by using a vector similarity metric.

        :param query_emb: Embedding of the query (e.g. gathered from DPR)
        :param filters: Optional filters to narrow down the search space.
                        Example: {"name": ["some", "more"], "category": ["only_one"]}
        :param top_k: How many documents to return
        :param index: (SQL) index name for storing the docs and metadata
        :param return_embedding: To return document embedding
        :return:
        """
        if filters:
            raise Exception(
                "Query filters are not implemented for the FAISSDocumentStore."
            )
        if not self.faiss_index:
            raise Exception(
                "No index exists. Use 'update_embeddings()` to create an index."
            )

        query_emb = query_emb.astype(np.float32)
        return self.faiss_index.search(query_emb, top_k)
Ejemplo n.º 28
0
def process_stats(stats_np: np.array, api_mapping: pd.DataFrame):
    if stats_np.shape[0] < 1:
        return blank_stats()

    stats_results = pd.DataFrame(
        stats_np.astype(np.float64),
        columns=[
            'NNS', 'WBT', 'distance_segment', 'azimuth_delta', 'sidenns_heel',
            'sidenns_toe', 'distance_2d_mean', 'distance_2d_std',
            'distance_2d_min', 'distance_2d_25percentile',
            'distance_2d_50percentile', 'distance_2d_75percentile',
            'distance_2d_max', 'distance_3d_mean', 'distance_3d_std',
            'distance_3d_min', 'distance_3d_25percentile',
            'distance_3d_50percentile', 'distance_3d_75percentile',
            'distance_3d_max', 'distance_vertical_mean',
            'distance_vertical_std', 'distance_vertical_min',
            'distance_vertical_25percentile', 'distance_vertical_50percentile',
            'distance_vertical_75percentile', 'distance_vertical_max',
            'theta_mean', 'theta_std', 'theta_min', 'theta_25percentile',
            'theta_50percentile', 'theta_75percentile', 'theta_max'
        ],
    ).assign(
        WBT=lambda idf: idf[['WBT']].astype(np.int64).merge(
            api_mapping, how='left', left_on='WBT', right_on='API_ID')['API'],
        NNS=lambda idf: idf[['NNS']].astype(np.int64).merge(
            api_mapping, how='left', left_on='NNS', right_on='API_ID')['API'],
        sidenns_heel=lambda idf: idf['sidenns_heel'].pipe(side_np_to_str),
        sidenns_toe=lambda idf: idf['sidenns_toe'].pipe(side_np_to_str),
    )
    return stats_results
Ejemplo n.º 29
0
    def write_array(self,
                    numpy_array: NumpyArray,
                    name: str,
                    attrs: Dict[str, str] = None):
        """Write array with name and optional attrs to OMX file.

        Args:
            numpy_array:: Numpy array
            name: name to use for the OMX key
            attrs: additional attribute key value pairs to write to OMX file
        """
        if self._mode not in ["a", "w"]:
            raise Exception(f"{self._file_path}: open in read-only mode")
        shape = numpy_array.shape
        if len(shape) == 2:
            chunkshape = (1, shape[0])
        else:
            chunkshape = None
        if self._mask_max_value:
            numpy_array[numpy_array > self._mask_max_value] = 0
        numpy_array = numpy_array.astype(dtype="float64", copy=False)
        self._omx_file.create_matrix(name,
                                     obj=numpy_array,
                                     chunkshape=chunkshape,
                                     attrs=attrs)
def gms_preprocess(gms: np.array) -> np.array:
    gms = gms.astype(float)
    i1 = gms <= 1
    i2 = gms > 1
    gms[i1] = 1 + np.tanh(gms[i1])
    gms[i2] = np.log2(gms[i2])
    return gms
Ejemplo n.º 31
0
    def __init__(self, n_input: int, n_output: int, W: nparray, b: nparray, activation: Elemwise = T.tanh):
        """
        A layer of a neural network, computes s(Wx + b) where s is a nonlinearity and x is the input vector.

        :parameters:
            - rng: numpy random state
            - n_in: input dimensionality
            - n_out: output dimensionality
            - W: np.array, shape=(n_in, n_out)
                Optional weight matrix, if not given is initialised randomly.
            - b: np.array, shape=(n_out,)
                Optional bias vector, if not given is initialised randomly.
            - activation : theano.tensor.elemwise.Elemwise
                Activation function for layer output
        """
        assert W.shape == (n_input, n_output), \
            'W does not match the expected dimensionality (%d, %d) != %s' % (n_input, n_output, W.shape)
        assert b.shape == (n_output,), 'b does not match the expected dimensionality (%d,) != %s' % (n_output, b.shape)

        self.n_input = n_input
        self.n_output = n_output
        # All parameters should be shared variables.
        # They're used in this class to compute the layer output,
        # but are updated elsewhere when optimizing the network parameters.
        # Note that we are explicitly requiring that W_init has the theano.config.floatX dtype
        self.W = theano.shared(value=W.astype(theano.config.floatX),
                               # The name parameter is solely for printing purporses
                               name='W',
                               # Setting borrow=True allows Theano to use user memory for this object.
                               # It can make code slightly faster by avoiding a deep copy on construction.
                               # For more details, see
                               # http://deeplearning.net/software/theano/tutorial/aliasing.html
                               borrow=True)

        # We can force our bias vector b to be a column vector using numpy's reshape method.
        # When b is a column vector, we can pass a matrix-shaped input to the layer
        # and get a matrix-shaped output, thanks to broadcasting (described below)
        self.b = theano.shared(value=b.astype(theano.config.floatX),
                               name='b',
                               borrow=True)

        self.activation = activation

        # We'll compute the gradient of the cost of the network with respect to the parameters in this list.
        self.params = [self.W, self.b]