def generate_sketch(img, low_val=0.1, high_val=0.2):
    """Generate an image sketch, using a Canny filter with a lower threshold and a high threshold. This implementation
    uses the MATLAB implementation and thus a MATLAB engine, since no 3D python implementation is available. If you dont
    have MATLAB simply use the gradient image or a sobel filter (method available in numpy and scipy)
     :param img: input image
     :param low_val: lower threshold of Canny filter
     :param high_val: higher threshold of Canny filter
     :return: sketch of image (canny edges weighted by gradient magnitude)
     """
    norm_img = normalize_image(img, 0, 1, 'float32')

    # start MATLAB engine
    eng = matlab.engine.start_matlab()
    img_list = matlab.double(norm_img.tolist())

    # apply canny
    edges = eng.edge3(img_list, 'approxcanny',
                      matlab.double([low_val, high_val]))
    # form MATLAB to numpy
    edges_np = edges._data
    edges_np = np.reshape(edges_np, (img.shape[2], img.shape[1], img.shape[0]))
    edges_np = np.transpose(edges_np, (2, 1, 0))
    # we want a magnitude weighted edge  image
    magnitudes = generic_gradient_magnitude(
        normalize_image(norm_img, 0, 1, 'float32'), sobel)
    norm_magnitudes = normalize_image(magnitudes, 0, 1, 'float32')
    return edges_np * norm_magnitudes * 255
Beispiel #2
0
def calculate(inputFile):
    a = np.loadtxt(inputFile)  
    b = np.reshape(a, (540, 410, 138)) 
    o = generic_gradient_magnitude(b, sobel)  # magnitude
    dx = ndimage.sobel(b, 0)  # x derivative
    dy = ndimage.sobel(b, 1)  # y derivative
    dz = ndimage.sobel(b, 2)  # z derivative

    r = pd.DataFrame({'gx':dx.reshape(-1), 'gy':dy.reshape(-1), 'gz':dz.reshape(-1)}) 
    R = r.loc[(r['gx'] != 0) & (r['gy'] != 0) & (r['gz'] != 0)]  # about 400 000 
    S = np.array(R.values)
    L = len(S)

    output_file = open(f"output/{inputFile.replace('.txt','')}.csv", 'w')
    for a in range(0, 360):  
        for b in range(0, 90):
            i = (math.cos(b * math.pi / 180)) * (math.sin(a * math.pi / 180))
            j = (math.cos(b * math.pi / 180)) * (math.cos(a * math.pi / 180))
            k = math.sin(b * math.pi / 180)
            g = np.array([i, j, k])
            All_list = np.dot(S, g.T)
            A = pd.DataFrame(All_list, columns=['dot'])
            z = A.loc[A['dot'] > 0]
            f = A.loc[A['dot'] < 0]
            Z = z.mean()[0]
            F = f.mean()[0]
            l1 = len(z)
            l2 = len(f)
            V = (l1 * Z + (- l2 * F)) / (l1 + l2)
            output_file.write( ','.join([str(a) , str(b) , str(l1) , str(Z) , str(l2) , str(F) , str(V)]) + '\n')
    output_file.close()
    print(f'{inputFile} calculate done!')  
Beispiel #3
0
def generate_random_telegraph_noise(
    how_many: int = 20000,
    save_to_file: bool = True,
    filename: Optional[str] = None,
) -> np.ndarray:
    """ """
    condensed_data_all = np.empty(
        [len(nt.config["core"]["data_types"]) - 1, 0, np.prod(N_2D)]
    )

    for niter in range(how_many):
        condensed_data = np.empty(
            [len(nt.config["core"]["data_types"]) - 1, 1, np.prod(N_2D)]
        )
        x = np.ones(N_2D)
        s = 1
        # for n_switches in range(0, 1):

        lam = np.random.uniform(0, 0.2, 1)
        trnsp = np.random.randint(2, size=1)

        poisson = np.random.poisson(lam=lam, size=N_2D)
        poisson[poisson > 1] = 1
        for ix in range(N_2D[0]):
            for iy in range(N_2D[0]):
                if poisson[ix, iy] == 1:
                    s *= -1
                x[ix, iy] *= s
        if trnsp:
            x = x.T

        x = (x + 1) / 2

        noise_spect = fp.frequencies2(x)
        noise_spect = fp.frequenciesshift(noise_spect)
        noise_spect = np.abs(noise_spect)

        grad = generic_gradient_magnitude(x, sobel)

        index = nt.config["core"]["data_types"]["signal"]
        condensed_data[index, 0, :] = x.flatten()

        index = nt.config["core"]["data_types"]["frequencies"]
        condensed_data[index, 0, :] = noise_spect.flatten()

        index = nt.config["core"]["data_types"]["gradient"]
        condensed_data[index, 0, :] = grad.flatten()

        condensed_data_all = np.concatenate(
            (condensed_data_all, condensed_data), axis=1
        )

    if save_to_file:
        if filename is None:
            filename = "random_telegraph_noise.npy"
        path = os.path.join(nt.config["db_folder"], filename)
        np.save(path, condensed_data_all)

    return condensed_data_all
Beispiel #4
0
def detect_boundarymask(data, nonvalue):

    np.place(data, data == 0, nonvalue)
    gradient = generic_gradient_magnitude(data, sobel)
    gradient = np.where(gradient < 1, 0, 1)
    np.place(data, data == nonvalue, 0)

    return gradient
Beispiel #5
0
def rst_3d(image, radii, alpha, beta):
    from scipy.ndimage import sobel, generic_gradient_magnitude, gaussian_filter
    from time import time
    #initiating the output image array
    t0 = time()

    output = np.zeros(image.shape)
    workingDims = tuple((e) for e in image.shape)

    O_n = np.zeros(workingDims, np.int16)
    M_n = np.zeros(workingDims, np.int16)

    #calculating the gradients in all directions and the magnitude image
    grad_image = generic_gradient_magnitude(image, sobel)
    gx = sobel(image, 0)
    gy = sobel(image, 1)
    gz = sobel(image, 2)

    #cutoff beta for removing some of the smaller gradients
    gthres = np.amax(grad_image) * beta

    #calculating negatively affected pixels
    gpx = np.multiply(
        np.divide(gx,
                  grad_image,
                  out=np.zeros(gx.shape),
                  where=grad_image != 0), radii).round().astype(int)
    gpy = np.multiply(
        np.divide(gy,
                  grad_image,
                  out=np.zeros(gy.shape),
                  where=grad_image != 0), radii).round().astype(int)
    gpz = np.multiply(
        np.divide(gz,
                  grad_image,
                  out=np.zeros(gz.shape),
                  where=grad_image != 0), radii).round().astype(int)

    for coords, gnorm in np.ndenumerate(grad_image):
        if gnorm > gthres:
            i, j, k = coords
            pnve = (i - gpx[i, j, k], j - gpy[i, j, k], k - gpz[i, j, k])
            O_n[pnve] -= 1
            M_n[pnve] -= gnorm

    O_n = np.abs(O_n)
    O_n = O_n / float(np.amax(O_n))

    M_max = float(np.amax(np.abs(M_n)))
    M_n = M_n / M_max

    F_n = np.multiply(np.power(O_n, alpha), M_n)

    s = gaussian_filter(F_n, 0.5 * radii)
    t1 = time()
    print("Time taken for 3D RST:", t1 - t0)

    return s
Beispiel #6
0
    def _init_coords(self):
        logging.info('2d peaks: starting')

        # Loop over 2d slices.
        for z in range(self.canvas.image.shape[0]):
            image_2d = (self.canvas.image[z, :, :]).astype(np.float32)

            # Edge detection.
            edges = ndimage.generic_gradient_magnitude(image_2d, ndimage.sobel)

            # Adaptive thresholding.
            sigma = 49.0 / 6.0
            thresh_image = np.zeros(edges.shape, dtype=np.float32)
            ndimage.gaussian_filter(edges,
                                    sigma,
                                    output=thresh_image,
                                    mode='reflect')
            filt_edges = edges > thresh_image

            del edges, thresh_image

            # Prevent border effect
            if (self.canvas.restrictor is not None
                    and self.canvas.restrictor.mask is not None):
                filt_edges[self.canvas.restrictor.mask[z, :, :]] = 1

            # Distance transform
            dt = ndimage.distance_transform_edt(1 - filt_edges).astype(
                np.float32)

            # Use a specifc seed for the noise so that results are reproducible
            # regardless of what happens before the policy is called.
            state = np.random.get_state()
            np.random.seed(42)
            idxs = skimage.feature.peak_local_max(
                dt + np.random.random(dt.shape) * 1e-4,
                indices=True,
                min_distance=3,
                threshold_abs=0,
                threshold_rel=0)
            zs = np.full((idxs.shape[0], 1), z, dtype=np.int64)
            idxs = np.concatenate((zs, idxs), axis=1)
            np.random.set_state(state)

            # Update self.coords with indices found at this z index
            logging.info('2d peaks: found %d local maxima at z index %d',
                         idxs.shape[0], z)
            self.coords = np.concatenate(
                (self.coords, idxs)) if z != 0 else idxs

        self.coords = np.array(
            sorted([(z, y, x) for z, y, x in self.coords],
                   reverse=self.sort_reverse))

        logging.info('2d peaks: found %d total local maxima',
                     self.coords.shape[0])
Beispiel #7
0
def correct_normalizations(
    filename: str,
    db_folder: Optional[str] = None,
) -> None:
    """"""
    if db_folder is None:
        db_folder = nt.config["db_folder"]

    path = os.path.join(db_folder, filename)

    all_data = np.load(path)

    data = all_data[:, :, :-1]
    labels = all_data[:, :, -1]

    sg_indx = nt.config["core"]["data_types"]["signal"]

    images = np.copy(data[sg_indx])
    images = images.reshape(images.shape[0], -1)

    high_current_images = np.max(images, axis=1)
    high_current_ids = np.where(high_current_images > 1)[0]

    # print(len(high_current_ids))

    for exid in high_current_ids:
        # print(np.max(data[sg_indx, exid]))
        sig = data[sg_indx, exid]
        sig = (sig - np.min(sig)) / (np.max(sig) - np.min(sig))
        sig = sig * 0.3  # assume it's dots and highest current is not max current
        data[sg_indx, exid] = sig

        freq_spect = fp.fft2(sig.reshape(50, 50))
        freq_spect = np.abs(fp.fftshift(freq_spect))

        grad = generic_gradient_magnitude(sig.reshape(50, 50), sobel)

        index = nt.config["core"]["data_types"]["frequencies"]
        data[index, exid, :] = freq_spect.flatten()

        index = nt.config["core"]["data_types"]["gradient"]
        data[index, exid, :] = grad.flatten()

    n = list(data.shape)
    n[-1] += 1

    data_w_labels = np.zeros(n)
    data_w_labels[:, :, -1] = labels
    data_w_labels[:, :, :-1] = data

    path = os.path.join(db_folder, filename)
    np.save(path, data_w_labels)
Beispiel #8
0
    def _init_coords(self, membrane_bias=False):
        logging.info('peaks: starting')
        # Edge detection.
        im = self.canvas.image.astype(np.float32)
        if membrane_bias:
            im = im[..., 0] * ((im[..., 1] / 2) + 0.5)
        else:
            im = im[..., 0]
        edges = ndimage.generic_gradient_magnitude(im, ndimage.sobel)

        # Adaptive thresholding.
        sigma = 49.0 / 6.0
        thresh_image = np.zeros(edges.shape, dtype=np.float32)
        ndimage.gaussian_filter(edges,
                                sigma,
                                output=thresh_image,
                                mode='reflect')
        filt_edges = edges > thresh_image

        del edges, thresh_image

        # This prevents a border effect where the large amount of masked area
        # screws up the distance transform below.
        if (self.canvas.restrictor is not None
                and self.canvas.restrictor.mask is not None):
            filt_edges[self.canvas.restrictor.mask] = 1

        logging.info('peaks: filtering done')
        dt = ndimage.distance_transform_edt(1 - filt_edges).astype(np.float32)
        logging.info('peaks: edt done')

        # Use a specifc seed for the noise so that results are reproducible
        # regardless of what happens before the policy is called.
        state = np.random.get_state()
        np.random.seed(42)
        idxs = skimage.feature.peak_local_max(
            dt + np.random.random(dt.shape) * 1e-4,
            indices=True,
            min_distance=3,
            threshold_abs=0,
            threshold_rel=0)
        np.random.set_state(state)

        # After skimage upgrade to 0.13.0 peak_local_max returns peaks in
        # descending order, versus ascending order previously.  Sort ascending to
        # maintain historic behavior.
        idxs = np.array(sorted((z, y, x) for z, y, x in idxs))
        idxs = idxs[np.random.permutation(len(idxs))]  # Shuffle for ensembling

        logging.info('peaks: found %d local maxima', idxs.shape[0])
        self.coords = idxs
def edge_mask():
    mri = '/home/dieudonnem/hpc/out/derivative/suiter_cnn/dataset_sence/sub-1/mask/suiter/r04_sub-testanat_acq-0p8mm_rec-MEMPRAGEnomotion_T1w/L_CrusI.nii'
    img = nib.load(mri)
    data = img.get_fdata()
    edge = generic_gradient_magnitude(data, sobel)
    # clean edge
    edge[edge < 0.7 * np.max(edge)] = 0
    new_img = nib.Nifti1Image(edge, img.affine, img.header)
    # save edge
    os.makedirs(os.path.join('/home/dieudonnem/hpc/out/', 'yolo'),
                exist_ok=True)
    nib.save(new_img, os.path.join('/home/dieudonnem/hpc/out/', 'yolo',
                                   'name'))
    print('edge saved')
Beispiel #10
0
def generate_current_drop(
    how_many: int = 20000,
    save_to_file: bool = True,
    filename: Optional[str] = None,
) -> np.ndarray:
    """ """
    condensed_data_all = np.empty(
        [len(nt.config["core"]["data_types"]) - 1, 0, np.prod(N_2D)]
    )

    for niter in range(how_many):
        condensed_data = np.empty(
            [len(nt.config["core"]["data_types"]) - 1, 1, np.prod(N_2D)]
        )
        xm, ym = np.meshgrid(np.linspace(0, 50, 50), np.linspace(0, 50, 50))
        drop = np.sqrt((xm + ym) ** 2)
        drop = (drop - np.min(drop)) / (np.max(drop) - np.min(drop))

        amp = np.random.uniform(0, 10, 1)
        offset = np.random.uniform(-5, 5, 1)

        drop = np.tanh(amp * drop + offset)
        drop = (drop - np.min(drop)) / (np.max(drop) - np.min(drop))

        drop_freq = fp.frequencies2(drop)
        drop_freq = fp.frequenciesshift(drop_freq)
        drop_freq = np.abs(drop_freq)

        grad = generic_gradient_magnitude(drop, sobel)

        index = nt.config["core"]["data_types"]["signal"]
        condensed_data[index, 0, :] = drop.flatten()

        index = nt.config["core"]["data_types"]["frequencies"]
        condensed_data[index, 0, :] = drop_freq.flatten()

        index = nt.config["core"]["data_types"]["gradient"]
        condensed_data[index, 0, :] = grad.flatten()

        condensed_data_all = np.concatenate(
            (condensed_data_all, condensed_data), axis=1
        )

    if save_to_file:
        if filename is None:
            filename = "current_drop.npy"
        path = os.path.join(nt.config["db_folder"], filename)
        np.save(path, condensed_data_all)

    return condensed_data_all
def compute_eng_grad(img):
    """
    Computes the energy of an image using gradient magnitude

    Args:
        img4 (n,m,4 numpy matrix): RGB image with additional mask layer.
        rgb_weights (n,m numpy matrix): img-specific weights for RBG values

    Returns:
        n,m numpy matrix: Gradient energy map of the provided image
    """
    bw_img = rgb_to_gray(img)
    eng = generic_gradient_magnitude(bw_img, sobel)
    eng = gaussian_gradient_magnitude(bw_img, 1)
    return normalize(eng)
def findEdgeSobel(image,sigma,amin,amax,output,testlog):
        if (sigma > 0):
                smooth=nd.gaussian_filter(image,sigma)
                if (output):
                        testlog=outputTestImage(smooth,'smooth','filtered image',testlog)
        else:
                smooth=image

        edges=nd.generic_gradient_magnitude(smooth, derivative=nd.sobel)
        if (output):
                testlog=outputTestImage(edges,'edge','edge image',testlog)

        edgemax=edges.max()
        tedges=ma.masked_inside(edges,edgemax*amin,edgemax*amax)
        return tedges,testlog
def findEdgeSobel(image, sigma, amin, amax, output, testlog):
    if (sigma > 0):
        smooth = nd.gaussian_filter(image, sigma)
        if (output):
            testlog = outputTestImage(smooth, 'smooth', 'filtered image',
                                      testlog)
    else:
        smooth = image

    edges = nd.generic_gradient_magnitude(smooth, derivative=nd.sobel)
    if (output):
        testlog = outputTestImage(edges, 'edge', 'edge image', testlog)

    edgemax = edges.max()
    tedges = ma.masked_inside(edges, edgemax * amin, edgemax * amax)
    return tedges, testlog
Beispiel #14
0
    def _init_coords(self):
        logging.info('peaks: starting')

        # Edge detection.
        edges = ndimage.generic_gradient_magnitude(
            self.canvas.image.astype(np.float32), ndimage.sobel)

        # Adaptive thresholding.
        sigma = 49.0 / 6.0
        thresh_image = np.zeros(edges.shape, dtype=np.float32)
        ndimage.gaussian_filter(edges,
                                sigma,
                                output=thresh_image,
                                mode='reflect')
        filt_edges = edges > thresh_image

        del edges, thresh_image

        # This prevents a border effect where the large amount of masked area
        # screws up the distance transform below.
        if (self.canvas.restrictor is not None
                and self.canvas.restrictor.mask is not None):
            filt_edges[self.canvas.restrictor.mask] = 1

        logging.info('peaks: filtering done')
        dt = ndimage.distance_transform_edt(1 - filt_edges).astype(np.float32)
        logging.info('peaks: edt done')

        # Use a specifc seed for the noise so that results are reproducible
        # regardless of what happens before the policy is called.
        state = np.random.get_state()
        np.random.seed(42)
        idxs = skimage.feature.peak_local_max(
            dt + np.random.random(dt.shape) * 1e-4,
            indices=True,
            min_distance=3,
            threshold_abs=0,
            threshold_rel=0)
        np.random.set_state(state)

        logging.info('peaks: found %d local maxima', idxs.shape[0])
        self.coords = idxs
Beispiel #15
0
def generate_white_noise(
    how_many: int = 20000,
    save_to_file: bool = True,
    filename: Optional[str] = None,
) -> np.ndarray:
    """ """
    condensed_data_all = np.empty(
        [len(nt.config["core"]["data_types"]) - 1, 0, np.prod(N_2D)]
    )

    for niter in range(how_many):
        condensed_data = np.empty(
            [len(nt.config["core"]["data_types"]) - 1, 1, np.prod(N_2D)]
        )
        coeff = np.random.normal(0, 1, N_2D)
        noise = np.abs(fp.ifrequencies2(coeff))
        grad = generic_gradient_magnitude(noise, sobel)

        index = nt.config["core"]["data_types"]["signal"]
        condensed_data[index, 0, :] = noise.flatten()

        index = nt.config["core"]["data_types"]["frequencies"]
        condensed_data[index, 0, :] = coeff.flatten()

        index = nt.config["core"]["data_types"]["gradient"]
        condensed_data[index, 0, :] = grad.flatten()

        condensed_data_all = np.concatenate(
            (condensed_data_all, condensed_data), axis=1
        )

    if save_to_file:
        if filename is None:
            filename = "white_noise.npy"
        path = os.path.join(nt.config["db_folder"], filename)
        np.save(path, condensed_data_all)

    return condensed_data_all
def save_edge(list_img):
    for img in list_img:
        mri = nib.load(os.path.join(input, img))
        data = mri.get_fdata()
        label = np.unique(data)
        print(len(np.unique(data)))
        for lab in label:
            # select area with specified label
            area = np.zeros(data.shape)
            area[data == lab] = lab
            # find edge of the area
            edge = generic_gradient_magnitude(area, sobel)
            print(np.min(edge), np.max(edge))
            # clean edge
            edge[edge < 0.5 * np.max(edge)] = 0
            new_img = nib.Nifti1Image(edge, mri.affine, mri.header)
            # saving
            name = soft + '_lab-' + str(int(lab))
            folder_out = img.split('.')
            folder_out = folder_out[0]
            if not os.path.exists(os.path.join(save_dir, folder_out)):
                os.mkdir(os.path.join(save_dir, folder_out))
            nib.save(new_img, os.path.join(save_dir, folder_out, name))
Beispiel #17
0
def prep_data(
    dataset: nt.Dataset,
    category: str,
) -> np.array:
    """
        Remove nans, normalize by normalization_constants and reshape into
        target shape
        shape convention:
        shape =  datatypes, #samples, #features]
        We return 1 sample and 2 datatypes
        """
    assert category in nt.config["core"]["features"].keys()
    if len(dataset.power_spectrum) == 0:
        dataset.compute_power_spectrum()

    condensed_data_all = []

    for readout_method in dataset.readout_methods.keys():
        signal = dataset.data[readout_method].values
        dimension = dataset.dimensions[readout_method]

        shape = tuple(nt.config["core"]["standard_shapes"][str(dimension)])
        condensed_data = np.empty(
            (len(nt.config["core"]["data_types"]), 1, np.prod(shape)))

        relevant_features = nt.config["core"]["features"][category]
        features = []

        if dataset.features:
            for feat in relevant_features:
                features.append(dataset.features[readout_method][feat])

        # double check if current range is correct:
        if np.max(signal) > 1:
            min_curr = np.min(signal)
            max_curr = np.max(signal)
            signal = (signal - min_curr) / (max_curr - min_curr)
            # assume we are talking dots and high current was not actually
            # device_max_signal
            dataset.data[readout_method].values = signal * 0.3
            dataset.compute_power_spectrum()

        data_resized = resize(signal,
                              shape,
                              anti_aliasing=True,
                              mode="constant").flatten()

        grad = generic_gradient_magnitude(signal, sobel)
        gradient_resized = resize(grad,
                                  shape,
                                  anti_aliasing=True,
                                  mode="constant").flatten()
        power = dataset.power_spectrum[readout_method].values
        frequencies_resized = resize(power,
                                     shape,
                                     anti_aliasing=True,
                                     mode="constant").flatten()

        pad_width = len(data_resized.flatten()) - len(features)
        features = np.pad(
            features,
            (0, pad_width),
            "constant",
            constant_values=nt.config["core"]["fill_value"],
        )

        index = nt.config["core"]["data_types"]["signal"]
        condensed_data[index, 0, :] = data_resized

        index = nt.config["core"]["data_types"]["frequencies"]
        condensed_data[index, 0, :] = frequencies_resized

        index = nt.config["core"]["data_types"]["gradient"]
        condensed_data[index, 0, :] = gradient_resized

        index = nt.config["core"]["data_types"]["features"]
        condensed_data[index, 0, :] = features

        condensed_data_all.append(condensed_data)

    return condensed_data_all
Beispiel #18
0
    def segment(self,
                img,
                well_radius=800,
                well_mask_radius=765,
                include_intermediate_results=False,
                **kwargs):
        # Assume image is single plane z-stack and grab first 2D image to process
        assert img.ndim == 3
        assert img.shape[0] == 1
        img = img[0]

        logger.debug(
            'Running 2x segmentation on image with shape %s, type %s (args: well_radius = %s, well_mask_radius = %s, include_intermediate_results=%s)',
            img.shape, img.dtype, well_radius, well_mask_radius,
            include_intermediate_results)

        # Remove outliers, convert to float
        img = ndi.median_filter(img, size=(3, 3))
        img = img_as_float(img)

        # Apply bandpass and compute gradients
        img_bp = ndi.gaussian_filter(img, sigma=6) - ndi.gaussian_filter(
            img, sigma=10)
        img_gr = ndi.generic_gradient_magnitude(img_bp, ndi.sobel)

        # Get and apply well mask translation
        img_well = get_circle_mask(well_radius, img_gr.shape)
        shifts = feature.register_translation(img_gr, img_well)[0]
        img_well = get_circle_mask(well_mask_radius,
                                   img_gr.shape,
                                   translation=shifts)
        img_gm = img_gr * img_well

        # Apply local threshold and cleanup binary result
        img_bm = img_gm > filters.threshold_local(img_gm, 255)
        img_bm = ndi.binary_fill_holes(img_bm, structure=morphology.disk(1))
        img_bm = morphology.binary_opening(img_bm, selem=morphology.disk(8))

        # Run segmentation
        img_dt = ndi.distance_transform_edt(img_bm)
        img_dt = ndi.gaussian_filter(img_dt, sigma=1)
        img_pk = morphology.label(
            feature.peak_local_max(img_dt, indices=False, min_distance=8))
        img_obj = segmentation.watershed(-img_dt, img_pk,
                                         mask=img_bm).astype(np.uint16)
        img_bnd = img_obj * segmentation.find_boundaries(
            img_obj, mode='inner', background=0)

        # Compile list of object image results (and append intermediates if necessary)
        img_seg = [img_obj, img_obj, img_bnd, img_bnd]
        if include_intermediate_results:
            to_uint16 = lambda im: exposure.rescale_intensity(
                im, out_range='uint16').astype(np.uint16)
            img_seg += [
                to_uint16(img_bp),
                segmentation.find_boundaries(img_well,
                                             mode='inner',
                                             background=0).astype(np.uint16),
                to_uint16(img_gm),
                to_uint16(img_dt),
                img_pk.astype(np.uint16)
            ]

        # Stack and add new axis to give to (z, ch, h, w)
        img_seg = np.stack(img_seg)[np.newaxis]
        assert img_seg.dtype == np.uint16, 'Expecting 16bit result, got type {}'.format(
            img_seg.dtype)
        assert img_seg.ndim == 4, 'Expecting 4D result, got shape {}'.format(
            img_seg.shape)
        return img_seg
Beispiel #19
0
import skimage.feature
import random
from scipy import ndimage

data = skimage.io.imread(
    "/home/x903102883/FFN_LM_v0.2/core/tool/skeletons/test_data/eboyden-1_(768, 2816, 0)_raw_binary.tif"
)
print(data.shape)
skel = np.zeros(data.shape)
mask_sk = (data >= 0)
print(np.sum(mask_sk))
bi_mask = np.zeros(data.shape)
bi_mask[mask_sk] = 1
print(np.sum(bi_mask))

edges = ndimage.generic_gradient_magnitude(bi_mask.astype(np.float32),
                                           ndimage.sobel)
sigma = 49.0 / 6.0
thresh_image = np.zeros(edges.shape, dtype=np.float32)
ndimage.gaussian_filter(edges, sigma, output=thresh_image, mode='reflect')
filt_edges = edges > thresh_image

del edges, thresh_image
dt = ndimage.distance_transform_edt(1 - filt_edges).astype(np.float32)

state = np.random.get_state()
np.random.seed(42)
idxs = skimage.feature.peak_local_max(dt + np.random.random(dt.shape) * 1e-4,
                                      indices=True,
                                      min_distance=1,
                                      threshold_abs=0,
                                      threshold_rel=0)
def gradient(a):
	a = ndimage.gaussian_filter(a, 1.5)
	a = ndimage.generic_gradient_magnitude(a, ndimage.sobel)
	a = numpy.abs(a)
	return a
Beispiel #21
0
def grad_image(image):
    from scipy.ndimage import sobel, generic_gradient_magnitude, gaussian_filter
    grad = generic_gradient_magnitude(image, sobel)
    return grad
Beispiel #22
0
def generate_one_f_noise(
    how_many: int = 20000,
    save_to_file: bool = True,
    filename: Optional[str] = None,
) -> np.ndarray:
    """ """
    fx_1d = fp.frequenciesshift(fp.frequenciesfreq(1000, d=0.02))

    condensed_data_all = np.empty(
        [len(nt.config["core"]["data_types"]) - 1, 0, np.prod(N_2D)]
    )

    for niter in range(how_many):

        condensed_data = np.empty(
            [len(nt.config["core"]["data_types"]) - 1, 1, np.prod(N_2D)]
        )

        fx, fy = np.meshgrid(fx_1d, fx_1d, indexing="ij")
        f = np.sqrt(fx ** 2 + fy ** 2)

        f[f > 0] = np.divide(1, f[f > 0])

        # if low_pass_cutoff is not None:
        #     f[f > low_pass_cutoff] = 0

        # if high_pass_cutoff is not None:
        # f[f < high_pass_cutoff] = 0

        exponents = np.random.uniform(low=0, high=2 * np.pi, size=f.shape)
        power_spect = np.multiply(f, np.exp(1j * exponents))

        noise = np.abs(fp.ifrequencies2(power_spect))
        noise = (noise - np.min(noise)) / (np.max(noise) - np.min(noise))

        grad = generic_gradient_magnitude(noise, sobel)

        noise = resize(noise, N_2D, anti_aliasing=True, mode="constant").flatten()

        grad = resize(grad, N_2D, anti_aliasing=True, mode="constant").flatten()

        power_spect = resize(
            np.abs(power_spect), N_2D, anti_aliasing=True, mode="constant"
        ).flatten()

        index = nt.config["core"]["data_types"]["signal"]
        condensed_data[index, 0, :] = noise

        index = nt.config["core"]["data_types"]["frequencies"]
        condensed_data[index, 0, :] = power_spect

        index = nt.config["core"]["data_types"]["gradient"]
        condensed_data[index, 0, :] = grad

        condensed_data_all = np.concatenate(
            (condensed_data_all, condensed_data), axis=1
        )

    if save_to_file:
        if filename is None:
            filename = "one_over_f_noise.npy"
        path = os.path.join(nt.config["db_folder"], filename)
        np.save(path, condensed_data_all)

    return condensed_data_all
Beispiel #23
0
def generate_random_blobs(
    how_many: int = 20000,
    save_to_file: bool = True,
    filename: Optional[str] = None,
    n_blobs: int = 15,
    stdx: Optional[List[float]] = None,
    stdy: Optional[List[float]] = None,
) -> np.ndarray:
    """ """
    if stdx is None:
        stdx = [0.3, 0.8]
    if stdy is None:
        stdy = [0.3, 0.8]

    condensed_data_all = np.empty(
        [len(nt.config["core"]["data_types"]) - 1, 0, np.prod(N_2D)]
    )

    for niter in range(how_many):
        condensed_data = np.empty(
            [len(nt.config["core"]["data_types"]) - 1, 1, np.prod(N_2D)]
        )
        x = np.linspace(-1, 1)
        y = np.linspace(-1, 1)
        x, y = np.meshgrid(x, y)
        z = np.zeros(N_2D)
        for n_blob in range(n_blobs):
            z += gauss2d(
                x,
                y,
                mx=np.random.uniform(-1, 1, 1),
                my=np.random.uniform(-1, 1, 1),
                sx=np.random.uniform(*stdx, 1),
                sy=np.random.uniform(*stdy, 1),
            )
        z = (z - np.min(z)) / (np.max(z) - np.min(z))

        noise_spect = fp.frequencies2(z)
        noise_spect = fp.frequenciesshift(noise_spect)
        noise_spect = np.abs(noise_spect)

        grad = generic_gradient_magnitude(z, sobel)

        index = nt.config["core"]["data_types"]["signal"]
        condensed_data[index, 0, :] = z.flatten()

        index = nt.config["core"]["data_types"]["frequencies"]
        condensed_data[index, 0, :] = noise_spect.flatten()

        index = nt.config["core"]["data_types"]["gradient"]
        condensed_data[index, 0, :] = grad.flatten()

        condensed_data_all = np.concatenate(
            (condensed_data_all, condensed_data), axis=1
        )

    if save_to_file:
        if filename is None:
            filename = "random_blobs.npy"
        path = os.path.join(nt.config["db_folder"], filename)
        np.save(path, condensed_data_all)

    return condensed_data_all
Beispiel #24
0
convert2lab = False

#array= io.imread(ruta1)
array = ji.read_tiff(ruta1, 1)
#array = np.transpose(array, (1, 0, 2))
print(array.shape)
print('Obtaining superstructures')
segments = slic(array,
                compactness=compactness,
                n_segments=numSegments,
                multichannel=False,
                convert2lab=convert2lab)
print('Number of SV: ', len(np.unique(segments)))
segments += 1
array = array.astype('int64')
mag = ndi.generic_gradient_magnitude(array, ndi.sobel, float)
mag *= 255.0 / np.max(mag)  # normalize (Q&D)

print('Obtaining RAG')
rag = graph.rag_boundary(segments, mag, connectivity=1)
print('Merging RAG´s segments')
segments2 = graph.merge_hierarchical(segments,
                                     rag,
                                     253,
                                     in_place_merge=True,
                                     rag_copy=False,
                                     merge_func=ji.merge_boundary,
                                     weight_func=ji.weight_boundary)
print('Final Number of SV: ', len(np.unique(segments2)))

# 110
Beispiel #25
0
                      default=True,
                      action='store_true')
    parser.add_option("--nocanny",
                      dest="canny",
                      help="Don't use canny filter",
                      action='store_false')
    (options, args) = parser.parse_args()

    if len(args) != 2:
        parser.error("Incorrect number of arguments")

    inim = volumeFromFile(args[0], dtype='ushort')
    outim = volumeFromInstance(inim, args[1])
    ''' 3D Sobel filter (doesnt work always) '''
    if options.sobel:
        outim.data[::] = ndimage.generic_gradient_magnitude(
            inim.data, ndimage.sobel)
        options.canny = 0
    ''' 2D Canny filter '''
    # http://scipy-lectures.github.io/advanced/image_processing/auto_examples/plot_canny.html
    # canny(image, sigma=1.0, low_threshold=0.1, high_threshold=0.2, mask=None)
    if options.canny:
        for i in range(inim.sizes[0]):
            t = inim.getHyperslab((i, 0, 0), (1, inim.sizes[1], inim.sizes[2]))
            t.shape = (inim.sizes[1], inim.sizes[2])
            c = filter.canny(t, sigma=options.sigma)
            outim.data[i::] = c

    if options.canny or options.sobel:
        outim.writeFile()
        outim.closeVolume()
    else:
Beispiel #26
0
low = 0.0; high = 1.0
for i in range(10):
    frame_filter = filters.rank.enhance_contrast_percentile(frame_filter, morphology.disk(5), p0=low, p1=high)
    low, high = low+.05, high-.05
    ax[0].clear()
    ax[0].imshow(frame_filter)
    plt.pause(.001)


vol = vol_orig.copy()
vol = vol[:,:,0:5000]
vol = img_as_float(vol)
vol = 1.-vol

vol = logistic_image(vol, this_log)
vol_sob = ndimage.generic_gradient_magnitude(vol, ndimage.sobel)
vol_sob = (vol_sob-np.min(vol_sob.flatten()))/(np.max(vol_sob.flatten())-np.min(vol_sob.flatten()))

params = np.ndarray(shape=(5,5000))
params[0,:] = ix
params[1,:] = iy
params[2,:] = rad
params[3,:] = rad
params[4,:] = 0

ellipse_mask = np.ndarray(shape=vol_sob.shape, dtype=np.bool)
ypts = np.linspace(0,vol_sob.shape[0]-1,vol_sob.shape[0])
xpts = np.linspace(0,vol_sob.shape[1]-1,vol_sob.shape[1])

ypts, xpts = np.meshgrid(range(vol_sob.shape[1]),range(vol_sob.shape[0]))
ypts, xpts = ypts.astype(np.float), xpts.astype(np.float)
Beispiel #27
0
def intensity_distance_seeds(image_data, resolution, axis=0, erosion_radius=16, min_sep=24, visualize=False):
    """Create seed locations maximally distant from a Sobel filter.

    Parameters
    ----------
    image_data : ndarray
    resolution : ndarray
    axis : int, optional
        Axis along which to slices volume to generate seeds in 2D. If
        None volume is processed in 3D.
    erosion_radius : int, optional
        L_infinity norm radius of the structuring element for eroding
        components.
    min_sep : int, optional
        L_infinity minimum separation of seeds in nanometers.

    Returns
    -------
    list of ndarray
    """
    # Late import as this is the only function using Scikit.
    from skimage import morphology

    structure = np.ones(np.floor_divide(erosion_radius, resolution) * 2 + 1)

    if axis is None:
        def slices():
            yield [slice(None), slice(None), slice(None)]
    else:
        structure = structure[axis]

        def slices():
            for i in xrange(image_data.shape[axis]):
                s = list(map(slice, [None] * 3))
                s[axis] = i
                yield s

    sobel = np.zeros_like(image_data)
    thresh = np.zeros_like(image_data)
    transform = np.zeros_like(image_data)
    skmax = np.zeros_like(image_data)
    for s in slices():
        image_slice = image_data[s]
        if axis is not None and not np.any(image_slice):
            logging.debug('Skipping blank slice.')
            continue
        logging.debug('Running Sobel filter on image shape %s', image_data.shape)
        sobel[s] = ndimage.generic_gradient_magnitude(image_slice, make_prewitt(int((24 / resolution).max() * 2 + 1)))
        # sobel = ndimage.grey_dilation(sobel, size=(5,5,3))
        logging.debug('Running distance transform on image shape %s', image_data.shape)

        # For low res images the sobel histogram is unimodal. For now just
        # threshold the histogram at the mean.
        thresh[s] = sobel[s] < np.mean(sobel[s])
        thresh[s] = ndimage.binary_erosion(thresh[s], structure=structure)
        transform[s] = ndimage.distance_transform_cdt(thresh[s])
        # Remove missing sections from distance transform.
        transform[s][image_slice == 0] = 0
        logging.debug('Finding local maxima of image shape %s', image_data.shape)
        skmax[s] = morphology.thin(morphology.extrema.local_maxima(transform[s]))

    if visualize:
        viewer = WrappedViewer()
        viewer.add(image_data, name='Image')
        viewer.add(sobel, name='Filtered')
        viewer.add(thresh.astype(np.float), name='Thresholded')
        viewer.add(transform.astype(np.float), name='Distance')
        viewer.add(skmax, name='Seeds', shader=get_color_shader(0, normalized=False))
        viewer.print_view_prompt()

    mask = np.zeros(np.floor_divide(min_sep, resolution) + 1)
    mask[0, 0, 0] = 1
    seeds = np.transpose(np.nonzero(skmax))
    for seed in seeds:
        if skmax[tuple(seed)]:
            lim = np.minimum(mask.shape, skmax.shape - seed)
            skmax[list(map(slice, seed, seed + lim))] = mask[list(map(slice, lim))]

    seeds = np.transpose(np.nonzero(skmax))

    return seeds
Beispiel #28
0
def save_augmented_data(
    original_raw_data: np.ndarray,
    new_path: str,
    new_filename: str,
    mult_factor: int,
    write_period: int = 200,
    max_samples: int = 20000,
    data_types: List[str] = ["signal", "frequencies"],
) -> None:
    """ """
    # TODO: Is this method finished?
    total_counter = 0
    write_curr = 0
    shape = (50, 50)
    new_path = os.path.join(new_path, new_filename)

    index_sig = nt.config["core"]["data_types"]["signal"]
    index_freq = nt.config["core"]["data_types"]["frequencies"]
    index_grad = nt.config["core"]["data_types"]["gradient"]
    n_indx = len(nt.config["core"]["data_types"])

    condensed_data_all = np.empty((n_indx, 0, np.prod(shape) + 1))

    original_images = np.squeeze(original_raw_data[index_sig, :, :-1])
    print(original_images.shape)
    original_labels = original_raw_data[:, :, -1][0]
    print(original_labels.shape)

    if not os.path.exists(new_path):
        np.save(new_path, condensed_data_all)

    stop = False
    for it in range(mult_factor):
        for orig_image, orig_label in zip(original_images, original_labels):
            #         print(orig_image.shape)
            orig_image = orig_image.reshape(50, 50)
            condensed_data = np.empty((n_indx, 1, np.prod(shape) + 1))

            new_img = random_transformation(orig_image, single=False)
            condensed_data[index_sig, 0, :] = np.append(new_img.flatten(), orig_label)

            dtrnd = sg.detrend(new_img, axis=0)
            dtrnd = sg.detrend(dtrnd, axis=1)

            frequencies_res = fp.frequencies2(dtrnd)
            frequencies_res = np.abs(fp.frequenciesshift(frequencies_res))
            data_frq = resize(
                frequencies_res, (50, 50), anti_aliasing=True, mode="constant"
            ).flatten()

            condensed_data[index_freq, 0, :] = np.append(data_frq, orig_label)
            #             labels_all.append(orig_label)

            grad = generic_gradient_magnitude(new_img, sobel)
            gradient_resized = resize(
                grad, shape, anti_aliasing=True, mode="constant"
            ).flatten()
            condensed_data[index_grad, 0, :] = np.append(gradient_resized, orig_label)

            condensed_data_all = np.append(condensed_data_all, condensed_data, axis=1)

            write_curr += 1
            total_counter += 1
            if write_curr >= write_period:
                # save to file

                n = list(condensed_data_all.shape)
                n[-1] += 1

                previous_data = np.load(new_path)

                all_data = np.append(previous_data, condensed_data_all, axis=1)

                np.save(new_path, all_data)

                condensed_data_all = np.empty((n_indx, 0, np.prod(shape) + 1))
                write_curr = 0
            if total_counter >= max_samples:
                stop = True
                break
        if stop:
            break

    previous_data = np.load(new_path)

    all_data = np.append(previous_data, condensed_data_all, axis=1)

    np.save(new_path, all_data)
Beispiel #29
0
# RAG BORDE
directorio = Path(
    "C:/Users/Juan Ignacio/Documents/Movistar Cloud/TFM/img_muestra_y_destino/"
)
imagen1 = "prueba_slic_100_pequena.tif"
imagen2 = 'prueba_slic_100.tif'
ruta1 = directorio / imagen2

prueba1 = ji.read_tiff(ruta1)
numSegments = 2200000  ## 1/30 #19000 prueba2d #100000 slic100 peque
#superp = slic(prueba1, n_segments = numSegments,compactness= 0.1, multichannel= False, convert2lab= False)
superp += 1
print('Number of SV: ', len(np.unique(superp)))

prueba1 = prueba1.astype('int64')  # Precision
mag = ndi.generic_gradient_magnitude(prueba1, ndi.sobel,
                                     float)  # Float for rag_boundary
#mag *= 255.0 / np.max(mag)  # normalize (Q&D)
ragb = graph.rag_boundary(superp, mag, connectivity=1)
# CLUST "CORTAR"
umbralc = 1000
superp_co = graph.cut_threshold(superp, ragb, umbralc)
print('Intermediate Number of SV: ', len(np.unique(superp_co)))
superp_co += 1
# CLUST "UNIR"
umbralb = 3500
ragb2 = graph.rag_boundary(superp_co, mag, connectivity=1)
superp_un = graph.merge_hierarchical(superp_co,
                                     ragb2,
                                     umbralb,
                                     in_place_merge=True,
                                     rag_copy=False,
Beispiel #30
0
def gradient(a):
    a = ndimage.gaussian_filter(a, 1.5)
    a = ndimage.generic_gradient_magnitude(a, ndimage.sobel)
    a = numpy.abs(a)
    return a
Beispiel #31
0
        help="Use canny filter [default]",
        default=True, action='store_true')
    parser.add_option("--nocanny", dest="canny",
        help="Don't use canny filter",
        action='store_false')
    (options, args) = parser.parse_args()

    if len(args) != 2:
        parser.error("Incorrect number of arguments")

    inim = volumeFromFile(args[0], dtype='ushort')
    outim = volumeFromInstance(inim, args[1])

    ''' 3D Sobel filter (doesnt work always) '''
    if options.sobel:
        outim.data[::] = ndimage.generic_gradient_magnitude(inim.data, ndimage.sobel)
        options.canny = 0

    ''' 2D Canny filter '''
    # http://scipy-lectures.github.io/advanced/image_processing/auto_examples/plot_canny.html
    # canny(image, sigma=1.0, low_threshold=0.1, high_threshold=0.2, mask=None)
    if options.canny:
        for i in range(inim.sizes[0]):
            t = inim.getHyperslab((i,0,0),(1,inim.sizes[1],inim.sizes[2]))
            t.shape = (inim.sizes[1], inim.sizes[2])
            c = filter.canny(t, sigma=options.sigma)
            outim.data[i::] = c
                      
    if options.canny or options.sobel:
        outim.writeFile()
        outim.closeVolume()
Beispiel #32
0
import os
import nibabel as nib
import numpy as np
from scipy.ndimage import sobel, generic_gradient_magnitude

walk_dir = '/home/dieudonnem/hpc/out/comparaison/suiter_cnn/dataset_sence/sub-1/mask'

for root, subdirs, files in os.walk(walk_dir):
    for name in files:
        # input
        path_input = os.path.join(root, name)
        img = nib.load(path_input)
        data = img.get_fdata()
        print(path_input, '>> load')
        # process
        edge = generic_gradient_magnitude(data, sobel)
        edge[edge < 0.7 * np.max(edge)] = 0  # cleaning
        print(path_input, '>> processed')
        # output
        path_output = path_input.split('mask')[0] + 'edge' + path_input.split(
            'mask')[1]
        os.makedirs(os.path.dirname(path_output), exist_ok=True)
        print(path_output, '>> created')
        # saving
        new_img = nib.Nifti1Image(edge, img.affine, img.header)
        nib.save(new_img, path_output)
        print(path_output, '>> save')
def create_mask(input_dir, soft, mask_flag=True, edge_flag=True):
    """
    :param input_dir: folder input where there are the folder img
    :param soft: you must chose between the stings 'cnn' or 'suit' or 'suiter'
    :param mask_flag : Boolean input. set it to True if you want mask saving
    :param edge_flag : Boolean input. set it to True if you want edge saving
    :return: all the mask coresponding to all output label of the specified soft hpc>>out>>'soft'>>dataset_sence>>
                sub-1>>derivative>>mask>>'img_folder'  >> all the mask
    """
    print(soft_select.get(soft))
    list_folder = os.listdir(input_dir)
    list_folder.sort()
    for folder in list_folder:
        pref = soft_select.get(soft)[1]
        img = [
            x for x in os.listdir(os.path.join(input_dir, folder))
            if x.startswith(pref)
        ]
        img = img[0]
        mri = nib.load(os.path.join(input_dir, folder, img))
        data = mri.get_fdata()
        label = np.unique(data)
        for lab in label:
            if mask_flag:
                ### mask
                mask = np.zeros(data.shape)
                mask[data == lab] = 1
                mask = nib.Nifti1Image(mask, mri.affine, mri.header)
                # save mask
                os.makedirs(os.path.join('/home/dieudonnem/hpc/out/', soft,
                                         'dataset_sence/sub-1', 'derivative',
                                         'mask', folder),
                            exist_ok=True)
                set_name = soft_select.get(soft)[0]
                name = set_name.get(lab)
                nib.save(
                    mask,
                    os.path.join('/home/dieudonnem/hpc/out/', soft,
                                 'dataset_sence/sub-1', 'derivative', 'mask',
                                 folder, name))
                print(soft, '>>', folder, '>>', name, 'mask saved')
            if edge_flag:
                ### edge
                # select area with specified label
                area = np.zeros(data.shape)
                area[data == lab] = lab
                # find edge of the area
                edge = generic_gradient_magnitude(area, sobel)
                # clean edge
                edge[edge < 0.5 * np.max(edge)] = 0
                new_img = nib.Nifti1Image(edge, mri.affine, mri.header)
                # save edge
                os.makedirs(os.path.join('/home/dieudonnem/hpc/out/', soft,
                                         'dataset_sence/sub-1', 'derivative',
                                         'edge', folder),
                            exist_ok=True)
                set_name = soft_select.get(soft)[0]
                name = set_name.get(lab)
                nib.save(
                    new_img,
                    os.path.join('/home/dieudonnem/hpc/out/', soft,
                                 'dataset_sence/sub-1', 'derivative', 'edge',
                                 folder, name))
                print(soft, '>>', folder, '>>', name, 'edge saved')
Beispiel #34
0
def plot(input_path,file_name,output_path):
#data = np.fromfile('t3/reconstruction_sarlo.rec',dtype='float32',sep='') # modify this to read the file 
    data = np.fromfile(input_path+'/'+file_name,dtype='float32',sep='') 
    data = data.reshape([array_size_z,array_size_y,array_size_x]) 

# apply a sobel file
    data = nd.generic_gradient_magnitude(data, nd.sobel) # apply a 3-D sobel filter


# plot 1 and title
    fig1 = plt.figure()
    fig1.suptitle(file_name)

# turn into gray figure
    plt.gray()

    subfig1 = fig1.add_subplot(2,2,1)
    subfig1.imshow(data[:,:,100])

    subfig2 = fig1.add_subplot(2,2,2)
    subfig2.imshow(data[:,:,300])

    subfig3 = fig1.add_subplot(2,2,3)
    subfig3.imshow(data[:,:,500])

    subfig4 = fig1.add_subplot(2,2,4)
    subfig4.imshow(data[:,:,700])

# save image to the output    
    plt.savefig(output_path+'/'+'sobel_X_'+file_name+'.png', bbox_inches=0)


# plot 3 and title
    fig3 = plt.figure()
    fig3.suptitle(file_name)

    subfig7 = fig3.add_subplot(3,1,1)
    subfig7.imshow(data[100,:,:])

    subfig8 = fig3.add_subplot(3,1,2)
    subfig8.imshow(data[200,:,:])

    subfig9 = fig3.add_subplot(3,1,3)
    subfig9.imshow(data[300,:,:])

# save image to the output    
    plt.savefig(output_path+'/'+'sobel_Z_'+file_name+'.png', bbox_inches=0)

# plot 2 and title
    fig2 = plt.figure()
    fig2.suptitle(file_name)
    
    subfig5 = fig2.add_subplot(2,1,1)
    subfig5.imshow(data[:,100,:])
    #subfig5.set_title('100')

    subfig6 = fig2.add_subplot(2,1,2)
    subfig6.imshow(data[:,200,:])
   # subfig6.set_title('200')

# save image to the output    
    plt.savefig(output_path+'/'+'sobel_Y_'+file_name+'.png', bbox_inches=0)
    
    # show the fig2 for reference
    fig2.show()