Ejemplo n.º 1
0
def detect_fluid_contours(input_image, max_rpe_row, min_ilm_row):
    md_img = apply_median_filter(input_image)
    gray = cv.cvtColor(md_img, cv.COLOR_BGR2GRAY)

    chv = chan_vese(gray,
                    mu=0.25,
                    lambda1=1,
                    lambda2=1,
                    tol=1e-3,
                    max_iter=200,
                    dt=0.5,
                    init_level_set="checkerboard",
                    extended_output=True)

    data = chv[1].copy()

    im_max = 255
    data = abs(data.astype(np.float64) /
               data.max())  # normalize the data to 0 - 1
    data = im_max * data  # Now scale by 255
    ls_img = data.astype(np.uint8)

    temp = apply_k_mean_clustering(ls_img)
    temp = apply_canny_edge(temp)
    # out_img2, areas = find_ret_contours(temp, max_rpe_row, min_ilm_row)

    return temp
Ejemplo n.º 2
0
        def InterpolateSingleFrameCellFromDict(track, missingframe, dict):

            coords_before = dict[track][missingframe - 1]['Coordinates']
            coords_after = dict[track][missingframe + 1]['Coordinates']

            img_before = DDN.Utils.makeImageFromCoordinates(coords_before)
            img_after = DDN.Utils.makeImageFromCoordinates(coords_after)
            interpolated = np.zeros(shape)

            for i in range(0, 154):
                interpolated[i] = interp_shape(img_before[i], img_after[i],
                                               0.5)

            data = interpolated > 0
            out = data.astype(np.uint8)
            out[out > 0] = 255
            props = regionprops(out)

            #from the props we need to return Area, Centroid and Coordinates
            Area = props[0]['Area']
            Centroid = props[0]['Centroid']
            Coordinates = props[0]['Coordinates']

            dict[track][missingframe] = {}
            dict[track][missingframe]['Area'] = Area
            dict[track][missingframe]['Centroid'] = Centroid
            dict[track][missingframe]['Coordinates'] = Coordinates
            cell_track[missingframe] = 999
            dict[track]['Track'] = cell_track
            print('Fixed  track ' + str(track) + ' at timepoint ' +
                  str(missingframe))

            return dict
Ejemplo n.º 3
0
def threshold_yen(data):
    tot = np.sum(data)
    data_f = data.astype(np.float)

    norm_hist = data_f / tot
    #Funcion prob. acum.
    P = np.zeros(256)
    P1 = np.zeros(256)
    P2 = np.zeros(256)

    P[0] = norm_hist[0]
    P1[0] = norm_hist[0]**2
    for i in xrange(1, 256):
        P[i] = P[i - 1] + norm_hist[i]
        P1[i] = P1[i - 1] + (norm_hist[i]**2)

    for i in reversed(range(255)):
        P2[i] = P2[i + 1] + (norm_hist[i + 1]**2)

    threshold = -1
    mmax = sys.float_info.min
    for i in range(256):
        c1 = math.log(P1[i] * P2[i]) if P1[i] * P2[i] > 0.0 else 0.0
        c2 = math.log(P[i] *
                      (1.0 - P[i])) if P[i] * (1.0 - P[i]) > 0.0 else 0.0
        aux = -1.0 * c1 + 2.0 * c2
        if aux > mmax:
            mmax = aux
            threshold = i

    return threshold
Ejemplo n.º 4
0
 def saveImageFile(data, savefile):
     """
     save a numpy matrix as a tiff image
     """
     out = data.astype(np.uint8)
     writer = omeTifWriter.OmeTifWriter(savefile)
     writer.save(out)
Ejemplo n.º 5
0
def readMSTARFile(filename):
    # raw_input('Enter the mstar file to read: ')

    # print filename

    f = open(filename, 'rb')

    a = b''

    phoenix_header = []

    tmp = 'PhoenixHeaderVer'
    while tmp.encode() not in a:
        a = f.readline()

    a = f.readline()

    tmp = 'EndofPhoenixHeader'
    while tmp.encode() not in a:
        phoenix_header.append(a)
        a = f.readline()

    data = np.fromfile(f, dtype='>f4')

    # print data.shape

    # magdata = data[:128*128]
    # phasedata = data[128*128:]

    # if you want to print an image
    # imdata = magdata*255

    # imdata = imdata.astype('uint8')

    targetSerNum = '-'

    for line in phoenix_header:
        # print line
        if ('TargetType').encode() in line:
            targetType = line.strip().split(b'=')[1].strip()
        elif ('TargetSerNum').encode() in line:
            targetSerNum = line.strip().split(b'=')[1].strip()
        elif ('NumberOfColumns').encode() in line:
            cols = int(line.strip().split(b'=')[1].strip())
        elif ('NumberOfRows').encode() in line:
            rows = int(line.strip().split(b'=')[1].strip())

    label = targetType  # + '_' + targetSerNum

    roffset = (rows - 128) // 2
    coffset = (cols - 128) // 2
    data = data[:rows * cols]
    data = data.reshape((rows, cols))
    data = data[roffset:(128 + roffset), coffset:(128 + coffset)]

    # plt.imshow(data)
    # plt.show()

    return data.astype('float32'), label, targetSerNum
Ejemplo n.º 6
0
 def saveImageFileBinary(data, savefile):
     """
     save a numpy matrix as a tiff image in binary format (all non-zero pixels = 255)
     """
     data = data > 0
     out = data.astype(np.uint8)
     out[out > 0] = 255
     writer = omeTifWriter.OmeTifWriter(savefile)
     writer.save(out)
Ejemplo n.º 7
0
 def magnitude_spectrum0(gray_img):
     f = np.fft.fft2(gray_img)  # 二维快速傅里叶变换
     fshift = np.fft.fftshift(f)  # 将图像中的低频部分移动到图像的中心
     data = 20 * np.log(np.abs(fshift))  # 傅里叶变换得到的复数取模 取对数 得到频域能量谱
     # 我们只关心频域在不同方向的强度,以此表现该方向像素点的密集程度。下面将谱归一化到 0~255 的灰度图像
     data = (255 / np.max(data)) * data
     data = data.astype(
         np.uint8)  # convert to 8 bit channel, back to like binary image
     return data
Ejemplo n.º 8
0
 def magnitude_spectrum(gray_img):
     # perform fast fourier transform of the image to find frequency information
     f = np.fft.fft2(gray_img)  # 二维快速傅里叶变换
     fshift = np.fft.fftshift(f)  # 将图像中的低频部分移动到图像的中心
     data = 20 * np.log(np.abs(fshift))  # 傅里叶变换得到的复数取模 取对数 得到频域能量谱
     # data = np.log(np.abs(fshift))  # 傅里叶变换得到的复数取模 取对数 得到频域能量谱
     # 我们只关心频域在不同方向的强度,以此表现该方向像素点的密集程度。下面将谱归一化到 0~255 的灰度图像
     # data = (250/np.max(data))*data
     data = data.astype(
         np.uint8)  # convert to 8 bit channel, back to like binary image
     # 黑白反色,处理后得到最终的频谱图
     data = cv2.bitwise_not(data)  # invert black and white pixels
     return data
Ejemplo n.º 9
0
 def entropy(self, data):
     if (self.entropyMethod != 'probFreq'):
         data = data * 1000
         data = np.abs(data.astype(int))
         data[data < 0] = 0.
         hist = np.bincount(data)
         data2 = hist[np.where(hist > 0)]
         p_data = data2 / float(len(data))  # calculates the probabilities
     else:
         p_data = data / (np.sum(data))
         p_data = p_data[np.where(p_data > 0)]
     entropy = sc.stats.entropy(
         p_data)  # input probabilities to get the entropy
     return entropy
Ejemplo n.º 10
0
def runAOD(img):
    model = 'AOD_Net.caffemodel'

    net = caffe.Net('DeployT.prototxt', model, caffe.TEST)
    batchdata = []
    data = img / 255.0
    data = data.transpose((2, 0, 1))
    batchdata.append(data)
    net.blobs['data'].data[...] = batchdata

    net.forward()

    data = net.blobs['sum'].data[0]
    data = data.transpose((1, 2, 0))
    data = data * 255.0
    return data.astype(np.uint8)
Ejemplo n.º 11
0
    def loadPPFilesV6(path, files):
        # Possibly not correct version - from preprocessV6,
        # likely updated in later scripts
        if isinstance(files, str):
            files = [files]
        elif isinstance(files, pd.DataFrame):
            files = set(files.iloc[:, 0])
        else:
            files = set(files)

        nFiles = len(files)
        loaded = np.zeros([nFiles, dims[0], dims[1], dims[2]],
                          dtype=np.float16)

        for fn in files:
            data = np.load(path + fn)['arr_0']

        return (data.astype(np.int16))
Ejemplo n.º 12
0
def prepare_data(data, dilate_iterations=1, sigma=0.5):
    """Returns the given binary data, its skeleton and the thickened skeleton.

    The skeleton of a given 2D or 3D array is computed, then it is thickened
    using morphological dilation with `dilate_iterations` and smoothed with
    help of Gaussian filter of specified `sigma`.

    Parameters
    ----------
    data : ndarray
        2D or 3D binary array which will be processed.

    dilate_iterations : integer
        Indicates the number of iterations for thickenning the skeleton.

    sigma : float
        Indicates the sigma of Gaussian filter used in smoothing of skeleton.

    Returns
    -------
    arrays : tuple of 2D or 3D arrays
        The original array, its skeleton and the thickened skeleton.
    """
    data_8bit = data.astype(np.uint8)
    data_8bit = ndi.binary_fill_holes(data_8bit).astype(np.uint8)

    if data.ndim == 3:
        skeleton = data  #morphology.skeletonize_3d(data_8bit)
        plt.imshow(skeleton)
    elif data.ndim == 2:
        skeleton = data  # morphology.skeletonize(data_8bit)
    else:
        raise ValueError(
            'Incorrect number of data dimensions, it supports from 2 to 3 dimensions.'
        )

    skeleton_thick = ndi.binary_dilation(skeleton,
                                         iterations=dilate_iterations).astype(
                                             np.float32)
    skeleton_thick = ndi.filters.gaussian_filter(skeleton_thick, sigma)
    plt.imshow(skeleton_thick)

    return (data, skeleton, skeleton_thick)
Ejemplo n.º 13
0
from scipy import signal
from scipy.ndimage import gaussian_filter
import scipy.stats as st

from scipy.signal import convolve2d as conv2

from skimage import color, data, restoration
import scipy
import scipy.signal
from scipy.stats import norm

# filename = 'testdata.nrrd'
data = nrrd.read('../../../data/16-05-05/000.nrrd')
data = np.array(data)[0]
data = data.astype('float')
print(data.shape)

MIP = data[0, 0:, 0:, 150]
MIP /= np.max(MIP)

# MIP = np.random.rand(MIP.shape[0], MIP.shape[1])

# print(MIP.shape)
# for i in range(0,200):
#   MIP[100,i] = 2635
# MIP[100,i+1] = 2635
# for i in range (450):
#   slice = data[0,:,:,i]
#   MIP += slice
# print(MIP.shape)
Ejemplo n.º 14
0
def transformDataTo255(data):
    min_val = np.amin(data)
    data -= min_val
    max_val = np.amax(data)
    data = (data / max_val) * 255
    return data.astype(int)
Ejemplo n.º 15
0
        def InterpolateTwoFramesFromDict(track, missingframe1, missingframe2):

            coords_before = tracked_cell_dict[track][missingframe1 -
                                                     1]['Coordinates']
            coords_after = tracked_cell_dict[track][missingframe2 +
                                                    1]['Coordinates']

            img_before = makeImageFromCoordinates(coords_before)
            img_after = makeImageFromCoordinates(coords_after)
            interpolated_middle = np.zeros(shape)

            #saveImageFile(img_before, 'output\\t\\1.tif')
            #saveImageFile(img_after, 'output\\t\\4.tif')

            for i in range(0, 154):
                interpolated_middle[i] = interp_shape(img_before[i],
                                                      img_after[i], 0.5)

            data = interpolated_middle > 0
            out = data.astype(np.uint8)
            out[out > 0] = 255
            props = regionprops(out)

            interpolated_middle_img = makeImageFromCoordinates(
                props[0]['Coordinates'])

            interpolated_1 = np.zeros(shape)
            interpolated_2 = np.zeros(shape)

            for i in range(0, 154):
                interpolated_1[i] = interp_shape(img_before[i],
                                                 interpolated_middle_img[i],
                                                 0.5)

            for i in range(0, 154):
                interpolated_2[i] = interp_shape(interpolated_middle_img[i],
                                                 img_after[i], 0.5)

            data = interpolated_1 > 0
            out = data.astype(np.uint8)
            out[out > 0] = 255
            props1 = regionprops(out)

            #saveImageFile(out, 'output\\t\\2.tif')

            data = interpolated_2 > 0
            out = data.astype(np.uint8)
            out[out > 0] = 255
            props2 = regionprops(out)

            #saveImageFile(out, 'output\\t\\3.tif')

            #from the props we need to return Area, Centroid and Coordinates
            Area = props1[0]['Area']
            Centroid = props1[0]['Centroid']
            Coordinates = props1[0]['Coordinates']

            tracked_cell_dict[track][missingframe1] = {}
            tracked_cell_dict[track][missingframe1]['Area'] = Area
            tracked_cell_dict[track][missingframe1]['Centroid'] = Centroid
            tracked_cell_dict[track][missingframe1][
                'Coordinates'] = Coordinates
            cell_track[missingframe1] = 999
            tracked_cell_dict[track]['Track'] = cell_track

            Area = props2[0]['Area']
            Centroid = props2[0]['Centroid']
            Coordinates = props2[0]['Coordinates']

            tracked_cell_dict[track][missingframe2] = {}
            tracked_cell_dict[track][missingframe2]['Area'] = Area
            tracked_cell_dict[track][missingframe2]['Centroid'] = Centroid
            tracked_cell_dict[track][missingframe2][
                'Coordinates'] = Coordinates
            cell_track[missingframe2] = 999
            tracked_cell_dict[track]['Track'] = cell_track

            print('Fixed  track ' + str(track) + ' at timepoint ' +
                  str(missingframe1) + ' ' + str(missingframe2))
Ejemplo n.º 16
0
def function_z(image_array):
    data = (image_array - image_array.min()) / (image_array.max() -
                                                image_array.min()) * 255
    dataarray = data.astype(np.uint8)
    return dataarray
Ejemplo n.º 17
0
#### 登出系统 ####
bs.logout()

a = np.arange(1,len(df['open'])+1)
df.insert(2,'index',a)
df.fillna(0)
print(df.columns.values.tolist())
data= df.values
data = data[:,2:]
for i in range(data.shape[0]):
    for j in range(data.shape[1]):
        # print([i,j])
        if data[i,j] == '':
            data[i, j]=0
        data[i,j] = float(data[i,j])
data.astype(float)


rnn_unit = 10

lstm_layers = 2

input_size = 5

output_size = 1
lr = 0.001

save_name ='.model.ckpt'
cpt_name ='/home/cqiuac/'

def get_train_data(batch_size=60, time_step=20, train_begin=0, train_end=2000):
Ejemplo n.º 18
0
        if (dmax > 0.6 * globalMax) or np.isclose(seeds, s1).any():
            #print('123')
            final_mask = skimage.segmentation.flood_fill(
                final_mask, start, 200)
        #t = final_mask
        #t = t.astype(np.uint8)
        #omg = Image.fromarray(t)
        #omg.show()
        #return final_mask
        final_mask[final_mask != 200] = 0
        final_mask[final_mask == 200] = 255
        #t = final_mask
        #t = t.astype(np.uint8)
        #omg = Image.fromarray(t)
        #omg.show()
        print(dmax)
    return final_mask


im = Image.open(
    "/Users/vladimirlisovoi/desktop/учеба/diplom/W-Net-Pytorch/1.png")
data = np.array(im)
fd = data.astype(np.float)
fd = rgb2gs(fd)
fd = fesi(fd)
#fd = vid(fd)
fd = fd.astype(np.uint8)
omg = Image.fromarray(fd)
omg.show()
omg.save("/Users/vladimirlisovoi/desktop/out2.png")
Ejemplo n.º 19
0
print('\n')
print('--------------------------------\n')
print('--------------- 3D -------------\n')
print('--------------------------------\n')

trf3D = pysparse.MRTransform3D(type_of_multiresolution_transform=type_mr_3D,
                               type_of_lifting_transform=3,
                               number_of_scales=nb_scales,
                               iter=3,
                               type_of_filters=1,
                               use_l2_norm=False,
                               nb_procs=0,
                               verbose=1)

analysis_data, nb_band_per_scale = trf3D.transform(data.astype(np.double),
                                                   save=False)

print('-----------------------------------')

print('NB BANDS 3D:', len(analysis_data))
print('SHAPE FOR EACH BAND', [s.shape for s in analysis_data])
print('BANDS SHAPE:', nb_band_per_scale)
print('-----------------------------------')

# np.save('/volatile/bsarthou/cube_trans_mallat_bind.npy',
#         np.array(analysis_data))

# coeffs, coeffs_shape = flatten(analysis_data)
# print('SHAPE:', coeffs_shape)
# print(coeffs)