Beispiel #1
0
def load_raw(path: str, site: str, multipage: bool = True):
    """Raw data loader

    This function takes a path to an experiment folder and 
    loads specified site data into a numpy array.

    Output array will be of shape: (n_frames, 2048, 2048, 2), where
    channel 0 (last dimension) is phase and channel 1 is retardance

    Args:
        path (str):
            path to the experiment folder
        site (str):
            position type ex: "C5-Site_0", "pos0", etc.
        multipage (bool, optional): default=True
            if folder contains stabilized multipage tiffs
            only multipage tiff is supported now

    Returns:
        np.array: numpy array as described above

    """
    if not multipage:
        raise NotImplementedError(
            "loading non-stabilized, non-multipage tiffs not supported")

    fullpath = path + '/' + site

    multi_tif_retard = 'img__Retardance__stabilized.tif'
    multi_tif_phase = 'img_Phase2D_stabilized.tif'
    multi_tif_bf = 'img_Brightfield_computed_stabilized.tif'

    _, ret = cv2.imreadmulti(fullpath + '/' + multi_tif_retard,
                             flags=cv2.IMREAD_ANYDEPTH)
    _, phase = cv2.imreadmulti(fullpath + '/' + multi_tif_phase,
                               flags=cv2.IMREAD_ANYDEPTH)
    _, bf = cv2.imreadmulti(fullpath + '/' + multi_tif_bf,
                            flags=cv2.IMREAD_ANYDEPTH)
    ret = np.array(ret)
    phase = np.array(phase)
    bf = np.array(bf)

    assert ret.shape == phase.shape == bf.shape

    n_frame, x_size, y_size = ret.shape[:3]
    out = np.empty(shape=(n_frame, 3, 1, x_size, y_size))
    out[:, 0, 0] = phase
    out[:, 1, 0] = ret
    out[:, 2, 0] = bf

    return out
Beispiel #2
0
def read_tif(path,
             rgb=False,
             eq_method=None,
             channel_mode=None,
             clahe_kwargs={
                 'clipLimit': 20,
                 'tileGridSize': (8, 8)
             }):
    ims = cv2.imreadmulti(path)[1]
    if not ims:
        print(f'{path} failed to read.')
        return False
    for i, im in enumerate(ims):
        if eq_method:
            ims[i] = im_equalize(ims[i],
                                 method=eq_method,
                                 clahe_kwargs=clahe_kwargs)
        if rgb:
            ims[i] = cv2.cvtColor(ims[i], cv2.COLOR_GRAY2RGB)
    if channel_mode == 'max':
        return np.clip(np.stack(ims).max(axis=0), 0, 255)
    elif channel_mode == 'stack':
        return np.stack(ims)

    return ims
Beispiel #3
0
def findLines(imgPath, page):

    imgs = cv2.imreadmulti(imgPath)
    img = imgs[1][page]
    blur = cv2.medianBlur(img, 3)
    thresh = 150

    #Binarization of image - Make it strictly black or white 0 or 255
    pre = cv2.threshold(blur, 210, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]

    height = img.shape[0]
    width = img.shape[1]
    vlines = 0

    #Identify vertical lines
    for i in range(width):
        single = pre[:, i:i + 1][:, 0]

        if len(single) > 0:
            vLines1 = findZeroPattern(single)
            for line in vLines1:
                if line[1] - line[0] >= thresh:
                    vlines += 1

    return vlines
Beispiel #4
0
def Heatmap(videofile, threshold, scale_percent, img_path, img_name,
            outliers=True):
    """Takes frequency accumulation array from
    GenerateHeatMap.GetFreqArray() and plots it as
    a colored meshgrid.

    Yellow pixels are at max frequency, blue pixels are
    minimum frequency (cmap = 'plasma')"""
    # Reading video file
    ret, img = cv2.imreadmulti(videofile, flags=cv2.IMREAD_GRAYSCALE)
    # obtaining frequency array
    if outliers is True:
        z = GetFreqArray(videofile, threshold, scale_percent)
    else:
        z = GetFreqArray(videofile, threshold, scale_percent, outliers=False)
    # Generating x and y axes in shape of image frame
    width = int(img[0].shape[1] * scale_percent / 100)
    height = int(img[0].shape[0] * scale_percent / 100)
    dim = (width, height)
    # resize image
    frame_resized = cv2.resize(img[0], dim, interpolation=cv2.INTER_AREA)
    pixel_X = np.arange(0, frame_resized.shape[1])
    pixel_Y = np.arange(0, frame_resized.shape[0])
    # Mapping frequency array onto the x and y axes
    fig = plt.pcolormesh(pixel_X, pixel_Y, z, cmap='plasma')
    plt.xlabel('Pixel Count')
    plt.ylabel('Pixel Count')
    plt.title('Frequency Heat Map')
    # picture is saved in file location designated by user
    plt.savefig(img_path + '/' + img_name + '.png', bbox_inches='tight')
    return fig
Beispiel #5
0
def imortTiftoVideo(filePath):
    numChan = 2
    numZoomLevles = 4
    etval, mats = cv2.imreadmulti(filePath)
    #8 images for each frame
    #TODO Generalize
    allFrames = []
    for matIndex in range(0, len(mats), 8):
        frame = []
        optChan = []
        #optChan.append(mats[matIndex])
        #optChan.append(mats[matIndex+1])
        #optChan.append(mats[matIndex+2])
        optChan.append(mats[matIndex + 3])
        floChan = []
        #floChan.append(mats[matIndex+4])
        floChan.append(mats[matIndex + 5])
        #floChan.append(mats[matIndex+6])
        #floChan.append(mats[matIndex+7])
        frame.append(optChan)
        frame.append(floChan)
        allFrames.append(frame)
    video = Video(allFrames)
    del mats
    return (video)
Beispiel #6
0
def loadData(filePath, series, cropUppLeft=-1, cropDownRight=-1):
    matList = []
    numZoomIn = 4
    numChan = 2
    #loading channel 0
    for channel in range(numChan):
        #List containg all zoom in levels
        zoomList = []
        for zoomLevel in range(numZoomIn):
            cleanWorking = "rm ./YeastTrack/VideoData/WorkingData/*"
            os.system(cleanWorking)

            comand = "./bftools/bfconvert -nolookup"
            seriesFlag = " -series " + str(series)
            channelFlag = " -channel " + str(channel)
            zoomFlag = " -z " + str(zoomLevel)
            cropFlag = " -crop 0,0,512,512"
            filePath = " " + filePath
            tifPath = " ./YeastTrack/VideoData/WorkingData/working.tif"

            cmd = comand + seriesFlag + cropFlag + channelFlag + zoomFlag + filePath + tifPath
            os.system(cmd)
            path = "./YeastTrack/VideoData/WorkingData/working.tif"
            retval, mats = cv2.imreadmulti(path)
            zoomList.append(mats)
        matList.append(zoomList)

    return (matList)
Beispiel #7
0
 def test_GetFreqArray(self):
     """Tests GetFreqArray functionality"""
     test_ret, test_img = cv2.imreadmulti(test_vid1,
                                          flags=cv2.IMREAD_GRAYSCALE)
     # Setting Resizing Dimensions
     scale_percent = 1
     width = int(test_img[0].shape[1] * scale_percent / 100)
     height = int(test_img[0].shape[0] * scale_percent / 100)
     dim = (width, height)
     test_img1_resized = cv2.resize(test_img[0],
                                    dim,
                                    interpolation=cv2.INTER_AREA)
     test_thresh = 3
     test_fn1 = GenerateHeatMap.GetFreqArray(test_vid1,
                                             test_thresh,
                                             scale_percent,
                                             outliers=False)
     test_fn2 = GenerateHeatMap.GetFreqArray(test_vid1,
                                             test_thresh,
                                             scale_percent,
                                             outliers=True)
     # Testing output size
     assert len(test_fn1) == len(test_img1_resized),\
         "Output1 is the wrong shape"
     assert len(test_fn2) == len(test_img1_resized),\
         "Output2 is the wrong shape"
     # Testing output type
     assert type(test_fn1) == np.ndarray,\
         "Output1 is the wrong type"
     assert type(test_fn2) == np.ndarray,\
         "Output2 is the wrong type"
Beispiel #8
0
def tiffstack_to_avi(path, fps=8, save_dir=None):
    '''saves tiffstack in local 'path' to similarly named avi.
	TODO: simplify this io a bit. '''

    if save_dir == None:
        save_dir = path[:path.find(r'.')] + '.avi'
    start = time.time()
    boo, cap = cv.imreadmulti(path)
    width = int(cap[0].shape[0])
    height = int(cap[0].shape[1])
    try:
        chnl_no = int(cap[0].shape[2])
    except:
        chnl_no = 0  #or 1, not sure
        print('Error: depth shape not given, returning 0')
    # uncompressed YUV 4:2:0 chroma subsampled
    fourcc = cv.VideoWriter_fourcc('I', '4', '2', '0')
    writer = cv.VideoWriter()
    retval = writer.open(save_dir, fourcc, fps, (width, height), 1)
    assert (writer.isOpened())  #assert the writer is properly initialized

    for i in range(len(cap)):
        #TODO: make this step faster by using something like the (missing) cv.GrabFrame command
        frame = cap[i]
        writer.write(frame)
    writer.release()
    end = time.time()
    print('{} seconds elapsed reading and writing video to avi.'.format(
        np.around(end - start)))
    return save_dir
Beispiel #9
0
 def test_GetIntensityArray(self):
     assert os.stat(test_vid).st_size > 0,\
         "File is empty"
     test_ret, test_img = cv2.imreadmulti(test_vid,
                                          flags=cv2.IMREAD_GRAYSCALE)
     # Setting Resizing Dimensions
     scale_percent = 1
     width = int(test_img[0].shape[1] * scale_percent / 100)
     height = int(test_img[0].shape[0] * scale_percent / 100)
     dim = (width, height)
     # Checking for valid dimensions
     self.assertNotIn(0, dim, msg="Invalid dimensions")
     test_img_resized = cv2.resize(test_img[0],
                                   dim,
                                   interpolation=cv2.INTER_AREA)
     test_thresh1 = 5
     test_thresh2 = 50000
     test_fn1 = GenerateIntensityMap.GetIntensityArray(
         test_vid, test_thresh1, scale_percent)
     test_fn2 = GenerateIntensityMap.GetIntensityArray(
         test_vid, test_thresh2, scale_percent)
     # Testing output size
     assert test_fn1.ndim > 0,\
         "Output 1 has incorrect dimensionality"
     assert test_fn2.ndim > 0,\
         "Output 2 has incorrect dimensionality"
     assert len(test_fn1) == len(test_img_resized),\
         "Output1 is the wrong shape"
     assert len(test_fn2) == len(test_img_resized),\
         "Output2 is the wrong shape"
     # Testing output type
     assert type(test_fn1) == np.ndarray,\
         "Output1 is the wrong type"
     assert type(test_fn2) == np.ndarray,\
         "Output2 is the wrong type"
Beispiel #10
0
def tifftoPng(imageLocation):
    Images = []
    multiBoolean, Images = cv2.imreadmulti(imageLocation, Images)
    imageNames = []
    for i, image in enumerate(Images):
        cv2.imwrite(f'{imageLocation}_{i}.png', image)
        imageNames = [*imageNames, f'{imageLocation}_{i}.png']
    return imageNames
Beispiel #11
0
def load_image(path, *, scale=0.41, max_size=None):
    _, frames = cv.imreadmulti(str(path), flags=cv.IMREAD_COLOR)  # TODO(thomasjo): Use cv.IMREAD_UNCHANGED instead?
    # frames = [preprocess_image(frame, scale, max_size) for frame in frames]

    main_image = frames[0]
    aux_images = frames[1:] if len(frames) > 1 else None

    return main_image, aux_images
Beispiel #12
0
def skrap():
    path = "/home/klas/Documents/Chalmers/ExamensArbete/YeastTrack/VideoData/WorkingData/working.tif"

    retval, mats = cv2.imreadmulti(path)
    #retval, mats = cv2.imread(path)

    for i in range(len(mats)):
        cv2.imshow("Funka", mats[i])
        cv2.waitKey()
Beispiel #13
0
def save_traj(k, output_path=None):
    input_path = DATA_ROOT + '/Data/DynamicPatches/%s/mg_traj_%s.tif' % (
        k.split('/')[0], k.split('/')[1])
    # images = tifffile.imread(input_path)
    _, images = cv2.imreadmulti(input_path, flags=cv2.IMREAD_ANYDEPTH)
    images = np.array(images)
    if output_path is None:
        output_path = './%s.gif' % (t, k[:9] + '_' + k[10:])
    imageio.mimsave(output_path, images)
    return
 def load_images_from_folder(self, folder, is_img):
     images = []
     for filename in os.listdir(folder):
         if is_img: 
             img = cv2.imread(os.path.join(folder,filename))
             if img is not None: images.append(img)
         else: 
             _, masks = cv2.imreadmulti(os.path.join(folder,filename), [], cv2.IMREAD_ANYDEPTH)
             if len(masks) != 0: 
                 images.append(masks)
     return images
Beispiel #15
0
	def build(self):
		path = 'D:\onedrive\program/for_liuyuan_rotation\data/test.tiff'
		ret,video=cv2.imreadmulti(path,flags=cv2.IMREAD_ANYDEPTH)
		demo=ImageStackCapture(stack=video)
		ret,img = demo.read()
		print(img)
		img = img/np.max(img)*255
		img = img.astype(np.uint8)
		cv2.imshow('img',cv2.resize(img,(512,512)))
		cv2.waitKey(0)
		return demo
def check_image_size(img_path):
    print('Check image size of: ', img_path)
    # check if a folder of png/tif files or a single stack to load
    if os.path.isfile(img_path) == 1:
        filename, file_extension = os.path.splitext(img_path)
        if file_extension == '.h5':
            print('Reading H5 image file')
            data = h5py.File(img_path, 'r')
            keys = list(data.keys())
            imagesize = data[keys[0]].shape
            return imagesize

        elif file_extension == '.tif':
            retflag, im = cv2.imreadmulti(img_path, flags=cv2.IMREAD_UNCHANGED)
            if retflag is True:
                imarray = np.array(im)
                imagesize = im.shape
                return (imagesize[1], imagesize[2], imagesize[0])
            else:
                raise Exception(
                    "Something went wrong while loading the multipage TIF file"
                )
        '''
        elif file_extension == '.png':
            im = Image.open(img_path)
            imarray = np.array(im)
            imagesize = imarray.shape
            return imagesize
        '''
    elif os.path.isdir(img_path) == 1:
        file_list = read_files_in_folder(img_path)[0]
        png_list = [f for f in file_list if f.lower().endswith('.png')]
        tif_list = [
            f for f in file_list if f.lower().endswith(('.tif', '.tiff'))
        ]
        if len(tif_list) + len(png_list) == 0:
            print('No Tifs or PNGs found in the directory')
            return
        else:
            # only read tif or pngs if ambiguous
            if len(png_list) > len(tif_list):
                file_list = png_list
            else:
                file_list = tif_list

            filename = os.path.join(img_path, file_list[0])
            print('Reading file: ', filename)
            imarray = cv2.imread(filename, -1)
            imagesize = (imarray.shape[0], imarray.shape[1], len(file_list))
            return imagesize

    else:
        raise Exception('No images found')
Beispiel #17
0
 def read(path, fmt=None):
     if not os.path.exists(path):
         raise FileExistsError('!! File `{}` does not exist.'.format(path))
     # Read image all frames in specified .tif file
     _, images = cv2.imreadmulti(path)
     img_seq = np.stack(images)
     # Process data according to given fmt
     if fmt in ('grey', 'duplicated_channel'): img_seq = img_seq[..., 0]
     console.show_status('Read `{}`, data shape: {}'.format(
         os.path.basename(path), img_seq.shape))
     # Wrap data into Video object and return
     return Video(img_seq)
def preprocessImage(filename: str, standard_mask):
    imgs = cv2.imreadmulti(filename)
    mask = get_shape(imgs[1][0])
    mask_border = detect_border(mask)

    #plt.imshow(standard_mask)
    #plt.show()

    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    imgs[1][0] = clahe.apply(imgs[1][0])
    #border = getBorder(imgs[1][0])
    img = imgs[1][1][mask_border[0]:mask_border[1],
                     mask_border[2]:mask_border[3]]  # gene expression
    img3 = imgs[1][0][mask_border[0]:mask_border[1],
                      mask_border[2]:mask_border[3]]  # microscropic images
    #mask = mask[mask_border[0]:mask_border[1],mask_border[2]:mask_border[3]]
    #img2 = imgs[1][0]
    """
    #img = img * mask
    plt.imshow(img3)
    plt.show()
    
    bias = 40
    img = img[max(0,border[0]-bias):min(1023,border[1]+bias),max(0,border[2]-bias):min(2047,border[3]+bias)]
    img3 = img3[max(0,border[0]-bias):min(1023,border[1]+bias),max(0,border[2]-bias):min(2047,border[3]+bias)]
    img2[max(0,border[0]-5-bias):max(0,border[0]+5-bias),:] = 255
    img2[min(1023,border[1]+5+bias):min(1023,border[1]+5+bias),:] = 255
    img2[:,max(0,border[2]-5-bias):max(0,border[2]+5-bias)] = 255
    img2[:,min(2047,border[3]+5+bias):min(2047,border[3]+5+bias)] = 255
    """
    """img = rescale_luminorsity(img)"""

    img = cv2.resize(img, (2048, 1024), interpolation=cv2.INTER_CUBIC)
    img3 = cv2.resize(img3, (2048, 1024), interpolation=cv2.INTER_CUBIC)
    mask = cv2.resize(mask, (2048, 1024), interpolation=cv2.INTER_CUBIC)
    img3, img = reshape2(img3, img, mask, standard_mask)
    #plt.imshow(img3)
    #plt.show()
    #plt.imshow(img)
    #plt.show()
    """
    a = 2
    img[:,0:400] = img[:,0:400] * float(a)
    img[img > 255] = 255
    img = np.round(img)
    img = img.astype(np.uint8)
    """

    return [img, img3]
Beispiel #19
0
def imortTiftoVideoNew(filePath):
    numChan = 2
    numZoomLevles = 4
    etval, mats = cv2.imreadmulti(filePath)
    #8 images for each frame
    #TODO Generalize
    allFrames = []
    for matIndex in range(0, len(mats) - 1, 2):
        frame = []
        frame.append(mats[matIndex])
        frame.append(mats[matIndex + 1])
        allFrames.append(frame)

    video = Video(allFrames)
    del mats
    return (video)
def preprocessImages():
    filepath = "./Dataset/"
    savepath3 = "./Embryos/"
    savepath = "./GeneExpression/"
    savepath2 = "./Rectangles/"
    filenames = load_filenames(filepath)

    imgs = cv2.imreadmulti(filepath + filenames[0])
    mask = get_shape(imgs[1][0])
    mask_border = detect_border(mask)
    mask = mask[mask_border[0]:mask_border[1], mask_border[2]:mask_border[3]]
    mask = cv2.resize(mask, (2048, 1024), interpolation=cv2.INTER_CUBIC)
    for filename in tqdm(filenames):
        img2save = preprocessImage(filepath + filename, mask)
        cv2.imwrite(savepath + filename, img2save[0])
        cv2.imwrite(savepath2 + filename, img2save[1])
        cv2.imwrite(savepath3 + filename, img2save[1])
Beispiel #21
0
def aug0():
    data_path = 'K:\BIGCAT\Projects\EM\data\Other data\EM CA1 hippocampus region of brain'
    if 1:
        imgs_path = fullfile(data_path, 'testing_groundtruth.tif')
        ims = cv2.imreadmulti(imgs_path)[1]
        for i in range(len(ims)):
            save_path = fullfile(data_path, 'test masks',
                                 'image_{}.jpg'.format(i))
            cv2.imwrite(save_path, ims[i])
        print(len(ims))
    if 0:
        trainingset_augmentation(
            fullfile(data_path, 'gray images'),
            output_width=256,
            output_height=256,
            samples=10000,
            ground_truth_path=fullfile(data_path, 'masks'),
            output_dir=fullfile(data_path, 'aug gray images'),
            ground_truth_output_dir=fullfile(data_path, 'aug masks'))
Beispiel #22
0
def get_images_from_tif(filedict):
    """ Returns a list of images from the TIF file.
        this function writs the bytes_array out to a file 
        and uses cv2.imreadmulti() to convert it. 
        This is inefficient since the data is already in memory.
    """
    temp = NamedTemporaryFile(delete=False)
    temp.write(filedict['bytes_array'])
    temp.close()
    _, images = cv2.imreadmulti(temp.name, np.ndarray(0), cv2.IMREAD_GRAYSCALE)
    os.unlink(temp.name)
    final_images = []
    if len(images) > 2:
        images = images[:-1]
    for image in images:
        if sum(cv2.mean(image[:, :200])) < 250 and sum(cv2.mean(image[:, -200:])) < 250:
            final_images.append(image)
            
    return final_images
Beispiel #23
0
 def test_GetIntensityArray(self):
     test_ret, test_img = cv2.imreadmulti(test_vid,
                                          flags=cv2.IMREAD_GRAYSCALE)
     # Setting Resizing Dimensions
     scale_percent = 1
     width = int(test_img[0].shape[1] * scale_percent / 100)
     height = int(test_img[0].shape[0] * scale_percent / 100)
     dim = (width, height)
     test_img_resized = cv2.resize(test_img[0],
                                   dim,
                                   interpolation=cv2.INTER_AREA)
     test_thresh = 5
     test_fn = GenerateIntensityMap.GetIntensityArray(
         test_vid, test_thresh, scale_percent)
     # Testing output size
     assert len(test_fn) == len(test_img_resized),\
         "Output is the wrong shape"
     # Testing output type
     assert type(test_fn) == np.ndarray,\
         "Output is the wrong type"
Beispiel #24
0
 def test_GetFreqCounts(self):
     """Tests GetFreqCounts functionality"""
     test_ret, test_img = cv2.imreadmulti(test_vid1,
                                          flags=cv2.IMREAD_GRAYSCALE)
     # Setting Resizing Dimensions
     scale_percent = 1
     width = int(test_img[0].shape[1] * scale_percent / 100)
     height = int(test_img[0].shape[0] * scale_percent / 100)
     dim = (width, height)
     test_img_resized = cv2.resize(test_img[0],
                                   dim,
                                   interpolation=cv2.INTER_AREA)
     test_thresh = 3
     # Calling function to test
     test_fn = GenerateHeatMap.GetFreqCounts(test_img_resized, test_thresh)
     # Testing output shape
     assert len(test_fn) == len(test_img_resized),\
         "Output is the wrong shape"
     # Testing output type
     assert type(test_fn) == np.ndarray,\
         "Output is the wrong type"
Beispiel #25
0
def run_tiff(file_path, progress_percent):
    progress_percent
    angle = 0
    ret, video = cv2.imreadmulti(file_path, flags=cv2.IMREAD_ANYDEPTH)
    video_labeled, table = [], []
    idx = 1
    for frame in video[:]:
        img_label, angle_new = process(frame)
        angle_new = float('{0:.2f}'.format(angle_new))
        rotation = cal_rotation(angle, angle_new)
        rotation = float('{0:.2f}'.format(rotation))
        table.append([angle, rotation, angle_new])
        video_labeled.append(img_label)
        angle = angle_new
        idx += 1
        progress_percent['value'] = idx / len(video) * 100
        # print(table[-1])
        # cv2.imshow('img',cv2.resize(img_label,(512,512)))
        # if cv2.waitKey(0) & 0xFF == ord('q'):
        #     break
    return video_labeled, table
def read_imgs(img_folder, color=False, extension="tif"):  # 画像入っているフォルダ
    if not os.path.exists(img_folder):
        print(img_folder + "がありません")
        sys.exit()
    if color is False:  # グレースケール読むとき
        imread_type = cv2.IMREAD_GRAYSCALE | cv2.IMREAD_ANYDEPTH
    elif color is True:  # カラー読むとき
        imread_type = cv2.IMREAD_COLOR
    if os.path.isfile(img_folder):
        _, img = cv2.imreadmulti(img_folder, flags=imread_type)
        img = np.array(img)
    elif os.path.isdir(img_folder):
        file_list = sorted(glob.glob(os.path.join(img_folder, "*." + extension)))
        tmp = cv2.imread(file_list[0], imread_type)  # 1枚目
        img = np.empty(
            np.concatenate(([len(file_list)], tmp.shape)), dtype=tmp.dtype
        )  # 箱
        img[0] = tmp  # 1枚目を箱に
        for i in range(1, len(file_list)):  # 2枚目から全部取り込み
            img[i] = cv2.imread(file_list[i], imread_type)
        print(img_folder + "から" + str(i + 1) + "枚取り込みました.")
    return img
Beispiel #27
0
def getLineInfo(DF, imagepath, page_count):
    DF_new = pd.DataFrame()
    tempar = pd.DataFrame()

    ret, imgs = cv2.imreadmulti(imagepath)

    for i in range(page_count):

        if len(imgs[i].shape) == 3:
            imgs[i] = cv2.cvtColor(imgs[i], cv2.COLOR_BGR2GRAY)
        blur = cv2.medianBlur(imgs[i], 3)
        pre = cv2.threshold(blur, 210, 255,
                            cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
        vlines, hlines = findLines(pre)
        height = pre.shape[0]
        width = pre.shape[1]

        hlines = [(hline[0] / width, hline[1] / height, hline[2] / width,
                   hline[3] / height) for hline in hlines]
        vlines = [(vline[0] / width, vline[1] / height, vline[2] / width,
                   vline[3] / height) for vline in vlines]

        file_split = os.path.basename(imagepath).split(".")
        filename = file_split[0]
        file_extn = "." + file_split[-1]

        temp = DF['OriginalFile'] == filename + "_" + str(i) + file_extn
        tempar = DF[temp].copy()

        if tempar.shape[0] > 0:
            tempar[lineInfo] = tempar.apply(findLinesClose,
                                            args=(hlines, vlines),
                                            axis=1)
            if DF_new.shape[0] == 0:
                DF_new = tempar
            else:
                DF_new = DF_new.append(tempar)

    return DF_new
Beispiel #28
0
def GetIntensityArray(videofile, threshold, scale_percent):
    """Finds pixel coordinates within a videofile (.tif, .mp4) for pixels
    that are above a brightness threshold, then accumulates the
    brightness event intensities for each coordinate,
    outputting it as a 2-D array in the same size as the video frames

    Input:
    -videofile: file containing an image stack of fluorescent events
    -threshold: minimum brightness for detection
    -scale_percent: helps resize image for faster computing speeds

    Output: 2-d Array of accumulated intensity values for each pixel above
    a calculated brightness threshold in the video"""
    # Reading video file and convert to grayscale
    ret, img = cv2.imreadmulti(videofile, flags=cv2.IMREAD_GRAYSCALE)
    # Setting Resizing Dimensions
    width = int(img[0].shape[1] * scale_percent / 100)
    height = int(img[0].shape[0] * scale_percent / 100)
    dim = (width, height)
    img_resized = cv2.resize(img[0], dim, interpolation=cv2.INTER_AREA)
    # Creating empty array to add intensity values to
    int_array = np.zeros(np.shape(img_resized))
    for frame in range(len(img)):
        # Resize Frame
        frame_resized = cv2.resize(img[frame],
                                   dim,
                                   interpolation=cv2.INTER_AREA)
        intensity = GetIntensityValues(frame_resized, threshold)
        if len(np.where(intensity >= 1)) > 0:
            # Get coordinates of the single pixel counts
            row, col = np.where(intensity >= 1)
            for i in range(len(row)):
                for j in range(len(col)):
                    # Add single count to freq_array in location of event
                    int_array[row[i], col[j]] += intensity[row[i], col[j]]
        else:
            pass
    return int_array
Beispiel #29
0
def imageimporter_large(img_path, area, z_stack, outfolder):
    print('Image importer loading ... ')
    print(img_path)

    # check if a folder of png/tif files or a single stack to load
    name_with_path, ext = os.path.splitext(img_path)

    if ext:
        if '.h5' in ext:
            h5file = h5py.File(img_path, 'r')
            imgstack = [h5file[key].value for key in h5file.keys()]
            imgstack = imgstack[area[0]:area[1],
                                area[2]:area[3], z_stack[0]:z_stack[1]]
            print('Processed size:', imgstack.shape)
        elif ext.lower() in ['.tif', '.tiff']:
            retflag, im = cv2.imreadmulti(img_path, flags=cv2.IMREAD_UNCHANGED)
            if retflag is True:
                imarray = np.array(im)
                # imarray = np.expand_dims(imarray, axis=len(imarray.shape))
                imgstack = imarray[z_stack[0]:z_stack[1], area[0]:area[1],
                                   area[2]:area[3]]
                imagesize = imgstack.shape
                print('Successfully read image stack with', str(
                    imagesize[0]), 'images\n')
            else:
                raise Exception("Something went wrong while loading the multi-page TIF file")

    elif os.path.isdir(img_path):
        file_list = read_files_in_folder(img_path)[0]
        png_list = [f for f in file_list if f.lower().endswith('.png')]
        tif_list = [f for f in file_list if f.lower().endswith(('.tif', '.tiff'))]
        tif_list_len = len(tif_list)
        png_list_len = len(png_list)

        if tif_list_len + png_list_len == 0:
            print('No Tifs or PNGs found in training directory')
            return
        else:
            if tif_list_len > png_list_len:
                file_list = tif_list
            else:
                file_list = png_list

            tempdir = os.path.join(outfolder, 'temp')
            if not os.path.isdir(tempdir):
                os.mkdir(tempdir)

            tempmat_infile = os.path.join(tempdir, 'infiles.txt')
            with open(tempmat_infile, 'w') as f:
                for fl in range(z_stack[0], z_stack[1]):
                    f.write(os.path.join(img_path, file_list[fl]) + '\n')

            tempmat_outfile = os.path.join(tempdir, 'outfiles.txt')

            with open(tempmat_outfile, 'w') as f:
                for fl in range(z_stack[0], z_stack[1]):
                    f.write(os.path.join(
                        tempdir, file_list[fl][:-3] + 'tif') + '\n')

            crop_png(
                tempmat_infile,
                tempmat_outfile,
                area[0],
                area[1],
                area[2],
                area[3])

            print('Reading images')
            imgstack = np.array([cv2.imread(os.path.join(
                tempdir, file_list[i][:-3] + 'tif'), -1) for i in range(z_stack[0], z_stack[1])])
            # shape = np.shape(imgstack)
            # print (shape)

    else:
        raise Exception('No images found')
        return

    # Add padding
    # Left and upper side
    if area[0] == 0 and imgstack.shape[1] <= 1012:  # first in y
        imgstack = np.concatenate(
            (np.flipud(imgstack[:, 1:13, :]), imgstack), axis=1)
    if area[2] == 0 and imgstack.shape[2] <= 1012:  # then in x
        imgstack = np.concatenate(
            (np.fliplr(imgstack[:, :, 1:13]), imgstack), axis=2)

    x_size = imgstack.shape[1]
    y_size = imgstack.shape[2]

    # Right and lower end

    if x_size < 1024:
        max_padsize = 1024 - x_size
        max_padsize = min(max_padsize, 12)
        imgstack = np.concatenate((imgstack, np.flipud(
            imgstack[:, x_size - max_padsize - 1:x_size - 1, :])), axis=1)

    if y_size < 1024:
        max_padsize = 1024 - y_size
        max_padsize = min(max_padsize, 12)
        imgstack = np.concatenate((imgstack, np.fliplr(
            imgstack[:, :, y_size - max_padsize - 1:y_size - 1])), axis=2)

    # Add zeros to fill 1024*1024 Image size
    x_size = imgstack.shape[1]
    y_size = imgstack.shape[2]

    if x_size < 1024 or y_size < 1024:
        temp_img = np.zeros(
            (imgstack.shape[0], 1024, 1024), dtype=imgstack.dtype)
        temp_img[:, 0:imgstack.shape[1], 0:imgstack.shape[2]] = imgstack
        imgstack = temp_img

    return imgstack
Beispiel #30
0
 def open_tiff_stack(self):
     tiff_stack = cv2.imreadmulti(self.file_path,
                                  flags=(cv2.IMREAD_UNCHANGED
                                         | cv2.IMREAD_ANYDEPTH))
     return tiff_stack
Beispiel #31
0
import cv2
import numpy as np
import pprint

mylist = []
loaded = cv2.imreadmulti(mats = mylist, filename = "2page.tiff", flags = cv2.IMREAD_ANYCOLOR )
print(loaded)
print(len(mylist))
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(mylist)