def save_fseg_segmentations_MSRA(images,in_dir,out_dir,param_path,train = False):
    fparams = np.load(param_path).item()
    if 'f_Segs' not in os.listdir(out_dir):
        os.mkdir(out_dir+'/f_Segs')

    for fimg in images :
        img = sio.imread(in_dir+fimg)
        gt = sio.imread(in_dir+fimg[0:-3]+'png')/255
        segs = {}
        for g in range(0,15):

            f_seg = np.zeros(img.shape[0:2],dtype=np.int32)
            felseg(img,f_seg,fparams['sigma'][g],np.float(fparams['scale'][g]),np.int(fparams['min_size'][g]))
            saliency = []
            segments = []
            f_seg+=1
            segments_temp = np.unique(f_seg)
            for segment in segments_temp:
                sal_temp = calc_saliency_score(segment,f_seg,gt)
                if (not(train) or (sal_temp >= 0)) :
                    segments.append(segment)
                    saliency.append(np.uint0(sal_temp))
            segs[str(g)]={}
            segs[str(g)]['segmap']= f_seg
            segs[str(g)]['seglist']= segments
            segs[str(g)]['labels']= saliency
        np.save(out_dir+'/f_Segs/'+fimg[0:-4],segs)
def produce_smoothed_images(get_component, replace_component, bins, output_path, paths):

    start_img = io.imread(paths[0])
    start_cdf = get_cdf(get_component(start_img), bins)

    end_img = io.imread(paths[-1])
    end_cdf = get_cdf(get_component(end_img), bins)

    delta_cdf = end_cdf - start_cdf

    for i, path in enumerate(paths[1:-1]):
        percentage = i / len(paths[1:-1])
        target_cdf = start_cdf + (delta_cdf * percentage)

        img = io.imread(path)
        values = get_component(img)
        cdf = get_cdf(values, bins)

        # In order to match the length of "bins" for the interpolation below
        # we prepend a 0
        target_cdf = numpy.insert(target_cdf, 0, 0)
        cdf = numpy.insert(cdf, 0, 0)

        matched = match(values, cdf, target_cdf, bins)
        matched = matched.reshape(values.shape)

        img = replace_component(img, matched)

        result_path = os.path.join(output_path, os.path.basename(path))
        io.imsave(result_path, img)
        print('Done with', result_path)
示例#3
0
def test_bicubic():
    origin = io.imread('baby_GT[gray].bmp')
    im_jor = io.imread('baby_JOR[gray].bmp')
    im_my = io.imread("baby_MY[gray].bmp")

    image = io.imread('baby_GT.bmp')
    shape = origin.shape

    if len(shape) == 3:
        test_img = image[:shape[0]-shape[0] % 3, :shape[1]-shape[1] % 3, :]
    else:
        test_img = image[:shape[0]-shape[0] % 3, :shape[1]-shape[1] % 3]

    lim = imresize(test_img, 1/3.0, 'bicubic')
    mim = imresize(lim, 2.0, 'bicubic')
    rim = imresize(lim, 3.0, 'bicubic')

    lim = np.asarray(tc.rgb2ycbcr(lim)[:, :, 0], dtype=float)
    image = np.asarray(tc.rgb2ycbcr(test_img)[:, :, 0], dtype=float)
    mim = np.asarray(tc.rgb2ycbcr(mim)[:, :, 0], dtype=float)
    rim = np.asarray(tc.rgb2ycbcr(rim)[:, :, 0], dtype=float)

    print psnr(image*1.0, rim*1.0)
    print psnr(image*1.0, im_my[0:504,0:504]*1.0)

    plt.subplot(221)
    plt.imshow(image, interpolation="None", cmap=cm.gray)
    plt.subplot(222)
    plt.imshow(np.abs(rim), cmap=cm.gray)
    plt.subplot(223)
    plt.imshow(np.abs(im_my), interpolation="None", cmap=cm.gray)
    plt.subplot(224)
    plt.imshow(np.abs(im_jor), interpolation="None", cmap=cm.gray)

    plt.show()
def compute_mean_image(training_data_path, testing_data_path, save_flag=True, save_file=''):
    print('computing mean images')
    folder = os.listdir(training_data_path)
    trainNum = len(folder)
    init_flag = True
    for f in folder:
        img = skimage.img_as_float( skio.imread(training_data_path+f) )
        if init_flag:
            mean_image = img
            init_flag = False
        else:
            mean_image += img
    
    folder = os.listdir(testing_data_path)
    testNum = len(folder)
    for f in folder:
        img = skimage.img_as_float( skio.imread(testing_data_path+f) )
        mean_image += img
    
    mean_image /= (trainNum + testNum)
    
    
    if len(mean_image.shape) == 2:
        '''if gray, (h, w) to (1, h, w)'''
        tmp = np.zeros((1, mean_image.shape[0], mean_image.shape[1]))
        tmp[0, ...] = mean_image
        mean_image = tmp
    else:
        '''if color, swap (h, w, ch) to (ch, h, w)'''
        mean_image = mean_image.swapaxes(1,2)
        mean_image = mean_image.swapaxes(0,1)
    if save_flag:
        with open(save_file, 'wb') as f:
            np.save(f, mean_image)
    return mean_image
    def run_simple(self):
        u'''Simple prediction'''
        if len(self.datapath) >= 2:
            # Use only two previous images
            af_img = io.imread(self.datapath[0])
            bf_img = io.imread(self.datapath[1])
            
            #af_img = io.imread(r'./viptrafficof_02.png')
            #bf_img = io.imread(r'./viptrafficof_03.png')

            # Convert to gray image
            af_gray = color.rgb2gray(af_img)
            bf_gray = color.rgb2gray(bf_img)

            # Calculate density flow
            # Small -> WHY?
            flow = cv2.calcOpticalFlowFarneback(bf_gray, af_gray, \
                0.5, 6, 20, 10, 5, 1.2, 0)
            print  flow.shape, flow[:, :, 0].min(), flow[:, :, 1].max()  
            self.before = bf_gray
            self.after = af_gray
            #self.result = self.current
            self.result = transform(af_img, flow)
            
            # Color code the result for better visualization of optical flow. 
            # Direction corresponds to Hue value of the image. 
            # Magnitude corresponds to Value plane
            
            mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
            hsv = np.zeros_like(af_img)
            hsv[...,1] = 255
            hsv[...,0] = ang*180/np.pi/2
            hsv[...,2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
            self.optical = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)        
示例#6
0
def compare_in_path(path):
	filenames = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]

	# shuffle because I started the script several times without finishing the process
	random.shuffle(filenames)

	counter = 0
	for idx, f1 in enumerate(filenames):

		# don't compare two files twice
		for f2 in filenames[(idx+1):]:
			a1 = io.imread(os.path.join(path, f1))
			a2 = io.imread(os.path.join(path, f2))

			s = ssim(a1, a2, multichannel=True)

			# 0.5 seems reasonable, but one could try around here (maybe 0.4)
			if s > 0.5:
				print(f1 + '   ' + f2 + '\t\t' + str(s))

				# remove the second file
				os.remove(os.path.join(path, f2))

				filenames.remove(f2)

		
		counter += 1
		print ('  * ' + str(counter) + ' *  ' )
		if counter > 100: break
def extract_features():
    des_type = 'HOG'

    # If feature directories don't exist, create them
    if not os.path.isdir(pos_feat_ph):
        os.makedirs(pos_feat_ph)

    # If feature directories don't exist, create them
    if not os.path.isdir(neg_feat_ph):
        os.makedirs(neg_feat_ph)

    print "Calculating the descriptors for the positive samples and saving them"
    for im_path in glob.glob(os.path.join(pos_im_path, "*")):
        #print im_path
        
        im = imread(im_path, as_grey=True)
        if des_type == "HOG":
            fd = hog(im, orientations, pixels_per_cell, cells_per_block, visualize, normalize)
        fd_name = os.path.split(im_path)[1].split(".")[0] + ".feat"
        fd_path = os.path.join(pos_feat_ph, fd_name)
        joblib.dump(fd, fd_path)
    print "Positive features saved in {}".format(pos_feat_ph)

    print "Calculating the descriptors for the negative samples and saving them"
    for im_path in glob.glob(os.path.join(neg_im_path, "*")):
        im = imread(im_path, as_grey=True)
        if des_type == "HOG":
            fd = hog(im,  orientations, pixels_per_cell, cells_per_block, visualize, normalize)
        fd_name = os.path.split(im_path)[1].split(".")[0] + ".feat"
        fd_path = os.path.join(neg_feat_ph, fd_name)
    
        joblib.dump(fd, fd_path)
    print "Negative features saved in {}".format(neg_feat_ph)

    print "Completed calculating features from training images"
def create_train_data():
    train_data_path = os.path.join(data_path, 'train')
    images = os.listdir(train_data_path)
    total = int(len(images) / 2)

    imgs = np.ndarray((total, image_rows, image_cols), dtype=np.uint8)
    imgs_mask = np.ndarray((total, image_rows, image_cols), dtype=np.uint8)

    i = 0
    print('-'*30)
    print('Creating training images...')
    print('-'*30)
    for image_name in images:
        if 'mask' in image_name:
            continue
        image_mask_name = image_name.split('.')[0] + '_mask.tif'
        img = imread(os.path.join(train_data_path, image_name), as_grey=True)
        img_mask = imread(os.path.join(train_data_path, image_mask_name), as_grey=True)

        img = np.array([img])
        img_mask = np.array([img_mask])

        imgs[i] = img
        imgs_mask[i] = img_mask

        if i % 100 == 0:
            print('Done: {0}/{1} images'.format(i, total))
        i += 1
    print('Loading done.')

    np.save('imgs_train.npy', imgs)
    np.save('imgs_mask_train.npy', imgs_mask)
    print('Saving to .npy files done.')
示例#9
0
def count_bubble(image_filename, ref_filename, plot_show = 0):
    
    image = io.imread(gv.__DIR__ + gv.__TrainImageDir__ + \
                      image_filename)
    ref_image = io.imread(gv.__DIR__ + gv.__TrainImageDir__ + \
                      ref_filename)

    image_gray = rgb2gray(image)
    ref_gray = rgb2gray(ref_image)

    # Constants
    Window_Size = 5
           
    pre_image = pre.noise_reduction(image_gray,
                                    ref_gray,
                                    Window_Size,
                                    mode = 0)
    seg_image = segmentation(pre_image,'self_design')
    perimeters = perimeter_exaction(seg_image, image, image_filename)
    if(plot_show == 1):
        fig, ax = plt.subplots(1,3)
        ax[0].imshow(image)
        ax[0].set_title('Original')
        ax[1].imshow(seg_image, cmap=plt.cm.gray)
        ax[1].set_title('Segmentation')
        result = io.imread(gv.__DIR__ + gv.cu__image_dir + image_filename)
        ax[2].imshow(result)
        ax[2].set_title('Result')
        plt.show()
       
    return perimeters
示例#10
0
def plot_ori_gt_ann(imgid):
    path_ori = ori_dir +'/'+ imgid + '.jpg'
    path_gt = gt_dir +'/'+ imgid + '.png'
    path_ann = ann_dir +'/'+ imgid + '.png'

    image_file_ori = io.imread(path_ori)
    image_file_gt = io.imread(path_gt)
    image_file_ann = io.imread(path_ann)

    f = plt.figure()
    gs = gridspec.GridSpec(1, 3, width_ratios=[1,1,1],height_ratios=[1,1,1])

    ax1 = plt.subplot(gs[0])
    ax2 = plt.subplot(gs[1])
    ax3 = plt.subplot(gs[2])

    ax1.imshow(image_file_ori,aspect='auto')
    ax2.imshow(image_file_gt,aspect='auto')
    ax3.imshow(image_file_ann,aspect='auto')

    ax1.axis('off')
    ax2.axis('off')
    ax3.axis('off')

    out_filepath = out_dir+'/'+imgid+'_ori_gt_ann.png'
    f.savefig(out_filepath,bbox_inches = 'tight')
    plt.close(f)
示例#11
0
def load_images(random_state=1234):
    train_df = pd.read_csv("data/train.csv", index_col="id", usecols=[0])
    depths_df = pd.read_csv("data/depths.csv", index_col="id")
    train_df = train_df.join(depths_df)
    test_df = depths_df[~depths_df.index.isin(train_df.index)]
    print(">>> train_df:",train_df.shape)
    print(train_df.head())
    print(">>> test_df:", test_df.shape)
    print(test_df.head())
    train_df["images"] = [gradmag(np.array(imread(path_train_images+"{}.png".format(idx)))) for idx in tqdm(train_df.index)]
    train_df["masks"] = [np.array(load_img(path_train_masks+"{}.png".format(idx),grayscale=True))/255 for idx in tqdm(train_df.index)]
    train_df["coverage"] = train_df.masks.map(np.sum) / pow(img_size_ori, 2)
    train_df["coverage_class"] = train_df.coverage.map(cov_to_class)
    print("*** TRAIN ***")
    print(train_df.head())
    print("*** TEST ***")
    print(test_df.head())
    ids_train, ids_valid, x_train, x_valid, y_train, y_valid, cov_train, cov_test, depth_train, depth_test = train_test_split(
        train_df.index.values,
        np.array(train_df.images.tolist()).reshape(-1, img_size_target, img_size_target, 1),
        np.array(train_df.masks.tolist()).reshape(-1, img_size_target, img_size_target, 1),
        train_df.coverage.values,
        train_df.z.values,
        test_size=0.2,
        stratify=train_df.coverage_class,
        random_state=random_state)
    #Data augmentation
    x_train2 = np.append(x_train, [np.fliplr(x) for x in x_train], axis=0)
    y_train2 = np.append(y_train, [np.fliplr(x) for x in y_train], axis=0)
    print(x_train2.shape)
    print(y_valid.shape)
    x_test = np.array([gradmag(np.array(imread(path_test_images+"{}.png".format(idx)))) for idx in tqdm(test_df.index)]).reshape(-1, img_size_target, img_size_target, 1)
    return x_train2, x_valid, y_train2, y_valid, x_test, test_df.index.values
示例#12
0
def compare_png(img1, img2, eps=0.99):
    """check whether img1 and img2 are similar

       we use structural similarity (SSIM) to compare them.

       SSIM generates values between 0 and 1, where 1 represents
       identical images

       If SSIM result is greater eps, then this method returns True.

    """
    im1 = imread(img1)
    im2 = imread(img2)
    if len(im1.shape) == 2 or im1.shape[-1] == 1:
        # only one color channel
        mssim = compare_ssim(im1, im2)
    elif HAVE_MULTICHANNEL_SSIM:
        # multi color channel
        mssim = compare_ssim(im1, im2, multichannel=True)
    else:
        # We have to do multichannel ssim ourselves
        nch = im1.shape[-1]
        mssim = np.empty(nch)
        for chan in range(nch):
            # use copy to generate contiguous array and avoid warning
            ch_result = compare_ssim(
                im1[..., chan].copy(), im2[..., chan].copy())
            mssim[..., chan] = ch_result
        mssim = mssim.mean()
    return mssim > eps
示例#13
0
def test_multi_page_gif():
    img = imread(os.path.join(data_dir, 'no_time_for_that.gif'))
    assert img.shape == (24, 280, 500, 3), img.shape
    img2 = imread(os.path.join(data_dir, 'no_time_for_that.gif'),
                  img_num=5)
    assert img2.shape == (280, 500, 3)
    assert_allclose(img[5], img2)
示例#14
0
def main():

	t0 = time.time()

	print "Preparing Inputs..."
	pi.prepareInputs()

	ndsm = io.imread("C:\\bertud_temp\\ndsm.tif")
	classified = io.imread("C:\\bertud_temp\\classified.tif")
	classified = classified[0:len(ndsm),0:len(ndsm[0])]
	slope = io.imread("C:\\bertud_temp\\slope.tif")
	numret = io.imread("C:\\bertud_temp\\numret.tif")

	print "Generating Initial Mask..."
	initialMask = ma.generateInitialMask(ndsm,classified,slope,numret)

	io.imsave("C:\\bertud_temp\\initialMask.tif",initialMask)


	pieces = br.performBoundaryRegularizationV2(initialMask,numProcesses=3)

	finalMask = ma.buildFinalMask(pieces,initialMask)

	io.imsave("C:\\bertud_temp\\finalMask.tif",finalMask)

	# pickle.dump(pieces,open("E:/BertudV2/pieces.pickle","wb"))

	t1 = time.time()

	print "Finished everything in ",round(t1-t0,2),"s."
示例#15
0
def test(classifier, pca):
	building = io.imread("http://www.nps.gov/tps/images/briefs/14-commercial-building.jpg")
	building = transform.resize(building, (200, 200, 3))
	building = color.rgb2gray(building)
	building = building.reshape(1, -1)
	# building = pca.transform(building)
	print building
	print classifier.predict(building)[0]
	print to_cat[str(classifier.predict(building)[0])] + " (expect building)"
	# print classifier.predict_proba(building)

	snow = io.imread("http://farm4.static.flickr.com/3405/3332148397_92d89db2ab.jpg")
	snow = transform.resize(snow, (200, 200, 3))
	snow = color.rgb2gray(snow)
	snow = snow.reshape(1, -1)
	# snow = pca.transform(snow)
	print snow
	print to_cat[str(classifier.predict(snow)[0])] + " (expect snow)"
	# print classifier.predict_proba(snow)


	flower = io.imread("https://upload.wikimedia.org/wikipedia/commons/f/fd/Daisy_flower_green_background.jpg")
	flower = transform.resize(flower, (200, 200, 3))
	flower = color.rgb2gray(flower)
	flower = flower.reshape(1, -1)
	# flower = pca.transform(flower)
	print to_cat[str(classifier.predict(flower)[0])] + " (expect plant)"
def make_masks():
    path = '/path/to/JSRT/All247images/'
    for i, filename in enumerate(os.listdir(path)):
        left = io.imread('/path/to/JSRT/Masks/left lung/' + filename[:-4] + '.gif')
        right = io.imread('/path/to/JSRT/Masks/right lung/' + filename[:-4] + '.gif')
        io.imsave('/path/to/JSRT/new/' + filename[:-4] + 'msk.png', np.clip(left + right, 0, 255))
        print 'Mask', i, filename
示例#17
0
    def load_sequence(self, sequence):
        """Load a sequence of images/frames

        Auxiliary function that loads a sequence of frames with
        the corresponding ground truth and their filenames.
        Returns a dict with the images in [0, 1], their corresponding
        labels, their subset (i.e. category, clip, prefix) and their
        filenames.
        """
        from skimage import io
        image_batch, mask_batch, filename_batch = [], [], []

        for prefix, img_name in sequence:

            img = io.imread(os.path.join(self.image_path, img_name + ".bmp"))
            img = img.astype(floatX) / 255.

            mask = np.array(io.imread(os.path.join(self.mask_path,
                                                   img_name + ".tif")))
            mask = mask.astype('int32')

            # Add to minibatch
            image_batch.append(img)
            mask_batch.append(mask)
            filename_batch.append(img_name)

        ret = {}
        ret['data'] = np.array(image_batch)
        ret['labels'] = np.array(mask_batch)
        ret['subset'] = prefix
        ret['filenames'] = np.array(filename_batch)
        return ret
示例#18
0
def main():
    try:
	if len(sys.argv) <= 1:
		print 'Error: Filename Required'  
	if len(sys.argv) == 2:
		print 'Error: Background Filename Required'
	if len(sys.argv) >= 3:

	    # Constants
	    Window_Size = 5
	    image_name = sys.argv[1]
	    ref_name = sys.argv[2]	
	
            image = rgb2gray(io.imread(sys.argv[1]))
	    ref = rgb2gray(io.imread(sys.argv[2]))

	    part_image, region, angle = pre.interest_region(image, plot_image = 0)
	    ref_rotate = rotate(ref,angle)
	    part_ref = ref_rotate[region[0]:region[1], region[2]:region[3]]
	
	    pre_image = pre.noise_reduction(part_image, part_ref, Window_Size, mode = 0)
	    io.imsave('pre_image.jpg',pre_image)

    except KeyboardInterrupt:
        print "Shutdown requested... exiting"
    except Exception:
        traceback.print_exc(file=sys.stdout)
    sys.exit(0)
def selectRandomPoints(imageFPath, centerLinePointCount, nonWormPointCount):
    centerLinePoints = []
    nonWormPoints = []
    imageFPath = Path(imageFPath)
    im_mask = skio.imread(str(imageFPath.parent / 'fluo_worm_mask.png')) > 0
    # Dilate mask out substantially so that non-worm pixels we select are not adjacent to the worm
    im_mask = scipy.ndimage.morphology.binary_dilation(im_mask, iterations=100)
    im_centerline = skio.imread(str(imageFPath.parent / 'fluo_worm_mask_skeleton.png')) > 0
    labels = skimage.measure.label(im_centerline)
    regions = skimage.measure.regionprops(labels)
    if len(regions) == 0:
        raise RuntimeError('Mask skeleton image contains no regions.')
    else:
        if len(regions) > 1:
            raise RuntimeError('Mask skeleton image contains multiple regions.')
        else:
            coords = regions[0].coords
            if centerLinePointCount > len(coords):
                print('warning: centerLinePointCount exceeds number of pixels in skeleton... ' + \
                      'All skeleton points will be returned without duplication, which is still ' + \
                      'fewer points than requested.')
                centerLinePointCount = len(coords)
            centerLinePoints = coords[numpy.random.choice(range(len(coords)), size=centerLinePointCount, replace=False)]
            while len(nonWormPoints) < nonWormPointCount:
                y = numpy.random.randint(im_mask.shape[0])
                x = numpy.random.randint(im_mask.shape[1])
                if im_mask[y, x] or math.sqrt((y - 1079.5)**2 + (x - 1279.5)**2) > 1000 or (y, x) in nonWormPoints:
                    continue
                nonWormPoints.append((y, x))
    return numpy.array(centerLinePoints, dtype=numpy.uint32), numpy.array(nonWormPoints, dtype=numpy.uint32)
示例#20
0
def run_illum(args):
    """Run illumination correction.

    Parameters
    ----------
    args : argparse.Namespace
        The arguments parsed by the argparse library.
    """
    if args.file_list is not None:
        args.images.extend([fn.rstrip() for fn in args.file_list])
    il = pre.find_background_illumination(args.images, args.radius,
                                          args.quantile, args.stretchlim,
                                          args.use_mask, args.mask_offset,
                                          args.mask_close, args.mask_erode)
    if args.verbose:
        print 'illumination field:', type(il), il.dtype, il.min(), il.max()
    if args.save_illumination is not None:
        io.imsave(args.save_illumination, il / il.max())
    base_fns = [pre.basefn(fn) for fn in args.images]
    ims_out = [fn + args.output_suffix for fn in base_fns]
    mask_fns = [fn + '.mask.tif' for fn in base_fns]
    ims = (io.imread(fn) for fn in args.images)
    for im, fout, mask_fn in it.izip(ims, ims_out, mask_fns):
        if os.path.isfile(mask_fn):
            mask = io.imread(mask_fn).astype(bool)
        else:
            mask = np.ones(im.shape, bool)
        im = pre.correct_image_illumination(im, il,
                                            args.stretchlim_output, mask)
        io.imsave(fout, im)
示例#21
0
    def __init__(self, source, fps, transparent=True, repeat=False, ismask=False):

        VideoClip.__init__(self, ismask=ismask)

        self.directory = source
        self.fps = fps
        self.pics = sorted(["%s/%s" % (source, f) for f in os.listdir(source)
                            if not f.endswith('.txt')])

        self.size = imread(self.pics[0]).shape[:2][::-1]

        if imread(self.pics[0]).shape[2] == 4:  # transparent png

            if ismask:
                def get_frame(t):
                    return 1.0 * imread(self.pics[int(self.fps * t)])[:, :, 3] / 255
            else:
                def get_frame(t):
                    return imread(self.pics[int(self.fps * t)])[:, :, :2]

            if transparent:
                self.mask = DirectoryClip(source, fps, ismask=True)

        else:

            def get_frame(t):
                return imread(self.pics[int(self.fps * t)])

        self.get_frame = get_frame
        self.duration = 1.0 * len(self.pics) / self.fps
示例#22
0
def iris_scan_orb_android(file_name):

    from skimage import io
    from skimage.feature import (match_descriptors, ORB)
    from skimage.color import rgb2gray
    from .settings import MEDIA_ROOT

    img1 = rgb2gray(io.imread(MEDIA_ROOT + '/'+ file_name))  # Query
    img2 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS9.jpg'))  # Comparing to

    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(img1)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors  # Query Descriptor

    descriptor_extractor.detect_and_extract(img2)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors  # Comparing To Descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    percent = len(matches12) / len(descriptors1) * 100

    return percent
示例#23
0
	def put_image(self, image_path):
		# print "Loading the image"
		self.image = io.imread(image_path, as_grey=True)
		self.image = transform.resize(self.image,(50,50))
		self.image_scaled = io.imread(image_path, as_grey=True)
		self.image_scaled = transform.resize(self.image_scaled,(50,50))
		self.image_scaled *= (1/self.image_scaled.max())
def readTestingFragment(datapath, fragList, imgSize=(1,224,224), meanImage=[]):
    ch, ih, iw = imgSize
    fragLen = len(fragList)
    if ch == 1:
        X = np.zeros((fragLen, 1, ih, iw))
        idx = -1
        print('reading data')
        for f in fragList:
            idx += 1
            # print(f)
            img = skimage.img_as_float(skio.imread(datapath+f) )
#            img -= meanImage
            X[idx, 0, ...] = img
    elif ch == 3:
        X = np.zeros((fragLen, 3, ih, iw))
        idx = -1
        print('reading data')
        for f in fragList:
            idx += 1
            img = skimage.img_as_float(skio.imread(datapath+f) )
            img = img.swapaxes(1, 2)
            img = img.swapaxes(0, 1)
#            img -= meanImage
            X[idx, ...] = img
    X -= np.tile(meanImage, [fragLen, 1, 1, 1])
    return X
def read_input_OLE():
	alphabet=["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
	#alphabet=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,17,19,20,21,2]
	n=70
	train_data=[]
	train_solutions=[]
	for i in range(n):
		for letter in alphabet:
			train_data.append(np.asarray(flatten_image(imread("chars74k-lite/chars74k-lite/"+letter+"/"+letter+"_"+str(i)+".jpg"))))
			train_data[-1] = np.divide(train_data[-1],255)
			train_solutions.append(ord(letter))
	#
	m=88
	test_data=[]
	test_solutions=[]
	for j in range(n,m):
		for letter in alphabet:
			test_data.append(np.asarray(flatten_image(imread("chars74k-lite/chars74k-lite/"+letter+"/"+letter+"_"+str(j)+".jpg"))))
			test_data[-1] = np.divide(test_data[-1],255)
			test_solutions.append(ord(letter))
	#
	print(len(train_data))
	print(len(train_solutions))
	print(len(test_data))
	print(test_solutions)
	return train_data, train_solutions, test_data, test_solutions
def computeMeanImage(trainingPath, testingPath, savePath, imgSize):
    ch, ih, iw = imgSize
    meanImage = np.zeros((ch, ih, iw))
    print('computing mean image')
    folder = os.listdir(trainingPath)
    trainNum = 0
    for f in folder:
        if not f[-4:] == '.jpg':
            continue
        img = skimage.img_as_float( skio.imread(trainingPath+f) )
        trainNum += 1
        if ch == 3:
            img = img.swapaxes(1, 2)
            img = img.swapaxes(0, 1)
        meanImage += img
    
    folder = os.listdir(testingPath)
    testNum = 0
    for f in folder:
        if not f[-4:] == '.jpg':
            continue
        img = skimage.img_as_float( skio.imread(testingPath+f) )
        testNum += 1
        if ch == 3:
            img = img.swapaxes(1, 2)
            img = img.swapaxes(0, 1)
        meanImage += img
    meanImage /= (trainNum + testNum)
    with open(savePath, 'wb') as f:
        np.save(f, meanImage)
def readTrainingFragment(datapath, fragList, imgSize=(1,224,224), meanImage=[], classNum=10):
    ch, ih, iw = imgSize
    fragLen = len(fragList)
    if ch == 1:
        X = np.zeros((fragLen, 1, ih, iw))
        Y = np.zeros((fragLen), dtype=int)
        idx = -1
        print('reading data')
        for f in fragList:
            idx += 1
            # print(f)
            label = np.int(f[0])
            img = skimage.img_as_float(skio.imread(datapath+f) )
#            img -= meanImage
            X[idx, 0, ...] = img
            Y[idx] = label
    elif ch == 3:
        X = np.zeros((fragLen, 3, ih, iw))
        Y = np.zeros((fragLen), dtype=int)
        idx = -1
        print('reading data')
        for f in fragList:
            idx += 1
            label = np.int(f[0])
            img = skimage.img_as_float(skio.imread(datapath+f) )
            img = img.swapaxes(1, 2)
            img = img.swapaxes(0, 1)
#            img -= meanImage
            X[idx, ...] = img
            Y[idx] = label
    X -= np.tile(meanImage, [fragLen, 1, 1, 1])
    Y = np_utils.to_categorical(Y, classNum)
    return X, Y
示例#28
0
def iris_scan_orb(request):

    from skimage import io
    from skimage.feature import (match_descriptors, ORB)
    from skimage.color import rgb2gray
    from .settings import MEDIA_ROOT

    img1 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS3.jpg'))  # Query
    img2 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS6.jpg'))  # Comparing to

    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(img1)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors  # Query Descriptor

    descriptor_extractor.detect_and_extract(img2)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors  # Comparing To Descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    # print("Matched: ", len(matches12), " of ", len(descriptors1))
    percent = len(matches12) / len(descriptors1) * 100

    # print("Percent Match - ", percent, "%")

    """if percent > 80:
        print("Matched!")
    else:
        print("Not Matched!")"""

    return render(request, 'scan.html', {'percent': percent})
示例#29
0
    def load_sequence(self, sequence):
        """Load a sequence of images/frames

        Auxiliary function that loads a sequence of frames with
        the corresponding ground truth and their filenames.
        Returns a dict with the images in [0, 1], their corresponding
        labels, their subset (i.e. category, clip, prefix) and their
        filenames.
        """
        from skimage import io
        X = []
        Y = []
        F = []

        for prefix, frame in sequence:
            img = io.imread(os.path.join(self.image_path, frame))
            mask = io.imread(os.path.join(self.mask_path, frame))

            img = img.astype(floatX) / 255.
            mask = mask.astype('int32')

            X.append(img)
            Y.append(mask)
            F.append(frame)

        ret = {}
        ret['data'] = np.array(X)
        ret['labels'] = np.array(Y)
        ret['subset'] = prefix
        ret['filenames'] = np.array(F)
        return ret
def compare(file1, file2):
    image1 = io.imread(file1, as_grey = True)
    image2 = io.imread(file2, as_grey = True)
    image1 = feature.canny(image1)
    image2 = feature.canny(image2)

    return ssim(image1, image2)
                     help="Path to BigTiff file",
                     required=True)
 parser.add_argument(
     "-maskPrefix",
     help="Mask file prefix: /path/to/bigtiff.tif[:-4]{MaskPrefix}",
     required=True)
 opt = parser.parse_args()
 print opt
 #
 pathTiff = opt.pathBigTiff
 pathMsk = '%s%s' % (pathTiff[:-4], opt.maskPrefix)
 checkFile(pathTiff)
 checkFile(pathMsk)
 tiffReader = BigTiffReader(pathTiff)
 #
 imgMsk = (io.imread(pathMsk) > 0)
 lstRowsMsk, lstColsMsk = np.where(imgMsk)
 nrMsk, ncMsk = imgMsk.shape
 ncTiffL0, nrTiffL0 = tiffReader.layerSizes[0]
 lstRowsTiffL0 = np.round(nrTiffL0 * (lstRowsMsk + 0.5) / nrMsk)
 lstColsTiffL0 = np.round(ncTiffL0 * (lstColsMsk + 0.5) / ncMsk)
 #
 dirOut = '%s_MaskTiles' % (pathTiff[:-4])
 fidxTiles = '%s/idx-tiles.txt' % dirOut
 if not os.path.isdir(dirOut):
     os.mkdir(dirOut)
 #
 numTiles = len(lstRowsTiffL0)
 sizTileL0 = 256
 sizTileL0d2 = int(256 / 2)
 with open(fidxTiles, 'w') as f:
示例#32
0
    des_type = args["descriptor"]

    print des_type

    # If feature directories don't exist, create them
    if not os.path.isdir(pos_feat_ph):
        os.makedirs(pos_feat_ph)

    # If feature directories don't exist, create them
    if not os.path.isdir(neg_feat_ph):
        os.makedirs(neg_feat_ph)

    print "Calculating the descriptors for the positive samples and saving them"
    for im_path in glob.glob(os.path.join(pos_im_path, "*")):
        # im = imread(im_path, as_grey=True)
        im = imread(im_path, as_grey=True)
        #im = cv2.resize(im, (100,40), interpolation=cv2.INTER_CUBIC)

        if des_type == "HOG":
           #fd =     hog(im, orientations=8, pixels_per_cell=(16, 16),
                    #cells_per_block=(1, 1), visualise=True)
           #fd, hog_image = hog(im, orientations=9, pixels_per_cell=(8, 8),
           #         cells_per_block=(3, 3), visualise=True)
           #fd          = hog(im, orientations, pixels_per_cell, cells_per_block, visualize, normalize)
           fd = hog(im, orientations, pixels_per_cell, cells_per_block, visualise=None, block_norm='L1')
        fd_name = os.path.split(im_path)[1].split(".")[0] + ".feat"
        fd_path = os.path.join(pos_feat_ph, fd_name)
        joblib.dump(fd, fd_path)
        print "feature pos: {}, shape {}".format(fd, fd.shape )
    print "Positive features saved in {}".format(pos_feat_ph)
示例#33
0
import scipy.io as sio
import matplotlib.pyplot as plt
import imageio
import Segment
import cv2
from skimage import io, color
from sklearn.mixture import GaussianMixture as GMM
from sklearn.cluster import AgglomerativeClustering as AC
import numpy as np
from sklearn import metrics

img = 'BSDS_small/train/2092.jpg'
rgb = io.imread(img)
# plt.imshow(imageio.imread(img))
# plt.show()

# Load .mat
gt=sio.loadmat(img.replace('jpg', 'mat'))

#Load segmentation from sixth human
segm=gt['groundTruth'][0,5][0][0]['Segmentation']
print(segm.shape)
# plt.imshow(segm, cmap=plt.get_cmap('hot'))
# plt.colorbar()
# plt.show()
percent=50
scale_percent = percent # percent of original size
width = int(rgb.shape[1] * scale_percent / 100)
height = int(rgb.shape[0] * scale_percent / 100)
dim = (width, height)
resized = cv2.resize(rgb, dim, interpolation = cv2.INTER_AREA)
示例#34
0
    def transform_sub(self, X, y=None):
        #print 'nao esta rodando em paralelo'
        #img = io.imread(X[0])
        #self.final_size = (np.round(self.size_percentage*np.asarray(img.shape))).astype(np.int)

        X_out = []

        #remover depois!!
        rotations = [[0, 0, 1]]
        if self.augmentation or self.multi_column:
            coordinates = [[[0, 0.8], [0, 0.8]], [[0.2, 1], [0, 0.8]],
                           [[0, 0.8], [0.2, 1]], [[0.2, 1], [0.2, 1]],
                           [[0.1, 0.9],
                            [0.1, 0.9]]]  #coordinates to do the crop the image
            if self.aug_rotate:
                rotations = [[-5, 0.05, 0.95], [0, 0.05, 0.95],
                             [5, 0.05, 0.95]]
        else:
            coordinates = [[[0., 1], [
                0., 1.
            ]]]  #if not augmented, just crop one image from the center

        for filename in X:

            if self.DEBUG >= 2:
                #print 'file ', i, ' of ', len(X)
                antes1 = datetime.now()
                antes2 = datetime.now()

            img = io.imread(filename)
            if self.DEBUG >= 2:
                print 'Tempo imread=', datetime.now() - antes2

            #convert color images to gray scale
            if len(img.shape) == 3:
                img = (255 * color.rgb2gray(io.imread(filename))).astype(
                    np.uint8)

            img = img.astype(np.float32)

            if self.gauss_noise:
                img = self.add_random_noise(img)

            if self.high_pass:
                img = self.get_high_pass(img)

            if self.low_pass:
                img = self.get_low_pass(img)

            if self.roi:
                img = self.get_roi(img)  #get only the foreground

            for ang, s, e in rotations:

                if self.augmentation:
                    h, w = img.shape
                    img_rot = misc.imrotate(img, ang)
                    img_rot = img_rot[int(s * h):int(e * h),
                                      int(s * w):int(e * w)]
                else:
                    img_rot = img

                for xy1, xy2 in coordinates:  #if augmented crop corners and center. If not, just one crop covering the whole image
                    h, w = img_rot.shape
                    imgs_augmented = []
                    img_crop = img_rot[int(xy1[0] * h):int(xy1[1] * h),
                                       int(xy2[0] * w):int(xy2[1] * w)]
                    imgs_augmented.append(img_crop)  #append the regular image
                    if self.augmentation or self.multi_column:
                        imgs_augmented.append(
                            img_crop[::, ::-1]
                        )  #if augmentation, append mirrored image

                    for img_final in imgs_augmented:

                        if self.DEBUG >= 2:
                            antes4 = datetime.now()

                        #img_final = img_final[120:-120,100:-100]#Remover depois, fazendo crop forcadamente

                        #get the final size to ensure that all images will have this size. This is a workaround for biometrika 2011 sensor since it has images with different sizes
                        #if final_size ==None:
                        #    final_size = (np.round(self.size_percentage*np.asarray(img_final.shape))).astype(np.int)

                        sample = misc.imresize(img_final, self.final_size)
                        if self.DEBUG >= 2:
                            print 'tempo resize=', datetime.now() - antes4

                        if self.feature_extractor != None:
                            if self.DEBUG >= 2:
                                antes3 = datetime.now()

                            sample = self.feature_extractor.extract(sample)

                            if self.DEBUG >= 2:
                                print 'Tempo feature_extractor = ', datetime.now(
                                ) - antes3
                                print 'Sample.shape = ', sample.shape
                                sys.stdout.flush(
                                )  #force prit when running child/sub processes
                        sample = sample.reshape(-1).astype(np.float32)
                        X_out.append(sample)

        if self.DEBUG >= 1:
            print 'Tempo preprocessing = ', datetime.now() - antes1
            sys.stdout.flush()  #force print when running child/sub processes
        X_out = np.asarray(X_out)
        if self.ZCA:
            X_out = self.apply_ZCA(X_out)
        return X_out
示例#35
0
    return sorted(l, key=alphanum_key)


if __name__ == "__main__":
    f = open(full_path + '/results/results.txt', 'w')
    c = open(full_path + '/results/cards.txt', 'w')
    neural_net = load_model(full_path + '/models/neural_model.h5')
    random_forest = pick.load(
        open(full_path + "/models/random_forest.pkl", 'rb'))

    pictures = os.listdir(full_path + '/data/test_dataset/')
    sorted_pictures = natural_sort(pictures)
    print('--- Running through test images...')
    for picture in sorted_pictures:
        print(picture)
        img = imread(full_path + '/data/test_dataset/' + picture)
        card_list = cd.card_recognition(img, neural_net)
        c.write(str(card_list) + '\n')
        hand = cph.get_poker_hand(random_forest, card_list)
        if (hand == 0):
            f.write(picture +
                    ' --- Nothing in hand; not a recognized poker hand.\n')
        elif (hand == 1):
            f.write(
                picture +
                ' --- One pair; one pair of equal ranks within five cards.\n')
        elif (hand == 2):
            f.write(
                picture +
                ' --- Two pairs; two pairs of equal ranks within five cards.\n'
            )
示例#36
0
import matplotlib.pyplot as plt
from skimage import measure, io, morphology, transform, util
from skimage import filters, color, data
from skimage.morphology import closing, square
from skimage.segmentation import clear_border
import matplotlib.patches as mpatches
import numpy as np

img = io.imread('./data/x_train_png/3.png')
test = io.imread('./pic/standerpic/numbers_1_10.png')

threshold = filters.threshold_yen(test)
test = (test > threshold)
test = transform.resize(test, (28, 28))
print(test)
io.imshow(test)
io.show()
from tqdm import tqdm
import numpy as np

in_dir = 'unzippedIntervalFaces/data/%s/1.6/'
img_size = (256, 256)
out_dir = 'vox'
format = '.jpg'

if not os.path.exists(out_dir):
    os.makedirs(out_dir)

for partition in ['train', 'test']:
    par_dir = os.path.join(out_dir, partition)
    if not os.path.exists(par_dir):
        os.makedirs(par_dir)
    celebs = open(partition + '_vox1.txt').read().splitlines()
    for celeb in tqdm(celebs):
        celeb_dir = in_dir % celeb
        for video_dir in os.listdir(celeb_dir):
            for part_dir in os.listdir(os.path.join(celeb_dir, video_dir)):
                result_name = celeb + "-" + video_dir + "-" + part_dir + format
                part_dir = os.path.join(celeb_dir, video_dir, part_dir)
                images = [
                    resize(imread(os.path.join(part_dir, img_name)), img_size)
                    for img_name in sorted(os.listdir(part_dir))
                ]
                if len(images) > 100 or len(images) < 4:
                    print("Warning sequence of len - %s" % len(images))
                result = np.concatenate(images, axis=1)
                imsave(os.path.join(par_dir, result_name), result)
def get_img(data_path, img_size=64, grayscale_images=True):
    # Getting image array from path:
    img = imread(data_path, as_gray=grayscale_images)
    img = resize(img, (img_size, img_size, 1 if grayscale_images else 3))
    return img
示例#39
0
    # nuclei segmentation
    nucleiMask = S3NucleiSegmentationWatershed(nucleiPM, nucleiCrop,
                                               args.logSigma, TMAmask,
                                               args.nucleiFilter,
                                               args.nucleiRegion)
    del nucleiPM
    # cytoplasm segmentation
    if args.segmentCytoplasm == 'segmentCytoplasm':
        count = 0
        if args.crop == 'noCrop' or args.crop == 'dearray' or args.crop == 'plate':
            cyto = np.empty((len(
                args.CytoMaskChan), nucleiCrop.shape[0], nucleiCrop.shape[1]),
                            dtype=np.uint16)
            for iChan in args.CytoMaskChan:
                cyto[count, :, :] = skio.imread(imagePath, key=iChan)
                count += 1
        else:
            cyto = np.empty((len(args.CytoMaskChan), rect[3], rect[2]),
                            dtype=np.int16)
            for iChan in args.CytoMaskChan:
                cytoFull = skio.imread(imagePath, key=iChan)
                cyto[count, :, :] = cytoFull[int(PMrect[0]):int(PMrect[0] +
                                                                PMrect[2]),
                                             int(PMrect[1]):int(PMrect[1] +
                                                                PMrect[3])]
                count += 1
        cyto = np.amax(cyto, axis=0)
        cytoplasmMask, nucleiMaskTemp, cellMask = S3CytoplasmSegmentation(
            nucleiMask, cyto, TMAmask, args.cytoMethod, args.cytoDilation)
        exportMasks(nucleiMaskTemp, nucleiCrop, outputPath, filePrefix,
示例#40
0
    def eval(self):
        """Evaluate a trained FCN model using mean IoU
            metric.
        """
        s_iou = 0
        s_pla = 0
        # evaluate iou per test image
        eps = np.finfo(float).eps
        for key in self.test_keys:
            # load a test image
            image_path = os.path.join(self.args.data_path, key)
            image = skimage.img_as_float(imread(image_path))
            segmentation = self.segment_objects(image)
            # load test image ground truth labels
            gt = self.test_dictionary[key]
            i_pla = 100 * (gt == segmentation).all(axis=(2)).mean()
            s_pla += i_pla

            i_iou = 0
            n_masks = 0
            # compute mask for each object in the test image
            # including background
            for i in range(self.n_classes):
                if np.sum(gt[..., i]) < eps:
                    continue
                mask = segmentation[..., i]
                intersection = mask * gt[..., i]
                union = np.ceil((mask + gt[..., i]) / 2.0)
                intersection = np.sum(intersection)
                union = np.sum(union)
                if union > eps:
                    iou = intersection / union
                    i_iou += iou
                    n_masks += 1

            # average iou per image
            i_iou /= n_masks
            if not self.args.train:
                log = "%s: %d objs, miou=%0.4f ,pla=%0.2f%%"\
                      % (key, n_masks, i_iou, i_pla)
                print_log(log, self.args.verbose)

            # accumulate all image ious
            s_iou += i_iou
            if self.args.plot:
                self.evaluate(key, image)

        n_test = len(self.test_keys)
        m_iou = s_iou / n_test
        self.miou_history.append(m_iou)
        np.save("miou_history.npy", self.miou_history)
        m_pla = s_pla / n_test
        self.mpla_history.append(m_pla)
        np.save("mpla_history.npy", self.mpla_history)
        if m_iou > self.miou and self.args.train:
            log = "\nOld best mIoU=%0.4f, New best mIoU=%0.4f, Pixel level accuracy=%0.2f%%"\
                    % (self.miou, m_iou, m_pla)
            print_log(log, self.args.verbose)
            self.miou = m_iou
            print_log("Saving weights... %s"\
                      % self.weights_path,\
                      self.args.verbose)
            self.fcn.save_weights(self.weights_path)
        else:
            log = "\nCurrent mIoU=%0.4f, Pixel level accuracy=%0.2f%%"\
                    % (m_iou, m_pla)
            print_log(log, self.args.verbose)
l_pair = [
          [16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13],
          [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3],
          [2, 4], [3, 5], [4, 6], [5, 7]
          ]

time1 = time.time()
annFile = 'path/name.json'
imageDir = 'path/name.jpg'
f = open(annFile, encoding='utf-8')
jsonFile = json.load(f)
length = len(jsonFile)
string = jsonFile[0]['image']
img_id = re.findall(r"\d+\d*", string)

I = io.imread(imageDir)
plt.imshow(I)
plt.axis('off')

for k in range(0, length):
    sks = np.array(l_pair)-1
    keypoints = jsonFile[k]['keypoints']
    kp = np.array[keypoints]
    x = kp[0::3]
    y = kp[1::3]
    for sk in sks:
        plt.plot(x[sk], y[sk], linewidth=0.5, color=[1, 1, 0])
plt.show()
plt.close()
time2 = time.time()
print('time cost = %.2f min' % (time2-time1)/60)
示例#42
0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import bruit_multiplicatif
import filtrage_median
from skimage import io
import matplotlib.pyplot as plt
from math import log10
from copy import deepcopy

image = io.imread('lena.jpg', dtype=float)
image2 = deepcopy(image)

#Taille de l'image
taille = image.shape[0]
abs = 0
ord = 0
pbruit = 0
psignal = 0

#----------------BRUITAGE---------------------
bruit_multiplicatif.bruit(image)

#-------------DEBRUITAGE----------------------
filtrage_median.debruit(image)

#SNR
for abs in range(0, taille):
    for ord in range(0, taille):
        psignal = psignal + (image2[abs][ord])**2
        pbruit = pbruit + (image2[abs][ord] - image[abs][ord])**2
snr = 10 * log10(psignal / pbruit)
def decodePixels(x_dim: int,
                 y_dim: int,
                 codebook: str,
                 bit_len: int,
                 img_path_list: List[str],
                 img_prefix: str,
                 threshold: float = 0.5176) -> pd.DataFrame:
    """
    Decodes a list of images in a pixelwise manner, assigning each pixel to the closest barcode in the codebook.

    Parameters
    ----------
        x_dim : int
            X dimension of input images
        y_dim : int
            Y dimension of input images
        codebook : str
            Path to codebook
        bit_len : int
            Length of the expected bins = number of images in img_path_list
        img_path_list : List[str]
            List of paths to the input images
        img_prefix : str
            Prefix used to sort the input images in ascending order
        threshold : float
            Distance threshold for a pixel vector to be assigned to a barcode vector.

    Returns
    -------
        pd.DataFrame
           Dataframe with every pixel being assigned to the closest barcode in the codebook.

    """
    codebook_df = parseBarcodes(codebook, bit_len)

    # Very important thing here is to sort based on the passed img_prexi, because the iteration needs to be done in order
    r = re.compile(rf"{img_prefix}(\d+)")

    def key_func(m):
        return int(r.search(m).group(1))

    img_path_list.sort(key=key_func)

    # Convert images to float for correct distance comparisan
    image_list = [img_as_float(io.imread(img)) for img in img_path_list]
    rows_list = [
    ]  # rows list is used to store the rows of the df that will be returned
    # Iterate over every pixel
    for x in range(0, x_dim):
        for y in range(0, y_dim):
            # Attribute dics store the key: values of the row's entries
            attribute_dict = {}
            attribute_dict['X'] = x
            attribute_dict['Y'] = y
            pixel_vector = createPixelVector(x, y, image_list)
            minimal_distance = np.inf
            gene_label = ""
            gene_name = ""
            barcode = ""
            for row in codebook_df.itertuples():
                distance = np.linalg.norm(row.Vector - pixel_vector)
                if distance < minimal_distance:
                    minimal_distance = distance
                    gene_label = row.Index
                    gene_name = row.Gene
                    barcode = row.Barcode
            attribute_dict['Barcode'] = barcode
            attribute_dict['Distance'] = minimal_distance
            attribute_dict['Gene'] = gene_name
            # If minimal distance not passing the threshold, it will be labeled as background
            if minimal_distance > threshold:
                gene_label = 0
            attribute_dict['Gene_Label'] = gene_label
            rows_list.append(attribute_dict)
    result_df = pd.DataFrame(rows_list)
    return result_df
示例#44
0
# read the 3dmm eigenvectors
face3dmm = dataloader.Face3DMM()

# read the model checkpoint
model = densenet
model.load_state_dict(torch.load("model/chkpoint_000.pt"))
model.cuda()

# read the adjustment network checkpoint
adjustmentnet.load_state_dict(torch.load("model/chkpoint_adj_000.pt"))
adjustmentnet.cuda()

# read the img file from system argument
filenamein = sys.argv[1]
original = io.imread(filenamein)
new_h,new_w,d = original.shape

# reshape image to 224x224
batchsize=1
resized = cv2.resize(original,(224,224),interpolation=cv2.INTER_AREA)
img = torch.Tensor(resized).float().cuda()
img = img.permute(2,0,1).unsqueeze(0)

# apply model on input
y_pred = model(img)
alphas = y_pred[:,:199]
betas = y_pred[:,199:228]
s = y_pred[:,228]
t = torch.tanh(y_pred[:,229:231])
r = torch.tanh(y_pred[:,231:235]) * (3.14/4)
示例#45
0
def plot():

    # Rescales the original image to replicate the scanners resolution and converts to greyscale for simplicity
    print("\nSimulating using " + this +
          " using Filtered Back Projection (FBP) for reconstruction")
    image = imread(this, as_gray=True)
    image2 = skt.rescale(image,
                         scale=1 / scale,
                         mode='reflect',
                         multichannel=False)
    scaledres = int((image.shape[0]) / scale)
    print("Scanner resolution is set to " + str(scaledres) + " x " +
          str(scaledres))

    a1 = fig.add_subplot(221)
    a2 = fig.add_subplot(222)

    # Displays to original picture in greyscale
    a1.imshow(image, cmap=plt.cm.Greys_r)
    a1.set_title("Original Image")

    #Variable to change the number of total slices. set to scanner resolution
    numofSlices = max(image2.shape)

    # Creates the sinogram
    print("Creating sinogram...")
    #theta = np.linspace(0., degree, max(image2.shape), endpoint=False)
    theta = np.linspace(0., 180, numofSlices, endpoint=False)
    sinogram = skt.radon(image2, theta=theta, circle=True)
    print("Sinogram complete")

    a2.imshow(sinogram,
              cmap=plt.cm.Greys_r,
              extent=(0, 180, 0, sinogram.shape[0]),
              aspect='auto')
    a2.set_title("Radon transform\n(Sinogram)")
    a2.set_xlabel("Projection angle (deg)")
    a2.set_ylabel("Projection position (pixels)")

    a3 = fig.add_subplot(223)
    a4 = fig.add_subplot(224)

    # Reconstructes using FBP on the sinogram
    print("Image being reconstructed")
    reconstruction_fbp = skt.iradon(sinogram, theta=theta, circle=True)
    print("Reconstruction complete.")

    #Calculates the error
    error = reconstruction_fbp - image2
    print('FBP rms reconstruction error: %.3g' % np.sqrt(np.mean(error**2)))
    rerror = 'Scanner resolution is set to ' + str(scaledres) + ' x ' + str(
        scaledres) + '\nFBP rms reconstruction error: %.3g' % np.sqrt(
            np.mean(error**2))
    imkwargs = dict(vmin=-0.2, vmax=0.2)

    a3.imshow(reconstruction_fbp, cmap=plt.cm.Greys_r)
    a3.set_title('Reconstruction with\nFiltered back projection')

    a4.imshow(reconstruction_fbp - image2, cmap=plt.cm.Greys_r, **imkwargs)
    a4.set_title('Reconstruction error\ndifference')

    fig.tight_layout()
    canvas = FigureCanvasTkAgg(fig, master=window)
    canvas.get_tk_widget().grid(row=2, columnspan=5)
    Label(window, text=rerror, font=("Ariel", 10), fg="red").grid(row=3,
                                                                  sticky=SE)
    canvas.draw()
示例#46
0
    return model_robust, inliers


###################################################################################

if __name__ == "__main__":

    image_files = sorted(os.listdir("input"))
    for img in image_files:
        if img.split(".")[-1].lower() not in ["jpg", "jpeg", "png"]:
            image_files.remove(img)

    images = []
    for imgN in image_files:
        print("Reading in file {}".format(imgN))
        img = imread("input/{}".format(imgN))
        # img = resize(img, (img.shape[0] / 2, img.shape[1] / 2))
        images.append(img)

    start = time.time()
    n = 0
    print("Image  {}".format(n))
    imsave("aligned/aligned{:02d}.jpg".format(n), images[n])

    print("   detecting features")
    keypoints1, descriptors1 = detectFeatures(images[n])
    print("   Time Elapsed = {:.3f}".format(time.time() - start))

    tform = SimilarityTransform(scale=1)

    for n in range(1, len(images)):
示例#47
0
from skimage import io, util, color, feature
import sys
from scipy import ndimage as nd
import numpy as np

img = util.img_as_float(color.rgb2gray(io.imread(sys.argv[1])))

# Canny edge detecor
gaussian_img = nd.gaussian_filter(img, int(sys.argv[2]))
canny = feature.canny(gaussian_img)
# io.imsave(sys.argv[3], util.img_as_uint(canny))

# Prewitt operators
prk_x = np.asarray([[-1, -1, -1], [0, 0, 0], [1, 1, 1]])
prk_y = np.asarray([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
"""
# Sobel
prk_x = np.asarray([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
prk_y = np.asarray([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
"""

grad_x = nd.convolve(gaussian_img, prk_x, mode='nearest')
grad_y = nd.convolve(gaussian_img, prk_y, mode='nearest')

grad_x_y = np.sqrt(grad_x**2 + grad_y**2)

io.imsave('prewitt.png', util.img_as_uint(grad_x_y))
示例#48
0
# %%
src_dir = './Datasets/Zurich_dataset_v1.0/images_tif'
tar_dir = './Datasets/Zurich'


dir_IR = f'{tar_dir}/IR'
dir_RGB = f'{tar_dir}/RGB'
if not os.path.exists(dir_IR):
    os.makedirs(dir_IR)
if not os.path.exists(dir_RGB):
    os.makedirs(dir_RGB)

id_tif = 11

img_name = f'zh{id_tif}.png'    
img_BGR = skio.imread(f'{dir_RGB}/{img_name}')
img_RGB = img_BGR[..., ::-1]


p1, p99 = np.percentile(img_RGB[..., 0], (1, 99))
img_en = np.zeros(img_RGB.shape, dtype=np.uint8)
for c in range(img_RGB.shape[-1]):
    p1, p99 = np.percentile(img_RGB[..., c], (1, 99))
    img_en[..., c] = exposure.rescale_intensity(img_RGB[..., c], in_range=(p1, p99))


# %%
def result_montage(dataset, n=3):
#    dataset='Zurich'
#    modality='A'
#    n=3
示例#49
0
def load_data(path=None,
              fls_file='',
              al_file='',
              flip=None,
              flip_fls_file=None,
              filtersize=3):
    """Load files in a directory (from a .fls file) using hyperspy. 

    For more information on how to organize the directory and load the data, as 
    well as how to setup the .fls file please refer to the README or the 
    TIE_template.ipynb notebook. 

    Args:
        path (str): Location of data directory. 
        fls_file (str): Name of the .fls file which contains the image names and
            defocus values. 
        al_file (str): Name of the aligned stack image file. 
        flip (Bool): True if using a flip stack, False otherwise. Uniformly 
            thick films can be reconstructed without a flip stack. The 
            electrostatic phase shift will not be reconstructed.
        flip_fls_file (str): Name of the .fls file for the flip images if they 
            are not named the same as the unflip files. Will only be applied to 
            the /flip/ directory. 
        filtersize (int): (`optional`) The images are processed with a median 
            filter to remove hot pixels which occur in experimental data. This 
            should be set to 0 for simulated data, though generally one would 
            only use this function for experimental data. 
    
    Returns:
        list: List of length 3, containing the following items: 

        - imstack: array of hyperspy signal2D objects (one per image)
        - flipstack: array of hyperspy signal2D objects, empty array if 
          flip == False  
        - ptie: TIE_params object holding a reference to the imstack and many
          other parameters.  

    """

    unflip_files = []
    flip_files = []

    # Finding the unflip fls file
    path = os.path.abspath(path)
    if not fls_file.endswith('.fls'):
        fls_file += '.fls'
    if os.path.isfile(os.path.join(path, fls_file)):
        fls_full = os.path.join(path, fls_file)
    elif os.path.isfile(os.path.join(path, 'unflip', fls_file)):
        fls_full = os.path.join(path, 'unflip', fls_file)
    elif os.path.isfile(os.path.join(path, 'tfs', fls_file)) and not flip:
        fls_full = os.path.join(path, 'tfs', fls_file)
    else:
        print("fls file could not be found.")
        sys.exit(1)

    if flip_fls_file is None:  # one fls file given
        fls = []
        with open(fls_full) as file:
            for line in file:
                fls.append(line.strip())

        num_files = int(fls[0])
        if flip:
            for line in fls[1:num_files + 1]:
                unflip_files.append(os.path.join(path, 'unflip', line))
            for line in fls[1:num_files + 1]:
                flip_files.append(os.path.join(path, 'flip', line))
        else:
            if os.path.isfile(os.path.join(path, 'tfs', fls[2])):
                tfs_dir = 'tfs'
            else:
                tfs_dir = 'unflip'
            for line in fls[1:num_files + 1]:
                unflip_files.append(os.path.join(path, tfs_dir, line))

    else:  # there are 2 fls files given
        if not flip:
            print(
                textwrap.dedent("""
                You probably made a mistake.
                You're defining both unflip and flip fls files but have flip=False.
                Proceeding anyways, will only load unflip stack (if it doesnt break).\n"""
                                ))
        # find the flip fls file
        if not flip_fls_file.endswith('.fls'):
            flip_fls_file += '.fls'
        if os.path.isfile(os.path.join(path, flip_fls_file)):
            flip_fls_full = os.path.join(path, flip_fls_file)
        elif os.path.isfile(os.path.join(path, 'flip', flip_fls_file)):
            flip_fls_full = os.path.join(path, 'flip', flip_fls_file)

        fls = []
        flip_fls = []
        with open(fls_full) as file:
            for line in file:
                fls.append(line.strip())

        with open(flip_fls_full) as file:
            for line in file:
                flip_fls.append(line.strip())

        assert int(fls[0]) == int(flip_fls[0])
        num_files = int(fls[0])
        for line in fls[1:num_files + 1]:
            unflip_files.append(os.path.join(path, "unflip", line))
        for line in flip_fls[1:num_files + 1]:
            flip_files.append(os.path.join(path, "flip", line))

    # Actually load the data using hyperspy
    imstack = hs.load(unflip_files)
    if flip:
        flipstack = hs.load(flip_files)
    else:
        flipstack = []

    # convert scale dimensions to nm
    for sig in imstack + flipstack:
        sig.axes_manager.convert_units(units=['nm', 'nm'])

    if unflip_files[0][-4:] != '.dm3' and unflip_files[0][-4:] != '.dm4':
        # if not dm3's then they generally don't have the title metadata.
        for sig in imstack + flipstack:
            sig.metadata.General.title = sig.metadata.General.original_filename

    # load the aligned tifs and update the dm3 data to match
    # The data from the dm3's will be replaced with the aligned image data.
    try:
        al_tifs = io.imread(os.path.join(path, al_file))
    except FileNotFoundError as e:
        print('Incorrect aligned stack filename given.')
        raise e

    if flip:
        tot_files = 2 * num_files
    else:
        tot_files = num_files

    for i in range(tot_files):
        # pull slices from correct axis, assumes fewer slices than images are tall
        if al_tifs.shape[0] < al_tifs.shape[2]:
            im = al_tifs[i]
        elif al_tifs.shape[0] > al_tifs.shape[2]:
            im = al_tifs[:, :, i]
        else:
            print("Bad stack\n Or maybe the second axis is slice axis?")
            print('Loading failed.\n')
            sys.exit(1)

        # then median filter to remove "hot pixels"
        im = median_filter(im, size=filtersize)

        # and assign to appropriate stack
        if i < num_files:
            print('loading unflip:', unflip_files[i])
            imstack[i].data = im
        else:
            j = i - num_files
            print('loading flip:', flip_files[j])
            flipstack[j].data = im

    # read the defocus values
    defvals = fls[-(num_files // 2):]
    assert num_files == 2 * len(defvals) + 1
    defvals = [float(i) for i in defvals]  # defocus values +/-

    # Create a TIE_params object
    ptie = TIE_params(imstack, flipstack, defvals, flip, path)
    print('Data loaded successfully.')
    return (imstack, flipstack, ptie)
示例#50
0
def inpaint_image(image, mask):
    image = np.array(image.copy())
    mask = np.array(mask.copy())
    image_orig = rescale(image, 1.0 / 5.0, anti_aliasing=False)
    mask = color.rgb2gray(mask)
    rescaled_mask = rescale(mask, 1.0 / 5.0, anti_aliasing=False)
    thresh = threshold_otsu(rescaled_mask)
    binary = rescaled_mask > thresh
    image_defect = image_orig.copy()
    for layer in range(image_defect.shape[-1]):
        image_defect[np.where(binary)] = 0
    image_result = inpaint.inpaint_biharmonic(image_defect, binary,
                                            multichannel=True)
    image_result = rescale(image_result, 5.0, anti_aliasing=False)

    return image_result, mask

RESULT_DIR = os.path.join(config.IHS_DIR, "result")

if not os.path.exists(os.path.dirname(RESULT_DIR)):
    os.makedirs(os.path.dirname(RESULT_DIR))

if __name__ == "__main__":
    image = io.imread(os.path.join(config.IHS_DIR, 'input.png'))
    mask = io.imread(os.path.join(config.IHS_DIR, 'mask.png'))

    image_result, mask = inpaint_image(image, mask)
    io.imsave(os.path.join(RESULT_DIR, "image_orig.png"), image)
    io.imsave(os.path.join(RESULT_DIR, "image_result.png"), image_result)

 def getIm(self):
     im = io.imread(self.fname)
     im = np.divide(im, np.mean(im))
     return im
示例#52
0
import time

import numpy as np
import tensorflow as tf

from skimage import img_as_float
from skimage.io import imread, imsave

from guided_filter_tf.guided_filter import guided_filter

## GuidedFilter
print('GuidedFilter:')
## check forward
# forward on img
rgb = img_as_float(imread('test/rgb.jpg'))
gt = img_as_float(imread('test/gt.jpg'))
x, y = [tf.constant(i.transpose((2, 0, 1))[None]) for i in [rgb, gt]]
output = guided_filter(x, y, 64, 0)

with tf.Session() as sess:
    start_time = time.time()
    r = sess.run(output)
    end_time = time.time()
print('\tForward on img ...')
print('\t\tTime: {}'.format(end_time - start_time))

r = r.squeeze().transpose(1, 2, 0)
r = np.asarray(r.clip(0, 1) * 255, dtype=np.uint8)
imsave('test/r.jpg', r)
示例#53
0
 def __init__(self):
     image = io.imread("1.jpg")
     pos, orient = script3.getRobotState(image)
     self.pos = pos
     self.orientation = orient
def extractLetter(ref):
	ref = rgb2gray(ref)
	# a, b = compare_images(img, ref)	
	img1 = get(rgb2gray(imread('letters/a-new.crop.png')))
	img2 = get(rgb2gray(imread('letters/b-new.crop.png')))
	img3 = get(rgb2gray(imread('letters/c-new.crop.png')))
	img4 = get(rgb2gray(imread('letters/d-new.jpg')))

	c1 = c2d(img1, ref, mode='same')  # baseline
	c2 = c2d(img2, ref, mode='same')
	c3 = c2d(img3, ref, mode='same')
	c4 = c2d(img4, ref, mode='same')

	result = np.array([c1.max(), c2.max(), c3.max(), c4.max()])
	letters = ['a', 'b', 'c', 'd']
	ridx = np.unravel_index(result.argmax(),result.shape) # finding max index

	if (letters[ridx[0]] == 'd' and result[ridx[0]] < 190): # select next match
		result[ridx[0]] = 0
		ridx = np.unravel_index(result.argmax(),result.shape) # finding max index

	return letters[ridx[0]], result[ridx[0]]



# refim = (imgBinarize(rgb2gray(imread('letters/d-new.jpg'))))	
# im = (imgBinarize(rgb2gray(imread('letters/all-letters.jpg'))))
# # extractLetter(ref)


# table = buildRefTable(refim)
# acc = matchTable(im, table)
# val, ridx, cidx = findMaxima(acc)

# # find the half-width and height of template
# hheight = np.floor(refim.shape[0] / 2) + 1
# hwidth = np.floor(refim.shape[1] / 2) + 1

# cstart = max(cidx - hwidth, 1)
# cend = min(cidx + hwidth, im.shape[1] - 1)
# # mcq_col = im[0:im.shape[0]-1, cstart:cend] # column containing all mcq's

# # imsave('match-col-orig.jpg', mcq_col) # saving the matched component

# # convolving our reference image on mcq column
# # acc = matchTable(mcq_col, table)
# # val, ridx, cidx = findMaxima(acc)

# # find coordinates of the box
# rstart = max(ridx - hheight, 1)
# rend = min(ridx + hheight, im.shape[0] - 1)
# # cstart = max(cidx - hwidth, 1)
# # cend = min(cidx + hwidth, im.shape[1] - 1)

# # draw the box
# im[rstart:rend, cstart] = 255
# im[rstart:rend, cend] = 255

# im[rstart, cstart:cend] = 255
# im[rend, cstart:cend] = 255

# plt.imshow(im, cmap='gray')
# plt.show()
with open(os.path.join(model_dir, "model_specification"), "r") as ms:
    model_name = ms.readline().strip()

my_classifier = get_model(model_name)
my_classifier.load_state_dict(torch.load(model_path))
my_classifier.to(device)
my_classifier.eval()
transform = preprocess.make_transform(mode="predict")

plt.ion()
camera_url = "http://*****:*****@ip:8080/photo.jpg"
while True:
    r = requests.get(camera_url)
    f = io.BytesIO(r.content)
    im = imread(f)
    im = rotate(im, -90, resize=True)
    im = im[420:-420, :]
    im = transform(np.float32(im))
    im = im.view(1, 3, 224, 224)
    show_im = np.clip(im[0].numpy().transpose(1, 2, 0), 0, 1)
    
    plt.clf()
    plt.imshow(show_im)
    plt.draw()
    plt.pause(0.01)
    
    with torch.no_grad():
        pred = my_classifier(im.to(device))
        pred = torch.argmax(pred, axis=1).cpu().item()
        print(pred, labels[pred])
def verify_image(img_file):
    try:
        img = io.imread(img_file)
    except:
        return False
    return True
示例#57
0
def rows(outdir, imgdir, method, auto, selem, reset, n_clusters):
    ''' Run row analyses on imgdir, output figures and export to
    outdir '''

    if not osp.isdir(outdir):
        log.debug(f"Creating output directory {outdir}")
        os.mkdir(osp.abspath(outdir))
    if not osp.isdir(osp.join(outdir, 'pickles')):
        os.mkdir(osp.join(outdir, 'pickles'))
    if not osp.isdir(osp.join(outdir, 'temp')):
        os.mkdir(osp.join(outdir, 'temp'))

    columns = [
        'Image#', 'Row#', 'num_objs', 'area', 'Shape_PC1', 'Shape_PC2',
        'mean_R', 'mean_G', 'mean_B', 'mean_H', 'mean_S', 'mean_V'
    ]
    data = pd.DataFrame(columns=columns)

    # set defaults
    resp_thresh = STARTING_THRESH
    resp_selem = selem
    rep = False
    for img_file in os.listdir(imgdir):
        log.info(f"Processing {img_file}")
        saved_analysis_file = osp.join(
            outdir, 'pickles', '.'.join(img_file.split('.')[0:-1]) + '.pkl')
        if osp.exists(saved_analysis_file) and not reset:
            log.info(f"Loading data from previous analysis on {img_file}")
            with open(saved_analysis_file, 'rb') as analysis_file:
                row_data = pkl.load(analysis_file)
            data = _append_row(data, row_data, img_file)
            continue

        image = imread(osp.join(imgdir, img_file))

        if not auto:
            plt.imshow(image)
            plt.show()

        resp_selem = selem  # Reset the default binary closing matrix size

        if method == 'kmeans':
            while True:
                filter_file = osp.join(outdir, 'temp',
                                       osp.basename(img_file)[:-3] + 'npy')
                if osp.exists(filter_file) and not (reset or rep):
                    filter = np.load(filter_file)
                    _, (ax1, ax2) = plt.subplots(ncols=2, figsize=(15, 15))
                    ax1.imshow(filter, cmap='gray')

                else:
                    filter = get_filter_kmeans(image,
                                               n_clusters=n_clusters,
                                               opening_selem=resp_selem,
                                               auto=auto)
                    fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(15, 15))
                    ax1.imshow(filter)

                rows = sorted_regions(filter)

                ax2.imshow(image)

                row_data = {}
                for row_num, row in enumerate(rows):
                    row_data[row_num] = row_analysis(image,
                                                     row,
                                                     row_num,
                                                     plt_axis=ax2)

                plt.savefig(osp.join(outdir, osp.basename(img_file)))
                if not auto:
                    plt.tight_layout()
                    plt.show()

                if not auto and _kmeans_reset_prompt():
                    continue
                else:
                    np.save(filter_file, filter)
                    break

        elif method == 'thresh':
            while True:
                filter = get_filter_thresh(image,
                                           std_factor=resp_thresh,
                                           opening_selem=resp_selem,
                                           auto=auto)
                # only prompt if displaying image

                rep, resp_thresh, resp_selem = _thresh_reset_prompt(
                    resp_thresh, resp_selem)
                if rep: continue

                rows = sorted_regions(filter)

                fig, ax = plt.subplots()
                ax.imshow(image)

                row_data = {}

                for row_num, row in enumerate(rows):
                    row_data[row_num + 1] = row_analysis(image,
                                                         row,
                                                         row_num,
                                                         plt_axis=ax)

                plt.savefig(osp.join(outdir, osp.basename(img_file)))
                if not auto:
                    plt.tight_layout()
                    plt.show()
                    rep, resp_thresh, resp_selem = _thresh_reset_prompt(
                        resp_thresh, resp_selem)
                    if rep:
                        continue

                break

        else:
            click.echo(f'Invalid method designation: {method}')
            return False

        data = _append_row(data, row_data, img_file)

        log.info("Caching row analysis")
        with open(saved_analysis_file, 'wb') as analysis_file:
            pkl.dump(row_data, analysis_file)

    log.info("Exporting data to csv")
    data = data.sort_values(['Image#', 'Row#'])
    data[columns].to_csv(osp.join(outdir, "export.csv"))
示例#58
0
 def __init__(self, src):
     self.im = io.imread("1.jpg")
示例#59
0
valid_x = next(os.walk(valid_x_path))[2]
valid_y = next(os.walk(valid_y_path))[2]

# Initializing arrays for training, validation, and testing data
x_train = np.zeros((len(train_x), HEIGHT, WIDTH, CHANNELS), dtype = np.uint8)
y_train = np.zeros((len(train_y), HEIGHT, WIDTH, 1), dtype = np.uint8)
x_test = np.zeros((len(testing), HEIGHT, WIDTH, CHANNELS), dtype = np.uint8)
x_valid = np.zeros((len(valid_x), HEIGHT, WIDTH, CHANNELS), dtype = np.uint8)
y_valid = np.zeros((len(valid_y), HEIGHT, WIDTH, 1), dtype = np.uint8)

# Resizing training images
print("\nResizing training images")

for n, id_ in tqdm(enumerate(train_x), total = len(train_x)):
	path = training_x_path + id_
	img = imread(path)
	img = resize(img, (HEIGHT, WIDTH), mode = 'constant', preserve_range = True)
	x_train[n] = img
    
# Resizing + processing training masks to have pixel values between 0 and 1
print("Resizing training masks")

for n, id_ in tqdm(enumerate(train_y), total = len(train_y)):
	path = training_y_path + id_
	img = imread(path)
	img = np.expand_dims(resize(img, (HEIGHT, WIDTH), mode = 'constant', preserve_range = True), axis = -1)
	img = np.round(img/255.)
	y_train[n] = img
    
# Resizing testing images
print("Resizing test images")
#https://flothesof.github.io/removing-background-scikit-image.html
from skimage import io as skio
import cv2
import numpy as np
import matplotlib.pyplot as plt
from skimage import filters
from scipy import ndimage as ndi
from skimage import morphology
import os

cwd = os.getcwd()

my_path = str(cwd) + "\\training_set\\bananas"
img = skio.imread(str(my_path) + '\\bimage13.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sobel = filters.sobel(gray)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['figure.dpi'] = 200
blurred = filters.gaussian(sobel, sigma=1.5)
light_spots = np.array((gray > 250).nonzero()).T
dark_spots = np.array((gray < 3).nonzero()).T
bool_mask = np.zeros(gray.shape, dtype=np.bool)
bool_mask[tuple(light_spots.T)] = True
bool_mask[tuple(dark_spots.T)] = True
seed_mask, num_seeds = ndi.label(bool_mask)
ws = morphology.watershed(blurred, seed_mask)
background = max(set(ws.ravel()), key=lambda g: np.sum(ws == g))
background_mask = (ws == background)
rows = len(background_mask)
cols = len(background_mask[0])