def get_tag_detections(im):
    #
    # Because of a bug in the tag detector, it doesn't seem
    # to detect tags larger than a certain size. To work-around
    # this limitation, we detect tags on two different image
    # scales and use the one with more detections
    #
    assert len(im.shape) == 2
    im4 = imrescale(im, 1./4)

    im  = img_as_ubyte(im)
    im4 = img_as_ubyte(im4)

    detections1 = AprilTagDetector().detect(im)
    detections4 = AprilTagDetector().detect(im4)
    for d in detections4:
        d.c[0] *= 4.
        d.c[1] *= 4.

    # note that everything other than the tag center is wrong
    # in detections4

    if len(detections4) > len(detections1):
        return detections4
    else:
        return detections1
    def insert_db(self, mode, image, label, features, channel_no, inverse):
        if inverse:
            image_ubyte = 255 - img_as_ubyte(image)
        else:
            image_ubyte = img_as_ubyte(image)

        image_ubyte = numpy.transpose(image_ubyte, (2, 0, 1))
                
        image_string = image_ubyte.tostring()
        
        if features != None:
            delimeter = '!@#$'
            self.datum.data = image_string + delimeter + features
        elif channel_no > 3:
            selem = disk(6)
            w_tophat = white_tophat(image_ubyte, selem)
            b_tophat = black_tophat(image_ubyte, selem)
            self.datum.data = image_string + w_tophat.tostring() + b_tophat.tostring()
        else:
            self.datum.data = image_string
            
        if label != None:
            self.datum.label = int(label)                
    
        serialized = self.datum.SerializeToString()
        
        if mode == 'train':
            self.train_batch.Put("%08d" % self.train_no, serialized)                    
            self.train_no += 1
        elif mode == 'valid':
            self.valid_batch.Put("%08d" % self.valid_no, serialized)                    
            self.valid_no += 1
        elif mode == 'test':
            self.test_batch.Put("%08d" % self.test_no, serialized)                    
            self.test_no += 1
示例#3
0
def mse(image_a, image_b):
    # the 'Mean Squared Error' between the two images is the
    # sum of the squared difference between the two images;
    # NOTE: the two images must have the same dimension
    image_a = util.img_as_ubyte(image_a)
    image_b = util.img_as_ubyte(image_b)
    err = np.sum((image_a.astype("float") - image_b.astype("float")) ** 2)
    err /= float(image_a.shape[0] * image_a.shape[1])
    
    # return the MSE, the lower the error, the more "similar"
    # the two images are
    return err
示例#4
0
def absolute_error(image_a, image_b):
    """
    Sum of pixel differences
    Images - 2d numpy arrays
    """
    image_a = util.img_as_ubyte(image_a)
    image_b = util.img_as_ubyte(image_b)
    return np.sum(
        np.absolute(
            image_a.view(np.ndarray).astype(np.int16) -
            image_b.view(np.ndarray).astype(np.int16)
        )
    )
示例#5
0
    def _write_to_file(self, new_bands, pan, **kwargs):

        # Read coverage from QBA
        coverage = self._calculate_cloud_ice_perc()

        self.output("Final Steps", normal=True, arrow=True)

        suffix = 'bands_%s_pan' % "".join(map(str, self.bands))

        output_file = join(self.dst_path, self._filename(suffix=suffix))

        output = rasterio.open(output_file, 'w', **kwargs)

        for i, band in enumerate(new_bands):
            # Color Correction
            band = numpy.multiply(band, pan)
            band = self._color_correction(band, self.bands[i], 0, coverage)

            output.write_band(i + 1, img_as_ubyte(band))

            new_bands[i] = None

        self.output("Writing to file", normal=True, color='green', indent=1)

        return output_file
示例#6
0
def HairRemover(image, debug=None):
    # =================================================================
    # extract hair as morphologically thin structures
    # -----------------------------------------------------------------

    # convert to Lab color space
    Lab_image = rgb2labnorm(image)
    L = img_as_ubyte(Lab_image[..., 0])

    # a hard threshold is then applied to the difference between
    # the luminance before and after morphological closing
    # the dark pigmented elements have a large intensity in the
    # difference image
    LClose = morph_close(L)
    LDiff = LClose - L

    # Threshold to create mask for inpainting
    # set all pixels > 11.9 -> 255 and < 12 -> 0
    # dilate by 1 to remove boundaries
    threshold = 10.0    # original comment and code did not match... -JH

    # threshold operation is directly performed on LDiff
    mask = (morph_dilate(LDiff) >= threshold) * 1.

    result = Inpainter(image, mask, 5)

    if debug is not None:
        debug["inpaintingMask"] = mask
        debug["hairRemoved"] = result
    
    return result
    def compute(self, src):
        image = img_as_ubyte(src)

        # denoise image
        denoised = denoise_tv_chambolle(image, weight=0.05)
        denoised_equalize= exposure.equalize_hist(denoised)

        # find continuous region (low gradient) --> markers
        markers = rank.gradient(denoised_equalize, disk(5)) < 10
        markers = ndi.label(markers)[0]

        # local gradient
        gradient = rank.gradient(denoised, disk(2))

        # labels
        labels = watershed(gradient, markers)

        # display results
        fig, axes = plt.subplots(2,3)
        axes[0, 0].imshow(image)#, cmap=plt.cm.spectral, interpolation='nearest')
        axes[0, 1].imshow(denoised, cmap=plt.cm.spectral, interpolation='nearest')
        axes[0, 2].imshow(markers, cmap=plt.cm.spectral, interpolation='nearest')
        axes[1, 0].imshow(gradient, cmap=plt.cm.spectral, interpolation='nearest')
        axes[1, 1].imshow(labels, cmap=plt.cm.spectral, interpolation='nearest', alpha=.7)
        plt.show()
示例#8
0
    def _write_to_file(self, new_bands, suffix=None, **kwargs):

        # Read cloud coverage from mtl file
        cloud_cover = self._read_cloud_cover()

        self.output("Final Steps", normal=True, arrow=True)

        output_file = '%s_bands_%s' % (self.scene, "".join(map(str, self.bands)))

        if suffix:
            output_file += suffix

        output_file += '.TIF'
        output_file = join(self.dst_path, output_file)

        output = rasterio.open(output_file, 'w', **kwargs)

        for i, band in enumerate(new_bands):
            # Color Correction
            band = self._color_correction(band, self.bands[i], 0, cloud_cover)

            output.write_band(i+1, img_as_ubyte(band))

            new_bands[i] = None
        self.output("Writing to file", normal=True, color='green', indent=1)

        return output_file
示例#9
0
 def fit(self, X, y=None):
     num = self.patch_num // X.size
     data = []
     for item in X:
         img = imread(str(item[0]))
         img = img_as_ubyte(rgb2gray(img))
         #img = self.binary(img) # 二值化
         tmp = extract_patches_2d(img, self.patch_size, max_patches = num,\
                                 random_state=np.random.RandomState())
         data.append(tmp)
     
     data = np.vstack(data)
     data = data.reshape(data.shape[0], -1)
     data = np.asarray(data, 'float32')
     
     # 二值化后不需要0-1归化
     data = data - np.min(data, 0)
     data = data/(np.max(data, 0) + 0.0001)  # 0-1 scaling
     
     self.rbm = BernoulliRBM(n_components=self.n_components,\
                     learning_rate=self.learning_rate, \
                     n_iter=self.n_iter,\
                     batch_size=self.batch_size,\
                     verbose=True)
     self.rbm.fit(data)
     return self
def convert_to_saturation(fn, out_fn, rescale=True):
    """
    Generate saturation channel as a grayscale image.
    """

# ImageMagick 18s
#     execute_command('convert %(fn)s -colorspace HSL -channel G %(out_fn)s' % {'fn': fn, 'out_fn': out_fn})

#     t = time.time()
    img = imread(fn)
#     sys.stderr.write('Read image: %.2f seconds\n' % (time.time() - t)) # ~4s

#     t1 = time.time()
    ma = img.max(axis=-1)
    mi = img.min(axis=-1)
#     sys.stderr.write('compute min and max color components: %.2f seconds\n' % (time.time() - t1)) # ~5s

#     t1 = time.time()
    s = np.nan_to_num(mi/ma.astype(np.float))
#     sys.stderr.write('min oiver max: %.2f seconds\n' % (time.time() - t1)) # ~2s

#     t1 = time.time()
    if rescale:
        pmax = s.max()
        pmin = s.min()
        s = (s - pmin) / (pmax - pmin)
#     sys.stderr.write('rescale: %.2f seconds\n' % (time.time() - t1)) # ~3s

#     t1 = time.time()
    cv2.imwrite(out_fn, img_as_ubyte(s))
def extractAndStoreFeatures(inputFolder, outputFolder):

    # List all files
    fileList = os.listdir(inputFolder)
    # Select only files that end with .png
    imagesList = filter(lambda element: ".png" in element, fileList)

    for filename in imagesList:
        imagepath = inputFolder + "/" + filename
        outputpath = outputFolder + "/" + filename + ".feat"

        if os.path.exists(outputpath):
            print "Features for " + imagepath + ". Delete the file if you want to replace."
            continue

        print "Extracting features for " + imagepath

        image = io.imread(imagepath, as_grey=True)
        # Read the image as bytes (pixels with values 0-255)
        image = util.img_as_ubyte(image)

        # Extract the features
        feats = feature_extractor.extractFeatures(image)

        # Save the features to a file
        outputFile = open(outputpath, "wb")
        pickle.dump(feats, outputFile)
        outputFile.close()
示例#12
0
def extractAndStoreFeatures(inputFolder, items, outputFolder):
	extension = '.jpg'
	X = np.zeros(shape=(cfg.num_train_images,cfg.num_features))
	y = np.zeros(shape=(cfg.num_train_images,1))
	number_of_images = 0
	for index_label, name_label in enumerate(items): # For each item...
		imagesPath = inputFolder + '/' + name_label # Each label corresponds to a folder
		fileList = os.listdir(imagesPath) # List all files
		imagesList = filter(lambda element: extension in element, fileList) # Select only the ones that ends with the desired extension
		for filename in imagesList:
			current_imagePath = imagesPath + '/' + filename
			print 'Extracting features for ' + current_imagePath
			image = io.imread(current_imagePath, as_grey=True)
			image = util.img_as_ubyte(image) # Read the image as bytes (pixels with values 0-255)
			X[number_of_images] = feature_extractor.extractFeatures(image) # Extract the features
			y[number_of_images] = index_label # Assign the label at the end of X when saving the data set
			number_of_images = number_of_images + 1
	print number_of_images
            
	#Save the data set to .data file in Data folder.
	np.savetxt(
	outputFolder,   		# file name
	np.c_[X,y],             # array to save
	fmt='%.2f',             # formatting, 2 digits in this case
	delimiter=',',          # column delimiter
	newline='\n',           # new line character  
	comments='# ')          # character to use for comments
示例#13
0
def save_windows(boxes, imagePath):
    image_color = io.imread(imagePath, as_grey=False)
    image_color = util.img_as_ubyte(image_color)
    imageFilename = os.path.basename(imagePath) # Get the filename
    imageBasename = os.path.splitext(imageFilename)[0] #Take out the extension
    annotationsFilePath = cfg.annotationsFolderPath+'gt.'+imageBasename+'.txt'
    annotatedBoxes = utils.readINRIAAnnotations(annotationsFilePath)
    signalTypes = utils.readINRIAAnnotationsDetection(annotationsFilePath)
    signalTypes = list(reversed(signalTypes))
    count = 0
    for box in boxes:
        if box[0] < 0 or box[1] < 0:
            continue
        if box[2] >= image_color.shape[1].__int__() or \
                        box[3] >= image_color.shape[0].__int__():
            continue
        annotated = 'NONSIGNAL'
        for idx in range(0, len(annotatedBoxes)):
            aBox = annotatedBoxes[idx]
            currentRatio = computeOverlap(box, aBox)
            currentRatio = math.ceil(currentRatio*10)/10
            if currentRatio > 0.5:
                annotated = signalTypes[idx]
                break
        crop = image_color[box[1]:box[3],box[0]:box[2]]
        imageName = imagePath.split('/')  #Working on the crop name...
        fileName = imageName[len(imageName)-1]
        fileName = fileName[:len(fileName)-4]
        fileName = (fileName+'.'+str(count))
        filename = (fileName+'.'+annotated+'.jpg')
        crop = resize(crop,(32,32))
        io.imsave('Crops/'+filename, crop)  #Save the crop
        print('Crop saved')
        count += 1
示例#14
0
def test_compare_8bit_vs_16bit():
    # filters applied on 8-bit image ore 16-bit image (having only real 8-bit
    # of dynamic) should be identical

    image8 = util.img_as_ubyte(data.camera())
    image16 = image8.astype(np.uint16)
    assert_equal(image8, image16)

    methods = [
        "autolevel",
        "bottomhat",
        "equalize",
        "gradient",
        "maximum",
        "mean",
        "subtract_mean",
        "median",
        "minimum",
        "modal",
        "enhance_contrast",
        "pop",
        "threshold",
        "tophat",
    ]

    for method in methods:
        func = getattr(rank, method)
        f8 = func(image8, disk(3))
        f16 = func(image16, disk(3))
        assert_equal(f8, f16)
    def saver(stepName, img, dbg=None, mode=mode):        
        path = (processedDir / str(imgName)).with_suffix(".{}.png".format(stepName) if stepName else ".png")
        
        if mode == 'cache' and processedDir and imgName:
            mode = 'save'
            if path.exists():
                print("Loading cached image:", path)
                img = ski.img_as_ubyte(io.imread(str(path)))
                mode = 'done'
            elif isinstance(img,type(None)):
                print("Caching image:", path)
                img = ski.img_as_ubyte(io.imread(str(imgName)))
            
        assert not isinstance(img,type(None))
        
        if mode == 'save' and processedDir and imgName:
            try:
                print("Saving:", img.shape, img.dtype, path.name, flush=True, )
                pil_img = PIL.Image.fromarray(img_as_ubyte(img))
                pil_img.save(str(path))
                if dbg:
                    dbg.saved_path = path
            except Exception as err:
                print("Error Saving:",path, err, flush=True, )

        elif mode == 'plot':
            plt.imshow(img)
            plt.suptitle(stepName+" "+imgName.name)
            plt.show(block=True)
            plt.close()

        return img
示例#16
0
def ndarray_to_pil(arr, format_str=None):
    """Export an ndarray to a PIL object.

    Parameters
    ----------
    Refer to ``imsave``.

    """
    if arr.ndim == 3:
        arr = img_as_ubyte(arr)
        mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]

    elif format_str in ['png', 'PNG']:
        mode = 'I;16'
        mode_base = 'I'

        if arr.dtype.kind == 'f':
            arr = img_as_uint(arr)

        elif arr.max() < 256 and arr.min() >= 0:
            arr = arr.astype(np.uint8)
            mode = mode_base = 'L'

        else:
            arr = img_as_uint(arr)

    else:
        arr = img_as_ubyte(arr)
        mode = 'L'
        mode_base = 'L'

    if arr.ndim == 2:
        im = Image.new(mode_base, arr.T.shape)
        try:
            im.frombytes(arr.tobytes(), 'raw', mode)
        except AttributeError:
            im.frombytes(arr.tostring(), 'raw', mode)

    else:
        try:
            im = Image.frombytes(mode, (arr.shape[1], arr.shape[0]),
                                 arr.tobytes())
        except AttributeError:
            im = Image.frombytes(mode, (arr.shape[1], arr.shape[0]),
                                  arr.tostring())
    return im
示例#17
0
def inplace_augment(data, outdir, fold=1, tparams=None, reset=False):
    output_json = osp.join(outdir, 'inplace_augment/data.json')
    if not os.path.exists(output_json) or reset:
        
        od = osp.join(outdir, 'inplace_augment')
        if not osp.exists(od):
            os.makedirs(od)
        
        if tparams == None:
            tparams = {}
            tparams['samples_per_image'] = 5
            tparams['shear'] = (-5, 30)
            tparams['order'] = 1            #bilinear
            tparams['selem_size'] = (3, 4)  #max size for square selem for erosion, dilation
            
        tparams['rotate'] = (0, 1)
        tparams['hpad'] = (0, 1)
        tparams['vpad'] = (0, 1)
            
        augmented = []
        for datum in data:
            dat = copy.deepcopy(datum)
            augmented.append(dat)

            if datum['split'] == 'train':
                datum['region_proposals'] = datum['gt_boxes'][:2] #smaller memory footprint, needed
                path, f = osp.split(datum['id'])
                for i in range(tparams['samples_per_image']):
                    img = imread(datum['id'])
                    if img.ndim == 3:
                        img = img_as_ubyte(rgb2gray(img))
                        
                    out = img.copy()
                    boxes = datum['gt_boxes']
                    for jj, b in enumerate(reversed(boxes)):
                        try: #Some random values for weird boxes give value errors, just handle and ignore
                            b = close_crop_box(img, b)
                            word = img[b[1]:b[3], b[0]:b[2]]
                            aug = augment(word, tparams, keep_size=True)
                        except ValueError:
                            continue
                            
                        out[b[1]:b[3], b[0]:b[2]] = aug
                    
                    new_path = osp.join(od, f[:-4] + '_%d.png' % i)
                    imsave(new_path, out)
                    new_datum = copy.deepcopy(datum)
                    new_datum['id'] = new_path
                    augmented.append(new_datum)
                
        with open(output_json, 'w') as f:
            json.dump(augmented, f)
    
    else: #otherwise load the json
        with open(output_json) as f:
            augmented = json.load(f) 
            
    return augmented
示例#18
0
def prepare_for_display(npy_img):
    '''Convert a 2D or 3D numpy array of any dtype into a
    3D numpy array with dtype uint8. This array will
    be suitable for use in passing to gui toolkits for
    image display purposes.

    Parameters
    ----------
    npy_img : ndarray, 2D or 3D
        The image to convert for display

    Returns
    -------
    out : ndarray, 3D dtype=np.uint8
        The converted image. This is guaranteed to be a contiguous array.

    Notes
    -----
    If the input image is floating point, it is assumed that the data
    is in the range of 0.0 - 1.0. No check is made to assert this
    condition. The image is then scaled to be in the range 0 - 255
    and then cast to np.uint8

    For all other dtypes, the array is simply cast to np.uint8

    If a 2D array is passed, the single channel is replicated
    to the 2nd and 3rd channels.

    If the array contains an alpha channel, this channel is
    ignored.

    '''
    if npy_img.ndim < 2:
        raise ValueError('Image must be 2D or 3D array')

    height = npy_img.shape[0]
    width = npy_img.shape[1]

    out = np.empty((height, width, 3), dtype=np.uint8)
    npy_img = img_as_ubyte(npy_img)

    if npy_img.ndim == 2 or \
       (npy_img.ndim == 3 and npy_img.shape[2] == 1):
        npy_plane = npy_img.reshape((height, width))
        out[:, :, 0] = npy_plane
        out[:, :, 1] = npy_plane
        out[:, :, 2] = npy_plane

    elif npy_img.ndim == 3:
        if npy_img.shape[2] == 3 or npy_img.shape[2] == 4:
            out[:, :, :3] = npy_img[:, :, :3]
        else:
            raise ValueError('Image must have 1, 3, or 4 channels')

    else:
        raise ValueError('Image must have 2 or 3 dimensions')

    return out
    def setup(self):
        self.image = np.zeros((640, 640))
        self.image[320:-320, 320:-320] = 1

        self.image = ndi.rotate(self.image, 15, mode='constant')
        self.image = ndi.gaussian_filter(self.image, 4)
        self.image += 0.2 * np.random.random(self.image.shape)

        self.image_ubyte = util.img_as_ubyte(np.clip(self.image, 0, 1))
示例#20
0
def punchhole_removal(im):
    import numpy as np
    from PIL import Image
    from skimage import io
    from skimage.color import rgba2rgb, rgb2gray
    from skimage.transform import hough_circle, hough_circle_peaks
    from skimage.feature import canny
    from skimage.draw import circle
    from skimage.util import img_as_ubyte

    ''' check for punch holes and remove  '''
    
    max_peaks =  24 #maximum number of peaks to be found. changed from 99 to 24 for reducing the unnecessary punch holes being filled.

    img = np.array(im)# Load picture .
    img_rgb = rgba2rgb(img)# convert to RGB
    img_gray = rgb2gray(img_rgb)# convert to gray
    image = img_as_ubyte(img_gray)
    width, height = image.shape
    x1 =  punchhole_margin
    x2 =  (int)(width - punchhole_margin)
    y1 =  (int)(height - punchhole_margin)
    y2 =  punchhole_margin

    edges = canny(image, 3, 10, 40) # perform canny to detect the edges
    hough_radii = np.arange(31, 34, 1) #get the radius range with step as 1.
    hough_res = hough_circle(edges, hough_radii) # detect the circles centres coordinates

    # Select the most prominent circles based on the max_peaks
    accums, cx, cy, radii = hough_circle_peaks(hough_res, hough_radii,total_num_peaks=max_peaks)

    for center_y, center_x, radius in zip(cy, cx, radii):

        #if the circles centres fall in the border regions, 
        #get the dominant color near the hole and fill the hole with a linear gradient of the dominant color
        if(((0 < center_y < width) and (0 < center_x < y2)) or \
           ((0 < center_y < width) and (y1 < center_x < height)) or\
           ((0 < center_y < x1) and (0 < center_x < height)) or \
           ((x2 < center_y < width) and (0 < center_x < height))):

            index=0
            rr, cc= circle(center_y, center_x, radius+1, img.shape)
            dominantpix = dominantcolor(center_x, center_y, radius, img)           
            dark_grad = [dominantpix[0], dominantpix[1],dominantpix[2]]
            light_grad = [dominantpix[0]+1, dominantpix[1]+1, dominantpix[2]+1]
            #white_grad = [255,255,255]
            RGBA_list = lineargradient(dark_grad,light_grad,len(list(rr)))   
          
            for i , j in zip(list(rr), list(cc)):
                pixlist = RGBA_list[index]
                pixtuple = tuple(pixlist)
                img[i,j]= (pixtuple[0], pixtuple[1], pixtuple[2], 255)
                index += 1
           
    finalimage=Image.fromarray(img)

    return finalimage
示例#21
0
    def ondoubleclick(self, event):
        global THRESH
 
 
        self.jpg_path = self.selected_image.GetFilePath()
 
        if (self.jpg_path):
            Img = wx.Image(self.jpg_path, wx.BITMAP_TYPE_JPEG)
 
            resized_image = resize(Img, self.MaxImageSize_x, self.MaxImageSize_y)   # function that re-sizes an image based on def resize()
            self.Image.SetBitmap(wx.BitmapFromImage(resized_image))     # displays image in wx.Python GUI/frame
 
            self.image = Image.open(self.jpg_path)                      # Opens the file path and saves it into self.im
 
            if self.image.mode != "RGB":
                dlg = wx.MessageDialog(self, "The image you selected is not a RGB image", style=wx.ICON_ERROR)
                dlg.ShowModal()
                dlg.Destroy()
                return
 
 
            self.graylevel_image = rgb_to_gray_level(self.image)
            self.vert_array = vertical_grayscale(self.graylevel_image)
            self.interpolation_result = sum(self.vert_array)
 
            self.plotpanel = wx.Panel(self, -1, size=(800, 200), pos = (10, 570))
            #self.plotpanel = wx.BoxSizer(wx.HORIZONTAL)
 
            self.figure = matplotlib.figure.Figure(dpi=100, figsize=(8.5,2))
            self.axes = self.figure.add_subplot(111)
            self.axes.plot(self.vert_array)
            self.canvas = FigCanvas(self.plotpanel, -1, self.figure)
 
            self.binarized_image = rgb_to_gray_level(self.image)        # Converts the image that was just imported to grayscale
            binarize(self.binarized_image, THRESH)                              # Binarizes the now grayscale image
            #print THRESH
            self.defect_count = pixel_counter(self.binarized_image)     # Counts the black pixels in the image and returns them to defect_count
 
            self.quote7 = wx.StaticText(self, label= str(self.defect_count), pos = (200, 300))  # Displays the defect count number in the GUI
            self.quote8 = wx.StaticText(self, label= str(self.interpolation_result), pos = (200, 340))   # Displays the grayscale summation in the GUI
 
            wx_image = piltoimage(self.binarized_image)
            resized_binarized = resize(wx_image, self.MaxImageSize_x, self.MaxImageSize_y)
 
            self.binImage.SetBitmap(wx.BitmapFromImage(resized_binarized))     # displays image in wx.Python GUI/frame
 
 
            # This section is to import the image from the selected path, then convert it to entropy#
            # Once the entropy ndarray is generated, we have to convert it so wx_image can display it in the frame#
            entropyimage = skimage.color.rgb2gray(mpimg.imread(self.jpg_path))
            ubyte_entropyimage = img_as_ubyte(entropyimage)
 
 
 
 
        self.Refresh()
        event.Skip()
示例#22
0
def get_gabor_desc(img, gdesc, w_size, scale=1.0, mask=None, _ncpus=None):
    """
    Extract local Gabor descriptors by scanning an image.

    :param img: numpy.ndarray
      Input intensity (grey-scale) image.

    :param gdesc: txtgrey.GaborDescriptor
      The parameters of the Gabor wavelets to be used.

    :param w_size: integer
      Window size (the sliding window is square-shaped).

    :param scale: float
      The image may be scaled prior to any descriptor extraction.

    :param mask: numpy.ndarray
      A mask (logical image) indicating the object regions
      in the image.

    :return: list
      A list with the local descriptors corresponding to each position
      of the sliding window. Each element of the list is a vector
      containing the coordinates of the local window (first 4 elements)
      and the 2 vectors of values for the local Gabor descriptors (one
      with the mean responses and one with the variances).
    """

    assert (img.ndim == 2)

    img_ = rescale(img, scale)
    if mask is not None:
        assert (mask.ndim == 2)
        assert (mask.shape == img.shape)
        mask = img_as_ubyte(resize(mask, img_.shape))

    img_iterator = sliding_window(img_.shape, (w_size, w_size), step=(w_size, w_size))  # non-overlapping windows

    res = []
    if mask is None:
        with ProcessPoolExecutor(max_workers=_ncpus) as executor:
            for w_coords in img_iterator:
                time.sleep(0.01)
                res.append(executor.submit(_gabor_worker, img_, gdesc, w_coords))
    else:
        th = w_size * w_size / 20.0   # consider only those windows with more than 5% pixels from object
        with ProcessPoolExecutor(max_workers=_ncpus) as executor:
            for w_coords in img_iterator:
                time.sleep(0.01)
                if mask[w_coords[0]:w_coords[1], w_coords[2]:w_coords[3]].sum() > th:
                    res.append(executor.submit(_gabor_worker, img_, gdesc, w_coords))

    desc = []
    for f in as_completed(res):
        desc.append(f.result())

    return desc
示例#23
0
def find_squares(img):
    img = cv2.GaussianBlur(img, (5, 5), 0)
    gray = img_as_ubyte(rgb2gray(img))  # convert to gray scale
    selem = disk(100)
    im_bin = gray >= cv2.adaptiveThreshold(gray, 255, 1, 1, 11, 2)  # global otsu binarize
    edges = canny(im_bin, sigma=.1)  # get edges from binary image
    edges = img_as_ubyte(edges)
    squares = []
    bin, contours, hierarchy = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        cnt_len = cv2.arcLength(cnt, True)
        cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
        if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
            cnt = cnt.reshape(-1, 2)
            max_cos = np.max(
                [angle_cos(cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4]) for i in xrange(4)])
            if max_cos < 0.1:
                squares.append(cnt)
    return squares
示例#24
0
def imshow(arr):
    """Display an image, using PIL's default display command.

    Parameters
    ----------
    arr : ndarray
       Image to display.  Images of dtype float are assumed to be in
       [0, 1].  Images of dtype uint8 are in [0, 255].

    """
    Image.fromarray(img_as_ubyte(arr)).show()
示例#25
0
def test_compare_autolevels():
    # compare autolevel and percentile autolevel with p0=0.0 and p1=1.0
    # should returns the same arrays

    image = util.img_as_ubyte(data.camera())

    selem = disk(20)
    loc_autolevel = rank.autolevel(image, selem=selem)
    loc_perc_autolevel = rank.autolevel_percentile(image, selem=selem, p0=0.0, p1=1.0)

    assert_equal(loc_autolevel, loc_perc_autolevel)
示例#26
0
    def test_compare_8bit_vs_16bit(self, method):
        # filters applied on 8-bit image ore 16-bit image (having only real 8-bit
        # of dynamic) should be identical
        image8 = util.img_as_ubyte(data.camera())[::2, ::2]
        image16 = image8.astype(np.uint16)
        assert_equal(image8, image16)

        func = getattr(rank, method)
        f8 = func(image8, disk(3))
        f16 = func(image16, disk(3))
        assert_equal(f8, f16)
示例#27
0
def extractFeaturesSingleImage(imageOriginal, outputpath, imagepath):

    #Read the image as bytes (pixels with values 0-255)
    image = util.img_as_ubyte(imageOriginal)

    #Extract the features
    feats = feature_extractor.extractFeatures(image, imagepath)

    #Save the features to a file
    outputFile = open(outputpath, "wb")
    pickle.dump(feats, outputFile)
    outputFile.close()
示例#28
0
def generate_region_proposals(f):
    img = imread(f)
    if img.ndim == 3:
        img = img_as_ubyte(rgb2gray(img))
        
    m = img.mean()
    threshold_range = np.arange(0.6, 1.01, 0.1) * m
    C_range=range(3, 50, 2) #horizontal range
    R_range=range(3, 50, 2) #vertical range
    region_proposals = find_regions(img, threshold_range, C_range, R_range) 
    region_proposals, _ = utils.unique_boxes(region_proposals)
    return region_proposals
示例#29
0
def watershed(image):
    """ the watershed algorithm """
    if len(image.shape) != 2:
        raise TypeError("The input image must be gray-scale ")

    h, w = image.shape
    image = cv2.equalizeHist(image)
    image = denoise_bilateral(image, sigma_range=0.1, sigma_spatial=10)
    image = rescale_intensity(image)
    image = img_as_ubyte(image)
    image = rescale_intensity(image)
    # com.debug_im(image)

    _, thres = cv2.threshold(image, 80, 255, cv2.THRESH_BINARY_INV)

    distance = ndi.distance_transform_edt(thres)
    local_maxi = peak_local_max(distance, indices=False,
                                labels=thres,
                                min_distance=5)

    # com.debug_im(thres)
    # implt = plt.imshow(-distance, cmap=plt.cm.jet, interpolation='nearest')
    # plt.show()

    markers = ndi.label(local_maxi, np.ones((3, 3)))[0]
    labels = ws(-distance, markers, mask=thres)
    labels = np.uint8(labels)
    # result = np.round(255.0 / np.amax(labels) * labels).astype(np.uint8)
    # com.debug_im(result)

    segments = []
    for idx in range(1, np.amax(labels) + 1):

        indices = np.where(labels == idx)
        left = np.amin(indices[1])
        right = np.amax(indices[1])
        top = np.amin(indices[0])
        down = np.amax(indices[0])

        # region = labels[top:down, left:right]
        # m = (region > 0) & (region != idx)
        # region[m] = 0
        # region[region >= 1] = 1
        region = image[top:down, left:right]
        cont = Contour(mask=region)
        cont.lt = [left, top]
        cont.rb = [right, down]
        segments.append(cont)

    return segments
示例#30
0
def extract_hough_circle(img_rgb, img_gray, out_filepath):
    # Canny
    img = img_as_ubyte(img_gray)
    edges = canny(img, sigma=3, low_threshold=10, high_threshold=50)

    # fig, ax = plt.subplots(nrows=1, ncols=1)
    # ax.imshow(edges, cmap=plt.cm.gray)
    # ax.axis('off')    
    # ax.set_title('Canny Edges for Hough Circle', fontsize=18)
    # plt.tight_layout()
    # plt.savefig('canny_edges_for_hough_circle.png')

    # Detect
    min_radii = 15; max_radii = 30; step_radii = 1
    plausible_radii = np.arange(min_radii, max_radii, step_radii)

    hough_circles = hough_circle(edges, plausible_radii)

    centers = []; accums = []; radii = []
    for radius, h in zip(plausible_radii, hough_circles):
        n_extracted_circle = 1 # ...for each radius
        peaks = peak_local_max(h, num_peaks=n_extracted_circle)
        centers.extend(peaks)
        accums.extend(h[peaks[:, 0], peaks[:, 1]])
        radii.extend([radius] * n_extracted_circle)

    # Draw the most prominent circles
    n_top_circle = 15
    fig, ax = plt.subplots(ncols=1, nrows=1)
    for idx in np.argsort(accums)[::-1][:n_top_circle]:
        center_x, center_y = centers[idx]
        center_color = (0, 225, 0)
        img_rgb[center_x, center_y] = center_color

        radius = radii[idx]
        perim_color = (255, 0, 0)
        perim_x_list, perim_y_list = circle_perimeter(center_y, center_x, radius)
        # if all(i < img_rgb.shape[1] for i in perim_x_list) and all(i < img_rgb.shape[0] for i in perim_y_list):
        #     img_rgb[perim_y_list, perim_x_list] = perim_color
        for perim_x, perim_y in zip(perim_x_list, perim_y_list):
            if perim_x < img_rgb.shape[1] and perim_y < img_rgb.shape[0]:
                img_rgb[perim_y, perim_x] = perim_color

    ax.imshow(img_rgb, cmap=plt.cm.gray)
    ax.axis('off')
    ax.set_title('Hough Circle', fontsize=18)
    plt.tight_layout()
    plt.savefig(out_filepath)
    plt.close(fig)
示例#31
0
def _img_as_ubyte(x):
    out = np.zeros(x.shape)
    out[x==0.3] = 77
    out[x!=0.3] = img_as_ubyte(x)[x!=0.3]
    return out
示例#32
0
def CreateVideoSlow(videooutname, clip, Dataframe, tmpfolder, dotsize,
                    colormap, alphavalue, pcutoff, trailpoints, cropping, x1,
                    x2, y1, y2, delete, DLCscorer, bodyparts2plot,
                    outputframerate, Frames2plot, bodyparts2connect,
                    skeleton_color, draw_skeleton, displaycropped):
    ''' Creating individual frames with labeled body parts and making a video'''
    #scorer=np.unique(Dataframe.columns.get_level_values(0))[0]
    #bodyparts2plot = list(np.unique(Dataframe.columns.get_level_values(1)))

    if displaycropped:
        ny, nx = y2 - y1, x2 - x1
    else:
        ny, nx = clip.height(), clip.width()

    fps = clip.fps()
    if outputframerate is None:  #by def. same as input rate.
        outputframerate = clip.fps()

    nframes = len(Dataframe.index)
    duration = nframes / fps

    print("Duration of video [s]: ", round(duration, 2), ", recorded with ",
          round(fps, 2), "fps!")
    print("Overall # of frames: ", int(nframes),
          "with cropped frame dimensions: ", nx, ny)
    print("Generating frames and creating video.")
    df_likelihood = np.empty((len(bodyparts2plot), nframes))
    df_x = np.empty((len(bodyparts2plot), nframes))
    df_y = np.empty((len(bodyparts2plot), nframes))

    for bpindex, bp in enumerate(bodyparts2plot):
        df_likelihood[bpindex, :] = Dataframe[DLCscorer, bp,
                                              'likelihood'].values
        if cropping and not displaycropped:
            df_x[bpindex, :] = Dataframe[DLCscorer, bp, 'x'].values + x1
            df_y[bpindex, :] = Dataframe[DLCscorer, bp, 'y'].values + y1
        else:
            df_x[bpindex, :] = Dataframe[DLCscorer, bp, 'x'].values
            df_y[bpindex, :] = Dataframe[DLCscorer, bp, 'y'].values

    colors = get_cmap(len(bodyparts2plot), name=colormap)
    if draw_skeleton:
        #recode the bodyparts2connect into indices for df_x and df_y for speed
        bpts2connect = []
        index = np.arange(len(bodyparts2plot))
        for pair in bodyparts2connect:
            if pair[0] in bodyparts2plot and pair[1] in bodyparts2plot:
                bpts2connect.append([
                    index[pair[0] == np.array(bodyparts2plot)][0],
                    index[pair[1] == np.array(bodyparts2plot)][0]
                ])

    nframes_digits = int(np.ceil(np.log10(nframes)))
    if nframes_digits > 9:
        raise Exception(
            "Your video has more than 10**9 frames, we recommend chopping it up."
        )

    if Frames2plot == None:
        Index = range(nframes)
    else:
        Index = []
        for k in Frames2plot:
            if k >= 0 and k < nframes:
                Index.append(int(k))

    for index in tqdm(range(nframes)):
        imagename = tmpfolder + "/file" + str(index).zfill(
            nframes_digits) + ".png"
        if os.path.isfile(imagename):
            image = img_as_ubyte(
                clip.load_frame())  #still need to read (so counter advances!)
        else:
            plt.axis('off')
            image = img_as_ubyte(clip.load_frame())
            if index in Index:  #then extract the frame!
                if cropping and displaycropped:
                    image = image[y1:y2, x1:x2]
                else:
                    pass

                plt.figure(frameon=False,
                           figsize=(nx * 1. / 100, ny * 1. / 100))
                plt.subplots_adjust(left=0,
                                    bottom=0,
                                    right=1,
                                    top=1,
                                    wspace=0,
                                    hspace=0)
                plt.imshow(image)

                # Adds skeleton to the video
                ####################
                if draw_skeleton:
                    for pair in bpts2connect:
                        if (df_likelihood[pair[0], index] > pcutoff) and (
                                df_likelihood[pair[1], index] > pcutoff):
                            plt.plot(
                                [df_x[pair[0], index], df_x[pair[1], index]],
                                [df_y[pair[0], index], df_y[pair[1], index]],
                                color=skeleton_color,
                                alpha=alphavalue)

                for bpindex, bp in enumerate(bodyparts2plot):
                    if df_likelihood[bpindex, index] > pcutoff:
                        if trailpoints > 0:
                            plt.scatter(
                                df_x[bpindex][max(0, index -
                                                  trailpoints):index],
                                df_y[bpindex][max(0, index -
                                                  trailpoints):index],
                                s=dotsize**2,
                                color=colors(bpindex),
                                alpha=alphavalue * .75)
                            #less transparent present.
                            plt.scatter(df_x[bpindex, index],
                                        df_y[bpindex, index],
                                        s=dotsize**2,
                                        color=colors(bpindex),
                                        alpha=alphavalue)

                        else:
                            plt.scatter(df_x[bpindex, index],
                                        df_y[bpindex, index],
                                        s=dotsize**2,
                                        color=colors(bpindex),
                                        alpha=alphavalue)

                plt.xlim(0, nx - 1)
                plt.ylim(0, ny - 1)

                plt.axis('off')
                plt.subplots_adjust(left=0,
                                    bottom=0,
                                    right=1,
                                    top=1,
                                    wspace=0,
                                    hspace=0)
                plt.gca().invert_yaxis()
                plt.savefig(imagename)

                plt.close("all")

    start = os.getcwd()
    os.chdir(tmpfolder)
    print("All labeled frames were created, now generating video...")
    ## One can change the parameters of the video creation script below:
    # See ffmpeg user guide: http://ffmpeg.org/ffmpeg.html#Video-and-Audio-file-format-conversion
    #
    try:
        subprocess.call([
            'ffmpeg', '-framerate',
            str(clip.fps()), '-i', 'file%0' + str(nframes_digits) + 'd.png',
            '-r',
            str(outputframerate), '../' + videooutname
        ])
    except FileNotFoundError:
        print(
            "Ffmpeg not correctly installed, see https://github.com/AlexEMG/DeepLabCut/issues/45"
        )

    if delete:
        for file_name in glob.glob("*.png"):
            os.remove(file_name)
    os.chdir(start)
示例#33
0
See Wikipedia_ for more details on the algorithm.

.. _Wikipedia: http://en.wikipedia.org/wiki/Watershed_(image_processing)

"""

from scipy import ndimage
import matplotlib.pyplot as plt

from skimage.morphology import watershed, disk
from skimage import data
from skimage.filter import rank
from skimage.util import img_as_ubyte

image = img_as_ubyte(data.camera())

# denoise image
denoised = rank.median(image, disk(2))

# find continuous region (low gradient) --> markers
markers = rank.gradient(denoised, disk(5)) < 10
markers = ndimage.label(markers)[0]

#local gradient
gradient = rank.gradient(denoised, disk(2))

# process the watershed
labels = watershed(gradient, markers)

# display results
示例#34
0
from skimage.util import img_as_ubyte
from skimage.filters.rank import entropy
from skimage.morphology import disk
from skimage import io
from skimage.color import rgb2gray

noise_mask = np.full((2160, 3840), 28, dtype=np.uint8)
noise_mask[32:-32, 32:-32] = 30

noise = (noise_mask * np.random.random(noise_mask.shape) -
         0.5 * noise_mask).astype(np.uint8)
img = noise + 128

image_file = io.imread("./images/study.png")
image_in_greyscale = rgb2gray(image_file)
image = img_as_ubyte(image_in_greyscale)

entr_img = entropy(image * noise, disk(10))

fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(10, 4))

img0 = ax0.imshow(image_in_greyscale, cmap="gray")
ax0.set_title("Object")
ax1.imshow(noise, cmap="gray")
ax1.set_title("Noisy image")
ax2.imshow(entr_img, cmap="viridis")
ax2.set_title("Local entropy")

fig.tight_layout()

fig, (ax0, ax1) = plt.subplots(ncols=2,
示例#35
0
 def read(self, name, flatten=True):
     return img_as_ubyte(imread(name, flatten))
示例#36
0
        # strip the first channel
        if len(annot.shape)>2:
            if annot.shape[2]!=3:
                annot = annot[:,:,0]
            else:
                annot = rgb2lab(annot)
                annot = annot[:,:,0]
        # label the annotations nicely to prepare for future filtering operation

        annot = skimage.morphology.label(annot)
        total_objects += len(np.unique(annot)) - 1

        # find boundaries
        boundaries = skimage.segmentation.find_boundaries(annot, mode = 'outer')

        # BINARY LABEL

        # prepare buffer for binary label
        label_binary = np.zeros((annot.shape + (3,)))

        # write binary label
        label_binary[(annot == 0) & (boundaries == 0), 0] = 1
        label_binary[(annot != 0) & (boundaries == 0), 1] = 1
        label_binary[boundaries == 1, 2] = 1

        label_binary = img_as_ubyte(label_binary)
        # save it - converts image to range from 0 to 255
        skimage.io.imsave(boundary_labels + '{}_'.format(index)+ 't' + filename[-7:-3] + 'png' , label_binary)
    index += 1
示例#37
0
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from skimage.draw import disk
from skimage.color import rgb2gray
from skimage import io, data, measure, morphology, color
from skimage.measure import label, regionprops, regionprops_table
from skimage.util import img_as_ubyte
from skimage.transform import rotate
script, target = sys.argv

img = io.imread(target)
xc, yc, t = img.data.shape
grayscale = rgb2gray(img)
img = img_as_ubyte(grayscale[0:xc, 0:yc])
#image size

#The section below here I think is actually aimed at measuring the pictures

label_img = label(img)
regions = regionprops(label_img)

fig, ax = plt.subplots()
ax.imshow(img, cmap=plt.cm.gray)

for props in regions:
    x0 = xc
    y0 = yc

    x1 = x0 + .5 * props.minor_axis_length
示例#38
0
def convert_to_ubyte(img, vmin, vmax):
    """Convert image from float to unsigned byte"""
            
    img_scl = (img - vmin) / (vmax - vmin)

    return img_as_ubyte(img_scl)
示例#39
0
from skimage import io, util, color
import sys
import numpy

def hist_generate(image, bins):
    out = [0] * bins
    for pixel in numpy.nditer(image):
        out[pixel] += 1
    return out

def hist_equalize(hist):
    hist_norm = hist / numpy.sum(hist)
    out = numpy.add.accumulate(hist_norm)
    return out

img = util.img_as_ubyte(io.imread(sys.argv[1], as_gray=True))
levels = 256
hist = hist_generate(img, levels)
hist_eq = hist_equalize(hist)
img_new = numpy.ubyte((levels - 1) * hist_eq[img])
io.imshow(img_new)
io.show()
示例#40
0
                for f in cmpd_imgs if fview in f
            ]
            dmso_imgs = [
                read_image(fname=os.path.join(imgpath, f), verbose=False)
                for f in ctrl_imgs if fview in f
            ]

            imglist.append(np.stack(imgs, axis=-1))
            titles.append(d)
            imglist.append(np.stack(dmso_imgs, axis=-1))
            titles.append('DMSO')

    hoechst = [img[:, :, 0] for img in imglist]
    # threshold and convert to ubyte
    hoechst = [threshold_img(img, method='otsu') for img in hoechst]
    hoechst = [img_as_ubyte(img) for img in hoechst]

    segf = SegfreeProfiler(tile_size=(10, 10),
                           n_block_types=10,
                           n_supblock_types=10)
    nucl_prof = segf.fit_transform(hoechst)
    pickle.dump(segf, open("segf_nuclei.pkl", "wb"))
    # segf = pickle.load(open("segf_nuclei.pkl", "rb"))

    # and segmentation-free profiles for all channels combined
    segf = SegfreeProfiler(tile_size=(10, 10),
                           n_block_types=20,
                           n_supblock_types=50)
    imgs_norm = [normalize_channels(img) for img in imglist]
    cell_prof = segf.fit_transform(imgs_norm)
    pickle.dump(segf, open("segf_cells.pkl", "wb"))
示例#41
0
files = [f for f in os.listdir('.') if os.path.isfile(f)]
for f in files:
    print(f)

# Load multiple images in a collections object
file_names = glob.glob('./*.jpg')
images = io.imread_collection(file_names)
image1 = images[0]
image2 = images[1]

print(image1.shape, type(image1), image1.dtype)
print(image2.shape, type(image2), image2.dtype)

image2_grey = color.rgb2gray(image2)
image2_grey = util.img_as_ubyte(image2_grey)
print(image2_grey.shape, type(image2_grey), image2_grey.dtype)
print(image2_grey[:10])

# Display images
fig, axes = plt.subplots(1,3)
fig.suptitle('Mr. Chairman from Iron Chef')
axes[0].imshow(image2)
axes[0].axis('off')
axes[0].set_title('Colour')
axes[1].imshow(image2_grey, cmap=plt.cm.gray)
axes[1].axis('off')
axes[1].set_title('GrayScale')
axes[2].imshow(util.invert(image2_grey), cmap=plt.cm.gray)
axes[2].axis('off')
axes[2].set_title('GrayScale - Inverted')
Note that the accumulator size is built to be larger than the
original picture in order to detect centers outside the frame.
Its size is extended by two times the larger radius.

"""
import numpy as np
import matplotlib.pyplot as plt

from skimage import data, filter, color
from skimage.transform import hough_circle
from skimage.feature import peak_local_max
from skimage.draw import circle_perimeter
from skimage.util import img_as_ubyte

# Load picture and detect edges
image = img_as_ubyte(data.coins()[0:95, 70:370])
edges = filter.canny(image, sigma=3, low_threshold=10, high_threshold=50)

fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))

# Detect two radii
hough_radii = np.arange(15, 30, 2)
hough_res = hough_circle(edges, hough_radii)

centers = []
accums = []
radii = []

for radius, h in zip(hough_radii, hough_res):
    # For each radius, extract two circles
    peaks = peak_local_max(h, num_peaks=2)
示例#43
0
def adaptiveEqualization(img: np.ndarray,
                         clip_limit: float = 0.03) -> np.ndarray:
    img_f = img_as_float(img)
    logging.info(img_f[0, :4])
    return img_as_ubyte(exposure.equalize_adapthist(img_f, clip_limit))
示例#44
0
 def test_hdx_rgb_roundtrip(self):
     from skimage.color.colorconv import hdx_from_rgb, rgb_from_hdx
     img_rgb = self.img_rgb
     conv = combine_stains(separate_stains(img_rgb, hdx_from_rgb),
                           rgb_from_hdx)
     assert_equal(img_as_ubyte(conv), img_rgb)
from skimage.util import img_as_ubyte

import matplotlib
import matplotlib.pyplot as plt

import numpy as np

from PIL import Image

myim = Image.open('../crops/KNMP-VIII_F_69______2C2O_0006_o.ppm').convert('L')

print np.array(myim).shape


matplotlib.rcParams['font.size'] = 9
img = img_as_ubyte(np.copy(np.asarray(myim)))


index_row = np.tile(np.arange(img.shape[0]).reshape(img.shape[0], 1), (1, img.shape[1]))
index_col = np.tile(np.arange(img.shape[1]), (img.shape[0], 1))

print index_row.shape
print index_col.shape



radius = 15
selem = disk(radius)

local_otsu = rank.otsu(img, selem)
threshold_global_otsu = threshold_otsu(img)
if cropping:
    clip = clip.crop(
        y1=y1, y2=y2, x1=x1, x2=x2)  # one might want to adjust

print("Duration of video [s]: ", clip.duration, ", recorded with ", fps,
      "fps!")
print("Overall # of frames: ", nframes_approx,"with cropped frame dimensions: ", clip.size)

start = time.time()
PredicteData = np.zeros((nframes_approx, 3 * len(cfg['all_joints_names'])))
clip.reader.initialize()
print("Starting to extract posture")
for index in tqdm(range(nframes_approx)):
    #image = img_as_ubyte(clip.get_frame(index * 1. / fps))
    image = img_as_ubyte(clip.reader.read_frame())
    # Thanks to Rick Warren for the  following snipplet:
    # if close to end of video, start checking whether two adjacent frames are identical
    # this should only happen when moviepy has reached the final frame
    # if two adjacent frames are identical, terminate the loop
    if index==int(nframes_approx-frame_buffer*2):
        last_image = image
    elif index>int(nframes_approx-frame_buffer*2):
        if (image==last_image).all():
            nframes = index
            print("Detected frames: ", nframes)
            break
        else:
            last_image = image
    pose = getpose(image, cfg, outputs)
    PredicteData[index, :] = pose.flatten()  # NOTE: thereby cfg['all_joints_names'] should be same order as bodyparts!
    denom = X + Y
    denom[denom == 0] = np.infty
    frac = num / denom

    chi_sqr = 0.5 * np.sum(frac, axis=2)

    # Generate a similarity measure. It needs to be low when distance is high
    # and high when distance is low; taking the reciprocal will do this.
    # Chi squared will always be >= 0, add small value to prevent divide by 0.
    similarity = 1 / (chi_sqr + 1.0e-4)

    return similarity


# Load the `skimage.data.coins` image
img = img_as_ubyte(data.coins())

# Quantize to 16 levels of grayscale; this way the output image will have a
# 16-dimensional feature vector per pixel
quantized_img = img // 16

# Select the coin from the 4th column, second row.
# Co-ordinate ordering: [x1,y1,x2,y2]
coin_coords = [184, 100, 228, 148]   # 44 x 44 region
coin = quantized_img[coin_coords[1]:coin_coords[3],
                     coin_coords[0]:coin_coords[2]]

# Compute coin histogram and normalize
coin_hist, _ = np.histogram(coin.flatten(), bins=16, range=(0, 16))
coin_hist = coin_hist.astype(float) / np.sum(coin_hist)
示例#48
0
 def _ish_target_slices(self, image):
     grey = _grey_scaled(image, self._target_scale)
     grey = util.img_as_ubyte(grey)
     return grey
示例#49
0
from skimage import data
from skimage.util import img_as_ubyte
from skimage.filters.rank import entropy
from skimage.morphology import disk
from skimage.filters.rank import gradient

# First example: object detection.

# Second example: texture detection.

path = '/Users/rgirish28/Documents/Strip JPEGs/'

image1 = Image.open(os.path.join(path, sys.argv[1]))

image = img_as_ubyte(image1)

image = image[:, :, 1]

fig, (ax0, ax1) = plt.subplots(ncols=2,
                               sharex=True,
                               sharey=True,
                               subplot_kw={"adjustable": "box-forced"})

img0 = ax0.imshow(image, cmap=plt.cm.gray)
ax0.set_title("Image")
ax0.axis("off")
fig.colorbar(img0, ax=ax0)

img1 = ax1.imshow(gradient(image, disk(5)), cmap=plt.cm.jet)
ax1.set_title("Entropy")
示例#50
0
def evaluate_multianimal_crossvalidate(
    config,
    Shuffles=[1],
    trainingsetindex=0,
    pbounds=None,
    edgewisecondition=True,
    target="rpck_train",
    inferencecfg=None,
    init_points=20,
    n_iter=50,
    dcorr=10.0,
    leastbpts=1,
    printingintermediatevalues=True,
    modelprefix="",
    plotting=False,
):
    """
    Crossvalidate inference parameters on evaluation data; optimal parametrs will be stored in " inference_cfg.yaml".

    They will then be then used for inference (for analysis of videos). Performs Bayesian Optimization with https://github.com/fmfn/BayesianOptimization

    This is a crucial step. The most important variable (in inferencecfg) to cross-validate is minimalnumberofconnections. Pass
    a reasonable range to optimze (e.g. if you have 5 edges from 1 to 5. If you have 4 bpts and 11 connections from 3 to 9).

    config: string
        Full path of the config.yaml file as a string.

    shuffle: int, optional
        An integer specifying the shuffle index of the training dataset used for training the network. The default is 1.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    pbounds: dictionary of variables with ranges to crossvalidate.
        By default: pbounds = {
                        'pafthreshold': (0.05, 0.7),
                        'detectionthresholdsquare': (0, 0.9),
                        'minimalnumberofconnections': (1, # connections in your skeleton),
                    }

    inferencecfg: dict, OPTIONAL
        For the variables that are *not* crossvalidated the parameters from inference_cfg.yaml are used, or
        you can overwrite them by passing a dictinary with your preferred parameters.

    edgewisecondition: bool, default True
        Estimates Euclidean distances for each skeleton edge and uses those distance for excluding possible connections.
        If false, uses only one distance for all bodyparts (which is obviously suboptimal).

    target: string, default='rpck_train'
        What metric to optimize. Options are pck/rpck/rmse on train/test set.

    init_points: int, optional (default=10)
        Number of random initial explorations. Probing random regions helps diversify the exploration space.
        Parameter from BayesianOptimization.

    n_iter: int, optional (default=20)
        Number of iterations of Bayesian optimization to perform.
        The larger it is, the higher the likelihood of finding a good extremum.
        Parameter from BayesianOptimization.

    dcorr: float,
        Distance thereshold for percent correct keypoints / relative percent correct keypoints (see paper).

    leastbpts: integer (should be a small number)
        If an animals has less or equal as many body parts in an image it will not be used
        for cross validation. Imagine e.g. if only a single bodypart is present, then
        if animals need a certain minimal number of bodyparts for assembly (minimalnumberofconnections),
        this might not be predictable.

    printingintermediatevalues: bool, default True
        If intermediate metrics RMSE/hits/.. per sample should be printed.


    Examples
    --------

    first run evalute:

    deeplabcut.evaluate_network(path_config_file,Shuffles=[shuffle],plotting=True)

    Then e.g. for finding inference parameters to minimize rmse on test set:

    deeplabcut.evaluate_multianimal_crossvalidate(path_config_file,Shuffles=[shuffle],target='rmse_test')
    """
    from deeplabcut.pose_estimation_tensorflow.lib import crossvalutils
    from deeplabcut.utils import auxfun_multianimal, auxiliaryfunctions
    from easydict import EasyDict as edict

    cfg = auxiliaryfunctions.read_config(config)
    trainFraction = cfg["TrainingFraction"][trainingsetindex]
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
    Data = pd.read_hdf(
        os.path.join(
            cfg["project_path"],
            str(trainingsetfolder),
            "CollectedData_" + cfg["scorer"] + ".h5",
        ),
        "df_with_missing",
    )
    comparisonbodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, "all"
    )
    colors = visualization.get_cmap(len(comparisonbodyparts), name=cfg["colormap"])

    # wild guesses for a wide range:
    maxconnections = len(cfg["skeleton"])
    minconnections = 1  # len(cfg['multianimalbodyparts'])-1

    _pbounds = {
        "pafthreshold": (0.05, 0.7),
        "detectionthresholdsquare": (
            0,
            0.9,
        ),  # TODO: set to minimum (from pose_cfg.yaml)
        "minimalnumberofconnections": (minconnections, maxconnections),
    }
    if pbounds is not None:
        _pbounds.update(pbounds)

    if "rpck" in target or "pck" in target:
        maximize = True

    if "rmse" in target:
        maximize = False  # i.e. minimize

    for shuffle in Shuffles:
        evaluationfolder = os.path.join(
            cfg["project_path"],
            str(
                auxiliaryfunctions.GetEvaluationFolder(
                    trainFraction, shuffle, cfg, modelprefix=modelprefix
                )
            ),
        )
        auxiliaryfunctions.attempttomakefolder(evaluationfolder, recursive=True)

        datafn, metadatafn = auxiliaryfunctions.GetDataandMetaDataFilenames(
            trainingsetfolder, trainFraction, shuffle, cfg
        )
        _, trainIndices, testIndices, _ = auxiliaryfunctions.LoadMetadata(
            os.path.join(cfg["project_path"], metadatafn)
        )
        modelfolder = os.path.join(
            cfg["project_path"],
            str(
                auxiliaryfunctions.GetModelFolder(
                    trainFraction, shuffle, cfg, modelprefix=modelprefix
                )
            ),
        )
        path_test_config = Path(modelfolder) / "test" / "pose_cfg.yaml"
        try:
            dlc_cfg = load_config(str(path_test_config))
        except FileNotFoundError:
            raise FileNotFoundError(
                "It seems the model for shuffle %s and trainFraction %s does not exist."
                % (shuffle, trainFraction)
            )

        # Check which snapshots are available and sort them by # iterations
        Snapshots = np.array(
            [
                fn.split(".")[0]
                for fn in os.listdir(os.path.join(str(modelfolder), "train"))
                if "index" in fn
            ]
        )
        snapindex = -1
        dlc_cfg["init_weights"] = os.path.join(
            str(modelfolder), "train", Snapshots[snapindex]
        )  # setting weights to corresponding snapshot.
        trainingsiterations = (dlc_cfg["init_weights"].split(os.sep)[-1]).split("-")[
            -1
        ]  # read how many training siterations that corresponds to.

        DLCscorer, _ = auxiliaryfunctions.GetScorerName(
            cfg, shuffle, trainFraction, trainingsiterations, modelprefix=modelprefix
        )

        path_inference_config = Path(modelfolder) / "test" / "inference_cfg.yaml"
        if inferencecfg is None:  # then load or initialize
            inferencecfg = auxfun_multianimal.read_inferencecfg(
                path_inference_config, cfg
            )
        else:
            inferencecfg = edict(inferencecfg)
            auxfun_multianimal.check_inferencecfg_sanity(cfg, inferencecfg)

        inferencecfg.topktoretain = np.inf
        inferencecfg, opt = crossvalutils.bayesian_search(
            config,
            inferencecfg,
            _pbounds,
            edgewisecondition=edgewisecondition,
            shuffle=shuffle,
            trainingsetindex=trainingsetindex,
            target=target,
            maximize=maximize,
            init_points=init_points,
            n_iter=n_iter,
            acq="ei",
            dcorr=dcorr,
            leastbpts=leastbpts,
            modelprefix=modelprefix,
        )

        # update number of individuals to retain.
        inferencecfg.topktoretain = len(cfg["individuals"]) + 1 * (
            len(cfg["uniquebodyparts"]) > 0
        )

        # calculating result at best best solution
        DataOptParams, poses_gt, poses = crossvalutils.compute_crossval_metrics(
            config, inferencecfg, shuffle, trainingsetindex, modelprefix
        )

        path_inference_config = str(path_inference_config)
        # print("Quantification:", DataOptParams.head())
        DataOptParams.to_hdf(
            path_inference_config.split(".yaml")[0] + ".h5",
            "df_with_missing",
            format="table",
            mode="w",
        )
        DataOptParams.to_csv(path_inference_config.split(".yaml")[0] + ".csv")
        print("Saving optimal inference parameters...")
        print(DataOptParams.to_string())
        auxiliaryfunctions.write_plainconfig(path_inference_config, dict(inferencecfg))

        # Store best predictions
        max_indivs = max(pose.shape[0] for pose in poses)
        bpts = dlc_cfg["all_joints_names"]
        container = np.full((len(poses), max_indivs * len(bpts) * 3), np.nan)
        for n, pose in enumerate(poses):
            temp = pose.flatten()
            container[n, : len(temp)] = temp

        header = pd.MultiIndex.from_product(
            [
                [DLCscorer],
                [f"individual{i}" for i in range(1, max_indivs + 1)],
                bpts,
                ["x", "y", "likelihood"],
            ],
            names=["scorer", "individuals", "bodyparts", "coords"],
        )

        df = pd.DataFrame(container, columns=header)
        df.to_hdf(
            os.path.join(evaluationfolder, f"{DLCscorer}.h5"), key="df_with_missing"
        )

        if plotting:
            foldername = os.path.join(
                str(evaluationfolder),
                "LabeledImages_" + DLCscorer + "_" + Snapshots[snapindex],
            )
            auxiliaryfunctions.attempttomakefolder(foldername)
            for imageindex, imagename in tqdm(enumerate(Data.index)):
                image_path = os.path.join(cfg["project_path"], imagename)
                image = io.imread(image_path)
                frame = img_as_ubyte(skimage.color.gray2rgb(image))
                groundtruthcoordinates = poses_gt[imageindex]
                coords_pred = poses[imageindex][:, :, :2]
                probs_pred = poses[imageindex][:, :, -1:]
                fig = visualization.make_multianimal_labeled_image(
                    frame,
                    groundtruthcoordinates,
                    coords_pred,
                    probs_pred,
                    colors,
                    cfg["dotsize"],
                    cfg["alphavalue"],
                    cfg["pcutoff"],
                )
                visualization.save_labeled_frame(
                    fig, image_path, foldername, imageindex in trainIndices
                )
示例#51
0
 def test_hed_rgb_roundtrip(self):
     img_rgb = img_as_ubyte(self.img_rgb)
     new = img_as_ubyte(hed2rgb(rgb2hed(img_rgb)))
     assert_equal(new, img_rgb)
示例#52
0
def evaluate_multianimal_full(
    config,
    Shuffles=[1],
    trainingsetindex=0,
    plotting=None,
    show_errors=True,
    comparisonbodyparts="all",
    gputouse=None,
    modelprefix="",
    c_engine=False,
):
    """
    WIP multi animal project.
    """

    import os

    from deeplabcut.pose_estimation_tensorflow.nnet import predict
    from deeplabcut.pose_estimation_tensorflow.nnet import (
        predict_multianimal as predictma,
    )
    from deeplabcut.utils import auxiliaryfunctions, auxfun_multianimal

    import tensorflow as tf

    if "TF_CUDNN_USE_AUTOTUNE" in os.environ:
        del os.environ["TF_CUDNN_USE_AUTOTUNE"]  # was potentially set during training

    tf.reset_default_graph()
    os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"  #
    if gputouse is not None:  # gpu selectinon
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gputouse)

    start_path = os.getcwd()

    ##################################################
    # Load data...
    ##################################################
    cfg = auxiliaryfunctions.read_config(config)
    if trainingsetindex == "all":
        TrainingFractions = cfg["TrainingFraction"]
    else:
        TrainingFractions = [cfg["TrainingFraction"][trainingsetindex]]

    # Loading human annotatated data
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
    Data = pd.read_hdf(
        os.path.join(
            cfg["project_path"],
            str(trainingsetfolder),
            "CollectedData_" + cfg["scorer"] + ".h5",
        ),
        "df_with_missing",
    )
    # Handle data previously annotated on a different platform
    sep = "/" if "/" in Data.index[0] else "\\"
    if sep != os.path.sep:
        Data.index = Data.index.str.replace(sep, os.path.sep)
    # Get list of body parts to evaluate network for
    comparisonbodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, comparisonbodyparts
    )
    colors = visualization.get_cmap(len(comparisonbodyparts), name=cfg["colormap"])
    # Make folder for evaluation
    auxiliaryfunctions.attempttomakefolder(
        str(cfg["project_path"] + "/evaluation-results/")
    )
    for shuffle in Shuffles:
        for trainFraction in TrainingFractions:
            ##################################################
            # Load and setup CNN part detector
            ##################################################
            datafn, metadatafn = auxiliaryfunctions.GetDataandMetaDataFilenames(
                trainingsetfolder, trainFraction, shuffle, cfg
            )
            modelfolder = os.path.join(
                cfg["project_path"],
                str(
                    auxiliaryfunctions.GetModelFolder(
                        trainFraction, shuffle, cfg, modelprefix=modelprefix
                    )
                ),
            )
            path_test_config = Path(modelfolder) / "test" / "pose_cfg.yaml"

            # Load meta data
            (
                data,
                trainIndices,
                testIndices,
                trainFraction,
            ) = auxiliaryfunctions.LoadMetadata(
                os.path.join(cfg["project_path"], metadatafn)
            )

            try:
                dlc_cfg = load_config(str(path_test_config))
            except FileNotFoundError:
                raise FileNotFoundError(
                    "It seems the model for shuffle %s and trainFraction %s does not exist."
                    % (shuffle, trainFraction)
                )

            # TODO: IMPLEMENT for different batch sizes?
            dlc_cfg["batch_size"] = 1  # due to differently sized images!!!

            # Create folder structure to store results.
            evaluationfolder = os.path.join(
                cfg["project_path"],
                str(
                    auxiliaryfunctions.GetEvaluationFolder(
                        trainFraction, shuffle, cfg, modelprefix=modelprefix
                    )
                ),
            )
            auxiliaryfunctions.attempttomakefolder(evaluationfolder, recursive=True)
            # path_train_config = modelfolder / 'train' / 'pose_cfg.yaml'

            # Check which snapshots are available and sort them by # iterations
            Snapshots = np.array(
                [
                    fn.split(".")[0]
                    for fn in os.listdir(os.path.join(str(modelfolder), "train"))
                    if "index" in fn
                ]
            )
            if len(Snapshots) == 0:
                print(
                    "Snapshots not found! It seems the dataset for shuffle %s and trainFraction %s is not trained.\nPlease train it before evaluating.\nUse the function 'train_network' to do so."
                    % (shuffle, trainFraction)
                )
            else:
                increasing_indices = np.argsort(
                    [int(m.split("-")[1]) for m in Snapshots]
                )
                Snapshots = Snapshots[increasing_indices]

                if cfg["snapshotindex"] == -1:
                    snapindices = [-1]
                elif cfg["snapshotindex"] == "all":
                    snapindices = range(len(Snapshots))
                elif cfg["snapshotindex"] < len(Snapshots):
                    snapindices = [cfg["snapshotindex"]]
                else:
                    print(
                        "Invalid choice, only -1 (last), any integer up to last, or all (as string)!"
                    )

                (
                    individuals,
                    uniquebodyparts,
                    multianimalbodyparts,
                ) = auxfun_multianimal.extractindividualsandbodyparts(cfg)

                final_result = []
                ##################################################
                # Compute predictions over images
                ##################################################
                for snapindex in snapindices:
                    dlc_cfg["init_weights"] = os.path.join(
                        str(modelfolder), "train", Snapshots[snapindex]
                    )  # setting weights to corresponding snapshot.
                    trainingsiterations = (
                        dlc_cfg["init_weights"].split(os.sep)[-1]
                    ).split("-")[
                        -1
                    ]  # read how many training siterations that corresponds to.

                    # name for deeplabcut net (based on its parameters)
                    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
                        cfg,
                        shuffle,
                        trainFraction,
                        trainingsiterations,
                        modelprefix=modelprefix,
                    )
                    print(
                        "Running ",
                        DLCscorer,
                        " with # of trainingiterations:",
                        trainingsiterations,
                    )
                    (
                        notanalyzed,
                        resultsfilename,
                        DLCscorer,
                    ) = auxiliaryfunctions.CheckifNotEvaluated(
                        str(evaluationfolder),
                        DLCscorer,
                        DLCscorerlegacy,
                        Snapshots[snapindex],
                    )

                    if os.path.isfile(resultsfilename.split(".h5")[0] + "_full.pickle"):
                        print("Model already evaluated.", resultsfilename)
                    else:
                        if plotting:
                            foldername = os.path.join(
                                str(evaluationfolder),
                                "LabeledImages_"
                                + DLCscorer
                                + "_"
                                + Snapshots[snapindex],
                            )
                            auxiliaryfunctions.attempttomakefolder(foldername)

                        # print(dlc_cfg)
                        # Specifying state of model (snapshot / training state)
                        sess, inputs, outputs = predict.setup_pose_prediction(dlc_cfg)

                        PredicteData = {}
                        print("Analyzing data...")
                        for imageindex, imagename in tqdm(enumerate(Data.index)):
                            image_path = os.path.join(cfg["project_path"], imagename)
                            image = io.imread(image_path)
                            frame = img_as_ubyte(skimage.color.gray2rgb(image))

                            GT = Data.iloc[imageindex]

                            # Storing GT data as dictionary, so it can be used for calculating connection costs
                            groundtruthcoordinates = []
                            groundtruthidentity = []
                            for bptindex, bpt in enumerate(dlc_cfg["all_joints_names"]):
                                coords = np.zeros([len(individuals), 2]) * np.nan
                                identity = []
                                for prfxindex, prefix in enumerate(individuals):
                                    if bpt in uniquebodyparts and prefix == "single":
                                        coords[prfxindex, :] = np.array(
                                            [
                                                GT[cfg["scorer"]][prefix][bpt]["x"],
                                                GT[cfg["scorer"]][prefix][bpt]["y"],
                                            ]
                                        )
                                        identity.append(prefix)
                                    elif (
                                        bpt in multianimalbodyparts
                                        and prefix != "single"
                                    ):
                                        coords[prfxindex, :] = np.array(
                                            [
                                                GT[cfg["scorer"]][prefix][bpt]["x"],
                                                GT[cfg["scorer"]][prefix][bpt]["y"],
                                            ]
                                        )
                                        identity.append(prefix)
                                    else:
                                        identity.append("nix")

                                groundtruthcoordinates.append(
                                    coords[np.isfinite(coords[:, 0]), :]
                                )
                                groundtruthidentity.append(
                                    np.array(identity)[np.isfinite(coords[:, 0])]
                                )

                            PredicteData[imagename] = {}
                            PredicteData[imagename]["index"] = imageindex

                            pred = predictma.get_detectionswithcostsandGT(
                                frame,
                                groundtruthcoordinates,
                                dlc_cfg,
                                sess,
                                inputs,
                                outputs,
                                outall=False,
                                nms_radius=dlc_cfg.nmsradius,
                                det_min_score=dlc_cfg.minconfidence,
                                c_engine=c_engine,
                            )
                            PredicteData[imagename]["prediction"] = pred
                            PredicteData[imagename]["groundtruth"] = [
                                groundtruthidentity,
                                groundtruthcoordinates,
                                GT,
                            ]

                            if plotting:
                                coords_pred = pred["coordinates"][0]
                                probs_pred = pred["confidence"]
                                fig = visualization.make_multianimal_labeled_image(
                                    frame,
                                    groundtruthcoordinates,
                                    coords_pred,
                                    probs_pred,
                                    colors,
                                    cfg["dotsize"],
                                    cfg["alphavalue"],
                                    cfg["pcutoff"],
                                )

                                visualization.save_labeled_frame(
                                    fig,
                                    image_path,
                                    foldername,
                                    imageindex in trainIndices,
                                )

                        sess.close()  # closes the current tf session
                        PredicteData["metadata"] = {
                            "nms radius": dlc_cfg.nmsradius,
                            "minimal confidence": dlc_cfg.minconfidence,
                            "PAFgraph": dlc_cfg.partaffinityfield_graph,
                            "all_joints": [[i] for i in range(len(dlc_cfg.all_joints))],
                            "all_joints_names": [
                                dlc_cfg.all_joints_names[i]
                                for i in range(len(dlc_cfg.all_joints))
                            ],
                            "stride": dlc_cfg.get("stride", 8),
                        }
                        print(
                            "Done and results stored for snapshot: ",
                            Snapshots[snapindex],
                        )

                        dictionary = {
                            "Scorer": DLCscorer,
                            "DLC-model-config file": dlc_cfg,
                            "trainIndices": trainIndices,
                            "testIndices": testIndices,
                            "trainFraction": trainFraction,
                        }
                        metadata = {"data": dictionary}
                        auxfun_multianimal.SaveFullMultiAnimalData(
                            PredicteData, metadata, resultsfilename
                        )

                        tf.reset_default_graph()

    # returning to intial folder
    os.chdir(str(start_path))
def CreateVideoSlow(videooutname,
                    clip,
                    Dataframe,
                    tmpfolder,
                    dotsize,
                    colormap,
                    alphavalue,
                    pcutoff,
                    cropping,
                    x1,
                    x2,
                    y1,
                    y2,
                    delete,
                    DLCscorer,
                    bodyparts2plot,
                    outputframerate,
                    Frames2plot,
                    behavior=None,
                    behavior_measure=None):  # AVH
    ''' Creating individual frames with labeled body parts and making a video'''
    #scorer=np.unique(Dataframe.columns.get_level_values(0))[0]
    #bodyparts2plot = list(np.unique(Dataframe.columns.get_level_values(1)))
    colors = get_cmap(len(bodyparts2plot), name=colormap)
    if cropping:
        ny, nx = y2 - y1, x2 - x1
    else:
        ny, nx = clip.height(), clip.width()

    fps = clip.fps()
    if outputframerate is None:  #by def. same as input rate.
        outputframerate = clip.fps()

    nframes = len(Dataframe.index)
    duration = nframes / fps

    print("Duration of video [s]: ", round(duration, 2), ", recorded with ",
          round(fps, 2), "fps!")
    print("Overall # of frames: ", int(nframes),
          "with cropped frame dimensions: ", nx, ny)
    print("Generating frames and creating video.")
    df_likelihood = np.empty((len(bodyparts2plot), nframes))
    df_x = np.empty((len(bodyparts2plot), nframes))
    df_y = np.empty((len(bodyparts2plot), nframes))
    for bpindex, bp in enumerate(bodyparts2plot):
        df_likelihood[
            bpindex, :] = Dataframe[DLCscorer][bp]['likelihood'].values
        df_x[bpindex, :] = Dataframe[DLCscorer][bp]['x'].values
        df_y[bpindex, :] = Dataframe[DLCscorer][bp]['y'].values

    nframes_digits = int(np.ceil(np.log10(nframes)))
    if nframes_digits > 9:
        raise Exception(
            "Your video has more than 10**9 frames, we recommend chopping it up."
        )

    if Frames2plot == None:
        Index = range(nframes)
    else:
        Index = []
        for k in Frames2plot:
            if k >= 0 and k < nframes:
                Index.append(int(k))

    for index in tqdm(range(nframes)):
        imagename = tmpfolder + "/file" + str(index).zfill(
            nframes_digits) + ".png"
        if os.path.isfile(imagename):
            image = img_as_ubyte(
                clip.load_frame())  #still need to read (so counter advances!)
        else:
            plt.axis('off')
            image = img_as_ubyte(clip.load_frame())
            if index in Index:  #then extract the frame!
                if cropping:
                    image = image[y1:y2, x1:x2]
                else:
                    pass
                plt.figure(frameon=False,
                           figsize=(nx * 1. / 100, ny * 1. / 100))
                plt.subplots_adjust(left=0,
                                    bottom=0,
                                    right=1,
                                    top=1,
                                    wspace=0,
                                    hspace=0)
                plt.imshow(image)

                for bpindex, bp in enumerate(bodyparts2plot):
                    if df_likelihood[bpindex, index] > pcutoff:
                        plt.scatter(df_x[bpindex, index],
                                    df_y[bpindex, index],
                                    s=dotsize**2,
                                    color=colors(bpindex),
                                    alpha=alphavalue)
                # AVH start
                # True if Odd Number
                if (bool(behavior[index])):
                    color_box_behavior = "green"  #green if behavior is present
                else:
                    color_box_behavior = "red"
                plt.text(10,
                         30,
                         "Activity",
                         bbox=dict(facecolor=color_box_behavior,
                                   alpha=0.5))  #AVH
                plt.text(
                    10,
                    80,
                    str(round(behavior_measure[index], 4)),
                    bbox=dict(facecolor=color_box_behavior, alpha=0.5)
                )  #AVH#plt.Polygon((df_x[,index],df_y[bpindex,index]),(df_x[bpindex,index],df_y[bpindex,index]),(df_x[bpindex,index],df_y[bpindex,index])) Seria tuanis hacer un poligono, como el de tomas
                # AVH end
                plt.xlim(0, nx)
                plt.ylim(0, ny)
                plt.axis('off')
                plt.subplots_adjust(left=0,
                                    bottom=0,
                                    right=1,
                                    top=1,
                                    wspace=0,
                                    hspace=0)
                plt.gca().invert_yaxis()
                plt.savefig(imagename)

                plt.close("all")

    start = os.getcwd()
    os.chdir(tmpfolder)
    print("All labeled frames were created, now generating video...")
    #vname=str(Path(tmpfolder).stem).split('-')[1]
    ## One can change the parameters of the video creation script below:
    # See ffmpeg user guide: http://ffmpeg.org/ffmpeg.html#Video-and-Audio-file-format-conversion
    #
    try:
        subprocess.call([
            'ffmpeg', '-framerate',
            str(clip.fps()), '-i', 'file%0' + str(nframes_digits) + 'd.png',
            '-r',
            str(outputframerate), '../' + videooutname
        ])
    except FileNotFoundError:
        print(
            "Ffmpeg not correctly installed, see https://github.com/AlexEMG/DeepLabCut/issues/45"
        )

    if delete:
        for file_name in glob.glob("*.png"):
            os.remove(file_name)
    os.chdir(start)
5. White Tophat
6. Black Tophat
7. Skeletonize
8. Convex Hull


To get started, let's load an image using ``io.imread``. Note that morphology
functions only work on gray-scale or binary images, so we set ``as_grey=True``.
"""

import matplotlib.pyplot as plt
from skimage.data import data_dir
from skimage.util import img_as_ubyte
from skimage import io

phantom = img_as_ubyte(io.imread(data_dir + '/phantom.png', as_grey=True))
fig, ax = plt.subplots()
ax.imshow(phantom, cmap=plt.cm.gray)
"""
.. image:: PLOT2RST.current_figure

Let's also define a convenience function for plotting comparisons:
"""


def plot_comparison(original, filtered, filter_name):

    fig, (ax1, ax2) = plt.subplots(ncols=2,
                                   figsize=(8, 4),
                                   sharex=True,
                                   sharey=True)
示例#55
0
def PlottingSingleFrame(clip,
                        Dataframe,
                        bodyparts2plot,
                        tmpfolder,
                        index,
                        scorer,
                        dotsize,
                        pcutoff,
                        alphavalue,
                        colors,
                        strwidth=4):
    ''' Label frame and save under imagename '''
    from skimage import io
    imagename1 = os.path.join(tmpfolder,
                              "img" + str(index).zfill(strwidth) + ".png")
    imagename2 = os.path.join(
        tmpfolder, "img" + str(index).zfill(strwidth) + "labeled.png")

    if os.path.isfile(
            os.path.join(tmpfolder,
                         "img" + str(index).zfill(strwidth) + ".png")):
        pass
    else:
        plt.axis('off')
        image = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))
        io.imsave(imagename1, image)

        if np.ndim(image) > 2:
            h, w, nc = np.shape(image)
        else:
            h, w = np.shape(image)

        plt.figure(frameon=False, figsize=(w * 1. / 100, h * 1. / 100))
        plt.subplots_adjust(left=0,
                            bottom=0,
                            right=1,
                            top=1,
                            wspace=0,
                            hspace=0)
        plt.imshow(image)
        for bpindex, bp in enumerate(bodyparts2plot):
            if Dataframe[scorer][bp]['likelihood'].values[index] > pcutoff:
                plt.plot(Dataframe[scorer][bp]['x'].values[index],
                         Dataframe[scorer][bp]['y'].values[index],
                         '.',
                         color=colors(bpindex),
                         ms=dotsize,
                         alpha=alphavalue)

        plt.xlim(0, w)
        plt.ylim(0, h)
        plt.axis('off')
        plt.subplots_adjust(left=0,
                            bottom=0,
                            right=1,
                            top=1,
                            wspace=0,
                            hspace=0)
        plt.gca().invert_yaxis()
        plt.savefig(imagename2)
        plt.close("all")
示例#56
0
def evaluate_multianimal_full(
    config,
    Shuffles=[1],
    trainingsetindex=0,
    plotting=None,
    show_errors=True,
    comparisonbodyparts="all",
    gputouse=None,
    modelprefix="",
    c_engine=False,
):
    from deeplabcut.pose_estimation_tensorflow.nnet import predict
    from deeplabcut.pose_estimation_tensorflow.nnet import (
        predict_multianimal as predictma, )
    from deeplabcut.utils import auxiliaryfunctions, auxfun_multianimal

    import tensorflow as tf

    if "TF_CUDNN_USE_AUTOTUNE" in os.environ:
        del os.environ[
            "TF_CUDNN_USE_AUTOTUNE"]  # was potentially set during training

    tf.reset_default_graph()
    os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"  #
    if gputouse is not None:  # gpu selectinon
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gputouse)

    start_path = os.getcwd()

    ##################################################
    # Load data...
    ##################################################
    cfg = auxiliaryfunctions.read_config(config)
    if trainingsetindex == "all":
        TrainingFractions = cfg["TrainingFraction"]
    else:
        TrainingFractions = [cfg["TrainingFraction"][trainingsetindex]]

    # Loading human annotatated data
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
    Data = pd.read_hdf(
        os.path.join(
            cfg["project_path"],
            str(trainingsetfolder),
            "CollectedData_" + cfg["scorer"] + ".h5",
        ),
        "df_with_missing",
    )
    # Handle data previously annotated on a different platform
    sep = "/" if "/" in Data.index[0] else "\\"
    if sep != os.path.sep:
        Data.index = Data.index.str.replace(sep, os.path.sep)
    # Get list of body parts to evaluate network for
    comparisonbodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, comparisonbodyparts)
    all_bpts = np.asarray(
        len(cfg["individuals"]) * cfg["multianimalbodyparts"] +
        cfg["uniquebodyparts"])
    colors = visualization.get_cmap(len(comparisonbodyparts),
                                    name=cfg["colormap"])
    # Make folder for evaluation
    auxiliaryfunctions.attempttomakefolder(
        str(cfg["project_path"] + "/evaluation-results/"))
    for shuffle in Shuffles:
        for trainFraction in TrainingFractions:
            ##################################################
            # Load and setup CNN part detector
            ##################################################
            datafn, metadatafn = auxiliaryfunctions.GetDataandMetaDataFilenames(
                trainingsetfolder, trainFraction, shuffle, cfg)
            modelfolder = os.path.join(
                cfg["project_path"],
                str(
                    auxiliaryfunctions.GetModelFolder(
                        trainFraction, shuffle, cfg, modelprefix=modelprefix)),
            )
            path_test_config = Path(modelfolder) / "test" / "pose_cfg.yaml"

            # Load meta data
            (
                data,
                trainIndices,
                testIndices,
                trainFraction,
            ) = auxiliaryfunctions.LoadMetadata(
                os.path.join(cfg["project_path"], metadatafn))

            try:
                dlc_cfg = load_config(str(path_test_config))
            except FileNotFoundError:
                raise FileNotFoundError(
                    "It seems the model for shuffle %s and trainFraction %s does not exist."
                    % (shuffle, trainFraction))

            # TODO: IMPLEMENT for different batch sizes?
            dlc_cfg["batch_size"] = 1  # due to differently sized images!!!

            joints = dlc_cfg["all_joints_names"]

            # Create folder structure to store results.
            evaluationfolder = os.path.join(
                cfg["project_path"],
                str(
                    auxiliaryfunctions.GetEvaluationFolder(
                        trainFraction, shuffle, cfg, modelprefix=modelprefix)),
            )
            auxiliaryfunctions.attempttomakefolder(evaluationfolder,
                                                   recursive=True)
            # path_train_config = modelfolder / 'train' / 'pose_cfg.yaml'

            # Check which snapshots are available and sort them by # iterations
            Snapshots = np.array([
                fn.split(".")[0]
                for fn in os.listdir(os.path.join(str(modelfolder), "train"))
                if "index" in fn
            ])
            if len(Snapshots) == 0:
                print(
                    "Snapshots not found! It seems the dataset for shuffle %s and trainFraction %s is not trained.\nPlease train it before evaluating.\nUse the function 'train_network' to do so."
                    % (shuffle, trainFraction))
            else:
                increasing_indices = np.argsort(
                    [int(m.split("-")[1]) for m in Snapshots])
                Snapshots = Snapshots[increasing_indices]

                if cfg["snapshotindex"] == -1:
                    snapindices = [-1]
                elif cfg["snapshotindex"] == "all":
                    snapindices = range(len(Snapshots))
                elif cfg["snapshotindex"] < len(Snapshots):
                    snapindices = [cfg["snapshotindex"]]
                else:
                    print(
                        "Invalid choice, only -1 (last), any integer up to last, or all (as string)!"
                    )

                final_result = []
                ##################################################
                # Compute predictions over images
                ##################################################
                for snapindex in snapindices:
                    dlc_cfg["init_weights"] = os.path.join(
                        str(modelfolder), "train", Snapshots[snapindex]
                    )  # setting weights to corresponding snapshot.
                    trainingsiterations = (
                        dlc_cfg["init_weights"].split(os.sep)[-1]
                    ).split(
                        "-"
                    )[-1]  # read how many training siterations that corresponds to.

                    # name for deeplabcut net (based on its parameters)
                    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
                        cfg,
                        shuffle,
                        trainFraction,
                        trainingsiterations,
                        modelprefix=modelprefix,
                    )
                    print(
                        "Running ",
                        DLCscorer,
                        " with # of trainingiterations:",
                        trainingsiterations,
                    )
                    (
                        notanalyzed,
                        resultsfilename,
                        DLCscorer,
                    ) = auxiliaryfunctions.CheckifNotEvaluated(
                        str(evaluationfolder),
                        DLCscorer,
                        DLCscorerlegacy,
                        Snapshots[snapindex],
                    )

                    if os.path.isfile(
                            resultsfilename.split(".h5")[0] + "_full.pickle"):
                        print("Model already evaluated.", resultsfilename)
                    else:
                        if plotting:
                            foldername = os.path.join(
                                str(evaluationfolder),
                                "LabeledImages_" + DLCscorer + "_" +
                                Snapshots[snapindex],
                            )
                            auxiliaryfunctions.attempttomakefolder(foldername)

                        # print(dlc_cfg)
                        # Specifying state of model (snapshot / training state)
                        sess, inputs, outputs = predict.setup_pose_prediction(
                            dlc_cfg)

                        PredicteData = {}
                        dist = np.full((len(all_bpts), len(Data)), np.nan)
                        distnorm = np.full(len(Data), np.nan)
                        print("Analyzing data...")
                        for imageindex, imagename in tqdm(enumerate(
                                Data.index)):
                            image_path = os.path.join(cfg["project_path"],
                                                      imagename)
                            image = io.imread(image_path)
                            frame = img_as_ubyte(skimage.color.gray2rgb(image))

                            GT = Data.iloc[imageindex]
                            df = GT.unstack("coords").reindex(
                                joints, level='bodyparts')

                            # Evaluate PAF edge lengths to calibrate `distnorm`
                            temp = GT.unstack("bodyparts")[joints]
                            xy = temp.values.reshape(
                                (-1, 2, temp.shape[1])).swapaxes(1, 2)
                            edges = xy[:, dlc_cfg["partaffinityfield_graph"]]
                            lengths = np.sum(
                                (edges[:, :, 0] - edges[:, :, 1])**2, axis=2)
                            distnorm[imageindex] = np.nanmax(lengths)

                            # FIXME Is having an empty array vs nan really that necessary?!
                            groundtruthidentity = list(
                                df.index.get_level_values(
                                    "individuals").to_numpy().reshape((-1, 1)))
                            groundtruthcoordinates = list(
                                df.values[:, np.newaxis])
                            for i, coords in enumerate(groundtruthcoordinates):
                                if np.isnan(coords).any():
                                    groundtruthcoordinates[i] = np.empty(
                                        (0, 2), dtype=float)
                                    groundtruthidentity[i] = np.array(
                                        [], dtype=str)

                            PredicteData[imagename] = {}
                            PredicteData[imagename]["index"] = imageindex

                            pred = predictma.get_detectionswithcostsandGT(
                                frame,
                                groundtruthcoordinates,
                                dlc_cfg,
                                sess,
                                inputs,
                                outputs,
                                outall=False,
                                nms_radius=dlc_cfg.nmsradius,
                                det_min_score=dlc_cfg.minconfidence,
                                c_engine=c_engine,
                            )
                            PredicteData[imagename]["prediction"] = pred
                            PredicteData[imagename]["groundtruth"] = [
                                groundtruthidentity,
                                groundtruthcoordinates,
                                GT,
                            ]

                            coords_pred = pred["coordinates"][0]
                            probs_pred = pred["confidence"]
                            for bpt, xy_gt in df.groupby(level="bodyparts"):
                                inds_gt = np.flatnonzero(
                                    np.all(~np.isnan(xy_gt), axis=1))
                                xy = coords_pred[joints.index(bpt)]
                                if inds_gt.size and xy.size:
                                    # Pick the predictions closest to ground truth,
                                    # rather than the ones the model has most confident in
                                    d = cdist(xy_gt.iloc[inds_gt], xy)
                                    rows, cols = linear_sum_assignment(d)
                                    min_dists = d[rows, cols]
                                    inds = np.flatnonzero(all_bpts == bpt)
                                    dist[inds[inds_gt[rows]],
                                         imageindex] = min_dists

                            if plotting:
                                fig = visualization.make_multianimal_labeled_image(
                                    frame,
                                    groundtruthcoordinates,
                                    coords_pred,
                                    probs_pred,
                                    colors,
                                    cfg["dotsize"],
                                    cfg["alphavalue"],
                                    cfg["pcutoff"],
                                )

                                visualization.save_labeled_frame(
                                    fig,
                                    image_path,
                                    foldername,
                                    imageindex in trainIndices,
                                )

                        sess.close()  # closes the current tf session

                        # Compute all distance statistics
                        df_dist = pd.DataFrame(dist, index=df.index)
                        write_path = os.path.join(evaluationfolder, "dist.csv")
                        df_dist.to_csv(write_path)

                        stats_per_ind = _compute_stats(
                            df_dist.groupby("individuals"))
                        stats_per_ind.to_csv(
                            write_path.replace("dist.csv",
                                               "dist_stats_ind.csv"))
                        stats_per_bpt = _compute_stats(
                            df_dist.groupby("bodyparts"))
                        stats_per_bpt.to_csv(
                            write_path.replace("dist.csv",
                                               "dist_stats_bpt.csv"))

                        # For OKS/PCK, compute the standard deviation error across all frames
                        sd = df_dist.groupby("bodyparts").mean().std(axis=1)
                        sd["distnorm"] = np.sqrt(np.nanmax(distnorm))
                        sd.to_csv(write_path.replace("dist.csv", "sd.csv"))

                        if show_errors:
                            print(
                                "##########################################&1")
                            print(
                                "Euclidean distance statistics per individual (in pixels)"
                            )
                            print(
                                stats_per_ind.mean(
                                    axis=1).unstack().to_string())
                            print("##########################################")
                            print(
                                "Euclidean distance statistics per bodypart (in pixels)"
                            )
                            print(
                                stats_per_bpt.mean(
                                    axis=1).unstack().to_string())

                        PredicteData["metadata"] = {
                            "nms radius":
                            dlc_cfg.nmsradius,
                            "minimal confidence":
                            dlc_cfg.minconfidence,
                            "PAFgraph":
                            dlc_cfg.partaffinityfield_graph,
                            "all_joints":
                            [[i] for i in range(len(dlc_cfg.all_joints))],
                            "all_joints_names": [
                                dlc_cfg.all_joints_names[i]
                                for i in range(len(dlc_cfg.all_joints))
                            ],
                            "stride":
                            dlc_cfg.get("stride", 8),
                        }
                        print(
                            "Done and results stored for snapshot: ",
                            Snapshots[snapindex],
                        )

                        dictionary = {
                            "Scorer": DLCscorer,
                            "DLC-model-config file": dlc_cfg,
                            "trainIndices": trainIndices,
                            "testIndices": testIndices,
                            "trainFraction": trainFraction,
                        }
                        metadata = {"data": dictionary}
                        auxfun_multianimal.SaveFullMultiAnimalData(
                            PredicteData, metadata, resultsfilename)

                        tf.reset_default_graph()

    # returning to intial folder
    os.chdir(str(start_path))
示例#57
0
In this example, we will see how to filter a gray-level image using some of the
linear and non-linear filters available in skimage. We use the ``camera`` image
from `skimage.data` for all comparisons.

.. [1] Pierre Soille, On morphological operators based on rank filters, Pattern
       Recognition 35 (2002) 527-535, :DOI:`10.1016/S0031-3203(01)00047-4`
"""

import numpy as np
import matplotlib.pyplot as plt

from skimage.util import img_as_ubyte
from skimage import data
from skimage.exposure import histogram

noisy_image = img_as_ubyte(data.camera())
hist, hist_centers = histogram(noisy_image)

fig, ax = plt.subplots(ncols=2, figsize=(10, 5))

ax[0].imshow(noisy_image, cmap=plt.cm.gray)
ax[0].axis('off')

ax[1].plot(hist_centers, hist, lw=2)
ax[1].set_title('Gray-level histogram')

plt.tight_layout()

######################################################################
#
# Noise removal
示例#58
0
def CreateVideo(clip, Dataframe):
    ''' Creating individual frames with labeled body parts and making a video'''
    scorer = np.unique(Dataframe.columns.get_level_values(0))[0]
    bodyparts2plot = list(np.unique(Dataframe.columns.get_level_values(1)))
    colors = get_cmap(len(bodyparts2plot))

    ny, nx = clip.size  # dimensions of frame (height, width)
    fps = clip.fps
    nframes = len(Dataframe.index)
    if cropping:
        # one might want to adjust
        clip = clip.crop(y1=y1, y2=y2, x1=x1, x2=x2)
    clip.reader.initialize()
    print("Duration of video [s]: ", clip.duration, ", recorded with ", fps,
          "fps!")
    print("Overall # of frames: ", nframes, "with cropped frame dimensions: ",
          clip.size)
    print("Generating frames")
    for index in tqdm(range(nframes)):

        imagename = tmpfolder + "/file%04d.png" % index
        if os.path.isfile(tmpfolder + "/file%04d.png" % index):
            pass
        else:
            plt.axis('off')
            image = img_as_ubyte(clip.reader.read_frame())
            #image = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))

            if np.ndim(image) > 2:
                h, w, nc = np.shape(image)
            else:
                h, w = np.shape(image)

            plt.figure(frameon=False, figsize=(w * 1. / 100, h * 1. / 100))
            plt.subplots_adjust(left=0,
                                bottom=0,
                                right=1,
                                top=1,
                                wspace=0,
                                hspace=0)
            plt.imshow(image)

            for bpindex, bp in enumerate(bodyparts2plot):
                if Dataframe[scorer][bp]['likelihood'].values[index] > pcutoff:
                    plt.scatter(Dataframe[scorer][bp]['x'].values[index],
                                Dataframe[scorer][bp]['y'].values[index],
                                s=dotsize**2,
                                color=colors(bpindex),
                                alpha=alphavalue)

            plt.xlim(0, w)
            plt.ylim(0, h)
            plt.axis('off')
            plt.subplots_adjust(left=0,
                                bottom=0,
                                right=1,
                                top=1,
                                wspace=0,
                                hspace=0)
            plt.gca().invert_yaxis()
            plt.savefig(imagename)

            plt.close("all")

    os.chdir(tmpfolder)

    print("Generating video")
    subprocess.call([
        'ffmpeg', '-framerate',
        str(clip.fps), '-i', 'file%04d.png', '-r', '30',
        '../' + vname + '_DeepLabCutlabeled.mp4'
    ])
    if deleteindividualframes:
        for file_name in glob.glob("*.png"):
            os.remove(file_name)

    os.chdir("../")
示例#59
0
def extract_glcm_features(image: np.ndarray)->np.ndarray:
    
    
    def mu(glcm: np.ndarray)->np.ndarray:
        m = 0
        for i in range(glcm.shape[0]):
            for j in range(glcm.shape[1]):
                m += i*glcm[i, j]
                
        return m


    def sigma_sq(glcm: np.ndarray)->np.ndarray:
        s = 0
        m = mu(glcm)

        for i in range(glcm.shape[0]):
            for j in range(glcm.shape[1]):
                s += glcm[i, j]*((i - m)**2)
                
        return s


    def A(glcm: np.ndarray, correlation: np.ndarray)->np.ndarray:
        a = 0
        m = mu(glcm)
        s = sigma_sq(glcm)
        denom = np.sqrt(s)**3 * (np.sqrt(2*(1+correlation)))**3

        for i in range(glcm.shape[0]):
            for j in range(glcm.shape[1]):
                nom = ((i+j-2*m)**3) * glcm[i, j]
                a += nom/denom
                
        return a


    def B(glcm: np.ndarray, correlation: np.ndarray)->np.ndarray:
        b = 0
        m = mu(glcm)
        s = sigma_sq(glcm)
        denom = 4*np.sqrt(s)**4 * ((1+correlation)**2)

        for i in range(glcm.shape[0]):
            for j in range(glcm.shape[1]):
                nom = ((i+j-2*m)**4) * glcm[i, j]
                b += nom/denom
                
        return b


    def entropy(glcm: np.ndarray)-> np.ndarray:
        entropy = 0
        for i in range(glcm.shape[0]):
            for j in range(glcm.shape[1]):
                if glcm[i, j] != 0:
                    entropy += -np.log(glcm[i,j])*glcm[i, j]
                    
        return entropy


    def cluster_shade(glcm: np.ndarray, correlation: np.ndarray)->np.ndarray:
        
        return np.sign(A(glcm, correlation)) * np.absolute(A(glcm, correlation))**(1/3)


    def cluster_prominence(glcm: np.ndarray, correlation: np.ndarray)->np.ndarray:
        
        return np.sign(B(glcm, correlation)) * np.absolute(B(glcm, correlation))**(1/4)


    def difference_average(glcm: np.ndarray)->np.ndarray:
        da = 0
        for k in range(glcm.shape[0]-1):
            for i in range(glcm.shape[0]):
                for j in range(glcm.shape[1]):
                    if np.abs(i-j) == k:
                        da += k*glcm[i, k] 
                        
        return da    


    def difference_variance(glcm: np.ndarray)->np.ndarray:
        dv = 0
        m = mu(glcm)
        for k in range(glcm.shape[0]-1):
            for i in range(glcm.shape[0]):
                for j in range(glcm.shape[1]):
                    if np.abs(i-j) == k:
                        dv += (k-m)**2 * glcm[i, j]
                        
        return dv


    def difference_entropy(glcm: np.ndarray)->np.ndarray:
        de = 0
        for k in range(glcm.shape[0]-1):
            for i in range(glcm.shape[0]):
                for j in range(glcm.shape[1]):
                    if np.abs(i-j) == k and glcm[i, j] != 0:
                        de += -np.log(glcm[i, j]) * glcm[i, j]
                        
        return de


    def sum_average(glcm: np.ndarray)->np.ndarray:
        sa = 0
        for k in range(2, 2*glcm.shape[0]):
            for i in range(glcm.shape[0]):
                for j in range(glcm.shape[1]):
                    if i+j == k:
                        sa += k * glcm[i, j]
                        
        return sa


    def sum_variance(glcm: np.ndarray)->np.ndarray:
        sv = 0
        m = mu(glcm)
        for k in range(2, 2*glcm.shape[0]):
            for i in range(glcm.shape[0]):
                for j in range(glcm.shape[1]):
                    if i+j == k:
                        sv += (k-m)**2 * glcm[i, j]
                        
        return sv


    def sum_entropy(glcm: np.ndarray)->np.ndarray:
        se = 0
        for k in range(2, 2*glcm.shape[0]):
            for i in range(glcm.shape[0]):
                for j in range(glcm.shape[1]):
                    if i+j == k and glcm[i, j] != 0:
                        se += np.log(glcm[i, j]) * glcm[i, j]
                        
        return -se


    def inverse_difference(glcm: np.ndarray)->np.ndarray:
        inv_d = 0
        for i in range(glcm.shape[0]):
            for j in range(glcm.shape[1]):
                inv_d += glcm[i, j] / (1+np.abs(i-j)) 
                
        return inv_d


    def normalized_inverse_difference(glcm: np.ndarray)->np.ndarray:
        ninv_d = 0
        for i in range(glcm.shape[0]):
            for j in range(glcm.shape[1]):
                ninv_d += glcm[i, j] / (1+np.abs(i-j)/glcm.shape[0]) 
                
        return ninv_d


    def inverse_difference_moment(glcm: np.ndarray)->np.ndarray:
        inv_m = 0
        for i in range(glcm.shape[0]):
            for j in range(glcm.shape[1]):
                inv_m += glcm[i, j] / (1+(i-j)**2)
                
        return inv_m


    def normalized_inverse_difference_moment(glcm: np.ndarray)->np.ndarray:
        ninv_m = 0
        for i in range(glcm.shape[0]):
            for j in range(glcm.shape[1]):
                ninv_m += glcm[i, j] / (1+(i-j)**2/glcm.shape[0]**2) 
                
        return ninv_m


    def inverse_variance(glcm: np.ndarray)->np.ndarray:
        inv_v = 0
        for i in range(glcm.shape[0]):
            for j in range(glcm.shape[1]):
                if j > i:
                    inv_v += glcm[i, j] / (i-j)**2
                    
        return 2 * inv_v


    def autocorrelation(glcm: np.ndarray)->np.ndarray:
        acr = 0
        for i in range(glcm.shape[0]):
            for j in range(glcm.shape[1]):
                acr += i*j*glcm[i, j]
                
        return acr


    def information_correlation_1(glcm: np.ndarray)->np.ndarray:
        HXY = 0
        HX = 0
        HXY1 = 0

        for i in range(glcm.shape[0]):
            if (np.sum(glcm[i]) !=0):
                HX -= np.sum(glcm[i]) * np.log(np.sum(glcm[i]))
            for j in range(glcm.shape[1]):
                if (glcm[i, j]!=0 and np.sum(glcm[i])!=0):
                    HXY -= glcm[i, j] * np.log(glcm[i, j])
                    HXY1 -= glcm[i, j] * np.log(np.sum(glcm[i])*np.sum(glcm[j]))

        return (HXY-HXY1) / HX


    def information_correlation_2(glcm: np.ndarray)->np.ndarray:
        HXY = 0
        HXY2 = 0

        for i in range(glcm.shape[0]):
            for j in range(glcm.shape[1]):
                if glcm[i, j]!=0 and np.sum(glcm[i])*np.sum(glcm[j])!=0:
                    HXY -= glcm[i, j]*np.log(glcm[i, j])
                    HXY2 -= np.sum(glcm[i]) * np.sum(glcm[j]) * np.log(np.sum(glcm[i])*np.sum(glcm[j]))

        return np.sqrt(1 - np.exp(2*(HXY2-HXY)))
    
    
    img_uint8 = img_as_ubyte(image)
    glcm = greycomatrix(img_uint8, distances=[1], angles=[1], levels=256,
                        symmetric=True, normed=True
                       )
    
    contrast_ = greycoprops(glcm, 'contrast')
    dissimilarity_ = greycoprops(glcm, 'dissimilarity')
    homogeneity_ = greycoprops(glcm, 'homogeneity')
    energy_ = greycoprops(glcm, 'energy')
    correlation_ = greycoprops(glcm, 'correlation')
    asm_ = greycoprops(glcm, 'ASM')
    entropy_ = entropy(glcm)

    cluster_shade_ = cluster_shade(glcm, correlation_)
    cluster_prominence_ = cluster_prominence(glcm, correlation_)

    max_prob_ = [np.max(glcm)]

    joint_average_ = mu(glcm)
    joint_variance_ = sigma_sq(glcm)

    difference_average_ = difference_average(glcm)
    difference_variance_ = difference_variance(glcm)
    difference_entropy_ = difference_entropy(glcm)

    sum_average_ = sum_average(glcm)
    sum_variance_ = sum_variance(glcm)
    sum_entropy_ = sum_entropy(glcm)

    inverse_difference_ = inverse_difference(glcm)
    normalized_inverse_difference_ = normalized_inverse_difference(glcm)

    inverse_difference_moment_ = inverse_difference_moment(glcm)
    normalized_inverse_difference_moment_ = normalized_inverse_difference_moment(glcm)

    inverse_variance_ = inverse_variance(glcm)

    autocorrelation_ = autocorrelation(glcm)

    information_correlation_1_ = information_correlation_1(glcm)
    information_correlation_2_ = information_correlation_2(glcm)

    feature_vector = {
                      "contrast": contrast_,
                      "dissimilarity": dissimilarity_,
                      "homogeneity": homogeneity_,
                      "energy": energy_,
                      "correlation": correlation_,
                      "asm": asm_,
                      "entropy": entropy_,
                      "cluster_shade": cluster_shade_,
                      "cluster_prominence": cluster_prominence_,
                      "max_prob": max_prob_,
                      "average": joint_average_,
                      "variance": joint_variance_,
                      "difference_average": difference_average_,
                      "difference_variance": difference_variance_,
                      "difference_entropy": difference_entropy_,
                      "sum_average": sum_average_,
                      "sum_variance": sum_variance_,
                      "sum_entropy": sum_entropy_,
                      "inverse_difference": inverse_difference_,
                      "normalized_inverse_difference": normalized_inverse_difference_,
                      "inverse_difference_moment": inverse_difference_moment_,
                      "normalized_inverse_difference_moment": normalized_inverse_difference_moment_,
                      "inverse_variance": inverse_variance_,
                      "autocorrelation": autocorrelation_,
                      "information_correlation_1": information_correlation_1_,
                      "information_correlation_2": information_correlation_2_
    }
    
    return feature_vector
示例#60
0
    pass

model.load_weights(args.model)

X = np.zeros((1, height, width, 5))

X[0, :, :, 0] = rgb2gray(next(videodata.nextFrame()))
X[0, :, :, 0] -= 0.5
X[0, :, :, 1] = rgb2gray(next(videodata.nextFrame()))
X[0, :, :, 1] -= 0.5
X[0, :, :, 2] = rgb2gray(next(videodata.nextFrame()))
X[0, :, :, 2] -= 0.5
X[0, :, :, 3] = rgb2gray(next(videodata.nextFrame()))
X[0, :, :, 3] -= 0.5

writer.writeFrame(img_as_ubyte(np.zeros((height, width))))
writer.writeFrame(img_as_ubyte(np.zeros((height, width))))

try:
    for k, f in enumerate(videodata.nextFrame()):
        print(k)
        X[0, :, :, 4] = rgb2gray(f)
        X[0, :, :, 4] -= 0.5

        pred = model.predict(X)
        ff = rescale(pred[0, :, :, 0], 2, anti_aliasing=False)

        writer.writeFrame(img_as_ubyte(ff))

        frame_1 = ff[0:(height // patch_size) * patch_size,
                     0:(width // patch_size) * patch_size] > 0.15