def main(IMAGE_PATH,DESTINATION_PATH):
	# DESTINATION_PATH_ = DESTINATION_PATH.split(".")
	# DESTINATION_PATH_[1] = "npy"
	# DESTINATION_PATH_ = ".".join(DESTINATION_PATH_)

	im = cv2.imread(IMAGE_PATH)
	# rotate_and_save(im,0,DESTINATION_PATH)
	increment_by = 4
	n = 5
	im2 = []
	# im2 = np.load(DESTINATION_PATH_).tolist()
	# print(len(im2))

	for i in range(1,n+1,1):
		pos_rot_im = ndimage.rotate(im,i*increment_by)
		pos_rot_im = scan_and_crop.main(pos_rot_im)
		im2.append(pos_rot_im)
		neg_rot_im = ndimage.rotate(im,-i*increment_by)
		neg_rot_im = scan_and_crop.main(neg_rot_im)
		im2.append(neg_rot_im)
		# rotate_and_save(im,i*increment_by,DESTINATION_PATH)
		# rotate_and_save(im,-i*increment_by,DESTINATION_PATH)
		# print(len(im2))
	# print(DESTINATION_PATH_)
	# print(len(im2))
	im = scan_and_crop.main(im)
	return [im,im2]
	# cv2.imwrite(DESTINATION_PATH,im2)
Пример #2
0
def find_field_in_stack(stack, x, y, z, yaw, pitch, roll, height, width):
    """ Get a cutout of the given height, width dimensions in the rotated stack at x, y, z.

    :param np.array stack: 3-d stack (depth, height, width)
    :param float x, y, z: Center of field measured as distance from the center in the
        original stack (before rotation).
    :param yaw, pitch, roll: Rotation angles to apply to the field.
    :param height, width: Height and width of the cutout from the stack.

    :returns: A height x width np.array at the desired location.
    """
    # Rotate stack (inverse of intrinsic yaw-> pitch -> roll)
    rotated = ndimage.rotate(stack, yaw, axes=(1, 2), order=1) # yaw(-w)
    rotated = ndimage.rotate(rotated, -pitch, axes=(0, 2), order=1) # pitch(-v)
    rotated = ndimage.rotate(rotated, roll, axes=(0, 1), order=1) # roll(-u)

    # Compute center of field in the rotated stack
    R_inv = np.linalg.inv(create_rotation_matrix(yaw, pitch, roll))
    rot_center = np.dot(R_inv, [x, y, z])
    center_ind = np.array(rotated.shape) / 2 + rot_center[::-1] # z, y, x

    # Crop field (interpolate at the desired positions)
    z_coords = [center_ind[0] - 0.5] # -0.5 is because our (0.5, 0.5) is np.map_coordinates' (0, 0)
    y_coords = np.arange(height) - height / 2 + center_ind[1] # - 0.5 is added by '- height / 2' term
    x_coords = np.arange(width)  - width / 2 + center_ind[2]
    coords = np.meshgrid(z_coords, y_coords, x_coords)
    out = ndimage.map_coordinates(rotated, [coords[0].reshape(-1), coords[1].reshape(-1),
                                            coords[2].reshape(-1)], order=1)
    field = out.reshape([height, width])

    return field
Пример #3
0
def scale_image(image,angle,ratio_compression_y_over_x,interpolation_value=0):
    """Corrects for scaling distortions in image plane, e.g. from astigmatism
    in the optical system. Returns the scaled image with the same rotation."""
    image_rot = rotate(image,angle,order=interpolation_value)
    image_norm = zoom(image_rot,[ratio_compression_y_over_x,1],order=interpolation_value)
    image_final = rotate(image_norm,-angle,order=interpolation_value)
    return image_final
Пример #4
0
    def loci_medfilter(self):
        log = self.log

        if self._file_exists('red','cube_loci_medfilter'): return
        ft = lambda i: '%d:%.2d' % ((time.time()-i)/60,(time.time()-i)%60)
        pa = self.pa
        for chan in ['red','blue']:
            cube =  self._read_fits(chan,'cube_medfilter')

            bp,cube = nt.reset_nans(cube)
            log.info('Applying loci method ...(Can take a while......)')
            ti = time.time()
            cube = loci_subtract(cube, pa)
            log.info('                             '+ft(ti))

            sz = np.shape(cube)
            for i in range(sz[0]):
                log.info('rotating cube_medfilter['+str(i+1)+'/'+str(sz[0])+'] %.2f'%-pa[i])
                cube[i,:,:] = nd.rotate (cube[i,:,:],pa[i], reshape=False)
                bp[i,:,:] = nd.rotate (bp[i,:,:],pa[i], reshape=False)
                #cube[i,:,:] = nd.rotate (cube[i,:,:],-pa[i], reshape=False)
                #bp[i,:,:] = nd.rotate (bp[i,:,:],-pa[i], reshape=False)
            cube = nt.restore_nans(cube,bp)

            self._write_fits(cube,chan,'cube_loci_medfilter')
            self._write_fits(np.median(cube,axis=0),chan,'loci_medfilter_medcrunch')
            fs = lambda cube: np.sum(cube,axis=0) / np.sum(np.isfinite(cube),axis=0)
            self._write_fits(fs(cube),chan,'loci_medfilter_sumcrunch')
            del cube,bp
Пример #5
0
    def loci_sdi(self):
        log=self.log

        ft = lambda i: '%d:%.2d' % ((time.time()-i)/60,(time.time()-i)%60)
        if self._file_exists('','cube_loci_sdi'): return
        pa = self.pa
        cube =  self._read_fits('','cube_sdi')
        #cube = self.cube_diff
        sz = np.shape(cube)
        bp,cube = nt.reset_nans(cube)
        log.info('Applying loci_subtract...(Can take a while......)')
        ti = time.time()
        cube = loci_subtract(cube, self.pa)
        #bp = loci_subtract(bp, self.pa)
        log.info('                              '+ft(ti))

        for i in range(sz[0]):
            log.info('rotating cube_diff['+str(i+1)+'/'+str(sz[0])+'] %.2f'%-pa[i])
            cube[i,:,:] = nd.rotate (cube[i,:,:],pa[i], reshape=False)
            bp[i,:,:] = nd.rotate (bp[i,:,:],pa[i], reshape=False)
            #cube[i,:,:] = nd.rotate (cube[i,:,:],-pa[i], reshape=False)
            #bp[i,:,:] = nd.rotate (bp[i,:,:],-pa[i], reshape=False)

        cube = nt.restore_nans(cube,bp)
        self._write_fits(cube,'','cube_loci_sdi')
        self._write_fits(np.median(cube,axis=0),'','loci_sdi_medcrunch')
        fs = lambda cube: np.sum(cube,axis=0) / np.sum(np.isfinite(cube),axis=0)
        self._write_fits(fs(cube),'','loci_sdi_sumcrunch')
Пример #6
0
    def _rotate_cube(self,cube):

        log =  self.log
        pa =   self.pa
        sz = np.shape(cube)
        # Now rotate this cube
        to=time.time()
        for k in range(sz[0]):
            ti=time.time()
            bp,cube[k,:,:] = nt.reset_nans(cube[k,:,:])
            cube[k,:,:] = nd.rotate (cube[k,:,:], pa[k], reshape=False)
            #cube[k,:,:] = nd.rotate (cube[k,:,:], -pa[k], reshape=False)

            #Now rotate the bp matrix 
            #bp = nd.rotate (bp, -pa[k], reshape=False)
            bp = nd.rotate (bp, pa[k], reshape=False)

            # And restore the Nans to cube
            cube[k,:,:] = nt.restore_nans(cube[k,:,:],bp)

            #_form_line('rotating ,k,nk,to,t1,t2,pa[k])
            t = time.time()
            line = 'rotating ['+str(k+1)+'/'+str(sz[0])+']'
            log.info(line+'%.2f %.2f %.2f' % (pa[k],t-ti,t-to))

        return cube
Пример #7
0
 def rotate_images(self, data, angle=-1):
     if angle == -1:
         angle = self.rng.random_integers(360)
     if data.ndim <= 2:
         data = ndimage.rotate(data, angle=angle)
     else:
         for i in xrange(data.shape[0]):
             data[i] = ndimage.rotate(data[i], angle=angle)
     return data
def testRigidTransformationMultiscale(dTheta, displacement, level):
    inImg=misc.imread('T2sample.png')[...,0]
    left=ndimage.rotate(inImg, -0.5*dTheta)#Rotate symmetricaly to ensure both are still the same size
    right=ndimage.rotate(inImg, 0.5*dTheta)#Rotate symmetricaly to ensure both are still the same size
    right=ndimage.affine_transform(right, np.eye(2), offset=-1*displacement)
    rightPyramid=[i for i in transform.pyramid_gaussian(right, level)]
    leftPyramid=[i for i in transform.pyramid_gaussian(left, level)]
    rcommon.plotPyramids(leftPyramid, rightPyramid)
    beta=estimateRigidTransformationMultiscale(leftPyramid, rightPyramid)
    print 180.0*beta[0]/np.pi, beta[1:3]
    return beta
def testRigidTransformEstimation(inImg, level, dTheta, displacement, thr):
    left=ndimage.rotate(inImg, dTheta)
    right=ndimage.rotate(inImg, -dTheta)
    left=ndimage.affine_transform(left , np.eye(2), offset=-1*displacement)
    right=ndimage.affine_transform(right, np.eye(2), offset=displacement)
    
    rightPyramid=[i for i in transform.pyramid_gaussian(right, level)]
    leftPyramid=[i for i in transform.pyramid_gaussian(left, level)]
    sel=level
    beta=estimateRigidTransformation(leftPyramid[sel], rightPyramid[sel], 2.0*dTheta, thr)
    return beta
Пример #10
0
def oversample(I):
    samples = []
    samples.append(I)
    samples.append(ndimage.rotate(I, 90))
    samples.append(ndimage.rotate(I, 180))
    samples.append(ndimage.rotate(I, 270))
    samples.append(np.fliplr(I))
    samples.append(np.flipud(I))
    samples.append(np.fliplr(samples[1]))
    samples.append(np.flipud(samples[1]))
    return samples
Пример #11
0
def align_image_to_ellipse(coeffs, image):
    """
    Given the coefficients of an ellipse in 2D and a binary 
    image, return the angle required to align the image to the
    principal axes of the ellipse (with the longest axis
    as the first major 'hump' on the left).
    """
    
    coeff_a, coeff_b, coeff_c = coeffs[:3]
        
    # Calculate tan(angle) for the angle of rotation of the major axis
    preangle = coeff_b / (coeff_a - coeff_c)
    
    if not np.isinf(preangle):
        # Take the arctan and convert to degrees, which is what 
        # ndimage.rotate uses.
        angle = radians_to_degrees(-0.5 * np.arctan(preangle))
        
        # Order = 0 prevents interpolation from being done and screwing 
        # with our object boundaries.
        rotated = ndimage.rotate(image, angle, order=0)
        
        # Pull out the height/width of just the object.
        try:    
            height, width = rotated[ndimage.find_objects(rotated)[0]].shape
        except IndexError:
            raise EllipseAlignmentError("Can't find object after " \
                + "initial rotation.")
    else:
        angle = 0.
        height, width = image.shape
    
    # we want the height (first axis) to be the major axis.
    if width > height:
        angle -= 90.0
        rotated = ndimage.rotate(image, angle, order=0)
    
    # Correct so that in budding cells, the "major" hump is always
    # on the first.          
    if np.argmax(rotated.sum(axis=1)) > rotated.shape[0] // 2:
        angle -= 180.0
        rotated = ndimage.rotate(image, angle, order=0)
    
    # Do a find_objects on the resultant array after rotation in 
    # order to _just_ get the object and not any of the extra 
    # space that's been added.
    try:
        bounds = ndimage.find_objects(rotated)[0]
    except IndexError:
        raise EllipseAlignmentError("Can't find object after final rotation.")
    
    return rotated[bounds], angle
def generate_overlay_image(ct_slice, labelmap_slice, window_width=None, window_level=None, opacity=None):
    """ This function that takes as input a 2D ct slice and a labelmap and 
    returns an overlay
 
    Parameters
    ----------
    ct_image : array, shape ( N, N )
        A description
    
    labelmap_slice : array, shape ( N, N )
        A description
    
    window_width : int
    
    window_level : int
    
    opacity : float. Not yet implemented
    
    
    Returns
    -------
    overlay : array, shape ( N, N, 3)
        Array with the r,g,b, pixel values of the overlayed image
    
    """
    assert ct_slice.shape[0] == labelmap_slice.shape[0], "CT slice and label disagree in dimension"
    assert ct_slice.shape[1] == labelmap_slice.shape[1], "CT slice and label disagree in dimension"

    length = labelmap_slice.shape[0]
    width = labelmap_slice.shape[1]

    overlayed_image = np.squeeze(ndimage.rotate(labelmap_slice, -90))
    ct_image = np.squeeze(ndimage.rotate(ct_slice, -90))

    # shift all values to positive
    min_ct = np.float(np.min(ct_image))
    ct_image = ct_image - min_ct

    if window_width == None:
        max_ct = np.float(np.max(ct_image))
        window_width = max_ct
        window_level = max_ct / 2
    else:
        window_level = window_level - min_ct

    rgbArray = np.zeros((length, width, 3), "uint8")
    grey_val = ((ct_image.astype(np.float)) * 256.0 / (window_width + window_level)).astype(np.uint8)
    rgbArray[..., 0] = rgbArray[..., 1] = rgbArray[..., 2] = grey_val
    color_val = overlayed_image > 0
    rgbArray[color_val, 0] = 255

    return rgbArray
Пример #13
0
def rotate_images(pixel_array, mask):

    param = (np.random.randint(-15,15))
    pixel_array = rotate(pixel_array, axes=(2,1), angle=param, reshape=False) 
    mask = rotate(mask, axes=(0,1), angle=param, reshape=False)

    rotated_data = {
        "pixel_array" : pixel_array,
        "mask" : mask, 
        "params" : param
    }
    
    return rotated_data
Пример #14
0
	def transformImage(self, image, aligndata, refimage):
		alignedimage = image
		shift = (aligndata['xshift'], aligndata['yshift'])
		alignedimage = ndimage.shift(alignedimage, shift=shift, mode='wrap', order=1)
		### due to the nature of Radon shifts, it cannot tell the difference between 0 and 180 degrees
		alignedimage1 = ndimage.rotate(alignedimage, -aligndata['rotangle'], reshape=False, order=1)
		alignedimage2 = ndimage.rotate(alignedimage, -aligndata['rotangle']+180, reshape=False, order=1)
		cc1 = self.getCCValue(alignedimage1, refimage)
		cc2 = self.getCCValue(alignedimage2, refimage)
		if cc1 > cc2:
			return alignedimage1
		else:
			return alignedimage2
Пример #15
0
    def loci_asdi(self):
        """
          Combine sdi and adi method on the shift_medfiltered cubes
        """
        log = self.log

        if self._file_exists('','asdi_medcrunch'): return

        ft = lambda i: '%d:%.2d' % ((time.time()-i)/60,(time.time()-i)%60)
        pa = self.pa
        # in case l1,l2 are not defined.
        #self._get_filters_scaling(self.idir+self.inputs[0])
        l1 =      self.l1
        l2 =      self.l2
        
        cube_red = self._read_fits(('blue','red')[l1 < l2],'cube_shift_medfilter')
        cube_blue = self._read_fits(('red','blue')[l1 < l2],'cube_shift_medfilter')

        # Lets not use bpr because we run out of memory
        #bpr,cube_red = nt.reset_nans(cube_red)
        cube_red = np.nan_to_num(cube_red)
        log.info('Applying loci method ...(Can take a while......)')
        ti = time.time()
        # 
        cube_red = loci_subtract(cube_red, pa, np.nan_to_num(cube_blue))
        log.info('                             '+ft(ti))
        del cube_blue

        #cube_red = nt.restore_nans(cube_red,bpr)
        self._write_fits(cube_red,'','cube_asdi')

        #bpr,cube_red = nt.reset_nans(cube_red)
        line = 'rotating combined sdi,adi cube'
        cube_red2=cube_red.copy()
        sz = np.shape(cube_red2)
        for i in range(sz[0]):
            log.info(line+'['+str(i+1)+'/'+str(sz[0])+']')
            cube_red[i,:,:] = nd.rotate (cube_red[i,:,:], pa[i], reshape=False)
            #cube_red[i,:,:] = nd.rotate (cube_red[i,:,:], -pa[i], reshape=False)

        #cube_red = nt.restore_nans(cube_red,bpr)
        self._write_fits(np.median(cube_red,axis=0),'','asdi_medcrunch')
        del cube_red

        for i in range(sz[0]):
            #Do counter rotate (only noise)
            cube_red2[i,:,:] = nd.rotate (cube_red2[i,:,:], -pa[i], reshape=False)
            #cube_red2[i,:,:] = nd.rotate (cube_red2[i,:,:], pa[i], reshape=False)

        self._write_fits(np.median(cube_red2,axis=0),'','asdi_counter_medcrunch')
        del cube_red2
Пример #16
0
def plot_overlays(atlas, b0, cmaps):
    plt.rcParams.update({'axes.labelsize': 'x-large',
                         'axes.titlesize': 'x-large'})

    if b0.shape == (182, 218, 182):
        x = [78, 90, 100]
        y = [82, 107, 142]
        z = [88, 103, 107]
    else:
        shap = b0.shape
        x = [int(shap[0]*0.35), int(shap[0]*0.51), int(shap[0]*0.65)]
        y = [int(shap[1]*0.35), int(shap[1]*0.51), int(shap[1]*0.65)]
        z = [int(shap[2]*0.35), int(shap[2]*0.51), int(shap[2]*0.65)]
    coords = (x, y, z)

    labs = ['Sagittal Slice (YZ fixed)',
            'Coronal Slice (XZ fixed)',
            'Axial Slice (XY fixed)']
    var = ['X', 'Y', 'Z']
    # create subplot for first slice
    # and customize all labels
    idx = 0
    for i, coord in enumerate(coords):
        for pos in coord:
            idx += 1
            ax = plt.subplot(3, 3, idx)
            ax.set_title(var[i] + " = " + str(pos))
            if i == 0:
                image = ndimage.rotate(b0[pos, :, :], 90)
                atl = ndimage.rotate(atlas[pos, :, :], 90)
            elif i == 1:
                image = ndimage.rotate(b0[:, pos, :], 90)
                atl = ndimage.rotate(atlas[:, pos, :], 90)
            else:
                image = b0[:, :, pos]
                atl = atlas[:, :, pos]

            if idx % 3 == 1:
                ax.set_ylabel(labs[i])
                ax.yaxis.set_ticks([0, image.shape[0]/2, image.shape[0] - 1])
                ax.xaxis.set_ticks([0, image.shape[1]/2, image.shape[1] - 1])

            min_val, max_val = get_min_max(image)
            plt.imshow(atl, interpolation='none', cmap=cmaps[0], alpha=0.5)
            plt.imshow(image, interpolation='none', cmap=cmaps[1], alpha=0.5,
                       vmin=min_val, vmax=max_val)

    fig = plt.gcf()
    fig.set_size_inches(12.5, 10.5, forward=True)
    return fig
Пример #17
0
def warp_ellipse_to_circle(cube, a, b, pa, stop_if_huge=True):
    '''
    Warp a SpectralCube such that the given ellipse is a circle int the
    warped frame.

    Since you should **NOT** be doing this with a large cube, we're going
    to assume that the given cube is a subcube centered in the middle of the
    cube.

    This requires a rotation, then scaling. The equivalent matrix is:
    [b cos PA    b sin PA]
    [-a sin PA   a cos PA ].

    '''

    if cube._is_huge:
        if stop_if_huge:
            raise Warning("The cube has the huge flag enabled. Disable "
                          "'stop_if_huge' if you would like to continue "
                          "anyways with the warp.")
        else:
            warn("The cube has the huge flag enabled. This may use a lot "
                 "of memory!")

    # Let NaNs be 0
    data = cube.with_fill_value(0.0).filled_data[:].value

    warped_array = []

    for i in range(cube.shape[0]):
        warped_array.append(nd.zoom(nd.rotate(data[i], np.rad2deg(-pa)),
                                    (1, a / b)))

    warped_array = np.array(warped_array)

    # We want to mask outside of the original bounds
    mask = np.ones(data.shape[1:])
    warp_mask = \
        np.isclose(nd.zoom(nd.rotate(mask, np.rad2deg(-pa)),
                           (1, a / b)), 1)

    # There's probably a clever way to transform the WCS, but all the
    # solutions appear to need pyast/starlink. The output of the wrap should
    # give a radius of b and the spectral dimension is unaffected.
    # Also this is hidden and users won't be able to use this weird cube
    # directly
    warped_cube = SpectralCube(warped_array * cube.unit, cube.wcs)
    warped_cube = warped_cube.with_mask(warp_mask)

    return warped_cube
Пример #18
0
def simple_adi(img_cube, img_params):
    median = np.median(img_cube, axis=0)
    rotation = [param[1][-1] for param in img_params]
    I = []
    I_without_sub = []
    img_cube2 = copy.deepcopy(img_cube)
    for (img, angle) in zip(img_cube, rotation):
        derotated_without_sub = ndimage.rotate(img, math.degrees(angle), reshape=False)
        derotated = ndimage.rotate(img-median, math.degrees(angle), reshape=False)
        I.append(derotated)
        I_without_sub.append(derotated_without_sub)

    #plotfast.image(np.array(img_cube))
    I = np.median(np.array(I), axis=0)
    return I
Пример #19
0
def rand_aug(im,z):
    
    ang = np.float32(np.random.random(1)*10.0-5.0); ang = ang[0]
    rgb = np.float32(np.random.random(3)*0.4+0.8);
    ctrst = np.float32(np.random.random(1)+0.5); ctrst = ctrst[0]

    im = rotate(im,ang,reshape=False,mode='nearest',order=1).copy()
    z = rotate(z,ang,reshape=False,mode='nearest',order=1).copy()

      
    for j in range(3):
        im[:,:,j] = im[:,:,j] * rgb[j]

    im[...] = np.minimum(255.0,(im[...] ** ctrst) * (255.0**(1.0-ctrst)))
    return im,z
def boxMaskStack(bmstackf, partdatas, box, xmask, ymask, falloff, imask=None, norotate=False):
        from appionlib.apSpider import operations
        from appionlib import apEMAN
        import os

        # create blank image for mask using SPIDER
        maskfile = "boxmask.spi"
        operations.createBoxMask(maskfile,box,xmask,ymask,falloff,imask)

        # convert mask to MRC
        apEMAN.executeEmanCmd("proc2d boxmask.spi boxmask.mrc",verbose=False,showcmd=False)
        os.remove("boxmask.spi")

        maskarray = mrc.read("boxmask.mrc")

        # box particles
        maskedparts = []
        for i in range(len(partdatas)):
                if norotate is True:
                        rotatemask = maskarray
                else:
                        angle = (-partdatas[i]['angle'])-90
                        rotatemask = ndimage.rotate(maskarray, angle=angle, reshape=False, order=1)
                maskedparts.append(rotatemask)

        # write to stack
        apImagicFile.writeImagic(maskedparts, bmstackf)
        os.remove("boxmask.mrc")
        return bmstackf
Пример #21
0
def create_transformed_array(array_original, reflectz, reflecty, reflectx, swapxy, angle, rotation_order, scaling_factor=None):
    array = array_original
    if array.ndim > 3:
        try:
            array = array.reshape(array.shape[-3:])
        except:
            raise ValueError("Can't transform ndarray with more than 3 dimensions with length > 1. "
                             "This array's shape is {}".format(array.shape))
    if scaling_factor is not None:
        array = array * scaling_factor
    if reflectz:
        array = array[::-1, :, :]
    if reflecty:
        array = array[:, ::-1, :]
    if reflectx:
        array = array[:, :, ::-1]
    if swapxy:
        array = array.transpose((0, 2, 1))
    if angle > 0:
        new_array = ndimage.rotate(array.astype(np.float32),
                                   angle,
                                   axes=(1, 2),
                                   order=rotation_order,
                                   cval=0)
    else:
        new_array = array
    if new_array.dtype != array_original.dtype:
        print('dtype mismatch: new_array.dtype = {0}, array_original.dtype = {1}'
              .format(new_array.dtype, array_original.dtype
                      ))
    return new_array
Пример #22
0
def largest_region(imData):

    belowMeanFilter = np.where(imData > np.mean(imData), 0., 1.0)
    dialated = morphology.dilation(belowMeanFilter, np.ones((3, 3)))
    regionLabels = (belowMeanFilter * measure.label(dialated)).astype(int)

    # calculate common region properties for each region within the segmentation
    regions = measure.regionprops(regionLabels)
    areas = [(None
              if sum(belowMeanFilter[regionLabels == region.label]) * 1.0 / region.area < 0.50
              else region.filled_area)
             for region in regions]

    if len(areas) > 0:

        regionMax = regions[np.argmax(areas)]

        # trim image to the max region
        regionMaxImg = trim_image(
            np.minimum(
                imData*np.where(regionLabels == regionMax.label, 1, 255),
                255))

        # rotate
        angle = intertial_axis(regionMaxImg)[2]
        rotatedRegionMaxImg = ndimage.rotate(regionMaxImg, np.degrees(angle))
        rotatedRegionMaxImg = trim_image(trim_image(rotatedRegionMaxImg, 0), 255)

    else:
        regionMax = None
        rotatedRegionMaxImg = None
        angle = 0

    return regionMax, rotatedRegionMaxImg, angle, regionLabels, regions, areas, belowMeanFilter, dialated
Пример #23
0
def plot_rotation(imData):

    def plot_bars(x_bar, y_bar, angle):
        def plot_bar(r, x_center, y_center, angle, pattern):
            dx = r * np.cos(angle)
            dy = r * np.sin(angle)
            plt.plot([x_center - dx, x_center, x_center + dx],
                     [y_center - dy, y_center, y_center + dy], pattern)

        plot_bar(10, x_bar, y_bar, angle + np.radians(90), 'bo-')
        plot_bar(30, x_bar, y_bar, angle, 'ro-')

    def plot_subplot(pawprint):
        x_bar, y_bar, angle = intertial_axis(pawprint)
        plt.imshow(pawprint, cmap=cm.gray)
        plot_bars(x_bar, y_bar, angle)
        return angle

    plt.figure()
    angle = plot_subplot(imData)
    plt.title('Original')

    plt.figure()
    plot_subplot(ndimage.rotate(imData, np.degrees(angle)))
    plt.title('Rotated')
    def shared_dataset(data_xy, augument=False, borrow=True):
        data_x, data_y = data_xy

        data_x_npy = numpy.asarray(data_x, dtype=theano.config.floatX)
        data_y_npy = numpy.asarray(data_y, dtype=theano.config.floatX)

        if augument:
            data_x_npy = numpy.concatenate((data_x_npy, data_x_npy))
            data_y_npy = numpy.concatenate((data_y_npy, data_y_npy))

            for x in range(data_x_npy.shape[0] / 2):
                sqrt = numpy.sqrt(data_x_npy.shape[1])
                rot = random.uniform(5, -5)
                rotated = numpy.reshape(data_x_npy[x, :], (sqrt, sqrt))
                rotated = srot.rotate(rotated, rot)
                rotated = rotated[(rotated.shape[0] - sqrt) / 2: (rotated.shape[0] - sqrt) / 2 + sqrt,
                          (rotated.shape[1] - sqrt) / 2: (rotated.shape[1] - sqrt) / 2 + sqrt]
                data_x_npy[x, :] = numpy.reshape(rotated, (sqrt * sqrt,))

        print data_x_npy.shape

        # add required 1 vector for bias
        # shared_x = theano.shared(numpy.hstack((data_x_npy,
        #                                       numpy.ones((data_x_npy.shape[0], 1), dtype=tensor.dscalar()))),
        #                          borrow=borrow)
        shared_x = theano.shared(data_x_npy, borrow=borrow)

        shared_y = theano.shared(data_y_npy, borrow=borrow)

        return shared_x, tensor.cast(shared_y, 'int32')
Пример #25
0
def clip_image(image,roi=[],rotangle=0.0,cp=False):
    """
    Clip an image given the roi

    Parameters:
    -----------
    * roi is a list [c1,r1,c2,r2] = [x1,y1,x2,y2]
      Note take x as the horizontal image axis index (col index), 
      and y as vertical axis index (row index).
      Therefore, in image indexing:
         image[y1:y2,x1:x2] ==> rows y1 to y2 and cols x1 to x2
    * rotangle is the angle to rotate the image by
    * cp is flag to indicate if a new copy of the image is generated
    
    """
    if len(roi) != 4:
        roi = [0,0,image.shape[1], image.shape[0]]
    else:
        [c1,r1,c2,r2] = _sort_roi(roi)
    # rotate
    if rotangle != 0:
        image = ndimage.rotate(image,rotangle)    

    if cp == True:
        return copy.copy(image[r1:r2, c1:c2])
    else:
        return image[r1:r2, c1:c2]
  def _transform(self, images_org):
        
    if self.image_options["crop"]:
        resize_size = int(self.image_options["resize_size"])
        y  = np.random.permutation(range(resize_size/2, images_org.shape[0]-resize_size/2))
        y = int(y[0])
        y1 = int(y - resize_size/2.0)
        y2 = int(y + resize_size/2.0)
        
        x  = np.random.permutation(range(resize_size/2, images_org.shape[1]-resize_size/2))
        x = int(x[0])
        x1 = int(x - resize_size/2.0)
        x2 = int(x + resize_size/2.0)
        
        image = images_org[y1:y2, x1:x2,...]
        
    if self.image_options["resize"]:
        resize_size = int(self.image_options["resize_size"])
        image = misc.imresize(image, [resize_size, resize_size], interp='nearest')
        
    if self.image_options["flip"]:
        if(np.random.rand()<.5):
            image = image[::-1,...]
            
        if(np.random.rand()<.5):
            image = image[:,::-1,...]
            
    if self.image_options["rotate_stepwise"]:
            if(np.random.rand()>.25): # skip "0" angle rotation
                angle = int(np.random.permutation([1,2,3])[0] * 90)
                image = rotate(image, angle, reshape=False)

    return np.array(image)
Пример #27
0
def morph_mean(src, angle):
    dst = ndimage.rotate(src, angle)
    strel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 16))
    dst = cv2.morphologyEx(dst, cv2.MORPH_OPEN, strel)
    mean = cv2.mean(dst)[0]
    print mean
    return mean
def boxerRotate(imgfile, parttree, outstack, boxsize):
        """
        boxes the particles with expanded size,
        applies a rotation to particle,
        reduces boxsize to requested size,
        and saves them to a imagic file
        """
        # size needed is sqrt(2)*boxsize, using 1.5 to be extra safe
        bigboxsize = int(math.ceil(1.5*boxsize))
        imgarray = mrc.read(imgfile)
        bigboxedparticles = boxerMemory(imgarray, parttree, bigboxsize)
        
        boxedparticles = []
        boxshape = (boxsize,boxsize)
        apDisplay.printMsg("Rotating particles...")
        for i in range(len(bigboxedparticles)):
                if i % 10 == 0:
                        sys.stderr.write(".")
                bigboxpart = bigboxedparticles[i]
                partdict = parttree[i]
                ### add 90 degrees because database angle is from x-axis not y-axis
                angle = partdict['angle']+90.0
                rotatepart = ndimage.rotate(bigboxpart, angle=angle, reshape=False, order=1)
                boxpart = imagefilter.frame_cut(rotatepart, boxshape)
                boxedparticles.append(boxpart)
        sys.stderr.write("done\n")
        apImagicFile.writeImagic(boxedparticles, outstack)
        return True
Пример #29
0
def HOGpicture(w, bs, positive=True):
	# w=feature, bs=size, 

	# construct a "glyph" for each orientaion
	bim1 = np.zeros([bs,bs])
	bim1[:,round(bs/2):round(bs/2)+1] = 1

	bim = np.zeros([bim1.shape[0],bim1.shape[1], 9])
	bim[:,:,1] = bim1
	for i in range(2,10):
		bim[:,:,i-1] = nd.rotate(bim1, -(i-1)*20, reshape=False) #crop?

	# make pictures of positive weights bs adding up weighted glyphs
	shape_ = w.shape
	if positive:
		w[w<0] = 0
	else:
		w[w>0] = 0
	# im = np.zeros([bs*shape_[0], bs*shape_[1]])
	im = np.zeros([bs*shape_[1], bs*shape_[0]])
	for i in range(1,shape_[0]):
		for j in range(1,shape_[1]):
			for k in range(9):
				# im[(i-1)*bs:i*bs, (j-1)*bs:j*bs] += bim[:,:,k]*w[i,j,k]
				im[(j-1)*bs:j*bs,(i-1)*bs:i*bs] += bim[:,:,k]*w[i,j,k]


	return im
Пример #30
0
def lena_example():
    im = rotated_lena()

    # find corners on grayscale image
    # use negative so that boundary is 0
    bw = im[:, :, 0]
    bw = 255 - bw

    corners, outline = guess_corners(bw)
    angle = mean_rotation(corners) * 180./np.pi
    im2 = clear_border(im, outline)
    im2 = ndimage.rotate(im, angle, reshape=False, mode='nearest')
    cropped = keyboard_crop(im2)

    fig, (ax1, ax2, ax3) = plt.subplots(3)
    ax1.imshow(im)
    ax1.plot(corners[:, 1], corners[:, 0], 'o')
    ax2.imshow(im2)
    ax3.imshow(cropped)

    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
    ax1.imshow(cropped[:20, :20])
    ax2.imshow(cropped[:20, -20:])
    ax3.imshow(cropped[-20:, :20])
    ax4.imshow(cropped[-20:, -20:])
    return cropped
Пример #31
0
def rotation(Pfad, image_path, lower, upper):
    image = cv2.imread(Pfad + image_path)
    # create border
    row, col = image.shape[:2]
    bottom = image[0:row, 0:col]
    mean = cv2.mean(bottom)[0]
    bordersize = 5
    border = cv2.copyMakeBorder(image,
                                top=bordersize,
                                bottom=bordersize,
                                left=bordersize,
                                right=bordersize,
                                borderType=cv2.BORDER_CONSTANT,
                                value=[mean, mean, mean])
    ## edge detection on image with border
    gray = cv2.cvtColor(border, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, 75, 200)
    # find angle
    cnts, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                                       cv2.CHAIN_APPROX_SIMPLE)
    # contour with the largest area is possibly the plate
    max_area = 0
    max_cnt = None
    for cnt in cnts:
        area = cv2.contourArea(cnt)
        if (area > max_area):
            max_area = area
            max_cnt = cnt
    if max_cnt is not None:
        min_rect = cv2.minAreaRect(max_cnt)
        (midpoint, widthheight, angle) = min_rect
        # Get the image size
        # NumPy stores image matricies backwards
        image_size = (border.shape[1], border.shape[0])
        image_center = tuple(np.array(image_size) / 2)

        # Convert the OpenCV 3x2 rotation matrix to 3x3
        rot_mat = np.vstack(
            [cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]])
        rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])
        # Shorthand for below calcs
        image_w2 = image_size[0] * 0.5
        image_h2 = image_size[1] * 0.5
        # Obtain the rotated coordinates of the image corners
        rotated_coords = [
            (np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],
            (np.array([image_w2, image_h2]) * rot_mat_notranslate).A[0],
            (np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],
            (np.array([image_w2, -image_h2]) * rot_mat_notranslate).A[0]
        ]

        # Find the size of the new image
        x_coords = [pt[0] for pt in rotated_coords]
        x_pos = [x for x in x_coords if x > 0]
        x_neg = [x for x in x_coords if x < 0]

        y_coords = [pt[1] for pt in rotated_coords]
        y_pos = [y for y in y_coords if y > 0]
        y_neg = [y for y in y_coords if y < 0]

        right_bound = max(x_pos)
        left_bound = min(x_neg)
        top_bound = max(y_pos)
        bot_bound = min(y_neg)

        new_w = int(abs(right_bound - left_bound))
        new_h = int(abs(top_bound - bot_bound))

        # translation matrix to keep the image centred
        trans_mat = np.matrix([[1, 0, int(new_w * 0.5 - image_w2)],
                               [0, 1, int(new_h * 0.5 - image_h2)], [0, 0, 1]])

        # Compute the tranform for the combined rotation and translation
        affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]

        # Apply the transform --> (-0° to -45° License Plates, which are tilted to the left and -46° to -90° License Plates, which are tilted to the right)
        rotation1 = cv2.warpAffine(image,
                                   affine_mat, (new_w, new_h),
                                   flags=cv2.INTER_LINEAR)
        # correction of rotation for -45 > angle >= -90
        rotation2 = ndimage.rotate(rotation1, 90)
        # if angle is not between -20° and -70°, keep original image in pipeline
        if (lower >= angle >= -45.0):
            rotated_image = rotation1
            rotated_image = Image.fromarray(rotated_image)
            rotated_image = rotated_image.convert('L')
            rotated_image.save(Pfad + image_path)
        elif (-45 > angle >= upper):
            rotated_image = rotation2
            rotated_image = Image.fromarray(rotated_image)
            rotated_image = rotated_image.convert('L')
            rotated_image.save(Pfad + image_path)
        else:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            cv2.imwrite(Pfad + image_path, image)
    else:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        cv2.imwrite(Pfad + image_path, image)
Пример #32
0
def plot_vehicle_nice_mv(ax, predictions, dt, max_hl=10, ph=6, map=None, x_min=0, y_min=0):
    prediction_dict, histories_dict, futures_dict = prediction_output_to_trajectories(predictions,
                                                                                      dt,
                                                                                      max_hl,
                                                                                      ph,
                                                                                      map=map)
    assert (len(prediction_dict.keys()) <= 1)
    if len(prediction_dict.keys()) == 0:
        return
    ts_key = list(prediction_dict.keys())[0]

    prediction_dict = prediction_dict[ts_key]
    histories_dict = histories_dict[ts_key]
    futures_dict = futures_dict[ts_key]

    if map is not None:
        ax.imshow(map.fdata, origin='lower', alpha=0.5)

    cmap = ['k', 'b', 'y', 'g', 'r']
    line_alpha = 0.7
    line_width = 0.2
    edge_width = 2
    circle_edge_width = 0.5
    node_circle_size = 0.3
    a = []
    i = 0
    node_list = sorted(histories_dict.keys(), key=lambda x: x.id)
    for node in node_list:
        h = node.get(np.array([ts_key]), {'heading': ['°']})[0, 0]
        history_org = histories_dict[node] + np.array([x_min, y_min])
        history = histories_dict[node] + np.array([x_min, y_min]) + 5 * np.array([np.cos(h), np.sin(h)])
        future = futures_dict[node] + np.array([x_min, y_min]) + 5 * np.array([np.cos(h), np.sin(h)])
        predictions = prediction_dict[node] + np.array([x_min, y_min]) + 5 * np.array([np.cos(h), np.sin(h)])
        if node.type.name == 'VEHICLE':
            for t in range(predictions.shape[2]):
                sns.kdeplot(predictions[0, :, t, 0], predictions[0, :, t, 1],
                            ax=ax, shade=True, shade_lowest=False,
                            color=line_colors[i % len(line_colors)], zorder=600, alpha=1.0)

            r_img = rotate(cars[i % len(cars)], node.get(np.array([ts_key]), {'heading': ['°']})[0, 0] * 180 / np.pi,
                           reshape=True)
            oi = OffsetImage(r_img, zoom=0.08, zorder=700)
            veh_box = AnnotationBbox(oi, (history_org[-1, 0], history_org[-1, 1]), frameon=False)
            veh_box.zorder = 700
            ax.add_artist(veh_box)
            i += 1
        else:

            for t in range(predictions.shape[2]):
                sns.kdeplot(predictions[:, t, 0], predictions[:, t, 1],
                            ax=ax, shade=True, shade_lowest=False,
                            color='b', zorder=600, alpha=0.8)

            # Current Node Position
            circle = plt.Circle((history[-1, 0],
                                 history[-1, 1]),
                                node_circle_size,
                                facecolor='g',
                                edgecolor='k',
                                lw=circle_edge_width,
                                zorder=3)
            ax.add_artist(circle)
Пример #33
0
"""

import scipy.ndimage as nd
import scipy.misc as misc

from imreg.models import model
from imreg.metrics import metric
from imreg.samplers import sampler

from imreg.visualize import plot

from imreg import register

# Form some test data (lena, lena rotated 20 degrees)
image = register.RegisterData(misc.lena())
template = register.RegisterData(nd.rotate(image.data, 20, reshape=False))

# Form the registrator.
affine = register.Register(model.Affine, metric.Residual,
                           sampler.CubicConvolution)

fullSearch = []

# Image pyramid registration can be executed like so:
pHat = None
for factor in [30., 20., 10., 5., 2., 1.]:

    if pHat is not None:
        scale = downImage.coords.spacing / factor
        # FIXME: Find a nicer way to do this.
        pHat = model.Affine.scale(pHat, scale)
    def step(self, action):

        X, Y = np.meshgrid(self.moveCenters, self.moveCenters)
        coords = np.stack([np.reshape(Y, [-1]), np.reshape(X, [-1])], axis=0)
        halfSide = self.blockSize / 2

        position = action % self.num_moves
        pickplace = action / (self.num_moves * self.num_orientations)
        orientation = (action - pickplace * self.num_moves *
                       self.num_orientations) / self.num_moves
        #        orientation = action / self.num_moves # DEBUG

        # if PICK
        if pickplace == 0:

            # if not holding anything
            if self.state[1] == 0:
                ii = coords[0, position]
                jj = coords[1, position]
                jjRangeInner, iiRangeInner = np.meshgrid(
                    range(jj - halfSide + self.gap, jj + halfSide - self.gap),
                    range(ii - halfSide + 1, ii + halfSide - 1))
                jjRangeOuter, iiRangeOuter = np.meshgrid(
                    range(jj - halfSide, jj + halfSide),
                    range(ii - halfSide, ii + halfSide))

                # if there's something in this spot
                if np.sum(self.state[0][iiRangeOuter, jjRangeOuter, 0]) > 0:
                    im = self.state[0][iiRangeOuter, jjRangeOuter, 0]

                    # if it's a separate object
                    shape = np.shape(im)
                    jjGap1, iiGap1 = np.meshgrid(
                        range(0, shape[1]),
                        np.r_[range(0, 1),
                              range(shape[1] - 1, shape[1])])
                    jjGap2, iiGap2 = np.meshgrid(
                        np.r_[range(0, 1),
                              range(shape[1] - 1, shape[1])],
                        range(0, shape[1]))
                    if (np.sum(im[iiGap1, jjGap1]) < 1) and (np.sum(
                            im[iiGap2, jjGap2]) < 1):

                        if orientation == 0:
                            imRot = np.int32(im > 0.5)
#                            imRot = np.int32(ndimage.rotate(im, 45, reshape=False)>0.5)
                        elif orientation == 1:
                            #                        imRot = np.int32(im>0.5)
                            imRot = np.int32(
                                ndimage.rotate(im, 45, reshape=False) > 0.5)
                        elif orientation == 2:
                            #                        imRot = np.int32(im>0.5)
                            imRot = np.int32(
                                ndimage.rotate(im, 90, reshape=False) > 0.5)
#                            imRot = np.int32(ndimage.rotate(im, 135, reshape=False)>0.5)
                        elif orientation == 3:
                            #                        imRot = np.int32(im>0.5)
                            #                        imRot = np.int32(ndimage.rotate(im, 90, reshape=False)>0.5)
                            imRot = np.int32(
                                ndimage.rotate(im, 135, reshape=False) > 0.5)
                        else:
                            print("error in orientation 1")

                        # if there's a gap for the fingers
                        shape = np.shape(imRot)
                        jjGap, iiGap = np.meshgrid(
                            np.r_[range(0, self.gap),
                                  range(shape[1] - self.gap, shape[1])],
                            range(0, shape[0]))
                        if np.sum(imRot[iiGap, jjGap]) < 1:

                            self.holdingImage = imRot
                            self.state[
                                1] = 1  # set holding to contents of action target
                            self.state[0][iiRangeOuter, jjRangeOuter,
                                          0] = np.zeros([
                                              np.shape(iiRangeOuter)[0],
                                              np.shape(jjRangeOuter)[1]
                                          ])

        # if PLACE
        elif pickplace == 1:

            if self.state[1] != 0:

                ii = coords[0, position]
                jj = coords[1, position]
                jjRangeOuter, iiRangeOuter = np.meshgrid(
                    range(jj - halfSide, jj + halfSide),
                    range(ii - halfSide, ii + halfSide))

                if True not in (self.state[0][iiRangeOuter, jjRangeOuter, 0] >
                                0):  # if this square is empty

                    im = self.holdingImage
                    if orientation == 0:
                        imRot = im
#                        imRot = ndimage.rotate(im, -45, reshape=False)
                    elif orientation == 1:
                        #                        imRot = im
                        imRot = ndimage.rotate(im, -45, reshape=False)
                    elif orientation == 2:
                        #                        imRot = im
                        imRot = ndimage.rotate(im, -90, reshape=False)
#                        imRot = ndimage.rotate(im, -135, reshape=False)
                    elif orientation == 3:
                        #                        imRot = im
                        #                        imRot = ndimage.rotate(im, -90, reshape=False)
                        imRot = ndimage.rotate(im, -135, reshape=False)
                    else:
                        print("error in orientation 2")

#                    self.state[0][iiRangeOuter,jjRangeOuter,0] = np.copy(self.holdingImage)
                    self.state[0][iiRangeOuter, jjRangeOuter, 0] = imRot
                    self.state[1] = 0  # set holding to zero

        else:
            print("error: action out of bounds")

        # check for termination condition
        reward = 0
        done = 0

        # check for two horizontal, vertical, or diagonally adjacent blocks
        if self.state[1] == 0:  # if both objs on the grid
            bounds = self.getBoundingBox(self.state[0][:, :, 0])

            if (bounds[1] - bounds[0]) < (self.blockSize * 0.7):
                if (bounds[3] - bounds[2]) < (self.blockSize * 3.0):
                    done = 1
                    reward = 10

            if (bounds[1] - bounds[0]) < (self.blockSize * 3.0):
                if (bounds[3] - bounds[2]) < (self.blockSize * 0.7):
                    done = 1
                    reward = 10

            im = np.int32(
                ndimage.rotate(self.state[0][:, :, 0], 45, reshape=True) > 0.5)
            bounds = self.getBoundingBox(im)

            if (bounds[1] - bounds[0]) < (self.blockSize * 0.9):
                if (bounds[3] - bounds[2]) < (self.blockSize * 3.0):
                    done = 1
                    reward = 10

            if (bounds[1] - bounds[0]) < (self.blockSize * 3.0):
                if (bounds[3] - bounds[2]) < (self.blockSize * 0.9):
                    done = 1
                    reward = 10

        if self.episode_timer > self.max_episode:
            self.episode_timer = 0
            done = 1
        self.episode_timer += 1

        return self.state, reward, done, {}
Пример #35
0
def plot_vehicle_nice(ax, predictions, dt, max_hl=10, ph=6, map=None, x_min=0, y_min=0):
    prediction_dict, histories_dict, futures_dict = prediction_output_to_trajectories(predictions,
                                                                                      dt,
                                                                                      max_hl,
                                                                                      ph,
                                                                                      map=map)
    assert (len(prediction_dict.keys()) <= 1)
    if len(prediction_dict.keys()) == 0:
        return
    ts_key = list(prediction_dict.keys())[0]

    prediction_dict = prediction_dict[ts_key]
    histories_dict = histories_dict[ts_key]
    futures_dict = futures_dict[ts_key]

    if map is not None:
        ax.imshow(map.fdata, origin='lower', alpha=0.5)

    cmap = ['k', 'b', 'y', 'g', 'r']
    line_alpha = 0.7
    line_width = 0.2
    edge_width = 2
    circle_edge_width = 0.5
    node_circle_size = 0.3
    a = []
    i = 0
    node_list = sorted(histories_dict.keys(), key=lambda x: x.id)
    for node in node_list:
        history = histories_dict[node] + np.array([x_min, y_min])
        future = futures_dict[node] + np.array([x_min, y_min])
        predictions = prediction_dict[node] + np.array([x_min, y_min])
        if node.type.name == 'VEHICLE':
            # ax.plot(history[:, 0], history[:, 1], 'ko-', linewidth=1)

            ax.plot(future[:, 0],
                    future[:, 1],
                    'w--o',
                    linewidth=4,
                    markersize=3,
                    zorder=650,
                    path_effects=[pe.Stroke(linewidth=5, foreground='k'), pe.Normal()])

            #print(node.id, future.shape)
            for t in range(predictions.shape[2]):
                sns.kdeplot(predictions[0, :, t, 0], predictions[0, :, t, 1],
                            ax=ax, shade=True, shade_lowest=False,
                            color=line_colors[i % len(line_colors)], zorder=600, alpha=0.8)

            vel = node.get(np.array([ts_key]), {'velocity': ['x', 'y']})
            h = np.arctan2(vel[0, 1], vel[0, 0])
            r_img = rotate(cars[i % len(cars)], node.get(np.array([ts_key]), {'heading': ['°']})[0, 0] * 180 / np.pi, reshape=True)
            oi = OffsetImage(r_img, zoom=0.025, zorder=700)
            veh_box = AnnotationBbox(oi, (history[-1, 0], history[-1, 1]), frameon=False)
            veh_box.zorder = 700
            ax.add_artist(veh_box)
            i += 1
        else:
            # ax.plot(history[:, 0], history[:, 1], 'k--')

            for t in range(predictions.shape[2]):
                sns.kdeplot(predictions[0, :, t, 0], predictions[0, :, t, 1],
                            ax=ax, shade=True, shade_lowest=False,
                            color='b', zorder=600, alpha=0.8)

            ax.plot(future[:, 0],
                    future[:, 1],
                    'w--',
                    zorder=650,
                    path_effects=[pe.Stroke(linewidth=edge_width, foreground='k'), pe.Normal()])
            # Current Node Position
            circle = plt.Circle((history[-1, 0],
                                 history[-1, 1]),
                                node_circle_size,
                                facecolor='g',
                                edgecolor='k',
                                lw=circle_edge_width,
                                zorder=3)
            ax.add_artist(circle)
Пример #36
0
    key = cv2.waitKey(1)
    # exit on 'q' 'esc' 'Q' //salida a la 'q' 'esc' 'Q'
    if key in [27, ord('Q'), ord('q')]:
        break
    # resize the captured frame for face detection to increase processing speed //
    # cambiar el tamano del fotograma capturado para la deteccion de cara a aumentar la velocidad de procesamiento
    resized_frame = cv2.resize(frame, frame_scale)

    processed_frame = resized_frame
    # Skip a frame if the no face was found last frame // Saltar de un marco de la cara no se encontro ultimo fotograma

    if frame_skip_rate == 0:
        faceFound = False
        for rotation in current_rotation_map:

            rotated_frame = ndimage.rotate(resized_frame, rotation)

            gray = cv2.cvtColor(rotated_frame, cv2.COLOR_BGR2GRAY)

            # return tuple is empty, ndarray if detected face // retorno tupla esta vacia, array si se detecta la cara
            faces = face_cascade.detectMultiScale(
                gray,
                scaleFactor=1.3,
                minNeighbors=5,
                minSize=(30, 30),
                flags=cv2.cv.CV_HAAR_SCALE_IMAGE)

            # If frontal face detector failed, use profileface detector
            # // Si la deteccion de caras frontal fallo, perfil de usuario cara de deteccion otro xml
            faces = faces if len(faces) else sideFace_cascade.detectMultiScale(
                gray,
    slika1 = cv2.addWeighted(slika1, 0.6, prazna_slika, 1, 0.0)
    return slika1


##PRETVORBA VIDEO V SLIKE
vidcap = cv2.VideoCapture('detection.mp4')
success, image = vidcap.read()
count = 0
alpha = 2  # Contrast control (1.0-3.0)
beta = -140  # Brightness control (0-100)
img_array = []
while success:
    if count % 2 == 0:
        try:
            image = rotate(image, -90)
            image = image[500:1000, 0:720]
            slika_ceste = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            slika_ceste = cv2.convertScaleAbs(slika_ceste,
                                              alpha=alpha,
                                              beta=beta)

            for x in range(500):
                for y in range(720):

                    if (slika_ceste[x, y][0] <
                            130) or (slika_ceste[x, y][1] <
                                     130) or (slika_ceste[x, y][2] < 130):
                        slika_ceste[x, y][0] = 0
                        slika_ceste[x, y][1] = 0
                        slika_ceste[x, y][2] = 0
Пример #38
0
upper = int(min(255, (1.0 + sigma) * v))

edges = cv2.Canny(new_img, lower, upper, apertureSize=3)

cv2.imwrite(sys.argv[1] + "_edges.jpg", edges)
#cv2.imshow('canny_edge', img_edges)
key = cv2.waitKey(0)
angles = []

lines_image = img_before.copy()

lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 80, 30, 10)
for k in range(0, len(lines)):
    for x1, y1, x2, y2 in lines[k]:
        angle = math.degrees(math.atan2(y2 - y1, x2 - x1))
        if angle != 0 and angle != 90 and angle != -90:
            print("Angle is %f" % angle)
            angles.append(angle)
            cv2.line(lines_image, (x1, y1), (x2, y2), (0, 255, 0), 10)

cv2.imwrite(sys.argv[1] + "_houghlines.jpg", lines_image)

#cv2.imshow('After', img_before)
key = cv2.waitKey(0)

median_angle = np.median(angles)
print("Median angle is: %f" % median_angle)
img_rotated = ndimage.rotate(img_before, median_angle)

cv2.imwrite(sys.argv[2], img_rotated)
Пример #39
0
            yStartPos = (shape[0] - ysize)
            finalY = True

    return slices


def rename_files():
    for file in glob.glob('{}{}/*_.bmp'.format(inputDirectory, 'images')):
        os.rename(file, file.replace("_.bmp", ".bmp"))


if __name__ == '__main__':

    files = glob.glob('{}{}/*.bmp'.format(inputDirectory, 'images'))
    print("Found {} files".format(len(files)))
    file_count = 1
    for file in files:
        fileName = os.path.basename(file)
        image = plt.imread(file)
        csv_values = []
        dataArrays = []
        read_csv(fileName)
        if not csv_values:
            continue
        imageSlices = slice_image(image)
        imageSliceRotated = slice_image(ndimage.rotate(image, 90))
        plot_image_rotate(imageSlices, imageSliceRotated, image)
        if file_count % 10 == 0:
            print("Processed {} files".format(file_count))
        file_count += 1
def fix_frame(frame):
    frame = ndimage.rotate(frame, -120, reshape=False)
    frame = images.crop_and_mask(frame, crop_result.bbox, crop_result.mask)
    return frame
def get_crop_result(vid):
    frame = vid.read_next_frame()
    frame = ndimage.rotate(frame, -120, reshape=False)
    crop_result = images.crop_rectangle(frame)
    vid.set_frame(0)
    return crop_result
Пример #42
0
import cv2
import numpy as np
import math
import pytesseract
from scipy import ndimage

## (1) read
img = cv2.imread(r"C:\Users\Internship004\Desktop\rot3.jpg")

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

img_edges = cv2.Canny(gray, 100, 100, apertureSize=3)
    # Using Houghlines to detect lines
lines = cv2.HoughLinesP(img_edges, 1, math.pi / 180.0, 100, minLineLength=100, maxLineGap=5)

angles = []
for x1, y1, x2, y2 in lines[0]:
    angle = math.degrees(math.atan2(y2 - y1, x2 - x1))
    angles.append(angle)

# Getting the median angle
median_angle = np.median(angles)

# Rotating the image with this median angle
img_rotated = ndimage.rotate(img, median_angle)

cv2.imwrite(r"C:\Users\Internship004\Desktop\orientation_corrected.jpg", img_rotated)

print(pytesseract.image_to_string(img_rotated))
Пример #43
0
def video(
    *,
    scene,
    resolution_ratio,
    frame_rate,
    exp_time,
    drift_angle,
    drift_velocity,
    angle_velocity,
    ccd_size,
    start,
):
    """
    Get video frames from input scene, applying appropriate motion blur

    Args:
        scene (ndarray): high resolution input scene
        resolution_ratio (float): downsample factor to low resolution images
        frame_rate (float): video frame rate
        exp_time (float): experiment duration
        drift_angle (float): linear drift direction (deg)
        drift_velocity (float): linear drift velocity (pix / s)
        angle_velocity (float): camera rotation rate (deg / s)
        ccd_size (int): size of square detector ccd (pixels)
        start (tuple): start location of detector in scene
    """

    num_frames = exp_time * frame_rate

    def coord(k):
        return np.array(
            (start[0] - k * drift_velocity * np.sin(np.deg2rad(drift_angle)) *
             resolution_ratio / (frame_rate),
             start[1] + k * drift_velocity * np.cos(np.deg2rad(drift_angle)) *
             resolution_ratio / (frame_rate))).astype(int).T

    # FIXME check box bounds correctly, need to account for rotation
    assert (0 <= coord(0)[0] < scene.shape[0]
            and 0 <= coord(0)[1] < scene.shape[1]
            and 0 <= coord(num_frames)[0] < scene.shape[0]
            and 0 <= coord(num_frames)[1] < scene.shape[1]
            ), f"Frames drift outside of scene bounds \
    ({coord(0)[0]}, {coord(0)[1]}) -> ({coord(num_frames)[0]}, {coord(num_frames)[1]})"

    # calculate the middle points for all frames
    mid = coord(np.arange(num_frames + 1))

    # initialize frame images
    frames = np.zeros((num_frames, ccd_size[0], ccd_size[1]))

    # calculate each frame by integrating high resolution image along the drift
    # direction
    for frame in tqdm(range(num_frames), desc='Frames', leave=None,
                      position=1):
        hr_size = np.array(ccd_size) * resolution_ratio
        hr_frame = np.zeros(hr_size)
        # calculate middle coordinates for the shortest line connecting the
        # middle coordinates of the consecutive frames
        path_rows, path_cols = line(mid[frame][0], mid[frame][1],
                                    mid[frame + 1][0], mid[frame + 1][1])
        total_rotation = exp_time * angle_velocity
        angles = total_rotation * np.sqrt(
            (path_rows - mid[0][0])**2 +
            (path_cols - mid[0][1])**2) / np.linalg.norm(mid[-1] - mid[0])
        if len(path_rows) > 1:
            path_rows, path_cols = path_rows[:-1], path_cols[:-1]
        for row, col, angle in zip(path_rows, path_cols, angles):
            # accelerate algorithm by not rotating if angle_velocity is 0
            if angle_velocity == 0:
                slice_x = slice(row - hr_size[0] // 2,
                                row + (hr_size[0] + 1) // 2)
                slice_y = slice(col - hr_size[1] // 2,
                                col + (hr_size[1] + 1) // 2)
                hr_frame += scene[slice_x, slice_y]
            else:
                # diameter of circumscribing circle
                circum_diam = int(np.ceil(np.linalg.norm(hr_size)))
                slice_x = slice(row - circum_diam // 2,
                                row + (circum_diam + 1) // 2)
                slice_y = slice(row - circum_diam // 2,
                                row + (circum_diam + 1) // 2)
                unrotated = scene[slice_x, slice_y]
                hr_frame += crop(rotate(unrotated, angle, reshape='largest'),
                                 width=hr_size)
        # scale collected energy of subframes
        hr_frame /= frame_rate * len(path_rows)
        frames[frame] = rescale(hr_frame,
                                1 / resolution_ratio,
                                anti_aliasing=False)

    return frames, mid
Пример #44
0
    return outputs.index(max(outputs))


'*****************************************************************'

imgFull = cv2.imread(
    "C:/New Folder/PCB inspection/Inspecting  SMD/123clahe.jpg", 0)
template = cv2.imread(
    "C:/New Folder/PCB inspection/Inspecting  SMD/template12.jpg", 0)
GoldenImg = cv2.imread("C:/New Folder/PCB inspection/Inspecting  SMD/123.jpg")
template1 = cv2.imread(
    "C:/New Folder/PCB inspection/Inspecting  SMD/template2NewCol.jpg", 0)

data = TemplateMatching(imgFull, template, 100)
data1 = TemplateMatching(imgFull, template1, 45)
rotated = ndimage.rotate(template, 90)
data2 = TemplateMatching(imgFull, rotated, 10)
fourier_desc = FourierDescriptor(data, GoldenImg, template, 0)
fourier_desc1 = FourierDescriptor(data1, GoldenImg, template1, 1)
fourier_desc2 = FourierDescriptor(data2, GoldenImg, rotated, 2)
database = np.vstack((fourier_desc, fourier_desc1, fourier_desc2))
normalizedFourier = np.zeros(database.shape, np.float)
for i in range(0, MIN_DESCRIPTOR):
    normalizedFourier[:, i] = NormalizeFourier(database[:, i])
normalizedFourier[:, MIN_DESCRIPTOR] = database[:, MIN_DESCRIPTOR]
dataset = normalizedFourier
n_inputs = len(dataset[0]) - 1
n_outputs = len(set([row[-1] for row in dataset]))
network = initialize_network(n_inputs, 30, n_outputs)
train_network(network, dataset, 0.4, 5000, n_outputs)
Пример #45
0
HRpQCT_Files = [
    File for File in os.listdir(HRpQCT_Path) if File.endswith('.mhd')
]
HRpQCT_Files.sort()

## Load uCT mask and rotate and segment it
uCT_Mask = sitk.ReadImage(uCT_Path + uCT_Mask_File)
Spacing = uCT_Mask.GetSpacing()
Origin = uCT_Mask.GetOrigin()
Direction = uCT_Mask.GetDirection()
uCT_Mask_Array = sitk.GetArrayFromImage(uCT_Mask)

DSCs = pd.read_csv(Results_Path + 'DSCs.csv')
BestAngle = int(DSCs.loc[DSCs['DSC'].idxmax(), 'Angle'])
uCT_Mask_Array = ndimage.rotate(uCT_Mask_Array,
                                -BestAngle, (1, 2),
                                reshape=True)
uCT_Mask_Array = np.rot90(uCT_Mask_Array, 2, (0, 1))

Otsu_Filter = sitk.OtsuThresholdImageFilter()
Otsu_Filter.SetInsideValue(0)
Otsu_Filter.SetOutsideValue(1)
Segmentation = Otsu_Filter.Execute(uCT_Mask)
R_Threshold = Otsu_Filter.GetThreshold()
uCT_Mask_Array[uCT_Mask_Array <= R_Threshold] = 0
uCT_Mask_Array[uCT_Mask_Array > 0] = 1

## Shift uCT mask according to initial translation
ParameterMapFile = open(Results_Path + 'InitialTranslation.txt', 'r')
Text = ParameterMapFile.read()
Start = Text.find('TransformParameters')
Пример #46
0
def get_text_lines_from_image(src):
    '''
    将图片按文本单行切割
    :param src:
    :return:图片数组
    '''
    #调整大小
    src = adjust_size(src)

    temp = src.copy()
    #调整水平
    src = cv2.Canny(src, 100, 200)
    src, slope = adjust_slope(src)

    #src = cv2.erode(src,cv2.getStructuringElement(cv2.MORPH_CROSS,(1, 3)) )
    #src = cv2.dilate(src,cv2.getStructuringElement(cv2.MORPH_CROSS,(1, 3)) )

    src = cv2.dilate(src, cv2.getStructuringElement(cv2.MORPH_RECT, (40, 3)))
    src = cv2.erode(src, cv2.getStructuringElement(cv2.MORPH_RECT, (40, 3)))

    src = cv2.erode(src, cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)))
    src = cv2.dilate(src, cv2.getStructuringElement(cv2.MORPH_RECT, (6, 5)))

    src = 1 * (src > 128)

    labels_open, nbr_objects_open = measurements.label(src)

    #调整水平
    block_size = 40
    temp = threshold_adaptive(temp, block_size, offset=10)
    temp = numpy.array(temp, 'uint8') * 255
    temp = cv2.bitwise_not(temp)

    if slope != 0:
        temp = ndimage.rotate(temp, slope)
        #旋转后像素会平滑,重新二值化
        temp = cv2.bitwise_not(temp)
        temp = threshold_adaptive(temp, block_size, offset=20)
        temp = numpy.array(temp, 'uint8') * 255
        temp = cv2.bitwise_not(temp)

    lines = []

    image = numpy.zeros(numpy.array(src).shape)
    count = 0
    for i in range(1, nbr_objects_open + 1):

        test = temp.copy()
        test[labels_open != i] = 0
        box = bounding_box(test)
        x, y, w, h = box
        if h < 10 or w < 3:
            continue
        #忽略靠近上下边的区域
        '''if y<2:
            continue
        if y+h> len(temp)-2:
            continue'''
        data = test[y:y + h, x:x + w]
        lines.append(data)

        copy = src.copy() * 255.
        copy[labels_open != i] = 0
        box = bounding_box(copy)
        x, y, w, h = box

        toerode = w / 3
        if toerode <= 1:
            continue

        copy = cv2.erode(
            copy, cv2.getStructuringElement(cv2.MORPH_RECT, (toerode, 1)))
        copy = cv2.dilate(
            copy, cv2.getStructuringElement(cv2.MORPH_RECT, (toerode, 1)))
        copy = 1 * (copy > 128)

        sub_labels_open, sub_nbr_objects_open = measurements.label(copy)
        if (sub_nbr_objects_open > 1):
            for i in range(1, sub_nbr_objects_open + 1):
                test = temp.copy()
                test[sub_labels_open != i] = 0
                box = bounding_box(test)
                #count+=1
                #image[sub_labels_open == i] = count
                x, y, w, h = box
                if h < 10 or w < 3:
                    continue
                #忽略靠近上下边的区域
                if y < 2:
                    continue
                if y + h > len(temp) - 2:
                    continue

                data = test[y:y + h, x:x + w]
                lines.append(data)
    '''figure()
    subplot(221)
    imshow(temp)
    subplot(222)
    imshow(image)
    subplot(223)
    imshow(labels_open)
    show()'''
    return lines
def main():
    # Ground Truth Paths
    json_path = r'/nfs/masi/nathv/miccai_2020/micro_methods_hcp_mini/data_list_compart_dti.json'
    json_path = os.path.normpath(json_path)

    all_data = json.load(open(json_path))
    #tr_data = all_data["train"]
    #val_data = all_data["validation"]
    test_data = all_data["test"]
    gt_paths_dict = test_data['output']
    method_list = ['BS_2003', 'IVIM', 'MC_SMT', 'NODDI_WATSON', 'DTI']

    # Predicted Data Paths
    base_pred_path = r'/nfs/masi/nathv/miccai_2020/bottleneck_compart_dti_test/bn_15/predicted_volumes'
    base_pred_path = os.path.normpath(base_pred_path)

    # Load the Mask Data
    mask_data = load_nifty(test_data['mask'], data_type='float32')
    mask_bool = np.array(mask_data, dtype='bool')

    # TODO Slice number is stored here
    slice_num = 72

    # DTI Analysis
    pred_dti_path = os.path.join(base_pred_path, 'dti.nii.gz')
    pred_dti = load_nifty(pred_dti_path, 'float32')

    plt.figure(1, figsize=(8, 12))
    metric_nums = len(gt_paths_dict['DTI'])
    plot_counter = 1
    for idx, each_vol_path in enumerate(gt_paths_dict['DTI']):

        gt_vol = load_nifty(each_vol_path, 'float32')
        plt.subplot(1, metric_nums, plot_counter)
        plt.imshow(rotate(np.squeeze(gt_vol[:, :, slice_num]), 90))
        plt.axis('equal')
        plt.colorbar()
        plt.clim(0, 1)
        plt.title('GT')

        plt.subplot(3, metric_nums, plot_counter + metric_nums)
        plt.imshow(rotate(np.squeeze(pred_dti[:, :, slice_num, idx]), 90))
        plt.axis('equal')
        plt.colorbar()
        plt.clim(0, 1)
        plt.title('Predicted')

        diff_img = np.abs(
            np.squeeze(gt_vol[:, :, slice_num]) -
            np.squeeze(pred_dti[:, :, slice_num, idx]))
        plt.subplot(3, metric_nums, plot_counter + metric_nums * 2)
        plt.imshow(rotate(diff_img, 90))
        plt.axis('equal')
        plt.colorbar()
        plt.clim(0, 0.1)
        plt.title('Abs Difference')

        plot_counter = plot_counter + 1
    plt.clf()
    plt.close(1)

    # IVIM Analysis
    pred_dti_path = os.path.join(base_pred_path, 'ivim.nii.gz')
    pred_dti = load_nifty(pred_dti_path, 'float32')

    plt.figure(1, figsize=(12, 12))
    metric_nums = len(gt_paths_dict['IVIM'])
    plot_counter = 1
    for idx, each_vol_path in enumerate(gt_paths_dict['IVIM']):

        gt_vol = load_nifty(each_vol_path, 'float32')
        plt.subplot(3, metric_nums, plot_counter)
        plt.imshow(rotate(np.squeeze(gt_vol[:, :, slice_num]), 90))
        plt.axis('equal')
        plt.colorbar()
        plt.clim(0, 1)
        plt.title('GT')

        plt.subplot(3, metric_nums, plot_counter + metric_nums)
        plt.imshow(rotate(np.squeeze(pred_dti[:, :, slice_num, idx]), 90))
        plt.axis('equal')
        plt.colorbar()
        plt.clim(0, 1)
        plt.title('Predicted')

        diff_img = np.abs(
            np.squeeze(gt_vol[:, :, slice_num]) -
            np.squeeze(pred_dti[:, :, slice_num, idx]))
        plt.subplot(3, metric_nums, plot_counter + metric_nums * 2)
        plt.imshow(rotate(diff_img, 90))
        plt.axis('equal')
        plt.colorbar()
        plt.clim(0, 0.1)
        plt.title('Abs Difference')

        plot_counter = plot_counter + 1
    plt.clf()
    plt.close(1)

    # MC SMT Analysis
    pred_dti_path = os.path.join(base_pred_path, 'mc_smt.nii.gz')
    pred_dti = load_nifty(pred_dti_path, 'float32')

    plt.figure(1, figsize=(8, 12))
    metric_nums = len(gt_paths_dict['MC_SMT'])
    plot_counter = 1
    for idx, each_vol_path in enumerate(gt_paths_dict['MC_SMT']):

        gt_vol = load_nifty(each_vol_path, 'float32')
        plt.subplot(3, metric_nums, plot_counter)
        plt.imshow(rotate(np.squeeze(gt_vol[:, :, slice_num]), 90))
        plt.axis('equal')
        plt.colorbar()
        plt.clim(0, 1)
        plt.title('GT')

        plt.subplot(3, metric_nums, plot_counter + metric_nums)
        plt.imshow(rotate(np.squeeze(pred_dti[:, :, slice_num, idx]), 90))
        plt.axis('equal')
        plt.colorbar()
        plt.clim(0, 1)
        plt.title('Predicted')

        diff_img = np.abs(
            np.squeeze(gt_vol[:, :, slice_num]) -
            np.squeeze(pred_dti[:, :, slice_num, idx]))
        plt.subplot(3, metric_nums, plot_counter + metric_nums * 2)
        plt.imshow(rotate(diff_img, 90))
        plt.axis('equal')
        plt.colorbar()
        plt.clim(0, 0.2)
        plt.title('Abs Difference')

        plot_counter = plot_counter + 1
    plt.clf()
    plt.close(1)

    # Ball Stick Analysis
    pred_dti_path = os.path.join(base_pred_path, 'ball_stick.nii.gz')
    pred_dti = load_nifty(pred_dti_path, 'float32')

    plt.figure(1, figsize=(20, 12))
    metric_nums = len(gt_paths_dict['BS_2003'])
    plot_counter = 1
    for idx, each_vol_path in enumerate(gt_paths_dict['BS_2003']):

        gt_vol = load_nifty(each_vol_path, 'float32')
        plt.subplot(3, metric_nums, plot_counter)
        plt.imshow(rotate(np.squeeze(gt_vol[:, :, slice_num]), 90))
        plt.axis('equal')
        plt.colorbar()
        plt.clim(0, 1)
        plt.title('GT')

        plt.subplot(3, metric_nums, plot_counter + metric_nums)
        plt.imshow(rotate(np.squeeze(pred_dti[:, :, slice_num, idx]), 90))
        plt.axis('equal')
        plt.colorbar()
        plt.clim(0, 1)
        plt.title('Predicted')

        diff_img = np.abs(
            np.squeeze(gt_vol[:, :, slice_num]) -
            np.squeeze(pred_dti[:, :, slice_num, idx]))
        plt.subplot(3, metric_nums, plot_counter + metric_nums * 2)
        plt.imshow(rotate(diff_img, 90))
        plt.axis('equal')
        plt.colorbar()
        plt.clim(0, 0.2)
        plt.title('Abs Difference')

        plot_counter = plot_counter + 1

    plt.clf()
    plt.close(1)

    # NODDI Analysis
    pred_dti_path = os.path.join(base_pred_path, 'noddi.nii.gz')
    pred_dti = load_nifty(pred_dti_path, 'float32')

    plt.figure(1, figsize=(18, 12))
    metric_nums = len(gt_paths_dict['NODDI_WATSON'])
    plot_counter = 1
    for idx, each_vol_path in enumerate(gt_paths_dict['NODDI_WATSON']):

        gt_vol = load_nifty(each_vol_path, 'float32')
        plt.subplot(3, metric_nums, plot_counter)
        plt.imshow(rotate(np.squeeze(gt_vol[:, :, slice_num]), 90))
        plt.axis('equal')
        plt.colorbar()
        plt.clim(0, 1)
        plt.title('GT')

        plt.subplot(3, metric_nums, plot_counter + metric_nums)
        plt.imshow(rotate(np.squeeze(pred_dti[:, :, slice_num, idx]), 90))
        plt.axis('equal')
        plt.colorbar()
        plt.clim(0, 1)
        plt.title('Predicted')

        diff_img = np.abs(
            np.squeeze(gt_vol[:, :, slice_num]) -
            np.squeeze(pred_dti[:, :, slice_num, idx]))
        plt.subplot(3, metric_nums, plot_counter + metric_nums * 2)
        plt.imshow(rotate(diff_img, 90))
        plt.axis('equal')
        plt.colorbar()
        plt.clim(0, 0.2)
        plt.title('Abs Difference')

        plot_counter = plot_counter + 1

    plt.show()
    print('Debug here')
    return None
Пример #48
0
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from scipy import ndimage
im = Image.open('../data/grenouille.jpg')
rouge, vert, bleu = im.split()
z = np.array(rouge)
zrr = ndimage.rotate(z, 30.)
zrn = ndimage.rotate(z, 30., reshape=False)

ny, nx = z.shape
nyrr, nxrr = zrr.shape
nyrn, nxrn = zrn.shape

fig = plt.figure(0)  # On cree une figure
plt.clf()
ax1 = fig.add_subplot(3, 1, 1)
plt.imshow(z, origin="upper")
plt.xticks([0, nx])
plt.yticks([0, ny])
ax2 = fig.add_subplot(3, 1, 2)
plt.imshow(zrr, origin="upper")
plt.xticks([0, nxrr])
plt.yticks([0, nyrr])
ax2 = fig.add_subplot(3, 1, 3)
plt.imshow(zrn, origin="upper")
plt.xticks([0, nxrn])
plt.yticks([0, nyrn])
plt.show()
# diff = np.abs(pp-ll)
# sort_idxs = np.argsort(-diff)
# sd_test = sd_test[sort_idxs]
# stest = stest[sort_idxs]

ppp = st[:, 0]
lll = st[:, 1]
mistakes = ppp != lll
sd_test = sd_test[mistakes]
stest = stest[mistakes]
# for i, dat, pred, label in zip(range(100), sd_test[100:200], stest[100:200, 0], stest[100:200, 1]):
for i, dat, pred, label in zip(range(100), sd_test[:100], stest[:100, 0],
                               stest[:100, 1]):
    f_guess = f'image_{i}.jpg'
    f_label = f'image_{i}_pred_{pred}_label_{label}.jpg'
    dat = ndimage.rotate(dat, 90)
    scipy.misc.imsave(os.path.join(path_guess, f_guess), dat)
    scipy.misc.imsave(os.path.join(path_labeled, f_label), dat)
    a_pred = int(pred > 1.11)
    a_label = int(label > 1.11)
    squared_error = (pred - label)**2
    writer.write_row(i=i,
                     prediction=pred,
                     label=label,
                     prediction_amyloid=a_pred,
                     label_amyloid=a_label,
                     correct=int(a_pred == a_label),
                     squared_error=squared_error)

    print(i, pred, label)
Пример #50
0
    plt.show()


if __name__ == "__main__":

    #Img = Image.readFrom("person_toy/00000001.jpg")
    #Img45 = Image.readFrom("person_toy/00000001.jpg")
    #Img90 = Image.readFrom("person_toy/00000001.jpg")
    #Img45.im = rotate(Img45.im, 45)
    #Img90.im = rotate(Img90.im, 90)

    Img = Image.readFrom("pingpong/0000.jpeg")
    Img45 = Image.readFrom("pingpong/0000.jpeg")
    Img90 = Image.readFrom("pingpong/0000.jpeg")
    Img45.im = rotate(Img45.im, 45)
    Img90.im = rotate(Img90.im, 90)

    CornerAlgo1 = HarrisCornerDetector(3, 1e6)
    CornerAlgo2 = HarrisCornerDetector(3, 1e5)
    CornerAlgo3 = HarrisCornerDetector(3, 1e4)
    CornerAlgo4 = HarrisCornerDetector(3, 1e7)

    H1 = CornerAlgo1.get_Hvalues(Img, sigma=1)
    H2 = CornerAlgo2.get_Hvalues(Img, sigma=1)
    H3 = CornerAlgo3.get_Hvalues(Img, sigma=1)
    H4 = CornerAlgo4.get_Hvalues(Img, sigma=1)
    Hr45 = CornerAlgo1.get_Hvalues(Img45, sigma=1)
    Hr90 = CornerAlgo1.get_Hvalues(Img90, sigma=1)
    mask1 = CornerAlgo1.evaluate(H1)
    mask2 = CornerAlgo2.evaluate(H2)
def image(imagename, filtertype):
	flag = 0
	log = db.log
	ts = datetime.datetime.now()

	filename='assets/'+imagename+'.jpg'
	img = misc.imread(filename)
	img[:] = np.max(img,axis=-1,keepdims=1)/2+np.min(img,axis=-1,keepdims=1)/2

	if filtertype == "greyscale":
		plt.imsave('test.jpg',img)
		flag=1

	elif filtertype == "lowpass" and request.args.get('value'):
		blurred = gaussian_filter(img, sigma = float(request.args.get('value')))
		plt.imsave('test.jpg',blurred)
		flag=1

	if filtertype == "crop":
		def crop_center(img,cropx,cropy):
			y,x,c = img.shape
			startx = x//2 - cropx//2
			starty = y//2 - cropy//2    
			return img[starty:starty+cropy, startx:startx+cropx]
		y,x,z = img.shape
		if y>x:
			cropped = crop_center(img,x,x)
		else:
			cropped = crop_center(img,y,y)
		plt.imsave('test.jpg',cropped)
		flag=1

	elif filtertype == "dx":
		sobelx = cv2.Sobel(img,cv2.cv2.CV_8U,1,0,ksize=5)
		plt.imsave('test.jpg',sobelx)
		flag=1

	if filtertype == "dy":
		sobely = cv2.Sobel(img,cv2.cv2.CV_8U,0,1,ksize=5)
		plt.imsave('test.jpg',sobely)
		flag=1

	elif filtertype == "downsample" and request.args.get('value'):
		downsampled = rescale(img, 1.0 / float(request.args.get('value')), anti_aliasing=False)
		plt.imsave('test.jpg',downsampled)
		flag=1

	if filtertype == "rotate" and request.args.get('value'):
		rotated = ndimage.rotate(img, int(request.args.get('value')))
		plt.imsave('test.jpg',rotated)
		flag=1

	te = datetime.datetime.now()
	if request.args.get('value') and flag == 1:
		log.insert({'imagename':imagename, 'cmd':filtertype, 'value':float(request.args.get('value')),'request_timestamp':str(ts),'processing_time':str(te-ts)})
	elif flag == 1:
		log.insert({'imagename':imagename, 'cmd':filtertype, 'value':'null','request_timestamp':str(ts),'processing_time':str(te-ts)})
	
	if flag == 1:
		return send_file('test.jpg',mimetype='image/gif')
	else:
		return 'Error Processing File'
Пример #52
0
def main():
    global X_train, y_train, X_valid, y_valid, X_test, y_test, EPOCHS, BATCH_SIZE, learning_rate, feed_forward,\
        fc_keep_prob_value, conv_keep_prob_value, saver, CONTINUE, conv1_depth_val, conv2_depth_val, conv_filter_size,\
    available_training_data_set, image_depth, acc_list, train_acc_list, loss_list, TRAIN, TEST, max_accuracy, max_accuracy_epoch


    global accuracy_operation, x, y, conv_keep_prob, fc_keep_prob

    # TODO: Fill this in based on where you saved the training and testing data

    training_file = "./traffic_signs_data/train.p"
    validation_file = "./traffic_signs_data/valid.p"
    testing_file = "./traffic_signs_data/test.p"

    with open(training_file, mode='rb') as f:
        train = pickle.load(f)
    with open(validation_file, mode='rb') as f:
        valid = pickle.load(f)
    with open(testing_file, mode='rb') as f:
        test = pickle.load(f)

    X_train, y_train = train['features'], train['labels']
    X_valid, y_valid = valid['features'], valid['labels']
    X_test, y_test = test['features'], test['labels']
    # TODO: Number of training examples
    n_train = len(y_train)

    # TODO: Number of validation examples
    n_validation = len(y_valid)

    # TODO: Number of testing examples.
    n_test = len(y_test)

    # TODO: What's the shape of an traffic sign image?
    image_shape = X_train[0].shape

    # TODO: How many unique classes/labels there are in the dataset.
    n_classes = max(np.concatenate((y_train ,y_valid , y_test),  axis=0)) + 1

    print("Number of training examples =", n_train)
    print("Number of testing examples =", n_test)
    print("Image data shape =", image_shape)
    print("Number of classes =", n_classes)

    #DATA analysis
    if DISPLAY_DATA_SET_HISTOGRAM:
        n, bins, patches = plt.hist(y_train, n_classes, facecolor='blue', alpha=0.5)
        plt.show()

    random_index = random.randint(0, len(X_train))
    rgb_image = X_train[random_index].squeeze()

    #convert to grayscale
    if image_depth == 1:
        X_train = np.sum(X_train/3, axis=3, keepdims=True)
        X_valid = np.sum(X_valid/3, axis=3, keepdims=True)
        X_test = np.sum(X_test/3, axis=3, keepdims=True)
        #Histograms Equalization in OpenCV
        if Data_Augmentation_EqualizeHist:
            if True:
                X_EqualizeHist = np.zeros_like(X_train)
                for i in range(X_train.shape[0]):
                    X_EqualizeHist[i, :, :, 0] = cv2.equalizeHist(X_train[i].astype(np.uint8))
                #add_images(X_EqualizeHist)
                X_train = X_EqualizeHist

                for i in range(X_valid.shape[0]):
                    X_valid[i, :, :, 0] = cv2.equalizeHist(X_valid[i].astype(np.uint8))
                for i in range(X_test.shape[0]):
                    X_test[i, :, :, 0] = cv2.equalizeHist(X_test[i].astype(np.uint8))
            else:
                clahe = cv2.createCLAHE(tileGridSize=(2, 2), clipLimit=clipLimit)
                X_EqualizeHist = np.zeros_like(X_train)
                for i in range(X_train.shape[0]):
                    X_EqualizeHist[i, :, :, 0] = clahe.apply(X_train[i].astype(np.uint16))
                #add_images(X_EqualizeHist)
                X_train = X_EqualizeHist

                for i in range(X_valid.shape[0]):
                    X_valid[i, :, :, 0] = clahe.apply(X_valid[i].astype(np.uint16))
                for i in range(X_test.shape[0]):
                    X_test[i, :, :, 0] = clahe.apply(X_test[i].astype(np.uint16))

    x = tf.placeholder(tf.float32, (None, 32, 32, image_depth))
    y = tf.placeholder(tf.int32, (None, 43))




    if Data_Augmentation_Rotate:
        print("Data Augmentation %s" %"Rotate")
        random_degree = random.uniform(-10, 10)
        x_rotate = rotate(X_train, random_degree)
        X_train = np.concatenate((X_train, x_rotate), axis=0)
        y_train = np.concatenate((y_train, y_train), axis=0)
        available_training_data_set += 1

    image_gray = X_train[random_index].squeeze()

    if Data_Augmentation_Noise:
        print("Data Augmentation %s" %"Noise")

        x_noise = RandomNoise(X_train/255, mode='pepper')*255
        #x_noise = x_noise / 2
        image_noise = x_noise[random_index].squeeze()
        #image_noise = RandomNoise(image_gray)
        plt.figure(figsize=(1,3))
        _ ,ax = plt.subplots(1, 3)
        ax[0].imshow(rgb_image)
        ax[0].set_title("rgb_image", fontsize=20)
        ax[1].imshow(image_gray, "gray")
        ax[1].set_title("gray", fontsize=20)
        ax[2].imshow(image_noise, "gray")
        ax[2].set_title("image_noise", fontsize=20)
        #print(image_noise)
        #print(np.max(image_gray), np.max(image_noise))
        plt.show()
        X_train = np.concatenate((X_train, x_noise), axis=0)
        y_train = np.concatenate((y_train, y_train), axis=0)
        available_training_data_set += 1

    if Data_Augmentation_Blur:
        print("Data Augmentation %s" %"Blur")
        x_blur = Blur(X_train, size=3)
        image_blur = x_blur[random_index].squeeze()
        plt.figure(figsize=(1,3))
        _, ax = plt.subplots(1, 3)
        ax[0].imshow(rgb_image)
        ax[0].set_title("rgb_image", fontsize=20)
        ax[1].imshow(image_gray, "gray")
        ax[1].set_title("gray", fontsize=20)
        ax[2].imshow(image_blur, "gray")
        ax[2].set_title("image_blur", fontsize=20)

        plt.show()
        X_train = np.concatenate((X_train, x_blur), axis=0)
        y_train = np.concatenate((y_train, y_train), axis=0)
        available_training_data_set += 1


    X_train, y_train = shuffle(X_train, y_train)
    n_train *=available_training_data_set

    # Normalize  data
    X_train = (X_train - 128)/128
    X_valid = (X_valid - 128)/128
    X_test = (X_test - 128)/128


    encoder = LabelBinarizer()
    encoder.fit(y_train)
    y_train = encoder.transform(y_train)
    y_valid = encoder.transform(y_valid)
    y_test = encoder.transform(y_test)

    if False:
        plt.figure(figsize=(1,1))
        plt.imshow(image)
        print(y_train_num[random_index])
        print(y_train[random_index])
    print('Labels One-Hot Encoded')

    plt.show()

    fc_keep_prob = tf.placeholder(tf.float32)
    conv_keep_prob = tf.placeholder(tf.float32)

    logits = feed_forward(x, conv_keep_prob, fc_keep_prob, depth=image_depth, conv1_depth=conv1_depth_val, conv2_depth=conv2_depth_val, filter_size=conv_filter_size)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits)
    loss_operation = tf.reduce_mean(cross_entropy)
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate )

    training_operation = optimizer.minimize(loss_operation)

    correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
    accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    saver = tf.train.Saver()

    with tf.Session() as sess:
        if Evaluate_NW:
            com_sess = sess
            com_x = x
            #outputFeatureMap(image_gray, conv1)
            pass
        if CONTINUE == True:
            saver.restore(sess, './00_out/2019_01_13-13_52_52/LeNet')
        else:
            sess.run(tf.global_variables_initializer())

        num_examples = int(n_train/BATCH_SIZE)*BATCH_SIZE

        if TRAIN:
            print("Training...")
            for i in range(EPOCHS):
                #print(type(y_train))
                X_train, y_train = shuffle(X_train, y_train)
                for offset in range(0, num_examples, BATCH_SIZE):
                    end = offset + BATCH_SIZE
                    if end >= n_train:
                        end = n_train-1
                    batch_x, batch_y = X_train[offset:end], y_train[offset:end]

                    _, l = sess.run([training_operation, loss_operation], feed_dict={x: batch_x, y: batch_y, conv_keep_prob : conv_keep_prob_value, fc_keep_prob : fc_keep_prob_value})
                loss_list.append(l)

                validation_accuracy = evaluate(X_valid, y_valid)
                training_accuracy = evaluate(X_train, y_train)
                print("EPOCH {} ...".format(i + 1))
                print("Validation Accuracy = {:.3f}".format(validation_accuracy))
                print("Training Accuracy = {:.3f}".format(training_accuracy))
                acc_list.append(validation_accuracy)
                train_acc_list.append(training_accuracy)
                print()


            sub_folder_name = get_date_time()
            fig0 = plt.figure(0)
            fig0.clf()
            plt.plot(acc_list, label='validation_accuracy')
            plt.plot(train_acc_list, label='training_accuracy')
            plt.grid(True)
            plt.legend()
            annot_max(range(len(acc_list)),acc_list)
            max_accuracy = max(acc_list)
            max_accuracy_epoch = range(len(acc_list))[np.argmax(acc_list)]
            print("Max accuracy %.3f at Epoch %d" %(max_accuracy, max_accuracy_epoch))
            arr = convert_figure_to_array(fig0)
            store_image(arr, "validation_accuracy", out_dir + "/" + sub_folder_name)

            fig0 = plt.figure(0)
            fig0.clf()
            plt.plot(loss_list, label='loss')
            plt.grid(True)
            plt.legend(loc=2)
            annot_min(range(len(loss_list)),loss_list)

            arr = convert_figure_to_array(fig0)
            store_image(arr, "loss", out_dir + "/" + sub_folder_name)

            global test_accuracy
            test_accuracy = 0

            data_list = ["EPOCHS", "BATCH_SIZE", "learning_rate", "feed_forward.__name__",
                         "fc_keep_prob_value", "conv_keep_prob_value", "CONTINUE", "image_depth",
                         "conv1_depth_val", "conv2_depth_val", "conv_filter_size","test_accuracy",
                         "max_accuracy", "max_accuracy_epoch", "clipLimit",
                         "acc_list", "train_acc_list", "loss_list"]
            file_name =  out_dir + "/" + sub_folder_name + "/" + "configs"
            save_variables(file_name, data_list)

            saver.save(sess, "./" + out_dir + "/" + sub_folder_name + '/' + feed_forward.__name__)
            print("Model saved")
            if TEST:
                print("Testing ...")
                test_accuracy = evaluate(X_test, y_test)
                print("Testing Accuracy = {:.3f}".format(test_accuracy))
Пример #53
0
        imgArr.append(x_test[i])
        pred.append(y_pred[i])
        gnd.append(y_test[i])
        counter += 1
w = 10
h = 10
fig = plt.figure(figsize=(12, 12))
columns = 4
rows = 1
print("\n\nFailure cases: ")
print("=================")
for i in range(1, columns * rows + 1):
    img = imgArr[i]
    img = img.reshape((64, 64))
    fig.add_subplot(rows, columns, i)
    rotated_img = ndimage.rotate(img, 270)
    plt.imshow(rotated_img, interpolation='nearest')
    plt.title("\nPred: {}  Actual o/p: {}".format(pred[i], gnd[i]),
              fontsize=11)
    plt.axis('off')
plt.show()

# following code displays images of correct prediction.
counter = 0
imgArr = []
pred = []
gnd = []
for i in range(len(y_test)):
    if (y_test[i] == y_pred[i] and counter < 10):
        imgArr.append(x_test[i])
        pred.append(y_pred[i])
                cv2.putText(img_new, label, (x, y - 10), font, 2, [255, 0, 0],
                            d)
            if (label == 'red insulator'):  #red = màu đỏ
                cv2.rectangle(img_new, (x, y), (x + w, y + h), [0, 0, 255], d)
                cv2.putText(img_new, label, (x, y - 10), font, 2, [0, 0, 255],
                            d)

h1 = int(h / 10)
w1 = int(w / 10)
img_crop = img[y - h1:y + h + h1, x - w1:x + w + w1]

#img_crop = cv2.resize(img_crop,(img_crop.shape[1]*5,img_crop.shape[0]*5))

#Xoay ngang ảnh nếu ảnh là ảnh dọc
if (h > w):
    img_crop = ndimage.rotate(img_crop, 90)

#Display dùng cv2
cv2.imshow("Image", img_new)
key = cv2.waitKey(1)

cv2.imshow("Image_crop", img_crop)
key = cv2.waitKey(0)
cv2.destroyAllWindows()
'''
#Display dùng plt, hiển thị toàn bộ 1 lần trên 1 khung
# Lưu ý nhớ chuyển hệ màu trước khi dùng

titles = ['Anh goc','Detect',"Crop"]
images = [img, img_new, img_crop]
Пример #55
0
def test_export_array():

    try:
        from scipy.ndimage import rotate
    except:
        rotate = False
        pass

    namfile = 'freyberg.nam'
    model_ws = '../examples/data/freyberg_multilayer_transient/'
    m = flopy.modflow.Modflow.load(namfile,
                                   model_ws=model_ws,
                                   verbose=False,
                                   load_only=['DIS', 'BAS6'])
    m.sr.rotation = 45.
    nodata = -9999
    m.sr.export_array(os.path.join(tpth, 'fb.asc'),
                      m.dis.top.array,
                      nodata=nodata)
    arr = np.loadtxt(os.path.join(tpth, 'fb.asc'), skiprows=6)

    m.sr.write_shapefile(os.path.join(tpth, 'grid.shp'))
    # check bounds
    with open(os.path.join(tpth, 'fb.asc')) as src:
        for line in src:
            if 'xllcorner' in line.lower():
                val = float(line.strip().split()[-1])
                # ascii grid origin will differ if it was unrotated
                if rotate:
                    assert np.abs(val - m.sr.bounds[0]) < 1e-6
                else:
                    assert np.abs(val - m.sr.xll) < 1e-6
            if 'yllcorner' in line.lower():
                val = float(line.strip().split()[-1])
                if rotate:
                    assert np.abs(val - m.sr.bounds[1]) < 1e-6
                else:
                    assert np.abs(val - m.sr.yll) < 1e-6
            if 'cellsize' in line.lower():
                val = float(line.strip().split()[-1])
                rot_cellsize = np.cos(np.radians(
                    m.sr.rotation)) * m.sr.delr[0] * m.sr.length_multiplier
                #assert np.abs(val - rot_cellsize) < 1e-6
                break
    rotate = False
    rasterio = False
    if rotate:
        rotated = rotate(m.dis.top.array, m.sr.rotation, cval=nodata)

    if rotate:
        assert rotated.shape == arr.shape

    try:
        # test GeoTIFF export
        import rasterio
        m.sr.export_array(os.path.join(tpth, 'fb.tif'),
                          m.dis.top.array,
                          nodata=nodata)
        with rasterio.open(os.path.join(tpth, 'fb.tif')) as src:
            arr = src.read(1)
    except:
        pass
    if rasterio:
        assert src.shape == (m.nrow, m.ncol)
        assert np.abs(src.bounds[0] - m.sr.bounds[0]) < 1e-6
        assert np.abs(src.bounds[1] - m.sr.bounds[1]) < 1e-6
Пример #56
0
def plotsklearnresult(configini, videoSetting, frameSetting):
    config = ConfigParser()
    configFile = str(configini)
    try:
        config.read(configFile)
    except MissingSectionHeaderError:
        print(
            'ERROR:  Not a valid project_config file. Please check the project_config.ini path.'
        )
    csv_dir = config.get('General settings', 'csv_path')
    csv_dir_in = os.path.join(csv_dir, "machine_results")
    animalsNo = config.getint('General settings', 'animal_no')
    projectPath = config.get('General settings', 'project_path')
    frames_dir_out = config.get('Frame settings', 'frames_dir_out')
    frames_dir_out = os.path.join(frames_dir_out, 'sklearn_results')
    if not os.path.exists(frames_dir_out):
        os.makedirs(frames_dir_out)
    counters_no = config.getint('SML settings', 'No_targets')
    vidInfPath = config.get('General settings', 'project_path')
    vidInfPath = os.path.join(vidInfPath, 'logs', 'video_info.csv')
    vidinfDf = pd.read_csv(vidInfPath)
    target_names = []
    loopy = 0
    Xcols, Ycols, Pcols = getBpNames(configini)
    colorList = []
    for color in range(len(Xcols)):
        r, g, b = (random.randint(0, 255), random.randint(0, 255),
                   random.randint(0, 255))
        colorTuple = (r, g, b)
        colorList.append(colorTuple)
    filesFound = glob.glob(csv_dir_in + '/*.csv')
    print('Processing ' + str(len(filesFound)) + ' videos ...')
    ########### GET MODEL NAMES ###########
    for i in range(counters_no):
        currentModelNames = 'target_name_' + str(i + 1)
        currentModelNames = config.get('SML settings', currentModelNames)
        target_names.append(currentModelNames)

    ########### FIND PREDICTION COLUMNS ###########
    for i in filesFound:
        target_counters, target_timers = ([0] * counters_no, [0] * counters_no)
        currentVideo = i
        loopy += 1
        CurrentVideoName = os.path.basename(currentVideo)
        if frameSetting == 1:
            videoFrameDir = os.path.join(frames_dir_out,
                                         CurrentVideoName.replace('.csv', ''))
            if not os.path.exists(videoFrameDir):
                os.makedirs(videoFrameDir)
        CurrentVideoRow = vidinfDf.loc[vidinfDf['Video'] == str(
            CurrentVideoName.replace('.csv', ''))]
        try:
            fps = int(CurrentVideoRow['fps'])
        except TypeError:
            print(
                'Error: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file'
            )
        currentDf = pd.read_csv(currentVideo)
        currentDf = currentDf.fillna(0)
        currentDf = currentDf.astype(int)
        currentDf = currentDf.loc[:,
                                  ~currentDf.columns.str.contains('^Unnamed')]
        currentDf = currentDf.reset_index()
        animalBpHeaderList, animalBpHeaderListY, animalBpHeaderListX = ([], [],
                                                                        [])
        animal1_BPsX, animal1_BPsY = (currentDf[Xcols], currentDf[Ycols])
        for i in range(len(animal1_BPsX.columns)):
            animalBpHeaderListX.append(animal1_BPsX.columns[i])
            animalBpHeaderListY.append(animal1_BPsY.columns[i])
            animalBpHeaderList.append(animal1_BPsX.columns[i])
            animalBpHeaderList.append(animal1_BPsY.columns[i])
        animalBpHeaderListX, animalBpHeaderListY, animalBpHeaderList = ([
            x for x in animalBpHeaderListX if "Tail_end" not in x
        ], [x for x in animalBpHeaderListY if "Tail_end" not in x
            ], [x for x in animalBpHeaderList if "Tail_end" not in x])
        if animalsNo == 2:
            animal_1_BpHeaderList = [
                s for s in animalBpHeaderList if "_1_" in s
            ]
            animal_2_BpHeaderList = [
                s for s in animalBpHeaderList if "_2_" in s
            ]
        if os.path.exists(
                os.path.join(projectPath, 'videos',
                             CurrentVideoName.replace('.csv', '.mp4'))):
            videoPathName = os.path.join(
                projectPath, 'videos',
                CurrentVideoName.replace('.csv', '.mp4'))
        elif os.path.exists(
                os.path.join(projectPath, 'videos',
                             CurrentVideoName.replace('.csv', '.avi'))):
            videoPathName = os.path.join(
                projectPath, 'videos',
                CurrentVideoName.replace('.csv', '.avi'))
        else:
            print('Cannot locate video ' +
                  str(CurrentVideoName.replace('.csv', '')) +
                  'in mp4 or avi format')
            break
        cap = cv2.VideoCapture(videoPathName)
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        outputFileName = os.path.join(frames_dir_out, CurrentVideoName)
        if height < width:
            videoHeight, videoWidth = width, height
        if height >= width:
            videoHeight, videoWidth = height, width
        writer = cv2.VideoWriter(outputFileName.replace('.csv', '.mp4'),
                                 fourcc, fps, (videoWidth, videoHeight))
        mySpaceScale, myRadius, myResolution, myFontScale = 60, 20, 1500, 1.5
        maxResDimension = max(width, height)
        circleScale = int(myRadius / (myResolution / maxResDimension))
        fontScale = float(myFontScale / (myResolution / maxResDimension))
        spacingScale = int(mySpaceScale / (myResolution / maxResDimension))
        currRow = 0
        while (cap.isOpened()):
            ret, frame = cap.read()
            if ret == True:
                if animalsNo == 1:
                    currAnimal1 = currentDf.loc[currentDf.index[currRow],
                                                animalBpHeaderList]
                    currAnimal1 = np.array(currAnimal1).astype(int)
                    currAnimal1 = np.reshape(currAnimal1, (-1, 2))
                    M1polyglon_array_hull = cv2.convexHull(
                        (currAnimal1.astype(int)))
                    cv2.drawContours(frame,
                                     [M1polyglon_array_hull.astype(int)], 0,
                                     (255, 255, 255), 2)
                if animalsNo == 2:
                    currAnimal1, currAnimal2 = (
                        currentDf.loc[currentDf.index[currRow],
                                      animal_1_BpHeaderList],
                        currentDf.loc[currentDf.index[currRow],
                                      animal_2_BpHeaderList])
                    currAnimal1, currAnimal2 = (
                        np.array(currAnimal1).astype(int),
                        np.array(currAnimal2).astype(int))
                    currAnimal1, currAnimal2 = (np.reshape(
                        currAnimal1,
                        (-1, 2)), np.reshape(currAnimal2, (-1, 2)))
                    M1polyglon_array_hull, M2polyglon_array_hull = (
                        cv2.convexHull((currAnimal1.astype(int))),
                        cv2.convexHull((currAnimal2.astype(int))))
                    cv2.drawContours(frame,
                                     [M1polyglon_array_hull.astype(int)], 0,
                                     (255, 255, 255), 2)
                    cv2.drawContours(frame,
                                     [M2polyglon_array_hull.astype(int)], 0,
                                     (255, 255, 255), 2)
                for cords in range(len(animalBpHeaderListX)):
                    currXval = animal1_BPsX.loc[animal1_BPsX.index[currRow],
                                                animalBpHeaderListX[cords]]
                    currYval = animal1_BPsY.loc[animal1_BPsY.index[currRow],
                                                animalBpHeaderListY[cords]]
                    cv2.circle(frame, (int(currXval), int(currYval)),
                               circleScale,
                               colorList[cords],
                               -1,
                               lineType=cv2.LINE_AA)
                if height < width:
                    frame = ndimage.rotate(frame, 90)
                # draw event timers
                for b in range(counters_no):
                    target_timers[b] = (1 / fps) * target_counters[b]
                    target_timers[b] = round(target_timers[b], 2)

                cv2.putText(frame, str('Timers'),
                            (10, ((height - height) + spacingScale)),
                            cv2.FONT_HERSHEY_COMPLEX, fontScale, (0, 255, 0),
                            2)
                addSpacer = 2
                for k in range(counters_no):
                    cv2.putText(frame, (str(target_names[k]) + ' ' +
                                        str(target_timers[k]) + str('s')),
                                (10,
                                 (height - height) + spacingScale * addSpacer),
                                cv2.FONT_HERSHEY_SIMPLEX, fontScale,
                                (0, 0, 255), 2)
                    addSpacer += 1

                cv2.putText(frame, str('ensemble prediction'),
                            (10, (height - height) + spacingScale * addSpacer),
                            cv2.FONT_HERSHEY_SIMPLEX, fontScale, (0, 255, 0),
                            2)

                addSpacer += 1
                colors = [(2, 166, 249), (47, 255, 173), (0, 165, 255),
                          (60, 20, 220), (193, 182, 255), (238, 130, 238),
                          (144, 128, 112), (32, 165, 218), (0, 0, 128),
                          (209, 206, 0)]
                for p in range(counters_no):
                    TargetVal = int(currentDf.loc[currRow, [target_names[p]]])
                    if TargetVal == 1:
                        cv2.putText(
                            frame, str(target_names[p]),
                            (10, (height - height) + spacingScale * addSpacer),
                            cv2.FONT_HERSHEY_TRIPLEX, int(fontScale * 2),
                            colors[p], 2)
                        target_counters[p] += 1
                        addSpacer += 1
                if videoSetting == 1:
                    writer.write(frame)
                if frameSetting == 1:
                    frameName = os.path.join(videoFrameDir,
                                             str(currRow) + '.png')
                    cv2.imwrite(frameName, frame)
                if (videoSetting == 0) and (frameSetting == 0):
                    print('Error: Please choose video and/or frames.')
                    break
                currRow += 1
                print('Frame ' + str(currRow) + '/' + str(frames) +
                      '. Video ' + str(loopy) + '/' + str(len(filesFound)))
            if frame is None:
                print('Video ' + str(
                    os.path.basename(CurrentVideoName.replace('.csv', '.mp4')))
                      + ' saved.')
                cap.release()
                break
Пример #57
0
def get_process_params(folder, wallcut_file):
   
    """
    Function to get the rotation angle and crop range

    Parameters
    ----------
    folder : string
        Input folder where the images are located.

    Returns
    -------
    crop_final : 2d array of int32
        Array containing the 4 crop parameters at the end.
    crop_rot : 2d array of int32
        Array containing the 4 crop parameters after the rotation.
    angle : float64
        Angle of tilt for the images in radians.
    name_sliced : string
        Name of the images without the number.
    img_amount : int
        Amount of images in the folder.
    idx0 : int
        Index of the first image in the folder.
    images_exist : boolean
        True if there are images in the folder, otherwise false

    """
    run_names = wallcut_file[:,0].astype(np.str)
    wallcuts = wallcut_file[:,1:].astype(np.float)
    # get a list of all the files in the folder
    file_list = os.listdir(folder)
    # get the amount of images by taking away the labview files
    img_amount = len(file_list)-5
    # check wether there actually are images in the folder
    if(img_amount < 1):
        # print error message and return
        MEX = 'No images in folder %s' %folder
        print(MEX)
        return 0, 0, 0, 0, 0, 0, False
    # get the name of the first frame
    frame0 = file_list[5]
    # get the '.' in the file name
    indices = [i for i, a in enumerate(frame0) if a == '.']
    # extract the index of the first frame
    idx0 = int(frame0[indices[0]+1:indices[1]])
    # cut of the index of the file to get the sliced name
    name_sliced = frame0[:(indices[0])]
    # search for the right wallcut
    for j in range(0, len(run_names)):
        if(name_sliced == run_names[j]):
            wallcut_left, wallcut_right = wallcuts[j,:]
    # check whether a wallcut was found
    if (wallcut_left ==np.nan):
        MEX = 'No wallcut was found'
        print(MEX)
        return 0, 0, 0, 0, 0, 0, False
    # calculate the average image
    avg = get_avg_image(folder, name_sliced, idx0)
    # calculate the angle from the average image
    angle = get_angle(avg)
    # rotate the image
    rotated = ndimage.rotate(avg, angle*180/np.pi)
    # get the new image shape
    hr, wr = rotated.shape
    # crop the edges due to the rotation
    crop_rot =(0+int(np.rint(wr*np.tan(angle))), hr-int(np.rint(wr*np.tan(angle))),\
               0+int(np.rint(hr*np.tan(angle))), wr-int(np.rint(hr*np.tan(angle))))
    cropped = rotated[crop_rot[0]:crop_rot[1],crop_rot[2]:crop_rot[3]]
    # get the edges of the rotated image
    new_peak_left, new_peak_right = find_edges(cropped) 
    x_low =  get_most_common_element(new_peak_left)
    x_high = get_most_common_element(new_peak_right)
    # arange coordinates of the final crop
    crop_final = (0,cropped.shape[0],x_low+int(wallcut_left),x_high+1-int(wallcut_right)) 
    return crop_final, crop_rot, angle, name_sliced, img_amount, idx0, True
    def __init__(self):

        #        mnist = tf.contrib.learn.datasets.load_dataset("mnist")
        #        train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
        #        blocksOfInterestIdx = np.nonzero(train_labels==2)[0]
        #        self.blocks = mnist.train.images[blocksOfInterestIdx]

        self.blockSize = 28  # edge size of one block
        self.stride = 28  # num grid cells between adjacent move destinations
        #        self.stride = 24 # num grid cells between adjacent move destinations
        #        self.stride = 20 # num grid cells between adjacent move destinations
        #        self.stride = 14 # num grid cells between adjacent move destinations
        #        self.stride = 7 # num grid cells between adjacent move destinations
        #        self.stride = 6 # num grid cells between adjacent move destinations
        #        self.stride = 3 # num grid cells between adjacent move destinations
        #        self.numBlocksWide = 8 # number of blocks that can fit end-to-end on one edge of board
        self.initStride = self.stride
        self.gridSize = 28 * 8
        self.num_blocks = 2
        self.num_orientations = 4

        self.observation_space = spaces.Tuple([
            spaces.Box(np.zeros([self.gridSize, self.gridSize, 1]),
                       np.ones([self.gridSize, self.gridSize, 1])),
            spaces.Discrete(2)
        ])
        self.holdingImage = []
        self.state = None
        self.max_episode = 10
        self.gap = 3  # num pixels that need to be clear around a block in order ot move it.
        #        self.gap = 2 # num pixels that need to be clear around a block in order ot move it.
        self.numBlocksInRowGoal = 2
        #        self.blockType = 'Numerals'
        #        self.blockType = 'Disks'
        self.blockType = 'Blocks'

        if self.blockType == 'Numerals':  # load MNIST numerals
            mnist = tf.contrib.learn.datasets.load_dataset("mnist")
            train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
            blocksOfInterestIdx = np.nonzero(train_labels == 2)[0]
            self.blocks = mnist.train.images[blocksOfInterestIdx]

        elif self.blockType == 'Disks':  # load random radius disks instead
            numBlocks = 10
            minRadius = 8
            maxRadius = 10
            self.blocks = []
            for i in range(numBlocks):
                X, Y = np.mgrid[0:self.blockSize, 0:self.blockSize]
                halfBlock = int(self.blockSize / 2)
                dist2 = (X - halfBlock)**2 + (Y - halfBlock)**2
                radius = np.random.randint(minRadius, maxRadius)
                im = np.int32(dist2 < radius**2)
                self.blocks.append(np.reshape(im, int(self.blockSize**2)))

        elif self.blockType == 'Blocks':  # load random blocks instead
            numBlocks = 10
            minRadius = 8
            maxRadius = 10
            self.blocks = []
            for i in range(numBlocks):
                Y, X = np.mgrid[0:self.blockSize, 0:self.blockSize]
                halfBlock = int(self.blockSize / 2)
                dist2 = (Y - halfBlock)**2  # vertical rectangle
                randNum = np.random.rand()
                if randNum < 0.25:
                    dist2 = dist2 + 0.01
#                    dist2 = ndimage.rotate(dist2+0.01,45,reshape=False)
                elif randNum < 0.5:
                    #                    dist2 = dist2+0.01
                    dist2 = ndimage.rotate(dist2 + 0.01, 45, reshape=False)
                elif randNum < 0.75:
                    #                    dist2 = dist2+0.01
                    dist2 = ndimage.rotate(dist2 + 0.01, 90, reshape=False)
#                    dist2 = ndimage.rotate(dist2+0.01,135,reshape=False)
                elif randNum < 1.01:
                    #                    dist2 = dist2+0.01
                    #                    dist2 = ndimage.rotate(dist2+0.01,90,reshape=False)
                    dist2 = ndimage.rotate(dist2 + 0.01, 135, reshape=False)
                else:
                    print("error!")

                dist2 = dist2 + (dist2 == 0) * np.max(dist2)
                radius = np.random.randint(minRadius, maxRadius)
                im = np.int32(dist2 < radius**2)

                # Create a boundary of zeros around the object. Needed to break
                # up adjacent objects.
                im[0, :] = 0
                im[:, 0] = 0
                im[-1, :] = 0
                im[:, -1] = 0

                self.blocks.append(np.reshape(im, int(self.blockSize**2)))

        self.reset()
def balanced_dataset(input_dir, gt_dir):

    positive_list = []
    negative_list = []

    # input = os.listdir(input_dir)
    # gt = os.listdir(gt_dir)

    input = input_dir
    gt = gt_dir

    input.sort()
    gt.sort()

    file_count = 0

    for x, y in zip(input, gt):
        file_count += 1
        print('Executing folder ', file_count)
        input_sub_dir = os.path.join(input_dir, x)
        gt_sub_dir = os.path.join(gt_dir, y)

        input_sub_dir_patches = os.listdir(input_sub_dir)
        gt_sub_dir_patches = os.listdir(gt_sub_dir)

        input_sub_dir_patches.sort()
        gt_sub_dir_patches.sort()

        for in_patch, gt_patch in zip(input_sub_dir_patches,
                                      gt_sub_dir_patches):
            input_image = os.path.join(input_sub_dir, in_patch)
            gt_image = os.path.join(gt_sub_dir, gt_patch)

            FLAIR_image_in = nib.load(input_image).get_data()
            FLAIR_image_in = np.array(FLAIR_image_in)

            FLAIR_image_gt = nib.load(gt_image).get_data()
            FLAIR_image_gt = np.array(FLAIR_image_gt)

            if FLAIR_image_gt.max() == 1.0:

                positive_list.append((FLAIR_image_in, 1.0))

                FLAIR_image_in_fliped = FLAIR_image_in[::-1, :, :]
                positive_list.append((FLAIR_image_in_fliped, 1.0))

                FLAIR_image_in_rotated_1 = sind.rotate(FLAIR_image_in,
                                                       -12,
                                                       reshape=False)
                positive_list.append((FLAIR_image_in_rotated_1, 1.0))

                FLAIR_image_in_rotated_2 = sind.rotate(FLAIR_image_in,
                                                       12,
                                                       reshape=False)
                positive_list.append((FLAIR_image_in_rotated_2, 1.0))

                FLAIR_image_shifted_1 = np.roll(FLAIR_image_in, 10, 0)
                positive_list.append((FLAIR_image_shifted_1, 1.0))

                FLAIR_image_shifted_2 = np.roll(FLAIR_image_in, -10, 0)
                positive_list.append((FLAIR_image_shifted_2, 1.0))

            else:
                negative_list.append((FLAIR_image_in, 0.0))

    print('Executed all folders')

    positive_count = len(positive_list)
    negative_list_1 = random.sample(negative_list, positive_count)

    balanced_list = positive_list + negative_list_1
    print(len(positive_list))
    print(len(negative_list_1))

    random.shuffle(balanced_list)

    print(len(balanced_list))

    return balanced_list
Пример #60
0
            transition_probs = np.outer(r_next_prior_uncorr(r[1], r_range, sigma_z),
                                        r_next_prior_uncorr(r[0], r_range, sigma_z))
            transition_probs = transition_probs / np.sum(transition_probs)

            # Weight the V at next t by the transition probabilities
            weighted_vals = V_cube[:, :, -(index - 1)] * transition_probs
            # Use mean of above minus opportunity cost in addition to V_d_r to find max V at curr r
            V_exp_cube[i, j, -index] = np.sum(weighted_vals) - (c + rho) * t_w * t
            V_cube[i, j, -index] = np.max((np.sum(weighted_vals) - t * (c + rho) * t_w, V_d_r))
            # Store decision identity in cube
            decision_cube[i, j, -index] = np.argmax((np.sum(weighted_vals) - (c + rho) * t_w * t,
                                                     V_d_r))
            if decision_cube[i, j, -index] == 1:
                decision_cube[i, j, -index] = np.argmax(r) + 1

    rotmat = rotate(decision_cube, -45, axes=(1, 0, 0))
    center_slice = rotmat[int(rotmat.shape[0] / 2), :, :]

    # Little movie of decision boundary change
    fig = plt.figure(figsize=(6, 9))
    ax1 = fig.add_subplot(311)
    ax2 = fig.add_subplot(312)
    ax3 = fig.add_subplot(313)

    ax3.pcolor(center_slice)
    ax3.set_xticks([])
    ax3.set_yticks([])
    ax3.set_xlabel('Time')
    ax3.set_ylabel('Estimated Reward Difference')

    def anim_update(i):