Example #1
0
    def zoom_rot(ii,dd):
        """ Rotate and zoom an image around a given angle"""
        a = np.random.randint(-10,10)
        ddr = rotate(dd,a, order=0, prefilter=False)
        iir = rotate(ii.transpose((1,2,0)),a, order=0, prefilter=False)
        
        f = np.random.randint(10000,15100) / 10000.
        
        h = int(dd.shape[0] / f)
        w = int(dd.shape[1] / f)
        
        s_fh = float(dd.shape[0]) / float(h)
        s_fw = float(dd.shape[1]) / float(w)

        s_f = (s_fh + s_fw) / 2.
        
        offset  = 0
        cy = np.random.randint(offset,dd.shape[0] - h - offset + 1)
        cx = np.random.randint(offset,dd.shape[1] - w - offset + 1)

        ddc = ddr[cy:cy+h, cx:cx+w]
        iic = iir[cy:cy+h,cx:cx+w,:]

        dd_s = zoom(ddc,(s_fh, s_fw),order=0, prefilter=False)
        dd_s /= s_f
        ii_s = iic.transpose((2,0,1))
        
        ii_s = zoom(ii_s,(1,s_fh,s_fw),order=0, prefilter=False)
        
        return ii_s.astype(np.float32), dd_s.astype(np.float32)
    def setUpClass(cls):
        img = imread(get_path('AS15-M-0298_SML.png'), flatten=True)
        img_coord = (482.09783936, 652.40679932)

        cls.template = sp.clip_roi(img, img_coord, 5)
        cls.template = rotate(cls.template, 90)
        cls.template = imresize(cls.template, 1.)

        cls.search = sp.clip_roi(img, img_coord, 21)
        cls.search = rotate(cls.search, 0)
        cls.search = imresize(cls.search, 1.)

        cls.offset = (1, 1)

        cls.offset_template = sp.clip_roi(img, np.add(img_coord, cls.offset), 5)
        cls.offset_template = rotate(cls.offset_template, 0)
        cls.offset_template = imresize(cls.offset_template, 1.)

        cls.search_center = [math.floor(cls.search.shape[0]/2),
                             math.floor(cls.search.shape[1]/2)]

        cls.upsampling = 10
        cls.alpha = math.pi/2
        cls.cifi_thresh = 90
        cls.rafi_thresh = 90
        cls.tefi_thresh = 100
        cls.use_percentile = True
        cls.radii = list(range(1, 3))

        cls.cifi_number_of_warnings = 2
        cls.rafi_number_of_warnings = 2
Example #3
0
    def outputShiftedImage(self):
        '''
        Outputs a FITS file in which the slits have been shifted
        to the best fitted positions.
        '''
        outfile1 = 'fittedSlitImage.fits'
        outfile2 = 'fittedSlitImageFullFrame.fits'

        zeros = np.zeros(self.slitImage.shape)

        r = []
        for res in self.result['minimaPosition'].values():
            r.append(res[0])
            x = res[1]
            y = res[2]
            n = res[5]
            d = self.slits[n]['values']
            xmin = self.slits[n]['xminSky'] + x
            xmax = self.slits[n]['xmaxSky'] + x
            ymin = self.slits[n]['yminSky'] + y
            ymax = self.slits[n]['ymaxSky'] + y
            zeros[ymin:ymax + 1, xmin:xmax + 1] = d

        rot = np.median(np.asarray(r))
        #note: -rot, because when fitting the direct image was rotated not the slits
        img = interpolation.rotate(zeros, -rot, reshape=False)

        if self.debug:
            print '\n{0:.2f} degree rotation to the fits file'.format(-rot)

        #output to a fits file
        hdu = PF.PrimaryHDU(img)
        if os.path.isfile(outfile1):
            os.remove(outfile1)
        hdu.writeto(outfile1)

        #output a second image
        zeros = np.zeros((3096, 3096))
        for slit in self.slits:
            xmin = self.slits[slit]['xminFitted']
            xmax = self.slits[slit]['xmaxFitted']
            ymin = self.slits[slit]['yminFitted']
            ymax = self.slits[slit]['ymaxFitted']

            zeros[ymin - self.slits[slit]['tolerance']:ymax + 1 + self.slits[slit]['tolerance'],\
                  xmin - self.slits[slit]['tolerance']:xmax + 1 + self.slits[slit]['tolerance']] = \
                  self.slits[slit]['valuesLarge']

        #note: -rot, because when fitting the direct image was rotated not the slits
        img = interpolation.rotate(zeros, -rot, reshape=False)

        #output to a fits file
        hdu = PF.PrimaryHDU(img)
        if os.path.isfile(outfile2):
            os.remove(outfile2)
        hdu.writeto(outfile2)
def rotate_n_crop(ff_bin, ff_path, Flat_frame, Flat_frame_scalar):
    """ Function for rotating the maxframe from bin file and cropping the meteor part based on the FTP_detectinfo detection data.
    """

    if not ff_path[-1] == os.sep:
        ff_path += os.sep

    if not os.path.exists(ff_path):
        print ff_path+" does not exist!"
        return False

    FTPdetect_file = ""
    for line in os.listdir(ff_path):
        if ("FTPdetectinfo_" in line) and (".txt" in line) and (not "_original" in line):
            FTPdetect_file = line
            break

    ff_bin_path = ff_path+ff_bin

    max_nomean_array = max_nomean(ff_bin_path, Flat_frame, Flat_frame_scalar)
    saveImage(max_nomean_array, ff_bin_path+"_max_nomean.bmp", print_name = False)

    max_bg_mean = int(np.mean(max_nomean_array))
    print max_bg_mean

    ###MUST MAKE SOME SORT OF IMAGE MASKING HERE!!!!!!!!!!!!! Problem is when meteor is in the corner, then the lightcurve will be calculated with dark corners during rotation

    coord_list, rot_angle = get_FTPdetect_coordinates(ff_path+FTPdetect_file, ff_bin)

    nrows = len(max_nomean_array)
    ncols = len(max_nomean_array[0])
    crop_array = np.zeros(shape=(nrows, ncols), dtype=np.int) #Make a temporary 2D array which helps determine the crop coordinates after rotation

    for coord in coord_list:
        x = coord[0]
        y = coord[1]
        crop_array[x][y] = 255

    crop_array = rotate(crop_array, -rot_angle+90, order = 0)

    first_x, first_y, last_x, last_y = find_crop_size(crop_array)

    saveImage(crop_array, 'test_croptest.bmp', print_name = False)

    rotated_img = rotate(max_nomean_array, -rot_angle+90, order = 0)

    rotated_img[rotated_img < 3] = max_bg_mean #Polish out the black edges
    #print rotated_img

    max_nomean_croped = rotated_img[first_y:last_y, first_x:last_x] #Crop out the image array

    saveImage(max_nomean_array, 'test_raw.bmp', print_name = False)
    saveImage(rotated_img, 'test_rotated.bmp', print_name = False)
    saveImage(max_nomean_croped, 'test_meteor_croped.bmp', print_name = False)

    return max_nomean_croped
Example #5
0
def tiltaxisalign(im_series,tilt_angles,shift_and_tilt=('hold','hold')):
    series_shape = np.shape(im_series)
    
    new_series = im_series.copy()
    final_series = im_series.copy()
    
    #deg0_int = input('Which image is the 0 degree image? ')
    midy = int(series_shape[1]/2)
    
    axis_shift = shift_and_tilt[0]
    axis_tilt = shift_and_tilt[1]
    
    if axis_shift == 'hold':
        shift_continue = 1
    
        while shift_continue == 1:
            plt.imshow(iradon(np.rot90(new_series[:,midy,:]),  # rot
            theta = tilt_angles,output_size = series_shape[2]))# anti-clokwise
            plt.show()
            
            axis_shift = float(input('By how many pixels from the original mid-point should the tilt axis be shifted? '))
            
            for i in range(series_shape[0]):
                new_series[i,:,:] = interpolation.shift(im_series.copy()[i,:,:],(0,axis_shift)) # shift along np x-axis
                
            shift_continue = int(input('Would you like to apply further image shifts (1 for yes, 0 for no)? '))
                
    for i in range(series_shape[0]):
        final_series[i,:,:] = interpolation.shift(final_series[i,:,:],(0,axis_shift))
        
    topy = int(3*series_shape[1]/8)
    bottomy = int(5*series_shape[1]/8)

    if axis_tilt == 'hold':
        tilt_series = final_series.copy()
        tilt_continue = 1
        while tilt_continue == 1:
            plt.imshow(iradon(np.rot90(new_series[:,topy,:]), theta = tilt_angles,output_size = series_shape[2]))
            plt.show()
            plt.imshow(iradon(np.rot90(new_series[:,bottomy,:]), theta = tilt_angles,output_size = series_shape[2]))
            plt.show()
            
            axis_tilt = float(input('By what angle from the original y axis (in degrees) should the tilt axis be rotated? '))
            
            for i in range(series_shape[0]):
                new_series[i,:,:] = interpolation.rotate(tilt_series.copy()[i,:,:],axis_tilt,reshape=False)
                    
            tilt_continue = int(input('Would you like to try another tilt angle (1 for yes, 0 for no)? '))
                    
    for i in range(series_shape[0]):
        final_series[i,:,:] = interpolation.rotate(final_series[i,:,:],axis_tilt,reshape=False)
            
    shift_and_tilt = (axis_shift,axis_tilt)
            
    return(final_series, shift_and_tilt)
Example #6
0
  def scatter(self,move_pix=0,scale=1):
    '''
    Generate the scattered image which is stored in the ``iss`` member.

    :param move_pix: (optional) int 
      Number of pixels to roll the screen (for time evolution).
    :param scale: (optional) scalar
      Scale factor for gradient.  To simulate the scattering effect at another 
      wavelength this is (lambda_new/lambda_old)**2
    '''

    M = self.model.shape[-1]       # size of original image array
    N = self.nx                    # size of output image array

    #if not self.live_dangerously: self._checkSanity()

    # calculate phase gradient
    dphi_x,dphi_y = self._calculate_dphi(move_pix=move_pix)

    if scale != 1:
        dphi_x *= scale/sqrt(2.)
        dphi_y *= scale/sqrt(2.)

    xx_,yy = np.meshgrid((np.arange(N) - 0.5*(N-1)),\
                         (np.arange(N) - 0.5*(N-1)),indexing='xy')

    # check whether we care about PA of scattering kernel
    if self.pa != None:
      f_model = RectBivariateSpline(self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\
                                    self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\
                                    self.model)

      # apply rotation
      theta = -(90 * pi / 180) + np.radians(self.pa)     # rotate CW 90 deg, then CCW by PA
      xx_ += dphi_x
      yy  += dphi_y
      xx = cos(theta)*xx_ - sin(theta)*yy
      yy = sin(theta)*xx_ + cos(theta)*yy
      self.iss  = f_model.ev(yy.flatten(),xx.flatten()).reshape((self.nx,self.nx))

      # rotate back and clip for positive values for I
      if self.think_positive:
          self.iss  = clip(rotate(self.iss,-1*theta/np.pi*180,reshape=False),a_min=0,a_max=1e30) * (self.dx/self.model_dx)**2
      else:
          self.iss  = rotate(self.iss,-1*theta/np.pi*180,reshape=False) * (self.dx/self.model_dx)**2

    # otherwise do a faster lookup rather than the expensive interpolation.
    else:
      yyi = np.rint((yy+dphi_y+self.nx/2)).astype(np.int) % self.nx
      xxi = np.rint((xx_+dphi_x+self.nx/2)).astype(np.int) % self.nx
      if self.think_positive:
        self.iss = clip(self.isrc[yyi,xxi],a_min=0,a_max=1e30)
      else:
        self.iss = self.isrc[yyi,xxi]
Example #7
0
 def computeOne(self, n):
     for v in range(self._projections.views):
         currentSm = self._systemMatrix.data[v]
         angle = v*180/float(self._projections.views)
         rotatedEstimate = interpolation.rotate(self._estimate, -angle, reshape=False)
         backprojection = np.sum(currentSm * rotatedEstimate, axis=0)
         normalization = np.sum(currentSm * currentSm, axis=0)
         normalization[normalization == 0] = 1.0 #avoid division by zero 
         update = currentSm * (self._projections.data[v] - backprojection) / normalization 
         self._estimate += interpolation.rotate(update, angle, reshape=False)
     self._result.append(self._estimate.copy())
def rotation_generator(generator, angle_range=(-180, 180)):
    '''
    yields rotated data and seg (rotated around center with a uniformly distributed angle between angle_range[0] and angle_range[1])
    '''
    for data, seg, labels in generator:
        seg_min = np.min(seg)
        seg_max = np.max(seg)
        for sample_id in xrange(data.shape[0]):
            angle = np.random.uniform(angle_range[0], angle_range[1])
            data[sample_id] = interpolation.rotate(data[sample_id], angle, (1, 2), reshape=False, mode='nearest', order=3)
            seg[sample_id] = np.round(interpolation.rotate(seg[sample_id], angle, (1, 2), reshape=False)).astype(np.int32)
        seg[seg > seg_max] = seg_max
        seg[seg < seg_min] = seg_min
        yield data, seg, labels
def slitFoV(c, l, w, pa, instrument='vimos', os=20):
	if instrument=='vimos':
		frac = np.zeros((40,40))
	elif instrument=='muse':
		frac = np.zeros((150,150))
	# pa = np.radians(pa)

	s = frac.shape

	# sampler = np.array(np.meshgrid(np.arange(s[0]*100), np.arange(s[1]*100))
	# 	).T/100.

	sampler = np.zeros((s[0]*os,s[1]*os))
	sampler[int(np.round(c[0] - w/2))*os: int(np.round(c[0] + w/2))*os, 
		int(np.round(c[1] - l/2))*os: int(np.round(c[1] + l/2))*os] = 1

	sampler = rotate(sampler, pa, reshape=False)

	i = np.arange(s[0])
	j = np.arange(s[1])
	i, j = np.meshgrid(i,j)
	i, j = i.flatten(), j.flatten()
	frac[j, i] = rebin(sampler, i*os+os/2, j*os+os/2, statistic='mean')
	
	return frac
Example #10
0
def _augment_data(data):
    image, rotations = data
    augmented_images = [image]  # Add the original image to the list
    # Rotate and pad with 'nearest' pixels
    augmented_images += [rotate(image.T, r, reshape=False, mode='nearest').T for r in rotations]
    augmented_images = np.array(augmented_images)
    return augmented_images
Example #11
0
def Reconstruct(data, geom_params,size):
        reconstructed = np.zeros((size,size))
        
        for params in geom_params.values():
                min_fs, max_fs, min_ss, max_ss = [int(p) for p in params[0:4]]
                x = float(params[4].split('x')[0])
                y = float(params[4].split('y')[0].split('x')[1])
                data_tile = data[min_ss:max_ss+1,min_fs:max_fs+1][::-1,:]
                delta_x = float(params[-2])
                delta_y = float(params[-1])
                if x > 0 and y > 0:
                    alpha = np.arctan(y/x) * 180. / np.pi
                if x > 0 and y <  0:
                    alpha =  np.arctan(y/x) * 180. / np.pi
                if x < 0 and y >  0:
                    if np.abs(x) > 0.5:alpha = 180 - np.arctan(y/x) * 180. / np.pi
                    else: alpha = 180 + np.arctan(y/x) * 180. / np.pi
                if x < 0 and  y <  0:
                    alpha = 180 - np.arctan(y/np.abs(x)) * 180. / np.pi

                rot = rotate(data_tile, alpha)

                if int(alpha) in range(80,100):
                    reconstructed[(size / 2)-delta_y-rot.shape[0]:(size / 2)-delta_y,(size / 2)+delta_x-rot.shape[1]:(size / 2)+delta_x] = rot

                elif int(alpha) in range(175,185):
                    reconstructed[(size / 2)-delta_y:(size / 2)-delta_y+rot.shape[0],(size / 2)+delta_x-rot.shape[1]:(size / 2)+delta_x] = rot

                elif int(alpha) in range(-95,-85) or int(alpha) in range(265,275):
                    reconstructed[(size / 2)-delta_y:(size / 2)-delta_y+rot.shape[0],(size / 2)+delta_x:(size / 2)+delta_x+rot.shape[1]] = rot

                else:
                    reconstructed[(size / 2)-delta_y-rot.shape[0]:(size / 2)-delta_y,(size / 2)+delta_x:(size / 2)+delta_x+rot.shape[1]] = rot
        return reconstructed
Example #12
0
def cellGridnessScore(rateMap, arenaDiam, h, corr_cutRmin):
    '''
    Compute a cell gridness score by taking the auto correlation of the
    firing rate map, rotating it, and subtracting maxima of the
    correlation coefficients of the former and latter, at 30, 90 and 150 (max),
    and 60 and 120 deg. (minima). This gives the gridness score.

    The center of the auto correlation map (given by corr_cutRmin) is removed
    from the map
    '''
    rateMap_mean = rateMap - np.mean(np.reshape(rateMap, (1, rateMap.size)))
    autoCorr, autoC_xedges, autoC_yedges = SNAutoCorr(rateMap_mean, arenaDiam, h)
    
    # Remove the center point and
    X, Y = np.meshgrid(autoC_xedges, autoC_yedges)
    autoCorr[np.sqrt(X**2 + Y**2) < corr_cutRmin] = 0
    
    da = 3
    angles = list(range(0, 180+da, da))
    crossCorr = []
    # Rotate and compute correlation coefficient
    for angle in angles:
        autoCorrRot = rotate(autoCorr, angle, reshape=False)
        C = np.corrcoef(np.reshape(autoCorr, (1, autoCorr.size)),
            np.reshape(autoCorrRot, (1, autoCorrRot.size)))
        crossCorr.append(C[0, 1])

    max_angles_i = np.array([30, 90, 150]) / da
    min_angles_i = np.array([60, 120]) / da

    maxima = np.max(np.array(crossCorr)[max_angles_i])
    minima = np.min(np.array(crossCorr)[min_angles_i])
    G = minima - maxima

    return G, np.array(crossCorr), angles
Example #13
0
 def drawRobot(self, mapObject, pos, val):
   robotMat = rotate(self.robotSprite, pos[2]+180)
   hgt = (robotMat.shape[0]-1)/2 # indices of center of robot
   wid = (robotMat.shape[1]-1)/2
   x = slice(pos[0]-wid, pos[0]+wid+1, 1) # columns
   y = slice(pos[1]-hgt, pos[1]+hgt+1, 1) # rows
   mapObject[y,x][robotMat.astype(bool)] = val
Example #14
0
def rotate(parangle,hdu):
    """Rotates fits file to match the rotation of the data cube.

    Parameters
    ----------
    parangle : float
        Paralactic angle of the cube.
    hdu : fits file

    Returns
    -------
    hdu : fits file
        Rotated fits file.
    """
    mat = [[hdu[0].header['CD1_1'],hdu[0].header['CD1_2']],[hdu[0].header['CD2_1'],hdu[0].header['CD2_2']]]
    CDELT1 = pow(abs(np.linalg.det(mat)),0.5) #Coordinate increment per pixel in DEGREES/PIXEL
    sdssangle = np.arcsin(hdu[0].header['CD1_1']/CDELT1)*180./np.pi

    hdu[0].data = interpolation.rotate(hdu[0].data,(parangle-sdssangle))
    hdu[0].header['CD1_1']= np.sin(parangle*np.pi/180.)*CDELT1
    hdu[0].header['CD1_2']= np.cos(parangle*np.pi/180.)*CDELT1
    hdu[0].header['CD2_1']= np.cos(parangle*np.pi/180.)*CDELT1
    hdu[0].header['CD2_2']= -np.sin(parangle*np.pi/180.)*CDELT1
    hdu[0].header['CRPIX1']= hdu[0].data.shape[1]/2+0.5
    hdu[0].header['CRPIX2']= hdu[0].data.shape[0]/2+0.5
    hdu.writeto('imaging/LsdssDR12g.fits',clobber =True)

    return hdu
def clip(ar, points, clipSize, if_Label):
    '''
    clips the input array to the given points. Because how the images are saved, a black border is created which can be removed by the function crop_around_center
    '''
      
        img = Image.new('F', (ar.shape[1], ar.shape[0]), 0)
        #ImageDraw.Draw(img).polygon([X1, Y1, X2, Y2, X3, Y3, X4, Y4], outline=1, fill=1)
        ImageDraw.Draw(img).polygon(points[0], outline=1, fill=1)        
        mask = np.array(img)
        #create new image
        newImArray = np.empty(ar.shape, dtype=np.float32)        
        #newImArray = ar * mask[:,:, np.newaxis]
        if len(ar.shape) == 3:                   
            newImArray = ar * mask[:,:,np.newaxis]
        else:           
            newImArray = ar * mask[:,:]
        croped_image = crop(newImArray, points[0], ar.shape)
        newImArray = rotate(croped_image, points[1][0],\
                     reshape=False, order=0, mode='constant', cval=0.0)
        if if_Label:
            final = crop_around_center(newImArray, clipSize, points[1][0])   
        else:
            final = crop_around_center(newImArray, clipSize, points[1][0])   
        #counter for fileIndex        
        #save_2_HDF5(inputFile, numberOfFiles, prefix, outputDir, channel, width, height, batch):
        return (final)
Example #16
0
    def rotate(self,angle=0,transpose=False):
        print 'Rotating data by %f degrees...' % angle
        self.rotated = rotate(self.data,angle)
        if transpose:
            self.rotated = self.rotated.transpose()

        return self.rotated
Example #17
0
def apply_rotation(hdulist_or_filename=None, rotate_value=None, crop=True):
    """
    Apply the detector's rotation to the PSF. This is for NIRCam, NIRISS, and FGS.
    MIRI and NIRSpec's large rotation is already added inside WebbPSF's calculations.

    Parameters
    ----------
    hdulist_or_filename :
        A PSF from WebbPSF, either as an HDUlist object or as a filename
    rotate_value : float
        Rotation in degrees that PSF needs to be. If set to None, function
        will pull the most up to date SIAF value. Default = None.
    crop : bool
        True or False to crop the PSF so it matches the size of the input
        PSF (e.g. so they could be more easily compared).

    Returns
    -------
    psf : HDUlist object
        PSF with rotation applied from SIAF values
    """

    # Read in input PSF
    if isinstance(hdulist_or_filename, str):
        hdu_list = fits.open(hdulist_or_filename)
    elif isinstance(hdulist_or_filename, fits.HDUList):
        hdu_list = hdulist_or_filename
    else:
        raise ValueError("input must be a filename or HDUlist")

    # Create a copy of the PSF
    psf = copy.deepcopy(hdu_list)

    # Log instrument and detector names
    instrument = hdu_list[0].header["INSTRUME"].upper()
    aper_name = hdu_list[0].header["APERNAME"].upper()

    if instrument in ["MIRI", "NIRSPEC"]:
        raise ValueError("{}'s rotation is already included in WebbPSF and "
                         "shouldn't be added again.".format(instrument))

    # Set rotation value if not already set by a keyword argument
    if rotate_value is None:
        aper = _get_default_siaf(instrument, aper_name)
        rotate_value = getattr(aper, "V3IdlYAngle")  # the angle to rotate the PSF in degrees

    # If crop = True, then reshape must be False - so invert this keyword
    reshape = np.invert(crop)

    ext = 1  # edit the oversampled PSF (OVERDIST extension)

    psf_new = rotate(psf[ext].data, rotate_value, reshape=reshape)

    # Apply data to correct extensions
    psf[ext].data = psf_new

    # Set new header keyword
    psf[ext].header["ROTATION"] = (rotate_value, "PSF rotated to match detector rotation")

    return psf
Example #18
0
def getGeomTransformations(geom_params):
    #reconstructed = np.zeros((size, size))
    size = 1800
    reconstructed = {}
    min_fs, max_fs, min_ss, max_ss = [int(p) for p in geom_params[geom_params.keys()[0]][0:4]]
    data = np.empty((max_ss - min_ss + 1, max_fs - min_fs + 1))

    for key, params in geom_params.items():

        #min_fs, max_fs, min_ss, max_ss = [int(p) for p in params[0:4]]
        #print max_fs - min_fs + 1
        #print max_ss - min_ss + 1
        x = float(params[4].split('x')[0])
        y = float(params[4].split('y')[0].split('x')[1])
        #data_tile = data[ min_ss:max_ss + 1, min_fs:max_fs + 1][::-1,:]  # , indices[1,::][min_ss:max_ss+1,min_fs:max_fs+1][::-1,:]
        delta_x = float(params[-2])
        delta_y = float(params[-1])
        if x > 0 and y > 0:
            alpha = np.arctan(y / x) * 180. / np.pi
        if x > 0 and y < 0:
            alpha = np.arctan(y / x) * 180. / np.pi
        if x < 0 and y > 0:
            if np.abs(x) > 0.5:
                alpha = 180 - np.arctan(y / x) * 180. / np.pi
            else:
                alpha = 180 + np.arctan(y / x) * 180. / np.pi
        if x < 0 and y < 0:
            alpha = 180 - np.arctan(y / np.abs(x)) * 180. / np.pi

        rot = rotate(data, alpha)

        if int(alpha) in range(80, 100):
            xmin = int(round((size / 2) - delta_y - rot.shape[0]))
            xmax = int(round((size / 2) - delta_y))
            ymin = int(round((size / 2) + delta_x - rot.shape[1]))
            ymax = int(round((size / 2) + delta_x))

        elif int(alpha) in range(175, 185):
            xmin = int(round((size / 2) - delta_y))
            xmax = int(round((size / 2) - delta_y + rot.shape[0]))
            ymin = int(round((size / 2) + delta_x - rot.shape[1]))
            ymax = int(round((size / 2) + delta_x))

        elif int(alpha) in range(-95, -85) or int(alpha) in range(265, 275):
            xmin = int(round((size / 2) - delta_y))
            xmax = int(round((size / 2) - delta_y + rot.shape[0]))
            ymin = int(round((size / 2) + delta_x))
            ymax = int(round((size / 2) + delta_x + rot.shape[1]))

        else:

            xmin = int(round((size / 2) - delta_y - rot.shape[0]))
            xmax = int(round((size / 2) - delta_y))
            ymin = int(round((size / 2) + delta_x))
            ymax = int(round((size / 2) + delta_x + rot.shape[1]))

        reconstructed[key] = (alpha, xmin, xmax, ymin, ymax, delta_x, delta_y)

    return reconstructed
Example #19
0
def evaluate_rotate(rotations=[-5.0, -2.5, -1.0, 1, 2.5, 5.0], index=4, output_file='rotations.pdf'):
    from scipy.ndimage import interpolation
    import algorithms

    original = face(index)
    other = face(1)
    faces = []

    for rotation in rotations:
        f = face(index)

        f.abs_file.data['X'] = interpolation.rotate(f.abs_file.data['X'], rotation, mode='nearest', prefilter=False, reshape=False)
        f.abs_file.data['Y'] = interpolation.rotate(f.abs_file.data['Y'], rotation, mode='nearest', prefilter=False, reshape=False)
        f.abs_file.data['Z'] = interpolation.rotate(f.abs_file.data['Z'], rotation, mode='nearest', prefilter=False, reshape=False)

        algorithms.features_histogram(f)
        faces.append(f)

    pyplot.figure()

    subplot = pyplot.subplot(1, 2+len(rotations), 1)

    subplot.imshow(original.Z)
    subplot.title.set_text("Original")
    subplot.title.set_fontsize(10)
    subplot.xaxis.set_visible(False)
    subplot.yaxis.set_visible(False)

    for rotation, f, i in zip(rotations, faces, range(len(rotations))):
        subplot = pyplot.subplot(1, 2+len(rotations), 2 + i)
        subplot.imshow(f.Z)
        subplot.title.set_text("%.1f deg" % rotation)
        subplot.title.set_fontsize(10)
        subplot.xaxis.set_visible(False)
        subplot.yaxis.set_visible(False)

    subplot = pyplot.subplot(1, 2+len(rotations), len(rotations) + 2)
    subplot.imshow(other.Z)
    subplot.title.set_text("Other")
    subplot.title.set_fontsize(10)
    subplot.xaxis.set_visible(False)
    subplot.yaxis.set_visible(False)

    pyplot.savefig(output_file, format='pdf', dpi=600, orientation='landscape', bbox_inches="tight")

    return algorithms.similarity_matrix([original] + faces + [other], methods=[algorithms.distance_histogram_euclidean, algorithms.distance_histogram_city_block, algorithms.distance_histogram_correlation], normalizers=[False, False, False])
Example #20
0
def estimate_skew_angle(image, angles):
    estimates = []
    for a in angles:
        v = mean(interpolation.rotate(image, a, order=0, mode="constant"), axis=1)
        v = var(v)
        estimates.append((v, a))
    _, a = max(estimates)
    return a
Example #21
0
    def rotate_image(image, angle):
        """

        :param image:
        :param angle:
        :return:
        """
        return rotate(image, angle=angle, reshape=False)
def rotate_col(b_t, pre_theta, theta_t, n_theta, patch_size):
    m, sparsity = b_t.shape
    
    b_t_new = np.zeros_like(b_t)
    for j in range(sparsity):
         b_t_new[:, j] = rotate(b_t[:,j].reshape(patch_size), (theta_t[j]-pre_theta[j])*360./n_theta, 
                        axes=(1, 0), reshape=False, order=3, mode='nearest').flatten()
    return b_t_new
Example #23
0
def plot_examples(X, y, y_angles, y_predicted_angles,
                  num_test_images=5, number=None, angle=None, only_errors=False):

    if only_errors:
        mask = np.where(y_predicted_angles != y_angles)[0]
        X = X[mask]
        y = y[mask]
        y_predicted_angles = y_predicted_angles[mask]
        y_angles = y_angles[mask]

    if angle is not None and number is not None:
        indices = np.intersect1d(np.where(y_angles == angle)[0], np.where(y == number)[0])
    elif angle is not None:
        indices = np.where(y_angles == angle)[0]
    elif number is not None:
        indices = np.where(y == number)[0]
    else:
        indices = len(y_angles)

    mask = np.random.choice(indices, num_test_images)
    true_angles = y_angles[mask]
    predicted_angles = y_predicted_angles[mask]

    plt.rcParams['figure.figsize'] = (10.0, 2 * num_test_images)
    fig_number = 0
    for i in range(num_test_images):
        rotated_image = X[mask[i]][0]
        original_image = rotate(rotated_image, -true_angles[i])
        corrected_image = rotate(rotated_image, -predicted_angles[i])

        fig_number += 1
        plt.subplot(num_test_images, 3, fig_number)
        plt.imshow(original_image)

        fig_number += 1
        plt.subplot(num_test_images, 3, fig_number)
        plt.title('Angle: {0}'.format(true_angles[i]))
        plt.imshow(rotated_image)

        fig_number += 1
        plt.subplot(num_test_images, 3, fig_number)
        reconstructed_angle = angle_difference(predicted_angles[i], true_angles[i])
        plt.title('Angle: {0}'.format(reconstructed_angle))
        plt.imshow(corrected_image)

    plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
Example #24
0
def rotate90():
	import matplotlib.pyplot as plt
	from scipy.ndimage.interpolation import rotate
	import scipy.misc
	
	image = io.imread(out_path + 'bibme0_contours.png')
	rotated_image = rotate(image, -90, reshape=True)
	toprint(rotated_image,"bibme0_contours.png")
Example #25
0
  def drawInset(self):
    x, y = self.robot_pix[0:2] # indices of center of robot in main map
    raw = int(self.insetSize_pix*0.75) # half the size of the main map segment to capture
    rad = self.insetSize_pix/2 # half the size of the final segment

    mapChunk = self.mapMatrix[y-raw:y+raw, x-raw:x+raw]
    s = slice(raw-rad, raw+rad, 1) # region of rotated chunk that we want
    self.insetMatrix = rotate(mapChunk, self.robot_rel[2], output=np.uint8, order=1, reshape=False)[s,s]
Example #26
0
def detect_skew(img, min_angle=-20, max_angle=20, quality='low'):
    img = sp.atleast_2d(img)
    rows, cols = img.shape
    min_min_angle = min_angle
    max_max_angle = max_angle

    if quality == 'low':
        resolution = sp.arctan2(2.0, cols) * 180.0 / sp.pi
        min_target_size = 100
        resize_order = 1
    elif quality == 'high':
        resolution = sp.arctan2(1.0, cols) * 180.0 / sp.pi
        min_target_size = 300
        resize_order = 3
    else:
        resolution = sp.arctan2(1.0, cols) * 180.0 / sp.pi
        min_target_size = 200
        resize_order = 2

    # resize the image so it's faster to work with
    min_size = min(rows, cols)
    target_size = min_target_size if min_size > min_target_size else min_size
    resize_ratio = float(target_size) / min_size
    img = imresize(img, resize_ratio)
    rows, cols = img.shape

    # pad the image and invert the colors
    img *= -1
    img += 255
    padded_img = sp.zeros((rows*2, cols*2))
    padded_img[rows//2:rows//2+rows, cols//2:cols//2+cols] = img
    img = padded_img

    # keep dividing the interval in half to achieve O(log(n))
    while True:
        current_resolution = (max_angle - min_angle) / 30.0
        best_angle = None
        best_variance = 0.0

        # rotate the image, sum the pixel values in each row for each rotation
        # then find the variance of all the sums, pick the highest variance
        for i in xrange(31):
            angle = min_angle + i * current_resolution
            rotated_img = rotate(img, angle, reshape=False, order=resize_order)
            num_black_pixels = sp.sum(rotated_img, axis=1)
            variance = sp.var(num_black_pixels)
            if variance > best_variance:
                best_angle = angle
                best_variance = variance

        if current_resolution < resolution:
            break

        # update the angle range
        min_angle = max(best_angle - current_resolution, min_min_angle)
        max_angle = min(best_angle + current_resolution, max_max_angle)

    return best_angle
Example #27
0
    def makeIMat(self, callback=None):
        '''
        Makes DM Interation Matrix

        Initially, the DM influence functions are created using the method
        ``makeIMatShapes'', then if a rotation is specified these are rotated.
        Each of the influence functions is passed to the specified ``WFS'' and
        wfs measurements recorded.

        Parameters:
            callback (function): Function to be called on each WFS run

        Returns:
            ndarray: 2-dimensional interaction matrix
        '''
        logger.info("Making DM Influence Functions...")
        self.makeIMatShapes()

        # Imat value is in microns
        # self.iMatShapes *= (self.dmConfig.iMatValue*1e-6)

        if self.dmConfig.rotation:
           self.iMatShapes = rotate(
                   self.iMatShapes, self.dmConfig.rotation,
                   order=self.dmConfig.interpOrder, axes=(-2,-1)
                   )
           rotShape = self.iMatShapes.shape
           self.iMatShapes = self.iMatShapes[:,
                   rotShape[1]/2. - self.simConfig.simSize/2.:
                   rotShape[1]/2. + self.simConfig.simSize/2.,
                   rotShape[2]/2. - self.simConfig.simSize/2.:
                   rotShape[2]/2. + self.simConfig.simSize/2.
                   ]

        iMat = numpy.zeros(
                (self.iMatShapes.shape[0], self.totalWfsMeasurements) )
        for i in xrange(self.iMatShapes.shape[0]):
            subap=0
            for nWfs in range(len(self.wfss)):

                logger.debug("subap: {}".format(subap))
                iMat[i, subap: subap + (2*self.wfss[nWfs].activeSubaps)] = (
                       self.wfss[nWfs].frame(
                                self.iMatShapes[i], iMatFrame=True
                       ))

                self.dmShape = self.iMatShapes[i]

                if callback!=None:
                    callback()

                logger.statusMessage(i, self.iMatShapes.shape[0],
                        "Generating {} Actuator DM iMat".format(self.acts))

                subap += 2*self.wfss[nWfs].activeSubaps

        self.iMat = iMat
        return iMat
Example #28
0
 def __init__(self, filename, scale, flip_lr=False, rot_angle=None):
     self.fn=filename[:8]
     d=plt.imread(filename)
     if flip_lr is True:
         d=np.fliplr(d)
     if rot_angle is not None:
         d=rotate(d, rot_angle)
     self.data=d
     self.scale=scale
Example #29
0
 def __init__(self, flip_lr, rot_angle, multiply_by, scale):
     if flip_lr is True:
         self.d=np.fliplr(self.d)
     if rot_angle is not None:
         self.d=rotate(self.d, rot_angle)
     self.rot_angle=rot_angle
     self.data=self.d*multiply_by
     self.scale=scale
     self.s_name=os.path.basename(self.fn)[:8]
Example #30
0
        def find_scores(arr, angles):
            scores = np.zeros_like(angles)

            for i, a in enumerate(angles):
                data = inter.rotate(arr, a, reshape=0, order=0)
                hist = np.sum(data, axis=1)
                scores[i] = np.sum((hist[1:] - hist[:-1]) ** 2)

            return scores
    def process(self):
        for (n, input_file) in enumerate(self.input_files):
            pcgts = page_from_file(self.workspace.download_file(input_file))
            page_id = pcgts.pcGtsId or input_file.pageId or input_file.ID
            page = pcgts.get_Page()

            # why does it save the image ??
            page_image, page_xywh, _ = self.workspace.image_from_page(
                page, page_id)

            if self.parameter['parallel'] < 2:
                LOG.info("INPUT FILE %s ", input_file.pageId or input_file.ID)
            raw = ocrolib.read_image_gray(page_image.filename)

            flat = raw
            #flat = np.array(binImg)
            # estimate skew angle and rotate
            if self.parameter['maxskew'] > 0:
                if self.parameter['parallel'] < 2:
                    LOG.info("Estimating Skew Angle")
                d0, d1 = flat.shape
                o0, o1 = int(self.parameter['bignore'] * d0), int(
                    self.parameter['bignore'] * d1)
                flat = amax(flat) - flat
                flat -= amin(flat)
                est = flat[o0:d0 - o0, o1:d1 - o1]
                ma = self.parameter['maxskew']
                ms = int(2 * self.parameter['maxskew'] *
                         self.parameter['skewsteps'])
                angle = self.estimate_skew_angle(est,
                                                 linspace(-ma, ma, ms + 1))
                flat = interpolation.rotate(flat,
                                            angle,
                                            mode='constant',
                                            reshape=0)
                flat = amax(flat) - flat
            else:
                angle = 0

            # self.write_angles_to_pageXML(base,angle)
            # estimate low and high thresholds
            if self.parameter['parallel'] < 2:
                LOG.info("Estimating Thresholds")
            d0, d1 = flat.shape
            o0, o1 = int(self.parameter['bignore'] * d0), int(
                self.parameter['bignore'] * d1)
            est = flat[o0:d0 - o0, o1:d1 - o1]
            if self.parameter['escale'] > 0:
                # by default, we use only regions that contain
                # significant variance; this makes the percentile
                # based low and high estimates more reliable
                e = self.parameter['escale']
                v = est - filters.gaussian_filter(est, e * 20.0)
                v = filters.gaussian_filter(v**2, e * 20.0)**0.5
                v = (v > 0.3 * amax(v))
                v = morphology.binary_dilation(v,
                                               structure=ones(
                                                   (int(e * 50), 1)))
                v = morphology.binary_dilation(v,
                                               structure=ones(
                                                   (1, int(e * 50))))
                if self.parameter['debug'] > 0:
                    imshow(v)
                    ginput(1, self.parameter['debug'])
                est = est[v]
            lo = stats.scoreatpercentile(est.ravel(), self.parameter['lo'])
            hi = stats.scoreatpercentile(est.ravel(), self.parameter['hi'])
            # rescale the image to get the gray scale image
            if self.parameter['parallel'] < 2:
                LOG.info("Rescaling")
            flat -= lo
            flat /= (hi - lo)
            flat = clip(flat, 0, 1)
            if self.parameter['debug'] > 0:
                imshow(flat, vmin=0, vmax=1)
                ginput(1, self.parameter['debug'])
            deskewed = 1 * (flat > self.parameter['threshold'])

            # output the normalized grayscale and the thresholded images
            LOG.info("%s lo-hi (%.2f %.2f) angle %4.1f" %
                     (pcgts.get_Page().imageFilename, lo, hi, angle))
            if self.parameter['parallel'] < 2:
                LOG.info("Writing")
            #ocrolib.write_image_binary(base+".ds.png", deskewed)

            #TODO: Need some clarification as the results effect the following pre-processing steps.
            #orientation = -angle
            #orientation = 180 - ((180 - orientation) % 360)
            pcgts.get_Page().set_orientation(angle)
            #print(orientation, angle)

            file_id = input_file.ID.replace(self.input_file_grp,
                                            self.output_file_grp)
            if file_id == input_file.ID:
                file_id = concat_padded(self.output_file_grp, n)

            self.workspace.add_file(ID=file_id,
                                    file_grp=self.output_file_grp,
                                    pageId=input_file.pageId,
                                    mimetype=MIMETYPE_PAGE,
                                    local_filename=os.path.join(
                                        self.output_file_grp,
                                        file_id + '.xml'),
                                    content=to_xml(pcgts).encode('utf-8'))
Example #32
0
def rotate_image(image):
    image=image.reshape((28, 28))
    rotated_image=rotate(image, angle=180, cval=0, mode="constant")
    
    return rotated_image.reshape((1, 28, 28, 1))
Example #33
0
def level_leaf(array):
    """
    takes the extrema of the leaf, finds the
    hypotenuse, finds the angle between the base
    and the hypotenuse, rotates the image by that
    number, and reinitializes the leaf with the
    new image
    """

    edges_old = canny(array, 2.5)

    thresh = threshold_otsu(array)

    # binary = -(array > thresh)
    binary = array <= thresh
    binary = binary_erosion(
        binary_closing(binary_dilation(binary_dilation(binary))))

    edges = binary

    left_extrema = []  # stem
    right_extrema = []  # tip of leaf

    # Create an array of index values
    array_length = len(edges[0])
    left_to_right = range(0, array_length)
    for i in left_to_right:
        column = edges[:, i]
        # xvalue = np.argmax(self.edges[:, i])
        if column.any():
            left_extrema = [i, np.argmax(column)]
            break
    right_to_left = reversed(left_to_right)
    for i in right_to_left:
        column = edges[:, i]
        if column.any():
            right_extrema = [i, np.argmax(column)]
            break

    endpoints = [left_extrema, right_extrema]

    left_endpoint, right_endpoint = endpoints

    # find the distance (length) between the two points (hypotenuse)
    diff_x = right_endpoint[0] - left_endpoint[0]
    diff_y = right_endpoint[1] - left_endpoint[1]
    hypot = sqrt((diff_x)**2 + (diff_y)**2)

    # get the angle between the endpoints to rotate the image by
    angle_radians = acos(diff_x / hypot)
    angle_degrees = degrees(angle_radians)
    array = array.copy()

    # rotate the image, preserving size
    if diff_y < 0:
        array = rotate(array, -angle_degrees, reshape=True, mode='nearest')
    else:
        array = rotate(array, angle_degrees, reshape=True, mode='nearest')

    # reinitzialize the image again
    return array
Example #34
0
 def find_score(arr, angle):
     data = inter.rotate(arr, angle, reshape=False, order=0)
     hist = np.sum(data, axis=1)
     score = np.sum((hist[1:] - hist[:-1])**2)
     return hist, score
Example #35
0
def yieldImages_ImgLabel(myobj):
    # all the image are substrate by the mean and divided by its std for RGB channel, respectively.
    #mask_process_f = get(myobj.ImageGenerator_Identifier)
    #allDictList = getfileinfo(myobj.datadir, myobj.labelSuffix,myobj.dataExt,myobj.labelExt[0])
    ImgList, ImgNameList = getfilelist(os.path.join(myobj.datadir, myobj.img),
                                       myobj.dataExt)
    #LabelList, LabelNameList =  getfilelist(os.path.join(Imagefolder,'gt'), myobj.labelExt):

    index_list = range(0, len(ImgList))
    randshuffle(index_list)
    for imgindx, thisindex in enumerate(index_list):
        print('processing image {s}'.format(s=imgindx))
        if imgindx == myobj.maximg:
            break

        thisimgfile = ImgList[thisindex]
        thisimgname = ImgNameList[thisindex]

        thismatfile = os.path.join(
            myobj.datadir, myobj.gt,
            thisimgname + myobj.labelSuffix[0] + myobj.labelExt[0])

        #absmatfilelist.append(thismatfile)
        #absfilelist.append(thisimgfile)

        img_org = imread(thisimgfile)  #np.asarray(Image.open(thisimgfile))
        if os.path.isfile(thismatfile):
            mask_org = imread(thismatfile)  #loaded_mt = loadmat(thismatfile)
            mask_org = (mask_org / np.max(mask_org))
        else:
            print(str(thismatfile) + ' does not exist!')
            continue
        filled_img = mask_org

        for resizeratio in myobj.resizeratio:
            #We may only interested in the region inside one region.
            img_res = imresize(img_org, resizeratio)
            mask_res = imresize(mask_org, resizeratio)
            filled_img = imresize(mask_org, resizeratio)
            #crop the boarder image
            [rowsize, colsize] = [img_res.shape[0], img_res.shape[1]]
            row_start = col_start = myobj.boarder * resizeratio
            row_end = rowsize - myobj.boarder * resizeratio
            col_end = colsize - myobj.boarder * resizeratio

            img_res = img_res[row_start:row_end, col_start:col_end, ...]
            mask_res = mask_res[row_start:row_end, col_start:col_end, ...]
            filled_img = filled_img[row_start:row_end, col_start:col_end, ...]
            for rotate_id in myobj.rotatepool:
                if rotate_id == 0:
                    img_rot = img_res
                    mask_rot = mask_res
                else:
                    img_rot = rotate(img_res, rotate_id, mode='nearest')
                    mask_rot = rotate(mask_res, rotate_id, mode='nearest')
                mask = mask_rot
                #assert np.max(mask.flatten()) <= 1, 'please normalize the mask to o, and 1 image'
                img_rgb = img_rot.copy()
                img = pre_process_img(img_rgb, yuv=False)
                # if using special small patch, then crop
                if myobj.crop_patch_size is None:
                    yield img, mask, filled_img
                    #break
                else:
                    if myobj.crop_patch_size[0] <= 1:
                        crop_patch_size = (int(myobj.crop_patch_size[0] * mask.shape[0]), \
                                           int(myobj.crop_patch_size[1] * mask.shape[1]))
                    else:
                        crop_patch_size = myobj.crop_patch_size
                    allcandidates = shuffle(find(mask != np.nan))
                    total_num = len(allcandidates)
                    selected_num = min(myobj.selected_num,
                                       int(myobj.selected_portion * total_num))
                    selected_ind = allcandidates[0:selected_num]

                    Allpatches_vec_img = Points2Patches(
                        selected_ind, img, crop_patch_size)
                    Allpatches_vec_mask = Points2Patches(
                        selected_ind, mask, crop_patch_size)
                    Allpatches_vec_filled_img = Points2Patches(
                        selected_ind, filled_img, crop_patch_size)

                    AllPatches_img = np.reshape(
                        Allpatches_vec_img,
                        (selected_num, ) + crop_patch_size + (img.shape[2], ))
                    AllPatches_mask = np.reshape(
                        Allpatches_vec_mask,
                        (selected_num, ) + crop_patch_size + (mask.shape[2], ))
                    AllPatches_filled_img = np.reshape(
                        Allpatches_vec_filled_img,
                        (selected_num, ) + crop_patch_size + (mask.shape[2], ))
                    # imshow to check if the selected pts is correct.

                    for patch_idx in range(selected_num):
                        #imshow(AllPatches_img[patch_idx, ...])

                        yield AllPatches_img[patch_idx, ...], AllPatches_mask[
                            patch_idx, ...], AllPatches_filled_img[patch_idx,
                                                                   ...]
Example #36
0
def random_rotation(image, angle_range=(0, 180)):
    h, w, _ = image.shape
    angle = np.random.randint(*angle_range)
    image = rotate(image, angle)
    image = resize(image, (h, w))
    return image
def get_boxes(lows,data,size,lat,lon,landmask):
    """
    box = get_boxes(lows, data, size)
   
    Clips a square of length(2 x size) + 1 around each low
    pressure center in lows and returns an array with all the
    boxes.

    Parameters:
    --------------------
    lows: binary matrix where 1 = low pressure center
    data: numpy array, land masked with 0
    size: numeric, half the length of the 2D subset box
    edgedif: numeric, roughly the difference 
             in value between data and land grid cells
    box:  numpy array of data around low pressure centers
    """
    lon[lon < 0.] = lon[lon < 0.] + 360.

    long_size = ((size *2) + 1)
    mylow = np.where(lows == 1)
    nlows = mylow[0].shape[0]
    data_box = np.zeros((nlows,long_size,long_size))
    lat_box = np.zeros(data_box.shape)
    lon_box = np.zeros(data_box.shape)
    (tmax, ymax, xmax) = data.shape
    if len(landmask.shape) == 3:
        landmask = landmask[0,:,:]
    # get lon where north is up
    lon0 = lon[0,(int(xmax/2))-1]
    count = 0
    indlist = np.zeros((nlows))
    for ind in range(0,nlows):
        time = mylow[0][ind]
        lowrow = mylow[1][ind]
        lowcol = mylow[2][ind]
        # -----------------
        # rotation to north
        # -----------------
        mylon = lon[lowrow,lowcol]
        low_mask = np.zeros((ymax,xmax))
        low_mask[lowrow,lowcol] = 1
        if lon0 < mylon:
            deg = mylon - lon0
        elif lon0 >= mylon:
            deg = (360 + mylon) - lon0
        low_rotated = interpolation.rotate(low_mask, deg, order = 2)
        # because of interpolation, lows != 1
        ynew,xnew = np.where(low_rotated == low_rotated.max())
        if len(ynew.shape) > 1:
            print("get_boxes: problem with rotation: too many indices for max")
            print("get_boxes: exiting script")
            #return
        data_rotated = interpolation.rotate(data[time,:,:], deg, order =2)
        # try to ignore data outside map
        data_rotated[data_rotated == 0.0] = np.nan
        # take out noisy grid cells near coast
        landmask_rot = interpolation.rotate(landmask, deg, order = 2)
        landmask_rot[landmask_rot < 0.5] = 0
        landmask_rot[landmask_rot >= 0.5] = 1
        data_rotated = buffer_coast(data_rotated, buf = 1, mask = landmask_rot)
        # -----------------
        # extracting box
        # -----------------
        y1 = int(ynew - size)
        y2 = int(ynew + size + 1)
        x1 = int(xnew - size)
        x2 = int(xnew + size + 1)
        if (y1 < 0) | (x1 < 0) | (y2 > ymax) | (x2 > xmax):
            # too close to edge of map
            continue
        else:
            data_box[count,:,:] = data_rotated[y1:y2,x1:x2]
            #data_box[count,:,:] = data[ind,y1:y2,x1:x2]
            #lat_box[count,:,:] = lat[y1:y2,x1:x2]
            #lon_box[count,:,:] = lon[y1:y2,x1:x2]
            indlist[count] = ind
            count += 1
    return data_box[0:count,:,:], indlist[0:count] #, lon_box[0:count,:,:]
Example #38
0
def register(fn1, fn2, warpband, dims1=None, outfile=None):                  
    gdal.AllRegister()    
    print '--------------------------------'
    print'        Register'   
    print'---------------------------------'      
    print time.asctime()     
    print 'reference image: '+fn1
    print 'warp image: '+fn2     
    print 'warp band: %i'%warpband  
    
    start =  time.time()              
    try:
        if outfile is None:
            path2 = os.path.dirname(fn2)
            basename2 = os.path.basename(fn2)
            root2, ext2 = os.path.splitext(basename2)
            outfile = path2 + '/' + root2 + '_warp' + ext2
        inDataset1 = gdal.Open(fn1,GA_ReadOnly)     
        inDataset2 = gdal.Open(fn2,GA_ReadOnly)
        try:
            cols1 = inDataset1.RasterXSize
            rows1 = inDataset1.RasterYSize    
            cols2 = inDataset2.RasterXSize
            rows2 = inDataset2.RasterYSize    
            bands2 = inDataset2.RasterCount   
        except Exception as e:
            print 'Error %s  --Image could not be read in'%e
            sys.exit(1)     
        if dims1 is None:
            x0 = 0
            y0 = 0
        else:
            x0,y0,cols1,rows1 = dims1    
        
        band = inDataset1.GetRasterBand(warpband)
        refband = band.ReadAsArray(x0,y0,cols1,rows1).astype(np.float32)
        band = inDataset2.GetRasterBand(warpband)
        warpband = band.ReadAsArray(x0,y0,cols1,rows1).astype(np.float32)
        
    #  similarity transform parameters for reference band number            
        scale, angle, shift = similarity(refband, warpband)
    
        driver = inDataset2.GetDriver()
        outDataset = driver.Create(outfile,cols1,rows1,bands2,GDT_Float32)
        projection = inDataset1.GetProjection()
        geotransform = inDataset1.GetGeoTransform()
        if geotransform is not None:
            gt = list(geotransform)
            gt[0] = gt[0] + x0*gt[1]
            gt[3] = gt[3] + y0*gt[5]
            outDataset.SetGeoTransform(tuple(gt))
        if projection is not None:
            outDataset.SetProjection(projection) 
    
    #  warp 
        for k in range(bands2):       
            inband = inDataset2.GetRasterBand(k+1)      
            outBand = outDataset.GetRasterBand(k+1)
            bn1 = inband.ReadAsArray(0,0,cols2,rows2).astype(np.float32)
            bn2 = ndii.zoom(bn1, 1.0 / scale)
            bn2 = ndii.rotate(bn2, angle)
            bn2 = ndii.shift(bn2, shift)       
            outBand.WriteArray(bn2[y0:y0+rows1, x0:x0+cols1]) 
            outBand.FlushCache() 
        inDataset1 = None
        inDataset2 = None
        outDataset = None    
        print 'Warped image written to: %s'%outfile
        print 'elapsed time: %s'%str(time.time()-start)
        return outfile
    except Exception as e:
        print 'registersms failed: %s'%e    
        return None   
        flat = np.load(
            '/scratch/dw1519/galex/data/star_photon/super_iterate{0}/flat.npy'.
            format(i + 4))
        profile = convert1(flat) / 0.6 * 0.75
        mask2 = profile > 0
        #profile[~mask2] = 1.
        plt.plot(profile, '-', label='{0}-{1}'.format(bins[i], bins[i + 1]))
    plt.legend()
    #plt.ylim(0.5,1.)
    plt.xlabel('x [pixel]')
    plt.ylabel('Average sensitivity')
    plt.savefig('/scratch/dw1519/galex/data/star_photon/profile_x.png',
                dpi=190)
    plt.clf()

    flat0 = rotate(flat0, -90., reshape=False, order=1, prefilter=False)
    profile0 = convert1(flat0)
    mask = profile0 > 0
    #profile0[~mask] = 1.
    ratio = profile0 / profile0
    plt.plot(profile0, '-', label='pipeline')

    for i in range(6):
        flat = np.load(
            '/scratch/dw1519/galex/data/star_photon/super_iterate{0}/flat.npy'.
            format(i + 4))
        flat = rotate(flat, -90., reshape=False, order=1, prefilter=False)
        profile = convert1(flat) / 0.6 * 0.72
        mask2 = profile > 0
        #profile[~mask2] = 1.
        plt.plot(profile, '-', label='{0}-{1}'.format(bins[i], bins[i + 1]))
Example #40
0
def decode_cv_prfs(n_pix, rsq_threshold, use_median, n_folds, data_file, extent, screen_distance, screen_width, TR, mask_name, **kwargs):
    
    # for key, value in kwargs.iteritems():
    #         key = value


    # set up results variables
    cv_rotated_recon, cv_reshrot_recon, cv_reshrot_recon_m, \
    cv_omega, cv_estimated_tau_matrix, \
    cv_estimated_rho, cv_estimated_sigma, cv_estimated_alpha = [], [], [], [], [], [], [], []

    for i in tqdm(range(n_folds)):
        # get the data
     
        
        (prf_cv_fold_data, W, 
         all_residuals_css, all_residual_covariance_css, test_data, mask) = setup_data_from_h5(
                        data_file = data_file, 
                        n_pix=n_pix, 
                        extent=extent, 
                        screen_distance=screen_distance, 
                        screen_width=screen_width, 
                        rsq_threshold=rsq_threshold,
                        TR=TR,
                        cv_fold=i,
                        n_folds=n_folds,
                        use_median=False,
                        mask_name=mask_name)

        # estimate the covariance structure, which outputs all parameters
        (estimated_tau_matrix, estimated_rho, 
         estimated_sigma, estimated_alpha, omega, omega_inv, logdet) = fit_model_omega(observed_residual_covariance=all_residual_covariance_css, 
                                        WWT=np.dot(W,W.T),
                                        verbose=0,
                                 #       infile='../data/omega.npy'
                                        )

         # set up result array:
        # set up result array:
        dm_pixel_logl_ratio = np.zeros((mask.sum(),test_data.shape[1]))

        # and loop across timepoints
        for t, bold in enumerate(test_data.T):
            dm_pixel_logl_ratio[:,t] = firstpass_decoder_independent_channels(
                                        W=W,
                                        bold=bold, 
                                        logdet=logdet,
                                        omega_inv=omega_inv,                                        
                                        mapping_relation=['power_law','linear'],
                                        mapping_parameters=[prf_cv_fold_data[:, 3],prf_cv_fold_data[:,4:6]]
                                        #mapping_relation='power_law',
                                        #mapping_parameters=prf_cv_fold_data[:, 3]
                                        )
            
            
        decoded_image = np.zeros((mask.sum(),test_data.shape[1]))  
        for t, bold in enumerate(tqdm(test_data.T)):
    

            logl, decoded_image[:,t] = maximize_loglikelihood( starting_value=dm_pixel_logl_ratio[:,t],
                            W=W,                           
                            bold=bold,
                            logdet=logdet,
                            omega_inv=omega_inv,                            
                            mapping_relation = ['power_law', 'linear'],
                            mapping_parameters = [prf_cv_fold_data[:, 3],prf_cv_fold_data[:,4:6]]
                            #mapping_relation='power_law',
                            #mapping_parameters=prf_cv_fold_data[:, 3]
                                                     )    

     

        # fill in the mask
        recon = np.zeros([decoded_image.shape[1]]+list(mask.shape) )
        for t in range(decoded_image.shape[1]):
            recon[t,mask] = decoded_image[:,t]

        # rotate reconstructions to bar orientation
        thetas = [-1, 0, -1, 45, 270, -1,  315,  180, -1,  135,   90, -1,  225, -1]
        rotated_recon = np.copy(recon).T

        hrf_delay = 0
        block_delimiters = np.r_[np.arange(2, 462, 34) + hrf_delay, 462]
        reshrot_recon = np.zeros((8, rotated_recon.shape[0], rotated_recon.shape[1], 38))
        bar_counter = 0
        for i in range(len(block_delimiters) - 1):
            if thetas[i] != -1:
                rotated_recon[:, :, block_delimiters[i]:block_delimiters[i + 1] + 4] = rotate(rotated_recon[:, :, block_delimiters[i]:block_delimiters[i + 1] + 4],
                                                                                              axes=(
                    0, 1),
                    angle=thetas[i],
                    reshape=False,
                    mode='nearest')
                reshrot_recon[bar_counter] = rotated_recon[:, :,
                                                           block_delimiters[i]:block_delimiters[i + 1] + 4]
                bar_counter += 1

        reshrot_recon_m = np.median(reshrot_recon, axis=0)
        rotated_recon_m = np.median(rotated_recon, axis=0)
        pl.figure(figsize=(24,7))
        pl.imshow(rotated_recon_m);
        pl.figure(figsize=(12,6))
        pl.imshow(np.median(reshrot_recon_m, axis = 0), aspect = 0.5);
        ##############################
        #   Save out results
        ##############################

        cv_rotated_recon.append(rotated_recon_m)
        cv_reshrot_recon.append(reshrot_recon)
        cv_reshrot_recon_m.append(reshrot_recon_m)
        cv_omega.append(omega)
        cv_estimated_tau_matrix.append(estimated_tau_matrix)
        cv_estimated_rho.append(estimated_rho)
        cv_estimated_sigma.append(estimated_sigma)
        cv_estimated_alpha.append(estimated_alpha)


    cv_rotated_recon = np.array(cv_rotated_recon)
    cv_reshrot_recon = np.array(cv_reshrot_recon)
    cv_reshrot_recon_m = np.array(cv_reshrot_recon_m)
    cv_omega = np.array(cv_omega)
    cv_estimated_tau_matrix = np.array(cv_estimated_tau_matrix)
    cv_estimated_rho = np.array(cv_estimated_rho)
    cv_estimated_sigma = np.array(cv_estimated_sigma)
    cv_estimated_alpha = np.array(cv_estimated_alpha)

    return cv_rotated_recon, cv_reshrot_recon, cv_reshrot_recon_m, cv_omega, cv_estimated_tau_matrix, cv_estimated_rho, cv_estimated_sigma, cv_estimated_alpha


#   0%|          | 0/6 [00:00<?, ?it/s]
# data file found, returning local file ../data/V1.h5
# max tau: 140.139935703 min tau: 2.24584848951
# sigma: 5.75604779908 rho: 0.0140699975513
# summed squared distance: 444618382.496
#  17%|█▋        | 1/6 [02:01<10:05, 121.07s/it]
# data file found, returning local file ../data/V1.h5
# max tau: 43.8768334409 min tau: 0.830690536232
# sigma: 8.72147338743 rho: 0.111600068051
# summed squared distance: 3433452.07076
#  33%|███▎      | 2/6 [04:01<08:04, 121.02s/it]
# data file found, returning local file ../data/V1.h5
# max tau: 72.5664452049 min tau: 1.14343146174
# sigma: 5.92838085616 rho: 0.0351495921648
# summed squared distance: 35334222.2567
#  50%|█████     | 3/6 [05:55<05:56, 118.72s/it]
# data file found, returning local file ../data/V1.h5
# max tau: 41.4005198983 min tau: 0.790744766647
# sigma: -5.14436990217 rho: 0.125429150112
# summed squared distance: 2501341.73854
#  67%|██████▋   | 4/6 [07:47<03:53, 116.83s/it]
# data file found, returning local file ../data/V1.h5
# max tau: 39.3126611088 min tau: 2.16457366969
# sigma: 10.4218693161 rho: 0.0367255035114
# summed squared distance: 7811951.39835
#  83%|████████▎ | 5/6 [09:46<01:57, 117.32s/it]
# data file found, returning local file ../data/V1.h5
# max tau: 18.611556683 min tau: -0.360151297827
# sigma: 13.6759073136 rho: 0.232756200748
# summed squared distance: 209931.292302
# 100%|██████████| 6/6 [11:47<00:00, 118.61s/it]
Example #41
0
    short = img[~np.all(img == 255, axis=1)]
    small = short.T[~np.all(short == 255, axis=0)].T
    return small


img = clip_whitespace(img)

# To find a reasonable position for the code:
# 1. rotate a bit, trim whitespace, check width
# 2, minimum width is a reasonable rotation (nice_angle)

#NOTE: this is iterative search, consider binary or grqdient-like
#NOTE: this is exact to 1deg, consider making it more (or less) exact
size_sums = []
for angle in range(360):
    rotated = interpolation.rotate(img, angle, cval=255)
    clipped = clip_whitespace(rotated)
    size_sums.append(clipped.shape[0])

nice_angle = np.array(size_sums).argmin()
rotated = interpolation.rotate(img, nice_angle, cval=255)
clipped = clip_whitespace(rotated)
qr = clipped

# Convert to a format known to the qr scanner and decode
#   NOTE: the scanner is unfortunately too powerful and works for any orientation
#   (in fact, it even works for the original image).
image = Image.fromarray(qr)
codes = zbarlight.scan_codes(['qrcode'], image)
code = str(codes[0], "utf-8")
print("Decoded: " + code)
Example #42
0
def jacobi(a):
    """
    Compute all eigenvalues and eigenvectors of a real symmetric matrix a of side-length n and output
    d[0...n-1] that has the eigenvalues of a sorted into descending order while v[0...n-1][0...n-1] is a
    matrix whose columns contain the corresponding nromalized eigenvectors. nrot is the number of the Jacobi rotations
    that are required. Only the upper tirangle of a is accessed.
    """
    eps = 1e-9  # (epsilon) accuracy
    theta = np.radians(30)
    nrot = 0
    if np.size(a, 0) != np.size(a, 1):
        print("Error: matrix must be symmertic. E.g., of shape nxn")
        return
    n = np.size(a, 0)
    v = np.matrix([0] * n, [0] * n)
    np.fill_diagonal(v, 1.0)
    b = np.diagonal(
        v)  # b and d are diagonals that we initialize for convenience
    d = np.diagonal(v)
    z = np.zeros(n)
    for i in range(1, 51):
        sm = 0  # sum the magnitude of off-diagonal elements
        for ip in range(0, n):
            for iq in range(ip + 1, n + 1):
                sm += abs(a[ip][iq])
        if sm == 0:
            eigsrt(d, v)
            return v
        if i < 4:
            tresh = .2 * sm(n**2)
        else:
            tresh = 0
        for ip in range(1, n):
            for eq in range(ip + 1, n + 1):
                g = 100 * abs(a[ip][iq])
                if i > 4 and g <= eps * abs(d[ip]) and g <= eps * abs * d[
                        iq]:  # after 4 sweeps, skip rotation if the off-diagonal element is small.
                    a[ip][iq] = 0
                elif abs(a[ip][iq]) > tresh:
                    h = d[iq] - d[ip]
                    if g <= eps * abs(h):
                        t = a[ip][iq] / h
                    else:
                        theta = .5 * h / (a[ip][iq])
                        t = 1.0 / (abs(theta) + np.sqrt(1 + theta**2))
                        if theta < 0:  # reverse direction
                            t = -t
                    c = 1.0 / np.sqrt(1 + t**2)
                    s = t * c
                    tau = s / (1.0 + c)
                    h = t * a[ip][iq]
                    z[ip] -= h
                    z[iq] += h
                    d[ip] -= h
                    a[ip][iq] = 0
                    # perform the rotations
                    for j in range(0, n + 1):
                        # not sure if I have to work case-by-case to handle the p and q parameters.
                        v = rotate(a, j)
        for ip in range(0, n + 1):
            b[ip] += z[ip]
            d[ip] = b[ip]
            z[ip] = 0

    return v, nrot
Example #43
0
def kepprf(infile,
           plotfile,
           rownum,
           columns,
           rows,
           fluxes,
           border,
           background,
           focus,
           prfdir,
           xtol,
           ftol,
           imscale,
           colmap,
           labcol,
           apercol,
           plt,
           verbose,
           logfile,
           status,
           cmdLine=False):

    # input arguments

    status = 0
    seterr(all="ignore")

    # log the call

    hashline = '----------------------------------------------------------------------------'
    kepmsg.log(logfile, hashline, verbose)
    call = 'KEPPRF -- '
    call += 'infile=' + infile + ' '
    call += 'plotfile=' + plotfile + ' '
    call += 'rownum=' + str(rownum) + ' '
    call += 'columns=' + columns + ' '
    call += 'rows=' + rows + ' '
    call += 'fluxes=' + fluxes + ' '
    call += 'border=' + str(border) + ' '
    bground = 'n'
    if (background): bground = 'y'
    call += 'background=' + bground + ' '
    focs = 'n'
    if (focus): focs = 'y'
    call += 'focus=' + focs + ' '
    call += 'prfdir=' + prfdir + ' '
    call += 'xtol=' + str(xtol) + ' '
    call += 'ftol=' + str(xtol) + ' '
    call += 'imscale=' + imscale + ' '
    call += 'colmap=' + colmap + ' '
    call += 'labcol=' + labcol + ' '
    call += 'apercol=' + apercol + ' '
    plotit = 'n'
    if (plt): plotit = 'y'
    call += 'plot=' + plotit + ' '
    chatter = 'n'
    if (verbose): chatter = 'y'
    call += 'verbose=' + chatter + ' '
    call += 'logfile=' + logfile
    kepmsg.log(logfile, call + '\n', verbose)

    # test log file

    logfile = kepmsg.test(logfile)

    # start time

    kepmsg.clock('KEPPRF started at', logfile, verbose)

    # reference color map

    if colmap == 'browse':
        status = cmap_plot(cmdLine)

# construct inital guess vector for fit

    if status == 0:
        guess = []
        try:
            f = fluxes.strip().split(',')
            x = columns.strip().split(',')
            y = rows.strip().split(',')
            for i in range(len(f)):
                f[i] = float(f[i])
        except:
            f = fluxes
            x = columns
            y = rows
        nsrc = len(f)
        for i in range(nsrc):
            try:
                guess.append(float(f[i]))
            except:
                message = 'ERROR -- KEPPRF: Fluxes must be floating point numbers'
                status = kepmsg.err(logfile, message, verbose)
        if status == 0:
            if len(x) != nsrc or len(y) != nsrc:
                message = 'ERROR -- KEPFIT:FITMULTIPRF: Guesses for rows, columns and '
                message += 'fluxes must have the same number of sources'
                status = kepmsg.err(logfile, message, verbose)
        if status == 0:
            for i in range(nsrc):
                try:
                    guess.append(float(x[i]))
                except:
                    message = 'ERROR -- KEPPRF: Columns must be floating point numbers'
                    status = kepmsg.err(logfile, message, verbose)
        if status == 0:
            for i in range(nsrc):
                try:
                    guess.append(float(y[i]))
                except:
                    message = 'ERROR -- KEPPRF: Rows must be floating point numbers'
                    status = kepmsg.err(logfile, message, verbose)
        if status == 0 and background:
            if border == 0:
                guess.append(0.0)
            else:
                for i in range((border + 1) * 2):
                    guess.append(0.0)
        if status == 0 and focus:
            guess.append(1.0)
            guess.append(1.0)
            guess.append(0.0)

# open TPF FITS file

    if status == 0:
        try:
            kepid, channel, skygroup, module, output, quarter, season, \
                ra, dec, column, row, kepmag, xdim, ydim, barytime, status = \
                kepio.readTPF(infile,'TIME',logfile,verbose)
        except:
            message = 'ERROR -- KEPPRF: is %s a Target Pixel File? ' % infile
            status = kepmsg.err(logfile, message, verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, tcorr, status = \
            kepio.readTPF(infile,'TIMECORR',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, cadno, status = \
            kepio.readTPF(infile,'CADENCENO',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, fluxpixels, status = \
            kepio.readTPF(infile,'FLUX',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, errpixels, status = \
            kepio.readTPF(infile,'FLUX_ERR',logfile,verbose)
    if status == 0:
        kepid, channel, skygroup, module, output, quarter, season, \
            ra, dec, column, row, kepmag, xdim, ydim, qual, status = \
            kepio.readTPF(infile,'QUALITY',logfile,verbose)

# read mask defintion data from TPF file

    if status == 0:
        maskimg, pixcoord1, pixcoord2, status = kepio.readMaskDefinition(
            infile, logfile, verbose)
        npix = numpy.size(numpy.nonzero(maskimg)[0])

# print target data

    if status == 0 and verbose:
        print('')
        print('      KepID: %s' % kepid)
        print('        BJD: %.2f' % (barytime[rownum - 1] + 2454833.0))
        print(' RA (J2000): %s' % ra)
        print('Dec (J2000):  %s' % dec)
        print('     KepMag:  %s' % kepmag)
        print('   SkyGroup:   %2s' % skygroup)
        print('     Season:   %2s' % str(season))
        print('    Channel:   %2s' % channel)
        print('     Module:   %2s' % module)
        print('     Output:    %1s' % output)
        print('')

# is this a good row with finite timestamp and pixels?

    if status == 0:
        if not numpy.isfinite(barytime[rownum - 1]) or numpy.nansum(
                fluxpixels[rownum - 1, :]) == numpy.nan:
            message = 'ERROR -- KEPFIELD: Row ' + str(
                rownum) + ' is a bad quality timestamp'
            status = kepmsg.err(logfile, message, verbose)

# construct input pixel image

    if status == 0:
        flux = fluxpixels[rownum - 1, :]
        ferr = errpixels[rownum - 1, :]
        DATx = arange(column, column + xdim)
        DATy = arange(row, row + ydim)
#        if numpy.nanmin > 420000.0: flux -= 420000.0

# image scale and intensity limits of pixel data

    if status == 0:
        n = 0
        DATimg = empty((ydim, xdim))
        ERRimg = empty((ydim, xdim))
        for i in range(ydim):
            for j in range(xdim):
                DATimg[i, j] = flux[n]
                ERRimg[i, j] = ferr[n]
                n += 1

# determine suitable PRF calibration file

    if status == 0:
        if int(module) < 10:
            prefix = 'kplr0'
        else:
            prefix = 'kplr'
        prfglob = prfdir + '/' + prefix + str(module) + '.' + str(
            output) + '*' + '_prf.fits'
        try:
            prffile = glob.glob(prfglob)[0]
        except:
            message = 'ERROR -- KEPPRF: No PRF file found in ' + prfdir
            status = kepmsg.err(logfile, message, verbose)

# read PRF images

    if status == 0:
        prfn = [0, 0, 0, 0, 0]
        crpix1p = numpy.zeros((5), dtype='float32')
        crpix2p = numpy.zeros((5), dtype='float32')
        crval1p = numpy.zeros((5), dtype='float32')
        crval2p = numpy.zeros((5), dtype='float32')
        cdelt1p = numpy.zeros((5), dtype='float32')
        cdelt2p = numpy.zeros((5), dtype='float32')
        for i in range(5):
            prfn[i], crpix1p[i], crpix2p[i], crval1p[i], crval2p[i], cdelt1p[i], cdelt2p[i], status \
                = kepio.readPRFimage(prffile,i+1,logfile,verbose)
        prfn = array(prfn)
        PRFx = arange(0.5, shape(prfn[0])[1] + 0.5)
        PRFy = arange(0.5, shape(prfn[0])[0] + 0.5)
        PRFx = (PRFx - size(PRFx) / 2) * cdelt1p[0]
        PRFy = (PRFy - size(PRFy) / 2) * cdelt2p[0]

# interpolate the calibrated PRF shape to the target position

    if status == 0:
        prf = zeros(shape(prfn[0]), dtype='float32')
        prfWeight = zeros((5), dtype='float32')
        for i in range(5):
            prfWeight[i] = sqrt((column - crval1p[i])**2 +
                                (row - crval2p[i])**2)
            if prfWeight[i] == 0.0:
                prfWeight[i] = 1.0e-6
            prf = prf + prfn[i] / prfWeight[i]
        prf = prf / nansum(prf) / cdelt1p[0] / cdelt2p[0]

# interpolate the calibrated PRF shape to the target position

#    if status == 0:
#        prf = zeros(shape(prfn[0,:,:]),dtype='float32')
#        px = crval1p + len(PRFx) / 2 * cdelt1p[0]
#        py = crval2p + len(PRFy) / 2 * cdelt2p[0]
#        pp = [[px[0],py[0]],
#              [px[1],py[1]],
#              [px[2],py[2]],
#              [px[3],py[3]],
#              [px[4],py[4]]]
#        for index,value in ndenumerate(prf):
#            pz = prfn[:,index[0],index[1]]
#            prf[index] = griddata(pp, pz, ([column], [row]), method='linear')
#        print shape(prf)

# location of the data image centered on the PRF image (in PRF pixel units)

    if status == 0:
        prfDimY = int(ydim / cdelt1p[0])
        prfDimX = int(xdim / cdelt2p[0])
        PRFy0 = (shape(prf)[0] - prfDimY) / 2
        PRFx0 = (shape(prf)[1] - prfDimX) / 2

# interpolation function over the PRF

    if status == 0:
        splineInterpolation = scipy.interpolate.RectBivariateSpline(
            PRFx, PRFy, prf)

# construct mesh for background model

    if status == 0 and background:
        bx = numpy.arange(1., float(xdim + 1))
        by = numpy.arange(1., float(ydim + 1))
        xx, yy = numpy.meshgrid(numpy.linspace(bx.min(), bx.max(), xdim),
                                numpy.linspace(by.min(), by.max(), ydim))

# fit PRF model to pixel data

    if status == 0:
        start = time.time()
        if focus and background:
            args = (DATx, DATy, DATimg, ERRimg, nsrc, border, xx, yy,
                    splineInterpolation, float(x[0]), float(y[0]))
            ans = fmin_powell(kepfunc.PRFwithFocusAndBackground,
                              guess,
                              args=args,
                              xtol=xtol,
                              ftol=ftol,
                              disp=False)
        elif focus and not background:
            args = (DATx, DATy, DATimg, ERRimg, nsrc, splineInterpolation,
                    float(x[0]), float(y[0]))
            ans = fmin_powell(kepfunc.PRFwithFocus,
                              guess,
                              args=args,
                              xtol=xtol,
                              ftol=ftol,
                              disp=False)
        elif background and not focus:
            args = (DATx, DATy, DATimg, ERRimg, nsrc, border, xx, yy,
                    splineInterpolation, float(x[0]), float(y[0]))
            ans = fmin_powell(kepfunc.PRFwithBackground,
                              guess,
                              args=args,
                              xtol=xtol,
                              ftol=ftol,
                              disp=False)
        else:
            args = (DATx, DATy, DATimg, ERRimg, nsrc, splineInterpolation,
                    float(x[0]), float(y[0]))
            ans = fmin_powell(kepfunc.PRF,
                              guess,
                              args=args,
                              xtol=xtol,
                              ftol=ftol,
                              disp=False)
        print('Convergence time = %.2fs\n' % (time.time() - start))

# pad the PRF data if the PRF array is smaller than the data array

    if status == 0:
        flux = []
        OBJx = []
        OBJy = []
        PRFmod = numpy.zeros((prfDimY, prfDimX))
        if PRFy0 < 0 or PRFx0 < 0.0:
            PRFmod = numpy.zeros((prfDimY, prfDimX))
            superPRF = zeros((prfDimY + 1, prfDimX + 1))
            superPRF[abs(PRFy0):abs(PRFy0) + shape(prf)[0],
                     abs(PRFx0):abs(PRFx0) + shape(prf)[1]] = prf
            prf = superPRF * 1.0
            PRFy0 = 0
            PRFx0 = 0

# rotate the PRF model around its center

        if focus:
            angle = ans[-1]
            prf = rotate(prf, -angle, reshape=False, mode='nearest')

# iterate through the sources in the best fit PSF model

        for i in range(nsrc):
            flux.append(ans[i])
            OBJx.append(ans[nsrc + i])
            OBJy.append(ans[nsrc * 2 + i])

            # calculate best-fit model

            y = (OBJy[i] - mean(DATy)) / cdelt1p[0]
            x = (OBJx[i] - mean(DATx)) / cdelt2p[0]
            prfTmp = shift(prf, [y, x], order=3, mode='constant')
            prfTmp = prfTmp[PRFy0:PRFy0 + prfDimY, PRFx0:PRFx0 + prfDimX]
            PRFmod = PRFmod + prfTmp * flux[i]
            wx = 1.0
            wy = 1.0
            angle = 0
            b = 0.0

            # write out best fit parameters

            if verbose:
                txt = 'Flux = %10.2f e-/s ' % flux[i]
                txt += 'X = %9.4f pix ' % OBJx[i]
                txt += 'Y = %9.4f pix ' % OBJy[i]
                kepmsg.log(logfile, txt, True)
#
#        params = {'backend': 'png',
#                  'axes.linewidth': 2.5,
#                  'axes.labelsize': 24,
#                  'axes.font': 'sans-serif',
#                  'axes.fontweight' : 'bold',
#                  'text.fontsize': 12,
#                  'legend.fontsize': 12,
#                  'xtick.labelsize': 24,
#                  'ytick.labelsize': 24}
#        pylab.rcParams.update(params)
#
#        pylab.figure(figsize=[20,10])
#        ax = pylab.axes([0.05,0.08,0.46,0.9])
#        xxx = numpy.arange(397.5,402.5,0.02)
#        yyy = numpy.sum(PRFmod,axis=0) / numpy.max(numpy.sum(PRFmod,axis=0))
#        pylab.plot(xxx,yyy,color='b',linewidth=3.0)
#        xxx = numpy.append(numpy.insert(xxx,[0],[xxx[0]]),xxx[-1])
#        yyy = numpy.append(numpy.insert(yyy,[0],[0.0]),yyy[-1])
#        pylab.fill(xxx,yyy,fc='y',linewidth=0.0,alpha=0.3)
#        pylab.xlabel('Pixel Column Number')
#        pylab.xlim(397.5,402.5)
#        pylab.ylim(1.0e-30,1.02)
#        for xmaj in numpy.arange(397.5,402.5,1.0):
#            pylab.plot([xmaj,xmaj],[0.0,1.1],color='k',linewidth=0.5,linestyle=':')
#        for xmaj in numpy.arange(0.2,1.2,0.2):
#            pylab.plot([0.0,2000.0],[xmaj,xmaj],color='k',linewidth=0.5,linestyle=':')
#
#
#        ax = pylab.axes([0.51,0.08,0.46,0.9])
#        xxx = numpy.arange(32.5,37.5,0.02)
#        yyy = numpy.sum(PRFmod,axis=1) / numpy.max(numpy.sum(PRFmod,axis=1))
#        pylab.plot(xxx,yyy,color='b',linewidth=3.0)
#        xxx = numpy.append(numpy.insert(xxx,[0],[xxx[0]]),xxx[-1])
#        yyy = numpy.append(numpy.insert(yyy,[0],[0.0]),yyy[-1])
#        pylab.fill(xxx,yyy,fc='y',linewidth=0.0,alpha=0.3)
#        pylab.setp(pylab.gca(),yticklabels=[])
#        pylab.xlabel('Pixel Row Number')
#        pylab.xlim(32.5,37.5)
#        pylab.ylim(1.0e-30,1.02)
#        for xmaj in numpy.arange(32.5,37.5,1.0):
#            pylab.plot([xmaj,xmaj],[0.0,1.1],color='k',linewidth=0.5,linestyle=':')
#        for xmaj in numpy.arange(0.2,1.2,0.2):
#            pylab.plot([0.0,2000.0],[xmaj,xmaj],color='k',linewidth=0.5,linestyle=':')
#        pylab.ion()
#        pylab.plot([])
#        pylab.ioff()

        if verbose and background:
            bterms = border + 1
            if bterms == 1:
                b = ans[nsrc * 3]
            else:
                bcoeff = array([
                    ans[nsrc * 3:nsrc * 3 + bterms],
                    ans[nsrc * 3 + bterms:nsrc * 3 + bterms * 2]
                ])
                bkg = kepfunc.polyval2d(xx, yy, bcoeff)
                b = nanmean(bkg.reshape(bkg.size))
            txt = '\n   Mean background = %.2f e-/s' % b
            kepmsg.log(logfile, txt, True)
        if focus:
            wx = ans[-3]
            wy = ans[-2]
            angle = ans[-1]
        if verbose and focus:
            if not background: kepmsg.log(logfile, '', True)
            kepmsg.log(logfile, ' X/Y focus factors = %.3f/%.3f' % (wx, wy),
                       True)
            kepmsg.log(logfile, 'PRF rotation angle = %.2f deg' % angle, True)

# measure flux fraction and contamination

# LUGER: This looks horribly bugged. ``PRFall`` is certainly NOT the sum of the all the sources.

    if status == 0:
        PRFall = kepfunc.PRF2DET(flux, OBJx, OBJy, DATx, DATy, wx, wy, angle,
                                 splineInterpolation)
        PRFone = kepfunc.PRF2DET([flux[0]], [OBJx[0]], [OBJy[0]], DATx, DATy,
                                 wx, wy, angle, splineInterpolation)

        # LUGER: Add up contaminant fluxes
        PRFcont = np.zeros_like(PRFone)
        for ncont in range(1, len(flux)):
            PRFcont += kepfunc.PRF2DET([flux[ncont]], [OBJx[ncont]],
                                       [OBJy[ncont]], DATx, DATy, wx, wy,
                                       angle, splineInterpolation)
        PRFcont[np.where(PRFcont < 0)] = 0

        FluxInMaskAll = numpy.nansum(PRFall)
        FluxInMaskOne = numpy.nansum(PRFone)
        FluxInAperAll = 0.0
        FluxInAperOne = 0.0
        FluxInAperAllTrue = 0.0

        for i in range(1, ydim):
            for j in range(1, xdim):
                if kepstat.bitInBitmap(maskimg[i, j], 2):
                    FluxInAperAll += PRFall[i, j]
                    FluxInAperOne += PRFone[i, j]
                    FluxInAperAllTrue += PRFone[i, j] + PRFcont[i, j]
        FluxFraction = FluxInAperOne / flux[0]
        try:
            Contamination = (FluxInAperAll - FluxInAperOne) / FluxInAperAll
        except:
            Contamination = 0.0

        # LUGER: Pixel crowding metrics
        Crowding = PRFone / (PRFone + PRFcont)

        # LUGER: Optimal aperture crowding metric
        CrowdAper = FluxInAperOne / FluxInAperAllTrue

        kepmsg.log(
            logfile,
            '\n                Total flux in mask = %.2f e-/s' % FluxInMaskAll,
            True)
        kepmsg.log(
            logfile,
            '               Target flux in mask = %.2f e-/s' % FluxInMaskOne,
            True)
        kepmsg.log(
            logfile,
            '            Total flux in aperture = %.2f e-/s' % FluxInAperAll,
            True)
        kepmsg.log(
            logfile,
            '           Target flux in aperture = %.2f e-/s' % FluxInAperOne,
            True)
        kepmsg.log(
            logfile, '  Target flux fraction in aperture = %.2f%%' %
            (FluxFraction * 100.0), True)
        kepmsg.log(
            logfile, 'Contamination fraction in aperture = %.2f%%' %
            (Contamination * 100.0), True)
        kepmsg.log(logfile,
                   '       Crowding metric in aperture = %.4f' % (CrowdAper),
                   True)

# constuct model PRF in detector coordinates

    if status == 0:
        PRFfit = PRFall + 0.0
        if background and bterms == 1:
            PRFfit = PRFall + b
        if background and bterms > 1:
            PRFfit = PRFall + bkg

# calculate residual of DATA - FIT

    if status == 0:
        PRFres = DATimg - PRFfit
        FLUXres = numpy.nansum(PRFres) / npix

# calculate the sum squared difference between data and model

    if status == 0:
        Pearson = abs(numpy.nansum(numpy.square(DATimg - PRFfit) / PRFfit))
        Chi2 = numpy.nansum(
            numpy.square(DATimg - PRFfit) / numpy.square(ERRimg))
        DegOfFreedom = npix - len(guess) - 1
        try:
            kepmsg.log(logfile, '\n       Residual flux = %.2f e-/s' % FLUXres,
                       True)
            kepmsg.log(
                logfile, 'Pearson\'s chi^2 test = %d for %d dof' %
                (Pearson, DegOfFreedom), True)
        except:
            pass
        kepmsg.log(
            logfile,
            '          Chi^2 test = %d for %d dof' % (Chi2, DegOfFreedom),
            True)

# image scale and intensity limits for plotting images

    if status == 0:
        imgdat_pl, zminfl, zmaxfl = kepplot.intScale2D(DATimg, imscale)
        imgprf_pl, zminpr, zmaxpr = kepplot.intScale2D(PRFmod, imscale)
        imgfit_pl, zminfi, zmaxfi = kepplot.intScale2D(PRFfit, imscale)
        imgres_pl, zminre, zmaxre = kepplot.intScale2D(PRFres, 'linear')
        if imscale == 'linear':
            zmaxpr *= 0.9
        elif imscale == 'logarithmic':
            zmaxpr = numpy.max(zmaxpr)
            zminpr = zmaxpr / 2

# plot style

    if status == 0:
        pylab.figure(figsize=[12, 10])
        pylab.clf()
        plotimage(imgdat_pl, zminfl, zmaxfl, 1, row, column, xdim, ydim, 0.07,
                  0.53, 'observation', colmap, labcol)
        #        pylab.text(830.0,242.1,'A',horizontalalignment='center',verticalalignment='center',
        #                   fontsize=28,fontweight=500,color='white')
        #        pylab.text(831.1,240.62,'B',horizontalalignment='center',verticalalignment='center',
        #                   fontsize=28,fontweight=500,color='white')
        #        plotimage(imgprf_pl,0.0,zmaxpr/0.5,2,row,column,xdim,ydim,0.52,0.52,'model',colmap)
        plotimage(imgprf_pl, zminpr, zmaxpr, 2, row, column, xdim, ydim, 0.44,
                  0.53, 'model', colmap, labcol)
        kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 1, apercol,
                        '--', 0.5)
        kepplot.borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, 2, apercol,
                        '-', 3.0)
        plotimage(imgfit_pl,
                  zminfl,
                  zmaxfl,
                  3,
                  row,
                  column,
                  xdim,
                  ydim,
                  0.07,
                  0.08,
                  'fit',
                  colmap,
                  labcol,
                  crowd=Crowding)
        #        plotimage(imgres_pl,-zmaxre,zmaxre,4,row,column,xdim,ydim,0.44,0.08,'residual',colmap,'k')
        plotimage(imgres_pl, zminfl, zmaxfl, 4, row, column, xdim, ydim, 0.44,
                  0.08, 'residual', colmap, labcol)

# plot data color bar

#    barwin = pylab.axes([0.84,0.53,0.06,0.45])
    barwin = pylab.axes([0.84, 0.08, 0.06, 0.9])
    if imscale == 'linear':
        brange = numpy.arange(zminfl, zmaxfl, (zmaxfl - zminfl) / 1000)
    elif imscale == 'logarithmic':
        brange = numpy.arange(10.0**zminfl, 10.0**zmaxfl,
                              (10.0**zmaxfl - 10.0**zminfl) / 1000)
    elif imscale == 'squareroot':
        brange = numpy.arange(zminfl**2, zmaxfl**2,
                              (zmaxfl**2 - zminfl**2) / 1000)
    if imscale == 'linear':
        barimg = numpy.resize(brange, (1000, 1))
    elif imscale == 'logarithmic':
        barimg = numpy.log10(numpy.resize(brange, (1000, 1)))
    elif imscale == 'squareroot':
        barimg = numpy.sqrt(numpy.resize(brange, (1000, 1)))
    try:
        nrm = len(str(int(numpy.nanmax(brange)))) - 1
    except:
        nrm = 0
    brange = brange / 10**nrm
    pylab.imshow(barimg,
                 aspect='auto',
                 interpolation='nearest',
                 origin='lower',
                 vmin=numpy.nanmin(barimg),
                 vmax=numpy.nanmax(barimg),
                 extent=(0.0, 1.0, brange[0], brange[-1]),
                 cmap=colmap)
    barwin.yaxis.tick_right()
    barwin.yaxis.set_label_position('right')
    barwin.yaxis.set_major_locator(MaxNLocator(7))
    pylab.gca().yaxis.set_major_formatter(
        pylab.ScalarFormatter(useOffset=False))
    pylab.gca().set_autoscale_on(False)
    pylab.setp(pylab.gca(), xticklabels=[], xticks=[])
    pylab.ylabel('Flux (10$^%d$ e$^-$ s$^{-1}$)' % nrm)
    setp(barwin.get_yticklabels(), 'rotation', 90)
    barwin.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))

    # plot residual color bar

    #    barwin = pylab.axes([0.84,0.08,0.06,0.45])
    #    Brange = numpy.arange(-zmaxre,zmaxre,(zmaxre+zmaxre)/1000)
    #    try:
    #        nrm = len(str(int(numpy.nanmax(brange))))-1
    #    except:
    #        nrm = 0
    #    brange = brange / 10**nrm
    #    barimg = numpy.resize(brange,(1000,1))
    #    pylab.imshow(barimg,aspect='auto',interpolation='nearest',origin='lower',
    #           vmin=brange[0],vmax=brange[-1],extent=(0.0,1.0,brange[0],brange[-1]),cmap=colmap)
    #    barwin.yaxis.tick_right()
    #    barwin.yaxis.set_label_position('right')
    #    barwin.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))
    #    barwin.yaxis.set_major_locator(MaxNLocator(7))
    #    pylab.gca().yaxis.set_major_formatter(pylab.ScalarFormatter(useOffset=False))
    #    pylab.gca().set_autoscale_on(False)
    #    pylab.setp(pylab.gca(),xticklabels=[],xticks=[])
    #    pylab.ylabel('Residual (10$^%d$ e$^-$ s$^{-1}$)' % nrm)
    #    setp(barwin.get_yticklabels(), 'rotation', 90)

    # render plot

    if status == 0 and len(plotfile) > 0 and plotfile.lower() != 'none':
        pylab.savefig(plotfile)
    if status == 0 and plt:
        if cmdLine:
            pylab.show(block=True)
        else:
            pylab.ion()
            pylab.plot([])
            pylab.ioff()

# stop time

    kepmsg.clock('\nKEPPRF ended at', logfile, verbose)

    return
Example #44
0
    #plt.imshow(bin_img, cmap='gray')
    #plt.savefig('binary.png')

    delta = 1
    limit = 5
    angles = np.arange(-limit, limit + delta, delta)
    scores = []

    for angle in angles:
        hist, score = find_score(bin_img, angle)
        scores.append(score)
    best_score = max(scores)
    best_angle = angles[scores.index(best_score)]
    print('Best angle for image' + str(i) + ': {}'.format(best_angle))
    # correct skew
    data = inter.rotate(bin_img, best_angle, reshape=False, order=0)
    img_s = im.fromarray(255 - (255 * data).astype("uint8")).convert("RGB")
    os.makedirs("../BDRP_Project/skew_corrected_images", exist_ok=True)
    img_s.save(r'skew_corrected_images\\skew_corrected_img' + str(i) + '.jpg')

    # img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    img_c = 'skew_corrected_images\\skew_corrected_img' + str(i) + '.jpg'
    img1 = cv2.imread(img_c, cv2.IMREAD_COLOR)
    img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    # rescaling image
    img1 = cv2.resize(img1, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)

    #img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)

    # noise removal
    img2 = cv2.GaussianBlur(img1, (5, 5), 0)
def correctSkews(angle1, angle2, array):
    from scipy.ndimage.interpolation import rotate
    rotated1 = rotate(array, angle1, mode='nearest', axes=(0, 1))
    angle1rad = angle1 / 360 * 2 * np.pi
    rotated2 = rotate(rotated1, angle2, mode='nearest', axes=(2, 0))
    return rotated1, rotated2
Example #46
0
def main():
    gdal.AllRegister()
    path = auxil.select_directory('Choose working directory')
    if path:
        os.chdir(path)
#  MS image
    file1 = auxil.select_infile(title='Choose MS image')
    if file1:
        inDataset1 = gdal.Open(file1, GA_ReadOnly)
        cols = inDataset1.RasterXSize
        rows = inDataset1.RasterYSize
        bands = inDataset1.RasterCount
    else:
        return
    pos1 = auxil.select_pos(bands)
    if not pos1:
        return
    num_bands = len(pos1)
    dims = auxil.select_dims([0, 0, cols, rows])
    if dims:
        x10, y10, cols1, rows1 = dims
    else:
        return
#  PAN image
    file2 = auxil.select_infile(title='Choose PAN image')
    if file2:
        inDataset2 = gdal.Open(file2, GA_ReadOnly)
        bands = inDataset2.RasterCount
    else:
        return
    if bands > 1:
        print 'Must be a single band (panchromatic) image'
        return
    geotransform1 = inDataset1.GetGeoTransform()
    geotransform2 = inDataset2.GetGeoTransform()
    #  outfile
    outfile, fmt = auxil.select_outfilefmt()
    if not outfile:
        return
#  resolution ratio
    ratio = auxil.select_integer(4, 'Resolution ratio (2 or 4)')
    if not ratio:
        return
#  MS registration band
    k1 = auxil.select_integer(1, 'MS band for registration')
    if not k1:
        return
    print '========================='
    print '   ATWT Pansharpening'
    print '========================='
    print time.asctime()
    print 'MS  file: ' + file1
    print 'PAN file: ' + file2
    #  read in MS image
    band = inDataset1.GetRasterBand(1)
    tmp = band.ReadAsArray(0, 0, 1, 1)
    dt = tmp.dtype
    MS = np.asarray(np.zeros((num_bands, rows1, cols1)), dtype=dt)
    k = 0
    for b in pos1:
        band = inDataset1.GetRasterBand(b)
        MS[k, :, :] = band.ReadAsArray(x10, y10, cols1, rows1)
        k += 1
#  if integer assume 11-bit quantization, otherwise must be byte
    if MS.dtype == np.int16:
        fact = 8.0
        MS = auxil.byteStretch(MS, (0, 2**11))
    else:
        fact = 1.0
#  read in corresponding spatial subset of PAN image
    if (geotransform1 is None) or (geotransform2 is None):
        print 'Image not georeferenced, aborting'
        return
#  upper left corner pixel in PAN
    gt1 = list(geotransform1)
    gt2 = list(geotransform2)
    ulx1 = gt1[0] + x10 * gt1[1]
    uly1 = gt1[3] + y10 * gt1[5]
    x20 = int(round(((ulx1 - gt2[0]) / gt2[1])))
    y20 = int(round(((uly1 - gt2[3]) / gt2[5])))
    cols2 = cols1 * ratio
    rows2 = rows1 * ratio
    band = inDataset2.GetRasterBand(1)
    PAN = band.ReadAsArray(x20, y20, cols2, rows2)
    #  if integer assume 11-bit quantization, otherwise must be byte
    if PAN.dtype == np.int16:
        PAN = auxil.byteStretch(PAN, (0, 2**11))
#  out array
    sharpened = np.zeros((num_bands, rows2, cols2), dtype=np.float32)
    #  compress PAN to resolution of MS image using DWT
    panDWT = auxil.DWTArray(PAN, cols2, rows2)
    r = ratio
    while r > 1:
        panDWT.filter()
        r /= 2
    bn0 = panDWT.get_quadrant(0)
    #  register (and subset) MS image to compressed PAN image using selected MSband
    lines0, samples0 = bn0.shape
    bn1 = MS[k1 - 1, :, :]
    #  register (and subset) MS image to compressed PAN image
    (scale, angle, shift) = auxil.similarity(bn0, bn1)
    tmp = np.zeros((num_bands, lines0, samples0))
    for k in range(num_bands):
        bn1 = MS[k, :, :]
        bn2 = ndii.zoom(bn1, 1.0 / scale)
        bn2 = ndii.rotate(bn2, angle)
        bn2 = ndii.shift(bn2, shift)
        tmp[k, :, :] = bn2[0:lines0, 0:samples0]
    MS = tmp
    smpl = np.random.randint(cols2 * rows2, size=100000)
    print 'Wavelet correlations:'
    #  loop over MS bands
    for k in range(num_bands):
        msATWT = auxil.ATWTArray(PAN)
        r = ratio
        while r > 1:
            msATWT.filter()
            r /= 2


#      sample PAN wavelet details
        X = msATWT.get_band(msATWT.num_iter)
        X = X.ravel()[smpl]
        #      resize the ms band to scale of the pan image
        ms_band = ndii.zoom(MS[k, :, :], ratio)
        #      sample details of MS band
        tmpATWT = auxil.ATWTArray(ms_band)
        r = ratio
        while r > 1:
            tmpATWT.filter()
            r /= 2
        Y = tmpATWT.get_band(msATWT.num_iter)
        Y = Y.ravel()[smpl]
        #      get band for injection
        bnd = tmpATWT.get_band(0)
        tmpATWT = None
        aa, bb, R = auxil.orthoregress(X, Y)
        print 'Band ' + str(k + 1) + ': %8.3f' % R
        #      inject the filtered MS band
        msATWT.inject(bnd)
        #      normalize wavelet components and expand
        msATWT.normalize(aa, bb)
        r = ratio
        while r > 1:
            msATWT.invert()
            r /= 2
        sharpened[k, :, :] = msATWT.get_band(0)
    sharpened *= fact  # rescale dynamic range
    msATWT = None
    #  write to disk

    driver = gdal.GetDriverByName(fmt)
    outDataset = driver.Create(outfile, cols2, rows2, num_bands, GDT_Float32)
    gt1[0] += x10 * ratio
    gt1[3] -= y10 * ratio
    gt1[1] = gt2[1]
    gt1[2] = gt2[2]
    gt1[4] = gt2[4]
    gt1[5] = gt2[5]
    outDataset.SetGeoTransform(tuple(gt1))
    projection1 = inDataset1.GetProjection()
    if projection1 is not None:
        outDataset.SetProjection(projection1)
    for k in range(num_bands):
        outBand = outDataset.GetRasterBand(k + 1)
        outBand.WriteArray(sharpened[k, :, :], 0, 0)
        outBand.FlushCache()
    outDataset = None
    print 'Result written to %s' % outfile
    inDataset1 = None
    inDataset2 = None
Example #47
0
 def return_score(arr, angle):
     data = interpolation.rotate(arr, angle, reshape=False, order=0)
     histogram = np.sum(data, axis=1)
     score = np.sum((histogram[1:] - histogram[:-1])**2)
     return histogram, score
Example #48
0
def yieldImages(myobj):
    # all the image are substrate by the mean and divided by its std for RGB channel, respectively.
    mask_process_f = get(myobj.ImageGenerator_Identifier)
    if hasattr(myobj, 'allDictList'):
        allDictList = myobj.allDictList
    else:
        allDictList = getfileinfo(myobj.datadir, myobj.labelSuffix,
                                  myobj.dataExt, myobj.labelExt[0])

    index_list = range(0, len(allDictList))
    rotationlist = myobj.resizeratio
    randshuffle(index_list)
    randshuffle(rotationlist)
    for imgindx, thisindex in enumerate(index_list):
        if imgindx == myobj.maximg:
            break
        returnDict = allDictList[thisindex]
        thismatfile = returnDict['thismatfile']
        thisimgfile = returnDict['thisfile']

        print(thisimgfile)
        img_org = np.asarray(Image.open(thisimgfile))
        loaded_mt = loadmat(thismatfile)
        if type(myobj.contourname) is not list:
            myobj.contourname = [myobj.contourname]
        contour_mat = None
        for contourname in myobj.contourname:
            if contourname in loaded_mt.keys():
                contour_mat = loaded_mt[contourname].tolist()[0]
                break
        if not contour_mat:
            contour_mat = loaded_mt.values()[0].tolist()[0]
            print('check the mat keys, we use the first one default key: ' +
                  loaded_mt.keys()[0])
        outputs_dict = dict()

        for resizeratio in rotationlist:
            img_res = imresize(img_org, resizeratio)
            #print('start mask')
            process_dict = mask_process_f(myobj,
                                          contour_mat,
                                          img_org.shape[0:2],
                                          resizeratio=resizeratio)
            #print('end mask')
            mask_res = process_dict['mask']
            filled_img_res = process_dict[
                'filled_img'] if 'filled_img' in process_dict.keys(
                ) else mask_org
            shed_mask_res = process_dict['shed']

            #We may only interested in the region inside one region.
            #since mask and filled_img are already resized version, only image need to be resized
            #mask_res = mask_org #imresize(mask_org, resizeratio)
            #crop the boarder image
            [rowsize, colsize] = [img_res.shape[0], img_res.shape[1]]
            if myobj.boarder < 1:
                row_board = myobj.boarder * rowsize
                col_board = myobj.boarder * colsize
            elif len(myobj.boarder) == 2:
                row_board, col_board = myobj.boarder
            else:
                row_board = col_board = myobj.boarder

            row_start, col_start = row_board * resizeratio, col_board * resizeratio
            row_end = rowsize - row_board * resizeratio
            col_end = colsize - col_board * resizeratio

            img_res = img_res[row_start:row_end, col_start:col_end, ...]
            mask_res = mask_res[row_start:row_end, col_start:col_end, ...]
            filled_img_res = filled_img_res[row_start:row_end,
                                            col_start:col_end, ...]
            shed_mask_res = shed_mask_res[row_start:row_end, col_start:col_end,
                                          ...]

            if myobj.get_validation == True:
                outputs_dict['img'] = pre_process_img(img_res.copy(),
                                                      yuv=False)
                outputs_dict['mask'] = mask_res
                outputs_dict['filled_img'] = filled_img_res
                outputs_dict['shed_mask'] = shed_mask_res
                yield outputs_dict
                break
            for rotate_id in myobj.rotatepool:
                if rotate_id == 0:
                    img_rot = img_res.copy()
                    mask_rot = mask_res.copy()
                    shed_rot = shed_mask_res.copy()
                    filled_img_rot = filled_img_res.copy()
                else:

                    img_rot = rotate(img_res.copy(),
                                     rotate_id,
                                     mode='reflect',
                                     reshape=False)
                    mask_rot = rotate(mask_res.copy(),
                                      rotate_id,
                                      mode='reflect',
                                      reshape=False)
                    shed_rot = rotate(shed_mask_res.copy(),
                                      rotate_id,
                                      mode='reflect',
                                      reshape=False)
                    filled_img_rot = rotate(filled_img_res.copy(),
                                            rotate_id,
                                            mode='reflect',
                                            reshape=False)

                #assert np.max(mask.flatten()) <= 1, 'please normalize the mask to o, and 1 image'
                img_rot = pre_process_img(img_rot, yuv=False)

                # if using special small patch, then crop
                if myobj.crop_patch_size is None:
                    outputs_dict['img'] = img_rot
                    outputs_dict['mask'] = mask_rot
                    outputs_dict['filled_img'] = filled_img_rot
                    outputs_dict['shed_mask'] = shed_rot
                    for item in first_flip_channel(myobj, outputs_dict):
                        yield item
                #break
                else:
                    if myobj.crop_patch_size[0] <= 1:
                        crop_patch_size = (int(myobj.crop_patch_size[0] * mask_rot.shape[0]), \
                                           int(myobj.crop_patch_size[1] * mask_rot.shape[1]))
                    else:
                        crop_patch_size = myobj.crop_patch_size
                    allcandidates = shuffle(find(mask_rot != np.nan))
                    total_num = len(allcandidates)
                    selected_num = min(myobj.selected_num,
                                       int(myobj.selected_portion * total_num))
                    selected_ind = allcandidates[0:selected_num]

                    Allpatches_vec_img = Points2Patches(
                        selected_ind, img_rot, crop_patch_size)
                    Allpatches_vec_mask = Points2Patches(
                        selected_ind, mask_rot, crop_patch_size)
                    Allpatches_vec_filled_img = Points2Patches(
                        selected_ind, filled_img_rot, crop_patch_size)
                    Allpatches_vec_shed_mask = Points2Patches(
                        selected_ind, shed_rot, crop_patch_size)

                    AllPatches_img = np.reshape(
                        Allpatches_vec_img, (selected_num, ) +
                        crop_patch_size + (img_rot.shape[2], ))
                    AllPatches_mask = np.reshape(
                        Allpatches_vec_mask, (selected_num, ) +
                        crop_patch_size + (mask_rot.shape[2], ))
                    AllPatches_filled_img = np.reshape(
                        Allpatches_vec_filled_img, (selected_num, ) +
                        crop_patch_size + (mask_rot.shape[2], ))
                    AllPatches_shed_mask = np.reshape(
                        Allpatches_vec_shed_mask, (selected_num, ) +
                        crop_patch_size + (mask_rot.shape[2], ))

                    # imshow to check if the selected pts is correct.
                    for patch_idx in range(selected_num):
                        #imshow(AllPatches_img[patch_idx, ...])
                        outputs_dict['img'] = AllPatches_img[patch_idx, ...]
                        outputs_dict['mask'] = AllPatches_mask[patch_idx, ...]
                        outputs_dict['filled_img'] = AllPatches_filled_img[
                            patch_idx, ...]
                        outputs_dict['shed_mask'] = AllPatches_shed_mask[
                            patch_idx, ...]

                        for item in first_flip_channel(myobj, outputs_dict):
                            yield item
Example #49
0
 def batch_function(self, image):
     angle = self.get_random_variable('angle')
     return rotate(image[0], angle, axes=self.axes, reshape=False), \
            rotate(image[1], angle, axes=self.axes, order=0, cval=self.ml, reshape=False)
Example #50
0
 def RotateTopAxis(self, data, angle):
     return rotate(data, angle, axes=(1, 2), reshape=False)
Example #51
0
def similarity(im0, im1):
    """Return similarity transformed image im1 and transformation parameters.

    Transformation parameters are: isotropic scale factor, rotation angle (in
    degrees), and translation vector.

    A similarity transformation is an affine transformation with isotropic
    scale and without shear.

    Limitations:
    Image shapes must be equal and square.
    All image areas must have same scale, rotation, and shift.
    Scale change must be less than 1.8.
    No subpixel precision.

    """
    if im0.shape != im1.shape:
        raise ValueError("Images must have same shapes.")
    elif len(im0.shape) != 2:
        raise ValueError("Images must be 2 dimensional.")

    f0 = fftshift(abs(fft2(im0)))
    f1 = fftshift(abs(fft2(im1)))

    h = highpass(f0.shape)
    f0 *= h
    f1 *= h
    del h

    f0, log_base = logpolar(f0)
    f1, log_base = logpolar(f1)

    f0 = fft2(f0)
    f1 = fft2(f1)
    r0 = abs(f0) * abs(f1)
    ir = abs(ifft2((f0 * f1.conjugate()) / r0))

    i0, i1 = np.unravel_index(np.argmax(ir), ir.shape)
    angle = 180.0 * i0 / ir.shape[0]
    scale = log_base ** i1

    if scale > 1.8:
        ir = abs(ifft2((f1 * f0.conjugate()) / r0))
        i0, i1 = np.unravel_index(np.argmax(ir), ir.shape)
        angle = -180.0 * i0 / ir.shape[0]
        scale = 1.0 / (log_base ** i1)
        if scale > 1.8:
            raise ValueError("Images are not compatible. Scale change > 1.8")

    if angle < -90.0:
        angle += 180.0
    elif angle > 90.0:
        angle -= 180.0

    im2 = ndii.zoom(im1, 1.0/scale)
    im2 = ndii.rotate(im2, angle)

    if im2.shape < im0.shape:
        t = np.zeros_like(im0)
        t[:im2.shape[0], :im2.shape[1]] = im2
        im2 = t
    elif im2.shape > im0.shape:
        im2 = im2[:im0.shape[0], :im0.shape[1]]

    f0 = fft2(im0)
    f1 = fft2(im2)
    ir = abs(ifft2((f0 * f1.conjugate()) / (abs(f0) * abs(f1))))
    t0, t1 = np.unravel_index(np.argmax(ir), ir.shape)

    if t0 > f0.shape[0] // 2:
        t0 -= f0.shape[0]
    if t1 > f0.shape[1] // 2:
        t1 -= f0.shape[1]

    im2 = ndii.shift(im2, [t0, t1])

    # correct parameters for ndimage's internal processing
    if angle > 0.0:
        d = int((int(im1.shape[1] / scale) * math.sin(math.radians(angle))))
        t0, t1 = t1, d+t0
    elif angle < 0.0:
        d = int((int(im1.shape[0] / scale) * math.sin(math.radians(angle))))
        t0, t1 = d+t1, d+t0
    scale = (im1.shape[1] - 1) / (int(im1.shape[1] / scale) - 1)

    return im2, scale, angle, [-t0, -t1]
RawData = sorted(glob.glob(folderMean + '20110410/*.fits'))

dateObs = (RawData[0])[-36:-21]
t1 = time.time()

# Rotating HMI continumm images
RawByH = RawData[0:80]
dcn1 = "ImaRot_"
print "Reading and rotating images..."
for i in RawByH:
    print "Reading and rotating image -->", i[-36:-21]
    data = (fits.open(i, checksum=True))[1].data
    hdr = (fits.open(i, checksum=True))[1].header
    rot_angle = hdr["crota2"]
    im = rotate(data, rot_angle, mode="nearest", prefilter=False)
    np.save(folderRot + dcn1 + i[-36:-21], im)
print 50 * "*"
print "End rotation section.. \n"
print "Time elapsed rotating images -->", time.time() - t1
print 50 * "*"
del (data)
del (hdr)
del (im)

# Charging rotated images to create submaps

RotData = sorted(glob.glob(folderRot + "*.npy"))

t2 = time.time()
Example #53
0
def tefi(template,
         search_image,
         candidate_pixels,
         best_scales,
         best_angles,
         scales=[0.5, 0.57, 0.66, 0.76, 0.87, 1.0],
         upsampling=1,
         thresh=100,
         alpha=math.pi / 16,
         use_percentile=True,
         verbose=False):
    """
    Template Matching Filter (Tefi) is the third and final filter for ciratefi.

    For every candidate pixel, tefi rotates and scales the template image by the list
    of scales and angles passed in (which, ideally are the output from cefi and rafi
    respectively) and performs template match around the candidate pixels at the
    approriate scale and rotation angle. Here, the scales, angles and candidate
    points should be a parrallel array structure.

    Any points with correlation strength over the threshold are returned as
    the the strongest candidates for the image location. If knows the point
    exists in one location, thresh should be 100 and use_percentile = True.

    parameters
    ----------
    template : ndarray
               The input search template used to 'query' the destination
               image

    image : ndarray
            The image or sub-image to be searched

    candidate_pixels : ndarray
                       array of candidate pixels in tuples (y,x), best if
                       the pixel are the output of Cifi

    best_scales : ndarray
                  The list of best fit scales for each candidate point, the length
                  should equal the length of the candidate point list

    best_angles : ndarray
                  The list of best fit rotation for each candidate point in radians,
                  the length should equal the length of the candidate point list

    upsampling : int
                 upsample degree

    thresh : float
             The correlation thresh hold above which a point will
             be a first grade candidate point. If use_percentile=True
             this will act as a percentile, for example, passing 90 means
             keep values in the top 90th percentile

    use_percentile : bool
                     If True (default), thresh is a percentile instead of a hard
                     strength value

    alpha : float
            A float between 0 & 2*pi, alpha list = np.arange(0, 2*pi, alpha)

    verbose : bool
              Set to True in order to output images and text describing the outputs. Can
              cause a serious decrease in performance. False by default.

    returns
    -------

    results : ndarray
              array of pixel tuples (y,x) which over the threshold
    """

    # check all inputs for validity, probably a better way to do this

    if search_image.shape < template.shape:
        raise ValueError(
            'Template Image is smaller than Search Image for template of'
            'size: {} and search image of size: {}'.format(
                template.shape, search_image.shape))

    candidate_pixels = np.asarray(candidate_pixels)
    if not candidate_pixels.size or not np.any(candidate_pixels):
        raise ValueError('cadidate pixel list is empty')

    best_scales = np.asarray(best_scales, dtype=np.float32)
    if not best_scales.size or not np.any(best_scales):
        raise ValueError('best_scale list is empty')

    if best_scales.shape != search_image.shape:
        raise ValueError(
            'Search image and scales must be of the same shape '
            'got: best scales shape: {}, search image shape: {}'.format(
                best_scales.shape, search_image.shape))

    best_angles = np.asarray(best_angles, dtype=np.float32)
    if not best_angles.size or not np.any(best_angles):
        raise ValueError('Input best angle list is empty')

    best_scales = np.asarray(best_scales, dtype=float)
    if not best_scales.size or not np.any(best_scales):
        raise ValueError('Input best_scales list is empty')

    if thresh < -1. or thresh > 1. and not use_percentile:
        raise ValueError(
            'Thresholds must be in range [-1,1] when not using percentiles. Got: {}'
            .format(thresh))

    # Check inputs
    if upsampling < 1:
        raise ValueError('Upsampling must be >= 1, got {}'.format(upsampling))

    tefi_coeffs = np.zeros(candidate_pixels.shape[0])

    # if verbose, preallocate pixel data
    if verbose:  # pragma: no cover
        image_pixels = np.zeros((search_image.shape[0], search_image.shape[1]))

    # check for upsampling
    if upsampling > 1:
        u_template = zoom(template, upsampling, order=3)
        u_search_image = zoom(search_image, upsampling, order=3)

    alpha_list = np.arange(0, 2 * math.pi, alpha)
    candidate_pixels *= int(upsampling)

    # Tefi -- Template Matching Filter
    for i in range(len(candidate_pixels)):
        y, x = candidate_pixels[i]

        try:
            best_scale_idx = (np.where(
                scales == best_scales[y // upsampling, x // upsampling]))[0][0]
            best_alpha_idx = (np.where(
                np.isclose(alpha_list, best_angles[i], atol=.01)))[0][0]
        except:
            tefi_coeffs[i] = 0
            continue

        tefi_scales = np.array(scales).take(range(best_scale_idx - 1,
                                                  best_scale_idx + 2),
                                            mode='wrap')
        tefi_alphas = alpha_list.take(range(best_alpha_idx - 1,
                                            best_alpha_idx + 2),
                                      mode='wrap')

        scalesxalphas = util.cartesian([tefi_scales, tefi_alphas])

        max_coeff = -math.inf
        for j in range(scalesxalphas.shape[0]):
            transformed_template = imresize(u_template, scalesxalphas[j][0])
            transformed_template = rotate(transformed_template,
                                          scalesxalphas[j][1])

            y_window, x_window = (math.floor(
                transformed_template.shape[0] /
                2), math.floor(transformed_template.shape[1] / 2))

            cropped_search = u_search_image[y - y_window:y + y_window + 1,
                                            x - x_window:x + x_window + 1]

            if (y < y_window or x < x_window
                    or cropped_search.shape < transformed_template.shape
                    or cropped_search.shape != transformed_template.shape):
                score = -1
            else:
                result = cv2.matchTemplate(transformed_template.astype(
                    np.float32),
                                           cropped_search.astype(np.float32),
                                           method=cv2.TM_CCORR_NORMED)
                score = np.average(result)

            if score > max_coeff:
                max_coeff = score

        tefi_coeffs[i] = max_coeff

        if verbose:  # pragma: no cover
            image_pixels[y // upsampling, x // upsampling] = max_coeff

    if use_percentile:
        thresh = np.percentile(tefi_coeffs, int(thresh))

    result_points = candidate_pixels[np.where(tefi_coeffs >= thresh)]
    result_coeffs = tefi_coeffs[np.where(tefi_coeffs >= thresh)]

    x = result_points[0][1]
    y = result_points[0][0]

    ideal_y = u_search_image.shape[0] / 2
    ideal_x = u_search_image.shape[1] / 2

    if verbose:  # pragma: no cover
        plt.imshow(image_pixels, interpolation='none')
        plt.scatter(y=y / upsampling, x=x / upsampling, c='w', s=80)
        plt.show()

    x = (ideal_x - x) / upsampling
    y = (ideal_y - y) / upsampling

    return x, y, result_coeffs[0]
Example #54
0
def load_series(pn, crop_to, sort_by='rot', blur=0, verbose=False):
    ''' 
    loads all ptycho reconstructions in a folder and sorts by a parameter
    
    Parameters
    ----------
    
    pn : String pathname of folder 
    
    sort_by: 'rot' or 'step' parameter by which to sort the data
    
    crop_to: Int size of cropped object reconstruction 
    
    blur: Int to pass as sigma to gaussian blur object phase. 0 = no blur 
    
    verbose: Bool to print detailed output
    
    Returns
    -------
    
    d_s: hyperspy singnal2D object function
    
    p_s: hyperspy singnal2D probe function
    
    d_s_fft: hyperspy singnal2D fourier transfor of object function
    
    rad_fft: hyperspy singnal1D object radial profile of d_s_fft 
    
    r_s: hyperspy signal1D object scan rotation
    
    s_s: hyperspy signal1D object probe step size
    
    e_s: hyperspy signal1D object final error value 
    
    
    Example usage
    -------------
    from epsic_tools.toolbox.ptychography.load_pycho_series import load_series
    pn = r'Y:\2020\cm26481-1\processing\Merlin\20200130_80kV_graphene_600C_pty\cluster\processing\pycho'
    d_s, p_s, d_s_fft, rad_fft, r_s, s_s, e_s = load_series(pn,sort_by = 'rot', crop_to = 80)
    hs.plot.plot_signals([d_s,p_s,d_s_fft, rad_fft], navigator_list=[r_s,s_s, e_s,None])

    to do 
    ------
    Break loading from hdf file into seperate functions 
    '''
    pn = pn + '/*.hdf'
    #build list of files
    fp_list = glob.glob(pn)
    len_dat = len(fp_list)
    n = 0  # counter
    # iterate through files
    print(pn)
    if verbose:
        print(fp_list)
    for this_fp in fp_list:
        fj = this_fp[:-4] + '.json'
        #open json file
        with open(fj) as r:
            params = json.load(r)
        with h5py.File(this_fp, 'r') as d5:
            #get phase data
            dat = d5['entry_1']['process_1']['output_1']['object_phase']
            dat = dat[0, 0, 0, 0, 0, :, :]
            #get modulus data
            dat_m = d5['entry_1']['process_1']['output_1']['object_modulus']

            dat_m = dat_m[0, 0, 0, 0, 0, :, :]
            #rotate
            rot_angle = 90 - params['process']['common']['scan']['rotation']
            dat = rotate(dat, rot_angle)
            dat_m = rotate(dat_m, rot_angle)

            #get probe
            probe = np.array(
                d5['entry_1']['process_1']['output_1']['probe_phase'])
            #sum seperate coherent modes
            probe = probe[:, :, 0, 0, 0, :, :].sum(axis=(0, 1))

            probe_m = np.array(
                d5['entry_1']['process_1']['output_1']['probe_modulus'])
            probe_m = probe_m[:, :, 0, 0, 0, :, :].sum(axis=(0, 1))

            #get complex probe
            probe_c = np.array(d5['entry_1']['process_1']['output_1']['probe'])
            probe_c = probe_c[:, :, 0, 0, 0, :, :].sum(axis=(0, 1))

            #get error plot
            error = np.array(d5['entry_1']['process_1']['output_1']['error'])
            error = error[error != 0]
            d5.close()  # probably not necessary but just in case!
        if n == 0:
            #initiate arrays if first itteration - make bigger than fist data to account for change in size
            shape_dat = int(10 * (np.ceil(dat.shape[0] / 10) + 1))
            shape_probe = int(10 * (np.ceil(probe.shape[0] / 10) + 1))

            if verbose == True:
                print(n, len_dat, shape_dat, shape_dat)
            dat_arr = np.empty(shape=(2, len_dat, shape_dat, shape_dat))
            probe_arr = np.empty(shape=(2, len_dat, shape_probe, shape_probe))
            rot_arr = np.empty(shape=len_dat)
            step_arr = np.empty(shape=len_dat)
            err_arr = np.empty(shape=len_dat)
            #full_err_arr = []#np.empty( (len_dat, len(error),))

        # calculate parameters to pad loaded data to same size as initiated array if necessary
        dat_diff = shape_dat - dat.shape[0]
        pad_dat = int(np.ceil(dat_diff / 2))
        del_dat = int(pad_dat - np.floor(dat_diff / 2))

        probe_diff = shape_probe - probe.shape[0]
        pad_probe = int(np.ceil(probe_diff / 2))
        del_probe = int(pad_probe - np.floor(probe_diff / 2))

        # populate rotation, step and error array
        rot_arr[n] = float(params['process']['common']['scan']['rotation'])
        step_arr[n] = float(params['process']['common']['scan']['dR'][0])

        err_arr[n] = error[-1]
        #full_err_arr.append(error)

        if verbose == True:
            print(n, ' step : ', step_arr[n], ', rot : ', rot_arr[n],
                  ', err : ', err_arr[n])
        # load data into arrays (padded if necessary)
        if pad_dat > 0:
            dat_arr[0, n, :, :] = np.pad(dat[del_dat:, del_dat:], pad_dat,
                                         'edge')  #object phase
            dat_arr[1, n, :, :] = np.pad(dat_m[del_dat:, del_dat:], pad_dat,
                                         'edge')  #object mod
        else:
            start_ind = int(-np.floor(dat_diff / 2))
            end_ind = int(np.ceil(dat_diff / 2))
            if end_ind == 0:
                end_ind = dat.shape[0]
            dat_arr[0, n, :, :] = dat[start_ind:end_ind,
                                      start_ind:end_ind]  #object phase
            dat_arr[1, n, :, :] = dat_m[start_ind:end_ind,
                                        start_ind:end_ind]  #object mod

        if pad_probe > 0:
            probe_arr[0, n, :, :] = np.pad(probe[del_probe:, del_probe:],
                                           pad_probe, 'edge')  #probe phase
            probe_arr[1, n, :, :] = np.pad(probe_m[del_probe:, del_probe:],
                                           pad_probe, 'edge')  #probe mod
        else:
            probe_start_ind = int(-np.floor(probe_diff / 2))
            probe_end_ind = int(np.ceil(probe_diff / 2))
            if probe_end_ind == 0:
                probe_end_ind = probe.shape[0]
            probe_arr[0, n, :, :] = probe[probe_start_ind:probe_end_ind,
                                          probe_start_ind:
                                          probe_end_ind]  #probe phase
            probe_arr[1, n, :, :] = probe_m[probe_start_ind:probe_end_ind,
                                            probe_start_ind:
                                            probe_end_ind]  #probe mod

        n = n + 1

    # define structured array and sort
    w_type = np.dtype([('rot', 'float'), ('step', 'float')])
    w = np.empty(len(rot_arr), dtype=w_type)
    w['rot'] = rot_arr
    w['step'] = step_arr

    if sort_by == 'rot':
        sort_ind = np.argsort(w, order=('rot', 'step'))
    elif sort_by == 'step':
        sort_ind = np.argsort(w, order=('step', 'rot'))
    print(sort_ind)
    # re-order arrays
    rot_sort = rot_arr[sort_ind]
    step_sort = step_arr[sort_ind]
    err_sort = err_arr[sort_ind]
    dat_sort = dat_arr[:, sort_ind, :, :]
    probe_sort = probe_arr[:, sort_ind, :, :]
    #full_err_arr from list to padded array
    #temp_arr = np.zeros([len(full_err_arr),len(max(full_err_arr,key = lambda x: len(x)))])
    #for i,j in enumerate(full_err_arr):
    #    temp_arr[i][0:len(j)] = j
    #full_err_arr = temp_arr

    #full_err_sort = full_err_arr[sort_ind, :]
    # unsorted to hs signals
    '''
    d = hs.signals.Signal2D(data = dat_arr)
    p = hs.signals.Signal2D(data = probe_arr)
    r = hs.signals.Signal1D(data = rot_arr)
    s = hs.signals.Signal1D(data = step_arr)
    e = hs.signals.Signal1D(data = err_arr)
    '''

    #sorted to hs signals
    d_s = hs.signals.Signal2D(data=dat_sort)
    p_s = hs.signals.Signal2D(data=probe_sort)
    r_s = hs.signals.Signal1D(data=rot_sort)
    s_s = hs.signals.Signal1D(data=step_sort)
    e_s = hs.signals.Signal1D(data=err_sort)
    #fe_s = hs.signals.Signal1D(data = full_err_sort)
    #crop
    '''
    d.crop(axis = (2), start =int((shape_dat / 2) - (crop_to/2)), end = int((shape_dat / 2) + (crop_to/2) ))
    d.crop(axis = (3), start =int((shape_dat / 2) - (crop_to/2)), end = int((shape_dat / 2) + (crop_to/2) ))
    '''

    d_s.crop(axis=(2),
             start=int((shape_dat / 2) - (crop_to / 2)),
             end=int((shape_dat / 2) + (crop_to / 2)))
    d_s.crop(axis=(3),
             start=int((shape_dat / 2) - (crop_to / 2)),
             end=int((shape_dat / 2) + (crop_to / 2)))

    #gaussian blur
    print(d_s.data.shape)
    print(type(d_s.data))
    d_s.map(gaussian, sigma=blur)

    # fft
    '''
    dat_fft = np.fft.fft2(d.data)
    dat_fft = np.fft.fftshift(dat_fft)
    
    d_fft = hs.signals.Signal2D(data = np.log10(np.abs(dat_fft)**2))
    d_fft.data = np.flip(d_fft.data, axis = 0)
    '''

    dat_sort_fft = np.fft.fft2(d_s.data)
    dat_sort_fft = np.fft.fftshift(dat_sort_fft)
    d_s_fft = hs.signals.Signal2D(data=np.log10(np.abs(dat_sort_fft)**2))
    d_s_fft.data = np.flip(d_s_fft.data, axis=0)
    fft_mask = np.zeros_like(d_s_fft.data, dtype='bool')
    fft_shape = fft_mask.shape

    d_s_fft.inav[:, 0].data[:, int(fft_shape[-1] / 2), :] = 0
    d_s_fft.inav[:, 0].data[:, :, int(fft_shape[-2] / 2)] = 0

    rad_fft = radial_profile.radial_profile_stack(d_s_fft)
    print(n, ' files loaded successfully')
    return d_s, p_s, d_s_fft, rad_fft, r_s, s_s, e_s  #, fe_s
def data_augentation(X, Y, data_gen_args, data_path_file_name):

    if "horizontal_flip" in data_gen_args:
        """ Flip image and groundtruth horizontally by a chance of 33% 
        # Arguments
            X: tensor, the input image
            Y: tensor, the groundtruth
            returns: two tensors, X, Y, which have the same dimensions as the input 
        """
        if data_gen_args["horizontal_flip"] == True & random.choice(
            [True, False, False]) == True:
            X = np.flip(X, len(np.shape(X)) - 1)
            Y = np.flip(Y, len(np.shape(Y)) - 1)

    if "vertical_flip" in data_gen_args:
        """ Flip image and groundtruth vertically by a 33% chance
        # Arguments
            X: tensor, the input image
            Y: tensor, the groundtruth
            returns: two tensors, X, Y, which have the same dimensions as the input
        """
        if data_gen_args["vertical_flip"] == True & random.choice(
            [True, False, False]) == True:
            X = np.flip(X, len(np.shape(X)) - 2)
            Y = np.flip(Y, len(np.shape(Y)) - 2)

    if "rotation_range" in data_gen_args and data_gen_args[
            "rotation_range"] > 0:
        """ Rotate image and groundtruth by a random angle within the rotation range 
        # Arguments
            X: tensor, the input image
            Y: tensor, the groundtruth
            returns: two tensors, X, Y, which have the same dimensions as the input
        """
        angle = np.random.choice(int(
            data_gen_args["rotation_range"] * 100)) / 100
        if len(np.shape(X)) - 1 == 3:
            X = np.nan_to_num(
                rotate(X, angle, mode='nearest', axes=(2, 3), reshape=False))
            Y = np.nan_to_num(
                rotate(Y, angle, mode='nearest', axes=(2, 3), reshape=False))
        else:
            X = np.nan_to_num(
                rotate(X, angle, mode='nearest', axes=(1, 2), reshape=False))
            Y = np.nan_to_num(
                rotate(Y, angle, mode='nearest', axes=(1, 2), reshape=False))
        # Zoom so that there are no empty edges
        shape_X = np.shape(X)
        shape_Y = np.shape(Y)
        length_X = np.max(shape_X)
        zoom_length_X = length_X / (np.cos(math.radians(angle)) +
                                    np.sin(math.radians(angle)))
        zoom_length_Y = length_X / (np.cos(math.radians(angle)) +
                                    np.sin(math.radians(angle)))
        X = X[...,
              int((length_X - zoom_length_X) /
                  2):int(length_X - ((length_X - zoom_length_X) / 2)),
              int((length_X - zoom_length_X) /
                  2):int(length_X - ((length_X - zoom_length_X) / 2)), :]
        X = np.nan_to_num(sk.transform.resize(X, shape_X))
        Y = Y[...,
              int((length_X - zoom_length_Y) /
                  2):int(length_X - ((length_X - zoom_length_Y) / 2)),
              int((length_X - zoom_length_Y) /
                  2):int(length_X - ((length_X - zoom_length_Y) / 2)), :]
        Y = np.nan_to_num(sk.transform.resize(Y, shape_Y))

    if "width_shift_range" in data_gen_args and data_gen_args[
            "width_shift_range"] > 0 & random.choice([True, False]) == True:
        """ Shift the image and groundtruth width by a number within the width shift range by a 50% chance
        # Arguments
            X: tensor, the input image
            Y: tensor, the groundtruth
            width shift range: float, amound of width shift
            returns: two tensors, X, Y, which have the same dimensions as the input
        """
        width_shift = np.random.choice(
            int(data_gen_args["width_shift_range"] * 100))
        shape_X = np.shape(X)
        shape_Y = np.shape(Y)
        if len(np.shape(X)) - 1 == 3:
            size = np.size(X[0, 0, :, 0, 0])
        else:
            size = np.size(X[0, 0, :, 0, ])
        start_width_shift = np.random.choice(
            int(size - (size - width_shift) + 1))

        X = X[:, :,
              start_width_shift:int(size - int(size * width_shift / 100)), ...]
        X = np.nan_to_num(sk.transform.resize(X, shape_X))
        Y = Y[:, :,
              start_width_shift:int(size - int(size * width_shift / 100)), ...]
        Y = np.nan_to_num(sk.transform.resize(Y, shape_Y))

    if "height_shift_range" in data_gen_args and data_gen_args[
            "height_shift_range"] > 0 & random.choice([True, False]) == True:
        """ Shift the image and groundtruth height by a number within the width shift range by a 50% chance
        # Arguments
            X: tensor, the input image
            Y: tensor, the groundtruth
            height shift range: float, amound of height shift
            returns: two tensors, X, Y, which have the same dimensions as the input
        """
        height_shift = np.random.choice(
            int(data_gen_args["height_shift_range"] * 100))
        shape_X = np.shape(X)
        shape_Y = np.shape(Y)
        if len(np.shape(X)) - 1 == 3:
            size = np.size(X[0, 0, :, 0, 0])
        else:
            size = np.size(X[0, 0, :, 0, ])
        start_heigth_shift = np.random.choice(
            int(size - (size - height_shift) + 1))
        X = X[...,
              start_heigth_shift:int(size - int(size * height_shift / 100)), :]
        X = np.nan_to_num(sk.transform.resize(X, shape_X))
        Y = Y[...,
              start_heigth_shift:int(size - int(size * height_shift / 100)), :]
        Y = np.nan_to_num(sk.transform.resize(Y, shape_Y))

    if "zoom_range" in data_gen_args and data_gen_args[
            "zoom_range"] > 0 & random.choice([True, False]) == True:
        """ Zooms the image and groundtruth to a random magnification in the zoom range and to a random position in the image by a 50% chance
        # Arguments
            X: tensor, the input image
            Y: tensor, the groundtruth
            zoom range: float, amound of zoom
            returns: two tensors, X, Y, which have the same dimensions as the input
        """
        zoom = np.random.choice(int(data_gen_args["zoom_range"] * 100)) + 1
        shape_X = np.shape(X)
        shape_Y = np.shape(Y)
        if len(np.shape(X)) - 1 == 3:
            size = np.size(X[0, 0, :, 0, 0])
        else:
            size = np.size(X[0, 0, :, 0, ])
        x_position = np.random.choice(zoom)
        y_position = np.random.choice(zoom)
        X = X[..., x_position:int(x_position + size - zoom),
              y_position:int(y_position + size - zoom), :]
        X = np.nan_to_num(sk.transform.resize(X, shape_X))
        Y = Y[..., x_position:int(x_position + size - zoom),
              y_position:int(y_position + size - zoom), :]
        Y = np.nan_to_num(sk.transform.resize(Y, shape_Y))

    if "gaussian_noise" in data_gen_args and data_gen_args[
            "gaussian_noise"] > 0 and random.choice([True, False, False
                                                     ]) == True:
        """ Adds gaussian noise to the image by a one-third chance
        # Arguments
            X: tensor, the input image
            gaussian_noise: float, amound of gaussian noise added
            returns: one tensors, X, which have the same dimensions as the input
        """
        value = data_gen_args["gaussian_noise"]
        X = X + np.random.normal(0, value)

    if "gaussian_blur_image" in data_gen_args and data_gen_args[
            "gaussian_blur_image"] > 0 and random.choice([True, False, False
                                                          ]) == True:
        """ Blurs the input image in gaussian fashin by a 33% chance
        # Arguments
            X: tensor, the input image
            gaussian_blur: float, amound of gaussian blur added
            returns: one tensor, X, which have the same dimensions as the input
        """
        value = data_gen_args["gaussian_blur_image"]
        X = gaussian_filter(X, sigma=value)

    if "gaussian_blur_label" in data_gen_args and data_gen_args[
            "gaussian_blur_label"] > 0 and random.choice([True, False, False
                                                          ]) == True:
        """ Blurs the groundtruth image in gaussian fashin by a 33% chance
        # Arguments
            Y: tensor, the groundtruth image
            gaussian_blur: float, amound of gaussian blur added
            returns: one tensor, Y, which have the same dimensions as the input
        """
        value = data_gen_args["gaussian_blur_label"]
        Y = gaussian_filter(Y, sigma=value)

    if "contrast_range" in data_gen_args and random.choice(
        [True, False, False]) == True:
        """ Increases or decreases the contrast of the input in by the range given by a 33% chance
        # Arguments
            X: tensor, the groundtruth image
            contrast_range: float, amound of contrast added or removed
            returns: one tensor, X, which have the same dimensions as the input
        """
        range = np.random.uniform(-1, 1) * data_gen_args["contrast_range"] + 1
        min_X = np.min(X)
        X = (X - min_X) * range + min_X

    if "brightness_range" in data_gen_args and random.choice([True, False
                                                              ]) == True:
        """ Increases or decreases the brightness of the input in by the range given by a 50% chance
        # Arguments
            X: tensor, the groundtruth image
            brightness_range: float, amound of gaussian noise added
            returns: one tensor, X, which have the same dimensions as the input
        """
        range = np.random.uniform(-1, 1) * data_gen_args["brightness_range"]
        X = X + range * X

    if "threshold_background_image" in data_gen_args and random.choice(
        [True, False, False]) == True:
        """ Thresholds the input image at the mean and sets every pixelvalue below the mean to zero by a 33% chance
        # Arguments
            X: tensor, the groundtruth image
            threshold_background_image: True or False
            returns: one tensor, X, which have the same dimensions as the input
        """
        mean_X = np.mean(X)
        X[X < mean_X] = 0

    if "threshold_background_groundtruth" in data_gen_args and random.choice(
        [True, False, False]) == True:
        """ Thresholds the groundtruth image at the mean and sets every pixelvalue below the mean to zero by a 33% chance
        # Arguments
            Y: tensor, the groundtruth image
            threshold_background_groundtruth: True or False
            returns: one tensor, Y, which have the same dimensions as the input
        """
        mean_Y = np.mean(Y) * 0.8
        Y[Y < mean_Y] = 0

    if "binarize_mask" in data_gen_args and data_gen_args[
            "binarize_mask"] == True:
        """ Binarize the groundtruth image at the mean and sets every pixelvalue below the mean to zero and every above to one
        # Arguments
            Y: tensor, the groundtruth image
            binarize_mask: True or False
            returns: one tensor, Y, which have the same dimensions as the input
        """
        mean_Y = np.mean(Y)
        Y[Y < mean_Y] = 0
        Y[Y >= mean_Y] = 1

    if "save_augmented_images" in data_gen_args and data_gen_args[
            "save_augmented_images"] == True:
        """ save all augmented images to a folder named "Augmentations" in the project folder to make augmentations trackable
        # Arguments
            X: tensor, the input image
            Y: tensor, the groundtruth image
            save_augmented_images: True or False
            returns: Saves the image pair as .png to the Augmentations folder
        """
        data_path, file_name = os.path.split(data_path_file_name)
        Aug_path = (data_path + '/Augmentations/')
        os.makedirs(Aug_path, exist_ok=True)
        #TODO: Check if the folder and saving really works with the path
        title = file_name.split("'")[1]
        title = os.path.splitext(title)[0]
        #datacomb = np.concatenate((X, Y), axis=2).astype("uint8")
        #sk.io.imsave(Aug_path + title, datacomb[0,...])
        if len(np.shape(X)) == 5:
            plot2images(X[0, 10, ..., 0], Y[0, 10, :, :, 0], Aug_path, title)
        elif len(np.shape(X)) == 4 and np.shape(X)[-1] == 3:
            plot2images(X[0, ...], Y[0, ...], Aug_path, title)
        elif len(np.shape(X)) == 4 and np.shape(X)[-1] == 1:
            plot2images(X[0, ..., 0], Y[0, :, :, 0], Aug_path, title)
    #logging.info("Augmented Dimensions:", np.shape(X), np.shape(Y))
    return X, Y
Example #56
0
def main():
    usage = '''
Usage:
-----------------------------------------------------------------------
python %s [-d spatialDimensions] [-p bandPositions [-r resolution ratio]
[-b registration band]  msfilename panfilename 
-----------------------------------------------------------------------
bandPositions and spatialDimensions are lists, 
e.g., -p [1,2,3] -d [0,0,400,400]

Outfile name is msfilename_pan_dwt with same format as msfilename    

Note: PAN image must completely overlap MS image subset chosen  
-----------------------------------------------------''' %sys.argv[0]
    options, args = getopt.getopt(sys.argv[1:],'hd:p:r:b:')
    ratio = 4
    dims1 = None
    pos1 = None  
    k1 = 0          
    for option, value in options:
        if option == '-h':
            print usage
            return 
        elif option == '-r':
            ratio = eval(value)
        elif option == '-d':
            dims1 = eval(value) 
        elif option == '-p':
            pos1 = eval(value)    
        elif option == '-b':
            k1 = eval(value)-1
    if len(args) != 2:
        print 'Incorrect number of arguments'
        print usage
        sys.exit(1)                         
    gdal.AllRegister()
    file1 = args[0]
    file2 = args[1]   
    path = os.path.dirname(file1)
    basename1 = os.path.basename(file1)
    root1, ext1 = os.path.splitext(basename1)
    outfile = '%s/%s_pan_dwt%s'%(path,root1,ext1)       
#  MS image    
    inDataset1 = gdal.Open(file1,GA_ReadOnly)     
    try:    
        cols = inDataset1.RasterXSize
        rows = inDataset1.RasterYSize    
        bands = inDataset1.RasterCount
    except Exception as e:
        print 'Error: %e --Image could not be read'%e
        sys.exit(1)    
    if pos1 is None:
        pos1 = range(1,bands+1)
    num_bands = len(pos1)    
    if dims1 is None:
        dims1 = [0,0,cols,rows]
    x10,y10,cols1,rows1 = dims1    
#  PAN image    
    inDataset2 = gdal.Open(file2,GA_ReadOnly)     
    try:  
        bands = inDataset2.RasterCount
    except Exception as e:
        print 'Error: %e --Image could not be read'%e  
        sys.exit(1)   
    if bands>1:
        print 'PAN image must be a single band'
        sys.exit(1)     
    geotransform1 = inDataset1.GetGeoTransform()
    geotransform2 = inDataset2.GetGeoTransform()   
    if (geotransform1 is None) or (geotransform2 is None):
        print 'Image not georeferenced, aborting' 
        sys.exit(1)      
    print '========================='
    print '   DWT Pansharpening'
    print '========================='
    print time.asctime()     
    print 'MS  file: '+file1
    print 'PAN file: '+file2       
#  image arrays
    band = inDataset1.GetRasterBand(1)
    tmp = band.ReadAsArray(0,0,1,1)
    dt = tmp.dtype
    MS = np.asarray(np.zeros((num_bands,rows1,cols1)),dtype=dt) 
    k = 0                                   
    for b in pos1:
        band = inDataset1.GetRasterBand(b)
        MS[k,:,:] = band.ReadAsArray(x10,y10,cols1,rows1)
        k += 1
#  if integer assume 11bit quantization otherwise must be byte   
    if MS.dtype == np.int16:
        fact = 8.0
        MS = auxil.byteStretch(MS,(0,2**11)) 
    else:
        fact = 1.0
#  read in corresponding spatial subset of PAN image    
    if (geotransform1 is None) or (geotransform2 is None):
        print 'Image not georeferenced, aborting' 
        return
#  upper left corner pixel in PAN    
    gt1 = list(geotransform1)               
    gt2 = list(geotransform2)
    ulx1 = gt1[0] + x10*gt1[1]
    uly1 = gt1[3] + y10*gt1[5]
    x20 = int(round(((ulx1 - gt2[0])/gt2[1])))
    y20 = int(round(((uly1 - gt2[3])/gt2[5])))
    cols2 = cols1*ratio
    rows2 = rows1*ratio
    band = inDataset2.GetRasterBand(1)
    PAN = band.ReadAsArray(x20,y20,cols2,rows2)        
#  if integer assume 11-bit quantization, otherwise must be byte    
    if PAN.dtype == np.int16:
        PAN = auxil.byteStretch(PAN,(0,2**11))                                   
#  compress PAN to resolution of MS image  
    panDWT = auxil.DWTArray(PAN,cols2,rows2)          
    r = ratio
    while r > 1:
        panDWT.filter()
        r /= 2
    bn0 = panDWT.get_quadrant(0) 
    lines0,samples0 = bn0.shape    
    bn1 = MS[k1,:,:]  
#  register (and subset) MS image to compressed PAN image 
    (scale,angle,shift) = auxil.similarity(bn0,bn1)
    tmp = np.zeros((num_bands,lines0,samples0))
    for k in range(num_bands): 
        bn1 = MS[k,:,:]                    
        bn2 = ndii.zoom(bn1, 1.0/scale)
        bn2 = ndii.rotate(bn2, angle)
        bn2 = ndii.shift(bn2, shift)
        tmp[k,:,:] = bn2[0:lines0,0:samples0]        
    MS = tmp            
#  compress pan once more, extract wavelet quadrants, and restore
    panDWT.filter()  
    fgpan = panDWT.get_quadrant(1)
    gfpan = panDWT.get_quadrant(2)
    ggpan = panDWT.get_quadrant(3)    
    panDWT.invert()       
#  output array            
    sharpened = np.zeros((num_bands,rows2,cols2),dtype=np.float32)     
    aa = np.zeros(3)
    bb = np.zeros(3)       
    print 'Wavelet correlations:'                                   
    for i in range(num_bands):
#      make copy of panDWT and inject ith ms band                
        msDWT = copy.deepcopy(panDWT)
        msDWT.put_quadrant(MS[i,:,:],0)
#      compress once more                 
        msDWT.filter()
#      determine wavelet normalization coefficents                
        ms = msDWT.get_quadrant(1)    
        aa[0],bb[0],R = auxil.orthoregress(fgpan.ravel(), ms.ravel())
        Rs = 'Band '+str(i+1)+': %8.3f'%R
        ms = msDWT.get_quadrant(2)
        aa[1],bb[1],R = auxil.orthoregress(gfpan.ravel(), ms.ravel())
        Rs += '%8.3f'%R                     
        ms = msDWT.get_quadrant(3)
        aa[2],bb[2],R = auxil.orthoregress(ggpan.ravel(), ms.ravel()) 
        Rs += '%8.3f'%R    
        print Rs         
#      restore once and normalize wavelet coefficients
        msDWT.invert() 
        msDWT.normalize(aa,bb)   
#      restore completely and collect result
        r = 1
        while r < ratio:
            msDWT.invert()
            r *= 2                            
        sharpened[i,:,:] = msDWT.get_quadrant(0)      
    sharpened *= fact    
#  write to disk       
    driver = inDataset1.GetDriver()
    outDataset = driver.Create(outfile,cols2,rows2,num_bands,GDT_Float32)
    projection1 = inDataset1.GetProjection()
    if projection1 is not None:
        outDataset.SetProjection(projection1)        
    gt1 = list(geotransform1)
    gt1[0] += x10*ratio  
    gt1[3] -= y10*ratio
    gt1[1] = gt2[1]
    gt1[2] = gt2[2]
    gt1[4] = gt2[4]
    gt1[5] = gt2[5]
    outDataset.SetGeoTransform(tuple(gt1))   
    for k in range(num_bands):        
        outBand = outDataset.GetRasterBand(k+1)
        outBand.WriteArray(sharpened[k,:,:],0,0) 
        outBand.FlushCache() 
    outDataset = None    
    print 'Result written to %s'%outfile    
    inDataset1 = None
    inDataset2 = None                      
Example #57
0
def cnn_training(X_train, y_train, X_val, y_val, 
             fc_units=[500,200,100], conv_featmap=[6,16,32],
             l2_norm=0.01,
             seed=235,
             learning_rate=1e-2,
             epoch=20,
             batch_size=245,
             verbose=False,
             pre_trained_model=None,imglen=164):
    print("Building Network Parameters: ")
    print("fc_units={}".format(fc_units))
    print("l2_norm={}".format(l2_norm))
    print("seed={}".format(seed))
    print("learning_rate={}".format(learning_rate))

    # define the variables and parameter needed during training
    with tf.name_scope('inputs'):
        xs = tf.placeholder(shape=[None,imglen,imglen,4], dtype=tf.float32)
        ys = tf.placeholder(shape=[None, ], dtype=tf.int64)
        
    #xs,hmm=augment(xs,ys,horizontal_flip=True,rotate=3,crop_probability=0.4) 

    
    output,loss=CNN(xs,ys,img_len=imglen, channel_num=4, output_size=2,conv_featmap=conv_featmap,fc_units=fc_units,conv_kernel_size=[3,3], pooling_size=[2,2],l2_norm=l2_norm, seed=seed)

    iters = int(X_train.shape[0] / batch_size)
    print('number of batches for training: {}'.format(iters))

    step = train_step(loss)
    eve,pred = evaluate(output, ys)

    iter_total = 0
    best_acc = 0
    cur_model_name = 'cnn_{}'.format(int(time.time()))

    with tf.Session() as sess:
        merge = tf.summary.merge_all()

        writer = tf.summary.FileWriter("log/{}".format(cur_model_name), sess.graph)
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())

        # try to restore the pre_trained
        if pre_trained_model is not None:
            try:
                print("Load the model from: {}".format(pre_trained_model))
                saver.restore(sess, 'model/{}'.format(pre_trained_model))
            except Exception:
                print("Load model Failed!")
                pass
        
        
        store_trainacc=[]
        store_valacc=[]
        for epc in range(epoch):
            print("epoch {} ".format(epc + 1))

            for itr in range(iters):
                iter_total += 1
                rng=randint(1,100)

                training_batch_x = X_train[itr * batch_size: (1 + itr) * batch_size]
                training_batch_y = y_train[itr * batch_size: (1 + itr) * batch_size]
                
                if rng>60 & rng<=75:
                    training_batch_x=translate(training_batch_x,2,2)
                elif rng>75 & rng<=90:
                    training_batch_x=rotate(training_batch_x,angle=3)
                elif rng>90 & rng<=95:
                    training_batch_x=flip(training_batch_x,mode='h')
                elif rng>95 & rng<=100:
                    training_batch_x=flip(training_batch_x,mode='v')
                else:
                    pass
                _, cur_loss = sess.run([step, loss], feed_dict={xs: training_batch_x, ys: training_batch_y})

                if itr == iters-1:
                    # do validation
                    train_eve=sess.run(eve,feed_dict={xs:X_train,ys:y_train})
                    train_acc=100-train_eve*100/y_train.shape[0]
                    store_trainacc.append(train_acc)
                    valid_eve, merge_result = sess.run([eve, merge], feed_dict={xs: X_val, ys: y_val})
                    valid_acc = 100 - valid_eve * 100 / y_val.shape[0]
                    store_valacc.append(valid_acc)
                    
                    #y=graph.get_tensor_by_name("evaluate/ArgMax:0")
                    
                    result=sess.run(pred,feed_dict={xs:X_val})
                    percentage=sess.run(output,feed_dict={xs:X_val,ys: y_val})
                    
                    if verbose:
                        print('loss: {} validation accuracy : {}%'.format(
                            cur_loss,
                            valid_acc))

                    # save the merge result summary
                    writer.add_summary(merge_result, iter_total)

                    # when achieve the best validation accuracy, we store the model paramters
                    if valid_acc > best_acc:
                        print('Best validation accuracy! iteration:{} accuracy: {}%'.format(iter_total, valid_acc))
                        best_acc = valid_acc
                        saver.save(sess, 'model/{}'.format(cur_model_name))
                        store_pred=result
                        store_truelabel=y_val

    print("Traning ends. The best valid accuracy is {}. Model named {}.".format(best_acc, cur_model_name))
    return best_acc,store_trainacc,store_valacc,store_pred,store_truelabel,percentage
Example #58
0
def gridness(rate_map, box_xlen, box_ylen, return_acorr=False,
             step_size=0.1*pq.m):
    '''Calculates gridness of a rate map. Calculates the normalized
    autocorrelation (A) of a rate map B where A is given as
    A = 1/n\Sum_{x,y}(B - \bar{B})^{2}/\sigma_{B}^{2}. Further, the Pearsson's
    product-moment correlation coefficients is calculated between A and A_{rot}
    rotated 30 and 60 degrees. Finally the gridness is calculated as the
    difference between the minimum of coefficients at 60 degrees and the
    maximum of coefficients at 30 degrees i.e. gridness = min(r60) - max(r30).
    In order to focus the analysis on symmetry of A the the central and the
    outer part of the gridness is maximized by increasingly mask A at steps of
    ``step_size``. This function is inspired by Lukas Solankas gridcells
    package from Matt Nolans lab.

    Parameters
    ----------
    rate_map : numpy.ndarray
    box_xlen : quantities scalar in m
        side length of quadratic box
    step_size : quantities scalar in m
        step size in masking
    return_acorr : bool
        return autocorrelation map or not

    Returns
    -------
    out : gridness, (autocorrelation map)
    '''
    from scipy.ndimage.interpolation import rotate
    import numpy.ma as ma
    from exana.misc.tools import (is_quantities, fftcorrelate2d,
                                            masked_corrcoef2d)
    is_quantities([box_xlen, box_ylen, step_size], 'scalar')
    box_xlen = box_xlen.rescale('m').magnitude
    box_ylen = box_ylen.rescale('m').magnitude
    step_size = step_size.rescale('m').magnitude
    tmp_map = rate_map.copy()
    tmp_map[~np.isfinite(tmp_map)] = 0
    acorr = fftcorrelate2d(tmp_map, tmp_map, mode='full', normalize=True)
    rows, cols = acorr.shape
    b_x = np.linspace(-box_xlen/2., box_xlen/2., rows)
    b_y = np.linspace(-box_ylen/2., box_ylen/2., cols)
    B_x, B_y = np.meshgrid(b_x, b_y)
    grids = []
    acorrs = []
    # TODO find size of middle gaussian and exclude
    for outer in np.arange(box_xlen/4, box_xlen/2, step_size):
        m_acorr = ma.masked_array(acorr, mask=np.sqrt(B_x**2 + B_y**2) > outer)
        for inner in np.arange(0, box_xlen/4, step_size):
            m_acorr = \
                ma.masked_array(m_acorr, mask=np.sqrt(B_x**2 + B_y**2) < inner)
            angles = range(30, 180+30, 30)
            corr = []
            # Rotate and compute correlation coefficient
            for angle in angles:
                rot_acorr = rotate(m_acorr, angle, reshape=False)
                corr.append(masked_corrcoef2d(rot_acorr, m_acorr)[0, 1])
            r60 = corr[1::2]
            r30 = corr[::2]
            grids.append(np.min(r60) - np.max(r30))
            acorrs.append(m_acorr)
    if return_acorr:
        return max(grids), acorr,  # acorrs[grids.index(max(grids))]
    else:
        return max(grids)
Example #59
0
 def rotateImage(self, angle):
     self.image = rotate(self.image, angle=angle, reshape=False, cval=-1000)
     self.dicomRotation = angle
Example #60
0
def rotate2d(img, angle, axes=(0,1), reshape=False, interpolation=1, border_mode='constant', value=0):
    return sci.rotate(img, angle, axes, reshape=reshape, order=interpolation, mode=border_mode, cval=value)