def spiderTransform(a, rot=0, shift=(0,0), mirror=False, order=2): """ rotates (in degrees) about an off-center pixel, then shifts (in pixels) and last mirrors an array FROM http://www.wadsworth.org/spider_doc/spider/docs/man/apmq.html UNTESTED """ ### make a copy b = a ### rotate is positive, but shifted by a half pixel b = ndimage.shift(b, shift=(-0.5, -0.5), mode='wrap', order=order) b = ndimage.rotate(b, angle=rot, reshape=False, mode='reflect', order=order) b = ndimage.shift(b, shift=(0.5, 0.5), mode='wrap', order=order) # shift is in rows/columns not x,y rowcol = (shift[1],shift[0]) b = ndimage.shift(b, shift=rowcol, mode='reflect', order=order) # mirror the image about the y-axis, i.e. flip left-right if mirror is True: b = numpy.fliplr(b) return b
def fluorescence(params, roi): """finds a neuron from images using thresholding in a region of interest.""" images = read_sequentially(params) values,locations = [],[] try: cnt = 0 imgs = ndimage.shift(images.next(), roi[cnt], mode="wrap") cms_old = [params['y0'],params['x0']] #print "cms_old is ",cms_old val_old = [] y0, x0 = cms_old height, width = imgs.shape cnt += 1 while True: y1, x1, fluor, bg = similarity3(imgs, cms_old,[y0,x0], params) #implement a short memory of neuron position val_old.append([y1, x1]) y0 = np.average([v[0] for v in val_old[-10:]]) x0 = np.average([v[1] for v in val_old[-10:]]) values.append([fluor, bg]) locations.append([y1-roi[cnt-1][0],x1+params["cropx"][0]-roi[cnt-1][1]]) imgs = ndimage.shift(images.next(), roi[cnt], mode="wrap") cnt += 1 except StopIteration: pass finally: del images return np.array(values), np.array(locations)
def outlines(self): tops = [] bottoms = [] isGlass = 1.0 != np.array([s.n(None) for s in self.surfaces]) from scipy.ndimage import shift drawEdge = np.logical_or(isGlass, shift(isGlass, 1, order=0)) for pt, surface, drawFrom, drawTo in zip(self.surfaceVertices[-2:].T, self._surfaces, isGlass, shift(isGlass,1,order=0)): ctr = pt[::-1][:,None] # y then z components in a column. outline = ctr + surface.outline() if drawFrom or drawTo: tops.append(outline[:,-1]) bottoms.append(outline[:,0]) if not drawFrom: tops.append([np.nan,np.nan]) bottoms.append([np.nan,np.nan]) if surface is self.apertureStop: y = surface.semidiam sz = y * 0.1 yield ctr + (np.array([0, sz, -sz, 0, np.nan, 0, sz, -sz, 0]), np.array([y, y+sz, y+sz, y, np.nan, -y, -y-sz, -y-sz, -y])) if drawFrom or drawTo: yield outline # Outline the stop only if it's got power. else: yield outline yield np.transpose(tops) yield np.transpose(bottoms)
def RigidRegistrationXY_guess(self,img,ximg): ''' @author: Christian Rossmann, PhD @license: Public Domain @blog: http://scientificcomputingco.blogspot.com/ ''' # Perform initial guess rotation & translation v_range = np.array(xrange(-10,10)) err = np.array([self.MeasureErr(img,ndimage.shift(ximg,(v,0))) for v in v_range]) x = v_range[np.where(err==err.min())[0]] # Need to check what is this where from pylab. It gives the correct # result different from np.where err = np.array([self.MeasureErr(img,ndimage.shift(ximg,(0,v))) for v in v_range]) y = v_range[np.where(err==err.min())[0]] # List contains displacement in x and y and rotation param = [x,y] # Scipy optimize def _ErrFunc(param,img=img,ximg=ximg): # Perform rotational and translational transformation _img = ximg.copy() _img = ndimage.shift(_img,param) return self.MeasureErr(img,_img) param = optimize.fmin(_ErrFunc,param,maxiter=100) return param,err.min()
def pumping(params, roi): """finds periodic motion from images using entropy based difference detection in a region of interest. Also calculates a kymograph.""" images = read_sequentially(params) sim= [] kymo = [] try: img0 = ndimage.shift(images.next(), roi[0], mode="wrap") img1 = ndimage.shift(images.next(), roi[1], mode="wrap") cnt = 2 sim = [] cms_old = [params['y0'],params['x0']] #normalize images min1, max1 = 1.0*np.min(img0), 1.0*np.max(img0) img0 =(img0-min1)/(max1-min1) min1, max1 = 1.0*np.min(img1), 1.0*np.max(img1) img1 =(img1-min1)/(max1-min1) while True: entr_small, area_small = difference_entropy(img0,img1, cms_old,params) sim.append([entr_small, area_small,cms_old[0]]) img0 = img1 img1 = ndimage.shift(images.next(), roi[cnt], mode="wrap") # normalize image min1, max1 = 1.0*np.min(img1), 1.0*np.max(img1) img1 =(img1-min1)/(max1-min1) cnt += 1 kymo.append(np.sum(img0, axis=1)) except StopIteration: pass finally: del images return np.array(sim), kymo
def RigidRegistrationXY(img,ximg): # Perform initial guess rotation & translation v_range = np.array(xrange(-10,10)) err = np.array([MeasureErr(img,ndimage.shift(ximg,(v,0))) for v in v_range]) x = v_range[where(err==err.min())[0]] err = np.array([MeasureErr(img,ndimage.shift(ximg,(0,v))) for v in v_range]) y = v_range[where(err==err.min())[0]] # List contains displacement in x and y and rotation param = [x,y] def ErrFunc(param,img=img,ximg=ximg): # Perform rotational and translational transformation _img = ximg.copy() _img = ndimage.shift(_img,param) return MeasureErr(img,_img) param = optimize.fmin(ErrFunc,param,maxiter=100) #Final transformation _img = ximg.copy() # _img = ndimage.rotate(_img,param[2],reshape=0) _img = ndimage.shift(_img,param) return (_img,param,guess)
def shiftRGB(redF,greenF,blueF,blueshiftr=0,blueshiftc=0,greenshiftr=0,greenshiftc=0,redshiftr=0,redshiftc=0,ext=None): """ this code shift the pixels of three r, g, b images. Using g image as reference and shift the other two images. It will return the shifted r,g,b images. each row goes along ra direction each col goes along dec direction CRVAL1 ; ra direction CRVAL2: dec direction """ blueHdr = pf.getheader(blueF,ext) greenHdr = pf.getheader(greenF,ext) redHdr = pf.getheader(redF,ext) bluerow = blueHdr['crval1']*3600./0.27 bluecol = blueHdr['crval2']*3600./0.27 greenrow = greenHdr['crval1']*3600./0.27 greencol = greenHdr['crval2']*3600./0.27 redrow = redHdr['crval1']*3600./0.27 redcol = redHdr['crval2']*3600./0.27 """ col0=int(blueHdr['datasec'].split('[')[1].split(']')[0].split(',')[0].split(':')[0])-1 col1=int(blueHdr['datasec'].split('[')[1].split(']')[0].split(',')[0].split(':')[1]) row0=int(blueHdr['datasec'].split('[')[1].split(']')[0].split(',')[1].split(':')[0])-1 row1=int(blueHdr['datasec'].split('[')[1].split(']')[0].split(',')[1].split(':')[1]) """ blue = pf.getdata(blueF,ext) green = pf.getdata(greenF,ext) red = pf.getdata(redF,ext) ctgreenrow = (bluerow+greenrow+redrow)/3. ctgreencol = (bluecol+greencol+redcol)/3. blue = nd.shift(blue,[bluerow - ctgreenrow+blueshiftr,bluecol-ctgreencol+blueshiftc],mode='nearest',order=1) green = nd.shift(green,[greenrow - ctgreenrow+greenshiftr,greencol-ctgreencol+greenshiftc],mode='nearest',order=1) red = nd.shift(red,[redrow - ctgreenrow+redshiftr, redcol-ctgreencol+redshiftc],mode='nearest',order=1) return red,green,blue
def superresEdge(edgeProfiles, n=4, returnBins = False): """ Given a bunch of edge profiles, create an average profile that is n times the size based on the edge positions. """ edgePositions = np.array(map(findStepEdgeSubpix, edgeProfiles)) p = np.polyfit(range(len(edgePositions)), edgePositions, 2) fitPositions = np.polyval(p, range(len(edgePositions))) meanPos = floor(np.mean(fitPositions)) shifts = (floor(fitPositions) - meanPos) bins = np.cast[int](np.modf(fitPositions)[0] * n) edgeProfiles = [ndimage.shift(profile, -round(shft), cval=float('nan'), order=0) for profile, shft in zip(edgeProfiles, shifts)] toAverage = [[] for i in range(n)] for bin_i, profile in zip(bins, edgeProfiles): toAverage[bin_i].append(profile) result = np.zeros(len(edgeProfiles[0]) * n) for i in range(n): if not toAverage[i]: continue x = np.mean(toAverage[i], 0) #print 'mean',i, x result[n-i-1::n] = x # The bins are sorted by increasing order of edge position; we want right-most edge to go in first bin. # Trim ends -- shifts were marked with nan. result = result[~isnan(result)] if returnBins: return result, edgeProfiles, toAverage return result, edgeProfiles
def crossCorrelateFft(imagefft, templatefft, imshape, tmplshape): # CALCULATE BIGGER MAP SIZE oversized = numarray.array(imshape) + numarray.array(tmplshape) # MULTIPLY FFTs TOGETHER newfft = (templatefft * numarray.conjugate(imagefft)).copy() del templatefft # INVERSE TRANSFORM TO GET RESULT corr = fft.inverse_real_fft2d(newfft, s=oversized) # corr = fft.inverse_fft2d(newfft, s=oversized) # corr = corr.astype(numarray.Float64) del newfft # ROTATION AND SHIFT # ROTATE 180 DEGREES, NEIL STYLE # corr = numarray.transpose(corr) # corr = corr[(corr.shape)[0]::-1,:] # corr = numarray.transpose(corr) # corr = corr[(corr.shape)[0]::-1,:] # ROTATE 180 DEGREES, CRAIG STYLE corrshape = corr.shape corr = numarray.ravel(corr) corr = numarray.reshape(corr[(corr.shape)[0] :: -1], corrshape) corr = nd_image.shift(corr, tmplshape[0], mode="wrap", order=0) # print " ... ... rot time %.2f sec" % float(time.time()-t1) # RETURN CENTRAL PART OF IMAGE (SIDES ARE JUNK) return corr[tmplshape[0] - 1 : imshape[0] + tmplshape[0] - 1, tmplshape[1] - 1 : imshape[1] + tmplshape[1] - 1]
def test_map_coordinates_dts(): # check that ndimage accepts different data types for interpolation data = np.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]) shifted_data = np.array([[0, 0, 0, 0], [0, 4, 1, 3], [0, 7, 6, 8]]) idx = np.indices(data.shape) dts = (np.uint8, np.uint16, np.uint32, np.uint64, np.int8, np.int16, np.int32, np.int64, np.intp, np.uintp, np.float32, np.float64) for order in range(0, 6): for data_dt in dts: these_data = data.astype(data_dt) for coord_dt in dts: # affine mapping mat = np.eye(2, dtype=coord_dt) off = np.zeros((2,), dtype=coord_dt) out = ndimage.affine_transform(these_data, mat, off) assert_array_almost_equal(these_data, out) # map coordinates coords_m1 = idx.astype(coord_dt) - 1 coords_p10 = idx.astype(coord_dt) + 10 out = ndimage.map_coordinates(these_data, coords_m1, order=order) assert_array_almost_equal(out, shifted_data) # check constant fill works out = ndimage.map_coordinates(these_data, coords_p10, order=order) assert_array_almost_equal(out, np.zeros((3,4))) # check shift and zoom out = ndimage.shift(these_data, 1) assert_array_almost_equal(out, shifted_data) out = ndimage.zoom(these_data, 1) assert_array_almost_equal(these_data, out)
def test_Circular_Aperture_PTP_short(display=False, npix=512, display_proper=False): """ Tests plane-to-plane propagation at short distances, by comparison of the results from propagate_ptp and propagate_direct calculations """ #test short distance propagation, as discussed in issue #194 (https://github.com/mperrin/poppy/issues/194) wf = fresnel.FresnelWavefront( 2 * u.um, wavelength=10e-9*u.m, npix=npix, oversample=4) wf *= optics.CircularAperture(radius=800 * 1e-9*u.m) wf_2 = wf.copy() z = 12. * u.um # Calculate same result using 2 different algorithms: wf.propagate_direct(z) wf_2.propagate_fresnel(z) # The results have different pixel scale so we need to resize # in order to compare them zoomed=(zoom(wf.intensity,(wf.pixelscale/wf_2.pixelscale).decompose().value)) n = zoomed.shape[0] crop_2=wf_2.intensity[int(1023-n/2):int(1023+n/2), int(1023-n/2):int(1023+n/2)] #zooming shifted the centroids, find new centers cent=fwcentroid.fwcentroid(zoomed,halfwidth=8) cent2=fwcentroid.fwcentroid(crop_2,halfwidth=8) shifted=shift(crop_2,[cent[1]-cent2[1],cent[0]-cent2[0]]) diff=shifted/shifted.max()-zoomed/zoomed.max() assert(diff.max() < 1e-3)
def ngc5256(): research.pab.align.coarse_align( 'NGC5256-39-138-F673N_drc_sci.fits', ref='NGC5256-14-170-F110W_drz_sci.fits') research.pab.align.coarse_align( 'NGC5256-48-152-F814W_drc_sci.fits', ref='NGC5256-14-170-F110W_drz_sci.fits') shifts = { 'F110W': [0, 0], 'F132N': [0, 0], 'F130N': [0, 0], 'F673N': [-1.137, 5.2527], 'F435W': [8.51, 1.35], 'F814W': [8.51, 1.35] } # files = glob.glob('*sci.fits') ims = {} for file in files: print file im = pyfits.open(file) filter = file.split('-')[-1][:5] pix = np.sqrt(im[0].header['CD1_1']**2 + im[0].header['CD1_2']**2) * 3600 scl = im[0].header['PHOTFLAM'] / 1.e-19 * (0.1 / pix)**2 im[0].data *= scl shifted = nd.shift(im[0].data, shifts[filter][::-1]) ims[filter] = shifted pab = ims['F132N'] - ims['F130N'] ha = ims['F673N'] - ims['F814W']
def shift(self, int_shift, in_place=False): """ Shift image by an integer number of pixels without interpolation. `int_shift` is a tuple, numpy array or list of integers (floats will be rounded). If `in_place` is true the image is shifted in place, and the wcs reference pixel is updated appropriately. """ from scipy import ndimage if (np.int32(np.array(int_shift)) != np.array(int_shift)).any(): raise ValueError('Shift must be integer amount!') res = ndimage.shift(self.data, int_shift, order=0) if in_place: self._applyArray(None, res) self.header['crpix1'] += int_shift[0] self.header['crpix2'] += int_shift[1] self._wcs = pywcs.WCS(self.header) #raise RuntimeWarning('salf.data and fitsfile data are now inconsistent.') res = None return res
def ShiftCenterFrame(f): cofm = nd.center_of_mass( f ) V,H = f.shape dv = V/2 - cofm[0] dh = H/2 - cofm[1] dv = int(dv); dh =int(dh) fshift = nd.shift( f, (dv,dh) ) return fshift, (dv,dh)
def ErrFunc(param,img=img,ximg=ximg): # Perform rotational and translational transformation _img = ximg.copy() _img = ndimage.shift(_img,param) return MeasureErr(img,_img)
def find_center_pc(proj1, proj2, tol=0.5, rotc_guess=None): """ Find rotation axis location by finding the offset between the first projection and a mirrored projection 180 degrees apart using phase correlation in Fourier space. The ``register_translation`` function uses cross-correlation in Fourier space, optionally employing an upsampled matrix-multiplication DFT to achieve arbitrary subpixel precision. :cite:`Guizar:08`. Parameters ---------- proj1 : ndarray 2D projection data. proj2 : ndarray 2D projection data. tol : scalar, optional Subpixel accuracy rotc_guess : float, optional Initual guess value for the rotation center Returns ------- float Rotation axis location. """ imgshift = 0.0 if rotc_guess is None else rotc_guess - (proj1.shape[1]-1.0)/2.0 proj1 = ndimage.shift(proj1, [0,-imgshift], mode='constant', cval=0) proj2 = ndimage.shift(proj2, [0,-imgshift], mode='constant', cval=0) # create reflection of second projection proj2 = np.fliplr(proj2) # Determine shift between images using scikit-image pcm shift = register_translation(proj1, proj2, upsample_factor=1.0/tol) # Compute center of rotation as the center of first image and the # registered translation with the second image center = (proj1.shape[1] + shift[0][1] - 1.0)/2.0 return center + imgshift
def registerStack(refImg,testImg,myROI,upsamplingFactor): refImgROI = setROIregistration(myROI,refImg) #refImgROI = setDefaultROIregistration(refImg) testImgROI = setROIregistration(myROI,testImg) #testImgROI = setDefaultROIregistration(testImg) resReg = register_translation(refImgROI,testImgROI,upsample_factor=upsamplingFactor,space='real') theShifts = resReg[0] regImage = ndimage.shift(testImg,(theShifts[0],theShifts[1])) return theShifts,regImage
def calculate_slice_geometries(science_set, shift_bounds=[-20, 20], shift_samples=100, fit_sample=5): science_frame = science_set.science mdf_table = table.Table(science_frame.mask.fits.fits_data[1].data) #calculating the slitsize and slit position from the MDF and instrument information naxis1, naxis2 = science_frame.prepared.fits.fits_data[1].header['naxis1'] * units.pix, \ science_frame.prepared.fits.fits_data[1].header['naxis2'] * units.pix slit_pos_x = mdf_table['slitpos_mx'].astype(np.float64) * units.mm slit_pos_x *= science_frame.instrument_setup.x_pix_per_mm slit_pos_x *= slit_pos_x + (naxis1 / 2) - 1 * units.pix slit_pos_y = np.polyval(science_frame.instrument_setup.y_distortion_coefficients, mdf_table['slitpos_my'].astype(np.float64)) * units.mm slit_pos_y *= science_frame.instrument_setup.y_pix_per_mm slit_pos_y += (naxis2 / 2) - 1 * units.pix slit_size_x = mdf_table['slitsize_mx'].astype(np.float64) * units.mm * science_frame.instrument_setup.x_pix_per_mm slit_size_y = mdf_table['slitsize_my'].astype(np.float64) * units.mm * science_frame.instrument_setup.y_pix_per_mm slice_lower_edge = (slit_pos_y - slit_size_y/2 + science_frame.instrument_setup.y_offset).value slice_upper_edge = slice_lower_edge + slit_size_y.value if science_set.flat is None: raise ValueError('science_frame does not have flat associated with it') flat_slices = np.median(science_set.flat.fits.fits_data[2].data, axis=1) slice_model = np.zeros_like(flat_slices) for slice_lower, slice_upper in zip(slice_lower_edge, slice_upper_edge): lower_idx = np.int(np.round(slice_lower)) upper_idx = np.int(np.round(slice_upper)) slice_model[lower_idx:upper_idx] = 1.0 slice_model *= np.median(flat_slices) rms_space = [] pixel_shifts = np.linspace(shift_bounds[0], shift_bounds[1], shift_samples) for shift in pixel_shifts: rms_space.append(((ndimage.shift(slice_model, shift) - flat_slices)**2).sum()) rms_space = np.array(rms_space) rms_slice = slice(rms_space.argmin()-5, rms_space.argmin()+5) a, b, c = np.polyfit(pixel_shifts[rms_slice], rms_space[rms_slice], 2) fitted_shift = -b / (2 * a) mdf_table['slice_lower_edge'] = slice_lower_edge + fitted_shift mdf_table['slice_upper_edge'] = slice_upper_edge + fitted_shift return mdf_table
def derotate(array): rows, cols = array.shape polar_array = get_logpolar(array, 0) rows_sum = polar_array.sum(1) maxcol = - rows_sum.argmax() rows_sum = ndimage.shift(rows_sum, maxcol, order=0, mode="wrap") rows_shift = maxcol angle = (-360. * rows_shift) / rows derotated = ndimage.rotate(array, angle, reshape=False) return derotated
def shift_augmentation(X, h_range, w_range): X_shift = np.copy(X) size = X.shape[2:] h_random = np.random.rand() * h_range * 2. - h_range w_random = np.random.rand() * w_range * 2. - w_range h_shift = int(h_random * size[0]) w_shift = int(w_random * size[1]) for j in range(X.shape[0]): X_shift[j, 0] = ndimage.shift(X[j, 0], (h_shift, w_shift), order=0) return X_shift
def sino_center(sinogram): """ Finds rotation axis of sinogram by using first and last projections which are assumed to be 180 degrees apart. Last projection is reversed and correlated with the first and the shifted image with rotation axis in center is returned. """ proj1 = sinogram[0,...] proj2 = sinogram[-1,::-1] shift = image._correlate_projections(proj1, proj2) return ndimage.shift(sinogram, (0,-shift), mode='nearest',cval=np.median(sinogram))
def test_uint64_max(): # Test interpolation respects uint64 max big = 2**64-1 arr = np.array([big, big, big], dtype=np.uint64) # Tests geometric transform (map_coordinates, affine_transform) inds = np.indices(arr.shape) - 0.1 x = ndimage.map_coordinates(arr, inds) assert_true(x[1] > (2**63)) # Tests zoom / shift x = ndimage.shift(arr, 0.1) assert_true(x[1] > (2**63))
def RigidRegistrationXY_guess(img,ximg): # Perform initial guess rotation & translation v_range = np.array(xrange(-10,10)) err = np.array([MeasureErr(img,ndimage.shift(ximg,(v,0))) for v in v_range]) x = v_range[where(err==err.min())[0]] err = np.array([MeasureErr(img,ndimage.shift(ximg,(0,v))) for v in v_range]) y = v_range[where(err==err.min())[0]] # List contains displacement in x and y and rotation param = [x,y] #Final transformation _img = ximg.copy() _img = ndimage.shift(_img,param) return (_img,param)
def sino_deinterlace(sinogram): sino_deinterlaced = sinogram.copy() sino_even = sinogram[::2,...] sino_odd = sinogram[1::2,...] if sino_even.shape > sino_odd.shape: shift = image._correlate_images(sino_even[:-1,...], sino_odd) else: shift = image._correlate_images(sino_even, sino_odd) sino_deinterlaced[1::2,...] = ndimage.shift(sinogram[1::2,...],(0,shift),mode='nearest') return sino_deinterlaced
def FixGantryTilt(matrix, spacing, tilt): """ Fix gantry tilt given a vtkImageData and the tilt value. Return new vtkImageData. """ angle = numpy.radians(tilt) spacing = spacing[0], spacing[1], spacing[2] gntan = math.tan(angle) for n, slice_ in enumerate(matrix): offset = gntan * n * spacing[2] matrix[n] = shift(slice_, (-offset/spacing[1], 0), cval=matrix.min())
def nudge_images(X, y): # Having a larger dataset shows more clearly the behavior of the # methods, but we multiply the size of the dataset only by 2, as the # cost of the hierarchical clustering methods are strongly # super-linear in n_samples shift = lambda x: ndimage.shift(x.reshape((8, 8)), .3 * np.random.normal(size=2), mode='constant', ).ravel() X = np.concatenate([X, np.apply_along_axis(shift, 1, X)]) Y = np.concatenate([y, y], axis=0) return X, Y
def rigid_transform(img, zoom=None, rotation=None, offset=None, outputShape=None, mode='constant', cval=0.0): """ rigid transformation of a 2d-image or 3d-matrix by using scipy :param img: input image/matrix :param zoom: :param rotation: in degree, counterclock wise :param offset: tuple (xoffset, yoffset) pixel value of starting point of output image :param outputShape: the shape of output image, (height, width) :return: new image or matrix after transformation """ if len(img.shape) != 2 and len(img.shape) != 3: raise LookupError, 'Input image is not a 2d or 3d array!' newImg = img.astype(np.float32) if zoom: if len(img.shape) == 2: newZoom = (zoom, zoom) elif len(img.shape) == 3: newZoom = (1, zoom, zoom) newImg = ni.zoom(newImg, zoom=newZoom, mode=mode, cval=cval) if rotation: newImg = expand_image(newImg) if len(img.shape) == 2: newImg = ni.rotate(newImg, angle=rotation, reshape=False, mode=mode, cval=cval) elif len(img.shape) == 3: newImg = ni.rotate(newImg, angle=rotation, axes=(1, 2), reshape=False, mode=mode, cval=cval) if offset: if len(img.shape) == 2: newImg = ni.shift(newImg, (offset[1], offset[0]), mode=mode, cval=cval) if len(img.shape) == 3: newImg = ni.shift(newImg, (0, offset[1], offset[0]), mode=mode, cval=cval) if outputShape: newImg = resize_image(newImg, outputShape) return newImg.astype(img.dtype)
def centerParticles(self, oldstack, centerstack, badstack): maxshift = self.params['maxshift'] centerparts = [] badparts = [] keeplist = [] i = 0 while partnum < numparts: ### if need more particles ### read 4000 parts from oldstack ### write centerparts to centerstack ### write badparts to badstack ### set current image oldpart = oldparts[i] ### mirror about x xmirror = numpy.flipud(oldpart) ### cross-correlate xcc = correlator.cross_correlate(oldpart, xmirror) ### find peak peakdict = peakfinder.findSubpixelPeak(xcc) xpeak = correlator.wrap_coord(peakdict['pixel peak'], xcc.shape) ### mirror about y ymirror = numpy.fliplr(oldpart) ### cross-correlate ycc = correlator.cross_correlate(oldpart, ymirror) ### find peak peakdict = peakfinder.findSubpixelPeak(ycc) ypeak = correlator.wrap_coord(peakdict['pixel peak'], ycc.shape) ### mirror about y then x xymirror = numpy.flipud(ymirror) ### cross-correlate xycc = correlator.cross_correlate(oldpart, xymirror) ### find peak peakdict = peakfinder.findSubpixelPeak(xycc) xypeak = correlator.wrap_coord(peakdict['pixel peak'], xycc.shape) ### do some math to get shift xshift = (ypeak[0] + xypeak[0])/4.0 yshift = (xpeak[0] + xypeak[0])/4.0 ### shift particle, by integers only if xshift < maxshift and yshift < maxshift: xyshift = (xshift, yshift) centerpart = ndimage.shift(oldpart, shift=xyshift, mode='wrap', order=0) centerparts.append(centerpart) keeplist.append(partnum) else: badparts.append(oldpart) return keeplist
def test_uint64_max(): # Test interpolation respects uint64 max. Reported to fail at least on # win32 (due to the 32 bit visual C compiler using signed int64 when # converting between uint64 to double) and Debian on s390x. big = 2**64-1 arr = np.array([big, big, big], dtype=np.uint64) # Tests geometric transform (map_coordinates, affine_transform) inds = np.indices(arr.shape) - 0.1 x = ndimage.map_coordinates(arr, inds) assert_true(x[1] > (2**63)) # Tests zoom / shift x = ndimage.shift(arr, 0.1) assert_true(x[1] > (2**63))
def merge3(a, b, c, ROI=None): """ @param: a, b, c: 3 2D-datasets @param ROI: tuple of slices, i.e. (slice(1,513),slice(700,700+512)) """ from scipy import ndimage out = numpy.zeros(a.shape, dtype="float32") out += a if ROI is not None: ac = a[ROI] bc = b[ROI] cc = c[ROI] else: ac = a bc = b cc = c shab = measure_offset(ac, bc) out += ndimage.shift(b, shab, order=1, cval=b.mean(dtype=float)) shac = measure_offset(ac, cc) out += ndimage.shift(c, shac, order=1, cval=c.mean(dtype=float)) print(shab, shac) return out / 3.0
def _shift_single_frame(im, shift_x, shift_y, interpolation_order=1): im_shifted = shift(im, (-shift_y, -shift_x), order=interpolation_order) return im_shifted
def _shift_interp_builtin(array, shift_value, mode='constant', cval=0): shifted = ndimage.shift(array, np.flipud(shift_value), order=3, mode=mode, cval=cval) return shifted
def dfi_reconstruction(sinogram, center, angles=None, ratio=1.0, filter_name="hann", pad_rate=0.25, pad_mode="edge", apply_log=True): """ Apply the DFI (direct Fourier inversion) reconstruction method to a sinogram image (Ref. [1]_). The method is a practical and direct implementation of the Fourier slice theorem (Ref. [2]_). Parameters ---------- sinogram : array_like 2D array. Sinogram image. center : float Center of rotation. angles : array_like 1D array. List of angles (in radian) corresponding to the sinogram. ratio : float To apply a circle mask to the reconstructed image. filter_name : {None, "hann", "bartlett", "blackman", "hamming", "nuttall",\\ "parzen", "triang"} Apply a smoothing filter. pad_rate : float To apply padding before the FFT. The padding width equals to (pad_rate * image_width). pad_mode : str Padding method. Full list can be found at numpy.pad documentation. apply_log : bool Apply the logarithm function to the sinogram before reconstruction. Returns ------- array_like Square array. Reconstructed image. References ---------- .. [1] https://doi.org/10.1364/OE.418448 .. [2] https://doi.org/10.1071/PH560198 """ if apply_log is True: sinogram = -np.log(sinogram) (nrow, ncol) = sinogram.shape if ncol % 2 == 0: sinogram = np.pad(sinogram, ((0, 0), (0, 1)), mode="edge") ncol1 = sinogram.shape[1] xshift = (ncol1 - 1) / 2.0 - center sinogram = shift(sinogram, (0, xshift), mode='nearest') if angles is not None: t_ang = np.sum(np.abs(np.diff(angles * 180.0 / np.pi))) if abs(t_ang - 360) < 10: nrow = nrow // 2 + 1 sinogram = (sinogram[:nrow] + np.fliplr(sinogram[-nrow:])) / 2 step = np.mean(np.abs(np.diff(angles))) b_ang = angles[0] - (angles[0] // (2 * np.pi)) * (2 * np.pi) sino_360 = np.vstack((sinogram[:nrow - 1], np.fliplr(sinogram))) sinogram = shift(sino_360, (b_ang / step, 0), mode='wrap')[:nrow] if angles[-1] < angles[0]: sinogram = np.flipud(np.fliplr(sinogram)) num_pad = int(pad_rate * ncol1) sinogram = np.pad(sinogram, ((0, 0), (num_pad, num_pad)), mode=pad_mode) ncol2 = sinogram.shape[1] mask = util.make_circle_mask(ncol2, 1.0) (r_mat, theta_mat) = generate_mapping_coordinate(ncol2, nrow, ncol2, ncol2) sino_fft = fft.fftshift(fft.fft(fft.ifftshift(sinogram, axes=1)), axes=1) if filter_name is not None: window = make_smoothing_window(filter_name, ncol2) sino_fft = sino_fft * np.tile(window, (nrow, 1)) mat_real = np.real(sino_fft) mat_imag = np.imag(sino_fft) reg_real = util.mapping( mat_real, r_mat, theta_mat, order=5, mode="reflect") * mask reg_imag = util.mapping( mat_imag, r_mat, theta_mat, order=5, mode="reflect") * mask recon = np.real( fft.fftshift(fft.ifft2( fft.ifftshift(reg_real + 1j * reg_imag))))[num_pad:ncol + num_pad, num_pad:ncol + num_pad] if ratio is not None: if ratio == 0.0: ratio = min(center, ncol - center) / (0.5 * ncol) mask = util.make_circle_mask(ncol, ratio) recon = recon * mask return recon
def main(args): log = logging.getLogger(__name__) log.setLevel(logging.INFO) hdlr = logging.StreamHandler(sys.stdout) if args.quiet: hdlr.setLevel(logging.ERROR) elif args.verbose: hdlr.setLevel(logging.INFO) else: hdlr.setLevel(logging.WARN) log.addHandler(hdlr) data, hdr = read(args.input, inc_header=True) final = None box = np.array([hdr[a] for a in ["nx", "ny", "nz"]]) center = box // 2 if args.fft: data_ft = vol_ft(data.T, threads=args.threads) np.save(args.output, data_ft) return 0 if args.transpose is not None: try: tax = [np.int64(a) for a in args.transpose.split(",")] data = np.transpose(data, axes=tax) except: log.error( "Transpose axes must be comma-separated list of three integers" ) return 1 if args.normalize: if args.reference is not None: ref, refhdr = read(args.reference, inc_header=True) sigma = np.std(ref) else: sigma = np.std(data) mu = np.mean(data) final = (data - mu) / sigma if args.verbose: log.info("Mean: %f, Standard deviation: %f" % (mu, sigma)) if args.apix is None: args.apix = hdr["xlen"] / hdr["nx"] log.info("Using computed pixel size of %f Angstroms" % args.apix) if args.target and args.matrix: log.warn( "Target pose transformation will be applied after explicit matrix") if args.euler is not None and (args.target is not None or args.matrix is not None): log.warn( "Euler transformation will be applied after target pose transformation" ) if args.translate is not None and (args.euler is not None or args.target is not None or args.matrix is not None): log.warn("Translation will be applied after other transformations") if args.origin is not None: try: args.origin = np.array( [np.double(tok) for tok in args.origin.split(",")]) / args.apix assert np.all(args.origin < box) except: log.error( "Origin must be comma-separated list of x,y,z coordinates and lie within the box" ) return 1 else: args.origin = center log.info("Origin set to box center, %s" % (args.origin * args.apix)) if not (args.target is None and args.euler is None and args.matrix is None) \ and ismask(data) and args.spline_order != 0: log.warn( "Input looks like a mask, --spline-order 0 (nearest neighbor) is recommended" ) if args.matrix is not None: try: r = np.array(json.loads(args.matrix)) except: log.error("Matrix format is incorrect") return 1 data = resample_volume(data, r=r, t=None, ori=None, order=args.spline_order) if args.target is not None: try: args.target = np.array( [np.double(tok) for tok in args.target.split(",")]) / args.apix except: log.error( "Standard pose target must be comma-separated list of x,y,z coordinates" ) return 1 args.target -= args.origin args.target = np.where(np.abs(args.target) < 1, 0, args.target) ori = None if args.origin is center else args.origin - args.center r = vec2rot(args.target) t = np.linalg.norm(args.target) log.info("Euler angles are %s deg and shift is %f px" % (np.rad2deg(rot2euler(r)), t)) data = resample_volume(data, r=r, t=args.target, ori=ori, order=args.spline_order, invert=args.target_invert) if args.euler is not None: try: args.euler = np.deg2rad( np.array([np.double(tok) for tok in args.euler.split(",")])) except: log.error( "Eulers must be comma-separated list of phi,theta,psi angles") return 1 r = euler2rot(*args.euler) offset = args.origin - 0.5 offset = offset - r.T.dot(offset) data = affine_transform(data, r.T, offset=offset, order=args.spline_order) if args.translate is not None: try: args.translate = np.array( [np.double(tok) for tok in args.translate.split(",")]) / args.apix except: log.error( "Translation vector must be comma-separated list of x,y,z coordinates" ) return 1 args.translate -= args.origin data = shift(data, -args.translate, order=args.spline_order) if final is None: final = data write(args.output, final, psz=args.apix) return 0
def transform_image(image, shiftx, shifty, angle, order=1): """ Apply shift and rotation to the image. The translation is applied first, then the rotation. If no rotation is requested (``angle=0``), then ``scipy.ndimage.shift()`` is called to perform a translation. Otherwise, ``scipy.ndimage.affine_transform()`` is called. In both cases the settings ``mode='wrap', prefilter=False`` are used. Prefilter *must* be turned off because it applies lossy image sharpening leading to artifacts. Parameters ---------- image : numpy.ndarray 2D image input. shiftx : float Shift in the x-axis in pixels. shifty : float Shift in the y-axis in pixels. angle : float Rotation angle in radians (positive is clockwise). order : int (Optional, default: 1) Spline interpolation order. 1 for bilinear, 3 for bicubic (bilinear is the original behavior). Returns ------- numpy.ndarray Transformed image. Notes ----- The transformation is implemented as sequence of affine transformations. The ``scipy`` module takes a matrix of the form (ndim + 1, ndim + 1), where it assumes that the transformation is specified using homogeneous coordinates. This matrix has the 2x2 rotation matrix in the top left corner, and the linear shifts in the top right. They are applied in this order: .. code-block:: text 1 0 shiftx 0 1 shifty 0 0 1 (translation by shift amounts) 1 0 -(X-1)/2 0 1 -(Y-1)/2 0 0 1 (translation to center rotation on the IDL rot center) cos sin 0 -sin cos 0 0 0 1 (clockwise rotation) 1 0 +(X-1)/2 0 1 +(Y-1)/2 0 0 1 (undo translation for center of rotation) """ if shiftx == 0 and shifty == 0 and angle == 0: return image elif angle == 0: return shift(image, [shifty, shiftx], order=order, mode='wrap', prefilter=False) else: # The original IDL implementation performs the linear translation # first (wrapping at borders), then rotates the image clockwise by an # angle in degrees. The center of the rotation is (X-1)/2, (Y-1)/2. In # both steps, bilinear interpolation is used. # Numpy array coordinates are (y, x). This swaps dx and dy in the # translation part, and the position of the -sin element in the # rotation part, compared to the standard version for (x, y, 1). # Beware that the coordinate transforms are calculated in the # conventional (x, y) sense, but are written in numpy's (y, x) order # when implementing them in the transformation matrix. cx, sx = np.cos(angle), np.sin(angle) # Center of rotation rot_x = 0.5 * (image.shape[1] - 1) rot_y = 0.5 * (image.shape[0] - 1) dx = cx * (shiftx - rot_x) + sx * (shifty - rot_y) + rot_x dy = -sx * (shiftx - rot_x) + cx * (shifty - rot_y) + rot_y tx = np.array([[cx, -sx, dy], [sx, cx, dx], [0, 0, 1]], dtype=np.float64) # print(cx, sx) # print(tx) # The prefilter option, which is turned on by default, applies an # image sharpening. It must not be applied. The mode is set to 'wrap' # to emulate the behavior of the original implementation of image # shift. The spline interpolation order is 1 for bilinear, 3 for # bicubic (bilinear is the original behavior). return affine_transform(image, inv(tx), order=order, mode='wrap', prefilter=False)
from scipy import ndimage as ndi ############################################# # Define areas of the image which are invalid. # Probability of an invalid pixel is 25%. # This could be due to a faulty detector, or edges that # are not affected by translation (e.g. moving object in a window). # See reference paper for more examples image = data.camera() shift = (-22, 13) corrupted_pixels = np.random.choice([False, True], size=image.shape, p=[0.25, 0.75]) # The shift corresponds to the pixel offset relative to the reference image offset_image = ndi.shift(image, shift) offset_image *= corrupted_pixels print(f"Known offset (row, col): {shift}") # Determine what the mask is based on which pixels are invalid # In this case, we know what the mask should be since we corrupted # the pixels ourselves mask = corrupted_pixels detected_shift = phase_cross_correlation(image, offset_image, reference_mask=mask) print(f"Detected pixel offset (row, col): {-detected_shift}") fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex=True, sharey=True, figsize=(8, 3))
def shift_random(img, shift): """Translate image in x and y direction""" x_shift = np.random.uniform(-shift, shift) # y_shift = np.random.uniform(-shift, shift) return ndimage.shift(img, (x_shift, 0, 0), mode='nearest')
def _refine(raw_image, image, radius, coords, max_iterations, characterize, walkthrough): SHIFT_THRESH = 0.6 GOOD_ENOUGH_THRESH = 0.005 ndim = image.ndim mask = binary_mask(radius, ndim) slices = [[slice(c - radius, c + radius + 1) for c in coord] for coord in coords] # Declare arrays that we will fill iteratively through loop. N = coords.shape[0] final_coords = np.empty_like(coords, dtype=np.float64) mass = np.empty(N, dtype=np.float64) Rg = np.empty(N, dtype=np.float64) ecc = np.empty(N, dtype=np.float64) signal = np.empty(N, dtype=np.float64) for feat in range(N): coord = coords[feat] # Define the circular neighborhood of (x, y). square = slices[feat] neighborhood = mask * image[square] cm_n = _safe_center_of_mass(neighborhood, radius) cm_i = cm_n - radius + coord # image coords allow_moves = True for iteration in range(max_iterations): off_center = cm_n - radius if walkthrough: print off_center if np.all(np.abs(off_center) < GOOD_ENOUGH_THRESH): break # Accurate enough. # If we're off by more than half a pixel in any direction, move. elif np.any(np.abs(off_center) > SHIFT_THRESH) & allow_moves: # In here, coord is an integer. new_coord = coord new_coord[off_center > SHIFT_THRESH] += 1 new_coord[off_center < -SHIFT_THRESH] -= 1 # Don't move outside the image! upper_bound = np.array(image.shape) - 1 - radius new_coord = np.clip(new_coord, radius, upper_bound).astype(int) # Update slice to shifted position. square = [slice(c - radius, c + radius + 1) for c in new_coord] neighborhood = mask * image[square] # If we're off by less than half a pixel, interpolate. else: # Here, coord is a float. We are off the grid. neighborhood = ndimage.shift(neighborhood, -off_center, order=2, mode='constant', cval=0) new_coord = coord + off_center # Disallow any whole-pixels moves on future iterations. allow_moves = False cm_n = _safe_center_of_mass(neighborhood, radius) # neighborhood cm_i = cm_n - radius + new_coord # image coords coord = new_coord # matplotlib and ndimage have opposite conventions for xy <-> yx. final_coords[feat] = cm_i[..., ::-1] if walkthrough: import matplotlib.pyplot as plt plt.imshow(neighborhood) # Characterize the neighborhood of our final centroid. mass[feat] = neighborhood.sum() if not characterize: continue # short-circuit loop Rg[feat] = np.sqrt( np.sum(r_squared_mask(radius, ndim) * neighborhood) / mass[feat]) # I only know how to measure eccentricity in 2D. if ndim == 2: ecc[feat] = np.sqrt( np.sum(neighborhood * cosmask(radius))**2 + np.sum(neighborhood * sinmask(radius))**2) ecc[feat] /= (mass[feat] - neighborhood[radius, radius] + 1e-6) else: ecc[feat] = np.nan raw_neighborhood = mask * raw_image[square] signal[feat] = raw_neighborhood.max() # black_level subtracted later if not characterize: result = np.column_stack([final_coords, mass]) else: result = np.column_stack([final_coords, mass, Rg, ecc, signal]) return result
def mains(self): slice = self.Preview_slice.value( ) # Preview Slice # Whats wrong here? # double_grid_size = 0 # =1 doubles the saved reconstructed area; essential for full extended field of view scans (half sided 360 degree scans) if self.checkBox_save_normalized == True: save_normalized = 1 # save normalized projections? else: save_normalized = 0 if self.checkBox_classic_order == True: save_normalized_classic_order = 1 # sorts the normalized projections to steadily rising angles !!! SET THIS 0 FOR CLASSIC-CT !!! else: save_normalized_classic_order = 0 preview_frequency = 10 # preview reconstruction on idle and every 5th, 10th, 20th or 50th projection volume_begin = 0 # reconstruct these slices in the end # DEFINING PATHS # ==================================================================================================== # DEFINING PATHS # root = Tkinter.Tk() path_klick = self.path_klick self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + path_klick) htap = self.path_klick[::-1] path_in = self.path_klick[0:len(htap) - htap.find('/') - 1:1] namepart = self.path_klick[len(htap) - htap.find('/') - 1:len(htap) - htap.find('.') - 5:1] counter = self.path_klick[len(htap) - htap.find('.') - 5:len(htap) - htap.find('.') - 1:1] filetype = self.path_klick[len(htap) - htap.find('.') - 1:len(htap):1] path_out = path_in print(path_out) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + path_out) path_lists = path_in print(path_lists) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + path_lists) root.withdraw() path_out_reconstructed = path_out + '/Reconstructed_Preview' path_out_reconstructed_full = path_out + '/Reconstructed_Volume' path_out_normalized = path_out + '/Normalized_Projections' path_out_changes = path_out + '/Changes' print(path_out_reconstructed) print(path_out_reconstructed_full) print(path_out_normalized) print(path_out_changes) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + path_out_reconstructed) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + path_out_reconstructed_full) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + path_out_normalized) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + path_out_changes) if os.path.isdir(path_out_reconstructed) is False: os.mkdir(path_out_reconstructed) if os.path.isdir(path_out_reconstructed_full) is False: os.mkdir(path_out_reconstructed_full) if save_normalized == 1: if os.path.isdir(path_out_normalized) is False: os.mkdir(path_out_normalized) if os.path.isdir(path_out_changes) is False: os.mkdir(path_out_changes) first = 1 # HIER ERSTE DATEI DES SCANS EINTRAGEN ; BEI MEHREREN SCANS WICHTIG self.file_name_protocol = path_out + '/' + 'reconstruction_protocol.txt' # READ THETA AND X_OFFSET-LIST # ====================================================================================== # READ THETA AND X_OFFSET-LIST # file_name_theta = path_lists + '/theta_list.txt' theta_list = numpy.genfromtxt(file_name_theta) file_name_X_offset = path_lists + '/X_offset_list.txt' x_offset_list = numpy.genfromtxt(file_name_X_offset) file_name_theta_first_list = path_lists + '/theta_first_list.txt' theta_first_list = numpy.genfromtxt(file_name_theta_first_list) file_name_parameter = path_lists + '/parameter.csv' print(file_name_parameter) f = open(file_name_parameter, 'r') # Reading scan-scheme parameters for line in f: line = line.strip() columns = line.split() print(columns[0]) if str(columns[0]) == 'box_lateral_shift': box_lateral_shift = int(columns[1]) print(columns[1]) if str(columns[0]) == 'number_of_sequences': number_of_sequences = int(columns[1]) print(columns[1]) if str(columns[0]) == 'sequence_size': sequence_size = int(columns[1]) print(columns[1]) if str(columns[0]) == 'FF_sequence_size': FF_sequence_size = int(columns[1]) print(columns[1]) if str(columns[0]) == 'zero_deg_proj': zero_deg_proj = int(columns[1]) print(columns[1]) f.close() number_of_projections = sequence_size * number_of_sequences last = first + number_of_projections - 1 # OPEN AND NORMALIZE 0, 90 AND 180° # ============================================================================= # OPEN AND NORMALIZE 0, 90 AND 180° # print(self.algorithm, 'algorithm chosen') print(self.filter, 'filter chosen') self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + str(self.algorithm) + 'algorithm chosen') self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + str(self.filter) + 'filter chosen') filename1 = path_in + namepart + str(first).zfill(4) + filetype filename2 = path_in + namepart + str(first + 1).zfill(4) + filetype filename3 = path_in + namepart + str(first + 2).zfill(4) + filetype filename4 = path_in + namepart + str(first + 3).zfill(4) + filetype filename5 = path_in + namepart + str(first + 4).zfill(4) + filetype while os.path.exists(filename5) != True: time.sleep(3) print('waiting for next file:', filename4) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'waiting for next file:', filename4) print('Reading data of 0, 90 and 180°') self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'Reading data of 0, 90 and 180°') im_000deg = Image.open(filename1) im = im_000deg im_090deg = Image.open(filename2) im_180deg = Image.open(filename3) FF = Image.open(filename4) DF = numpy.ones((im_000deg.size[1], im_000deg.size[0]), numpy.float32) DF = DF * self.dark_field_value DF = numpy.single(DF) im_000deg = numpy.single(numpy.array(im_000deg)) im_090deg = numpy.single(numpy.array(im_090deg)) im_180deg = numpy.single(numpy.array(im_180deg)) proj_000_sub = numpy.subtract(im_000deg, DF) proj_090_sub = numpy.subtract(im_090deg, DF) proj_180_sub = numpy.subtract(im_180deg, DF) FF_sub = numpy.subtract(numpy.array(FF), numpy.array(DF)) im_000_normalized = numpy.divide(proj_000_sub, FF_sub) im_090_normalized = numpy.divide(proj_090_sub, FF_sub) im_180_normalized = numpy.divide(proj_180_sub, FF_sub) filename_b_000 = path_out_changes + namepart + 'beginning_000_deg' + filetype print('Beginning Projection at 0°:', filename_b_000) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'Beginning Projection at 0°:' + filename_b_000) img = Image.fromarray(im_000_normalized) img.save(filename_b_000) filename_b_090 = path_out_changes + namepart + 'beginning_090_deg' + filetype print('Beginning Projection at 90°:', filename_b_090) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'Beginning Projection at 90°:' + filename_b_090) img = Image.fromarray(im_090_normalized) img.save(filename_b_090) filename_b_180 = path_out_changes + namepart + 'beginning_180_deg' + filetype print('Beginning Projection at 180°:', filename_b_180) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'Beginning Projection at 180°:' + filename_b_180) img = Image.fromarray(im_180_normalized) img.save(filename_b_180) print('image size', im_000deg.shape[0], 'x', im_000deg.shape[1], ' Total number of projections', last - first + 1) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + ' Image size ' + str(im_000deg.shape[0]) + ' x ' + str(im_000deg.shape[1]) + ' Total number of projections' + str(last - first + 1)) self.Preview_slice.setRange(2, im.size[1] - 2) if slice > im.size[1] - 1: self.Preview_slice.setValue(round(im.size[1] / 2)) print('Slice out of bound! Slice set to', round(im.size[1] / 2)) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + ' Slice out of bound! Slice set to ' + str(round(im.size[1] / 2))) time.sleep(0.5) extend_FOV = (abs(self.COR - im.size[0] / 2)) / im.size[ 0] # extend field of view (FOV), 0.0 no extension, 0.5 half extension to both sides (for half sided 360 degree scan!!!) print('extend_FOV ', extend_FOV) # FOLLOW AND READ DATA AND RECONSTRUCT # ===========================================================================# FOLLOW AND READ DATA AND RECONSTRUCT # print('used cor:', self.COR) FF0 = numpy.ones((FF_sequence_size, im.size[1], im.size[0]), numpy.float32) print('stack size: number of images', last - first + 1, '; Y =', im.size[1], '; X =', im.size[0]) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + ' stack size: number of images ' + str(last - first + 1) + '; Y =' + str(im.size[1]) + '; X =' + str(im.size[0])) i = first # Projection number (EXcluding FFs, zero, Paulchen, etc) # Projection number counter n = 4 # Image file number (INcluding FFs, zero, Paulchen, etc) # file number counter while i < last + 1: self.lcdNumber_Total.display(i) self.lcdNumber_Image.display(i % sequence_size) self.lcdNumber_Sequence.display(math.ceil(i / sequence_size)) self.progressBar_Sequence.setValue( (i % sequence_size) * 100 / sequence_size) self.progressBar_Total.setValue( i * 100 / (sequence_size * number_of_sequences)) self.algorithm = self.algorithm_list.currentText() self.filter = self.filter_list.currentText() QtCore.QCoreApplication.processEvents() time.sleep(0.2) if (i % sequence_size) == 1: j = 1 # FF counter while (j < FF_sequence_size + 1): filename_FF = path_in + namepart + str(n).zfill( 4) + filetype filename_FF_ = path_in + namepart + str(n + 1).zfill( 4) + filetype while os.path.exists(filename_FF_) != True: time.sleep(2) print('Waiting for next Flat Field:', filename_FF) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + ' Waiting for next Flat Field: ' + filename_FF) QtCore.QCoreApplication.processEvents() time.sleep(0.02) #print('Loading FF ', filename_FF) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'Loading FF ' + filename_FF) im_FF = Image.open(filename_FF) FF0[j - 1, :, :] = numpy.array(im_FF) n = n + 1 j = j + 1 if (j == FF_sequence_size): FF_avg = numpy.median(FF0, axis=0) FF_avg = numpy.single(FF_avg) # ZERO DEG CHECK OR SKIP if zero_deg_proj == 2: filename_zero_load = path_in + namepart + str(n).zfill( 4) + filetype filename_zero = path_out_changes + namepart + str( i - first).zfill(4) + filetype #print('Loading Zero Degree Projection ', filename_zero_load) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'Loading Zero Degree Projection ' + filename_zero_load) im_zero = Image.open(filename_zero_load) im_zero = numpy.single(numpy.array(im_zero)) im_zero_sub = numpy.subtract(im_zero, DF) FF_sub = numpy.subtract(numpy.array(FF_avg), numpy.array(DF)) im_zero_normalized = numpy.divide(im_zero_sub, FF_sub) im_zero_normalized = numpy.nan_to_num(im_zero_normalized, copy=True, nan=1.0, posinf=1.0, neginf=1.0) im_zero_normalized = ndimage.shift( numpy.single(numpy.array(im_zero_normalized)), [0, (x_offset_list[i - first] / self.pixel_size)], order=3, mode='nearest', prefilter=True) array_zero = im_zero_normalized #print('writing Zero Degree Projection ', filename_zero) im_zero_normalized = Image.fromarray(im_zero_normalized) im_zero_normalized.save(filename_zero) n = n + 1 drift_correction = 0 # under development if drift_correction == 1: drift_detection_range_x = 5 drift_detection_range_y = 3 drift_detection_results_array = numpy.zeros( (1 + 2 * drift_detection_range_x, 1 + 2 * drift_detection_range_y)) #array_zero_blur = im_zero_normalized.filter(ImageFilter.BoxBlur(5)) array_zero_blur = gaussian_filter(array_zero, sigma=2) print(i) #, first) if i == first: zero_first = numpy.array(array_zero_blur) zero_first = numpy.reshape(zero_first, -1) else: x = -drift_detection_range_x #print(x) while x < drift_detection_range_x + 1: y = -drift_detection_range_y #print(y) while y < drift_detection_range_y + 1: array_zero_shifted = ndimage.shift( numpy.single( numpy.array(array_zero_blur)), [y, x], order=3, mode='nearest', prefilter=True) array_zero_shifted = numpy.asarray( array_zero_shifted) array_zero_shifted = numpy.reshape( array_zero_shifted, -1) result = pearsonr(array_zero_shifted, zero_first) #print(x, y, result[0]) drift_detection_results_array[ x + drift_detection_range_x, y + drift_detection_range_y] = result[0] y = y + 1 x = x + 1 #print(drift_detection_results_array) ind = numpy.argmax(drift_detection_results_array) ind_2d = numpy.unravel_index( ind, drift_detection_results_array.shape) shift_2d = (ind_2d[0] - drift_detection_range_x, ind_2d[1] - drift_detection_range_y) print(shift_2d) f = i i = i + 10 n = n + 10 if self.Abort_and_reconstruct_now.isChecked() == True: break # RECONSTRUCT COMPLETE VOLUME # =================================================================================== # RECONSTRUCT COMPLETE VOLUME # new_list = [i * factor for i in theta_list] cor = self.COR_change.value() + round(2 * extend_FOV * im.size[0]) center_list = [cor] * (i) # arrar = arra[:, volume_begin:volume_end, :] if self.checkBox_reconstruct_at_end.isChecked() == True: arra = arra[:f - first + 1, :, :] print('checking conditions for adv. ringfilter') if self.advanced_ringfilter.isChecked( ) == True and self.Abort_and_reconstruct_now.isChecked() == False: print('Applying advanced ring filter') arratwo = numpy.copy(arra) arratwo.fill(0.0) print('arra-shape', arra.shape) print('arratwo-shape', arratwo.shape) m = 0 while (m < sequence_size * number_of_sequences): print( 'index', m, ' result', int(number_of_sequences * ((m - 1) % sequence_size) + (theta_first_list[math.floor( (m - 1) / sequence_size)] - min(theta_first_list)) * number_of_sequences), ' last', f - first) temp = arra[int(m), :, :] arratwo[int(number_of_sequences * ((m - 1) % sequence_size) + (theta_first_list[math.floor( (m - 1) / sequence_size)] - min(theta_first_list)) * number_of_sequences), :, :] = temp m = m + 1 filename_ring_before = path_in + namepart + '_original_sinogram' + filetype img = Image.fromarray(arra[:, 11, :]) img.save(filename_ring_before) #arrathree = ndimage.median_filter(arratwo, footprint = [[[0,0,0],[0,1,0],[0,0,0]],[[0,0,0],[0,1,0],[0,0,0]],[[0,0,0],[0,1,0],[0,0,0]]], mode ='nearest') print('Lets start filtering') arrathree = numpy.copy(arratwo) deviation = 5 # THRESHOLD MEDIAN FILTER n = 0 while n < arratwo.shape[1]: print(n, arratwo.shape) imathree = ndimage.median_filter(arratwo[:, n, :], footprint=[[0, 1, 0], [0, 1, 0], [0, 1, 0]], mode='nearest') print('median successful') divided = numpy.divide(arratwo[:, n, :], imathree) print('divide successful') divided = numpy.nan_to_num(divided, copy=True, nan=1.0, posinf=1.0, neginf=1.0) a = divided < 100 / (100 + deviation) # True False array b = divided > (100 + deviation) / 100 # True False array c = a.astype(int) + b.astype(int) # convert to 1 and 0 d = numpy.clip(c, 0, 1) e = -d + 1 g = d * imathree + e * arratwo[:, n, :] print('filling into array') arrathree[:, n, :] = g n = n + 1 print('filtering done. Shape arrathree: ', arrathree.shape) print('Shape arra: ', arra.shape) filename_ring_after = path_in + namepart + '_after_ringfilter' + filetype img = Image.fromarray(arrathree[:, 11, :]) img.save(filename_ring_after) print('starting to reorder again') print('arra and arrathree shape', arra.shape, arrathree.shape) m = 0 while (m < f - first + 1): print(m) arra[int(m), :, :] = arrathree[ int(number_of_sequences * ((m - 1) % sequence_size) + (theta_first_list[math.floor( (m - 1) / sequence_size)] - min(theta_first_list)) * number_of_sequences), :, :] m = m + 1 print('Advanced ringfilter finished') print('Advanced ringfilter passed') if self.checkBox_phase.isChecked() == True: print('Performing phase retrieval') self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'Performing phase retrieval' + str(arra.shape)) arra = tomopy.prep.phase.retrieve_phase( arra, pixel_size=self.pixel_size / 10000, dist=self.doubleSpinBox_distance.value(), energy=self.doubleSpinBox_Energy.value(), alpha=self.doubleSpinBox_alpha.value(), pad=True, ncore=self.no_of_cores, nchunk=None) i = 0 while (i < math.ceil(arra.shape[1] / self.block_size)): print('Reconstructing block', i + 1, 'of', math.ceil(arra.shape[1] / self.block_size)) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'Reconstructing block ' + str(i + 1) + ' of ' + str(math.ceil(arra.shape[1] / self.block_size))) # RECONSTRUCTING # ===================================================================================== slices = tomopy.recon(arra[:, i * self.block_size:(i + 1) * self.block_size, :], new_list, center=center_list, algorithm=self.algorithm, filter_name=self.filter, ncore=self.no_of_cores) slices = slices[:, round(1 * extend_FOV * im.size[0]):-round(1 * extend_FOV * im.size[0]), round(1 * extend_FOV * im.size[0]):-round(1 * extend_FOV * im.size[0])] slices = tomopy.circ_mask(slices, axis=0, ratio=1.0) print('Reconstructed Volume is', slices.shape) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'Reconstructed Volume is' + str(slices.shape)) a = 1 while (a < self.block_size + 1) and (a < slices.shape[0] + 1): filename2 = path_out_reconstructed_full + namepart + str( a + volume_begin + i * self.block_size).zfill(4) + filetype print('Writing Reconstructed Slices:', filename2) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'Writing Reconstructed Slices:' + filename2) img = Image.fromarray(slices[a - 1, :, :]) img.save(filename2) self.progressBar_Reconstruction.setValue( (a + (i * self.block_size)) * 100 / arra.shape[1]) QtCore.QCoreApplication.processEvents() time.sleep(0.02) a = a + 1 i = i + 1 # DIFFERENCE IMAGE AT 0, 90 AND 180° # ============================================================================ # DIFFERENCE IMAGE AT 0, 90 AND 180° # if self.Abort_and_reconstruct_now.isChecked() != True: filename1 = path_in + namepart + str( number_of_sequences * sequence_size + (number_of_sequences + 1) * FF_sequence_size + 4).zfill(4) + filetype filename2 = path_in + namepart + str( number_of_sequences * sequence_size + (number_of_sequences + 1) * FF_sequence_size + 5).zfill(4) + filetype filename3 = path_in + namepart + str( number_of_sequences * sequence_size + (number_of_sequences + 1) * FF_sequence_size + 6).zfill(4) + filetype filename4 = path_in + namepart + str( number_of_sequences * sequence_size + (number_of_sequences + 1) * FF_sequence_size + 3).zfill(4) + filetype while os.path.exists(filename3) != True: time.sleep(1) print('waiting for last file:', filename3) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'waiting for last file:' + filename3) time.sleep(5) eim_000deg = Image.open(filename1) eim_090deg = Image.open(filename2) eim_180deg = Image.open(filename3) FF = Image.open(filename4) eim_000deg = numpy.single(numpy.array(eim_000deg)) eim_090deg = numpy.single(numpy.array(eim_090deg)) eim_180deg = numpy.single(numpy.array(eim_180deg)) eproj_000_sub = numpy.subtract(eim_000deg, DF) eproj_090_sub = numpy.subtract(eim_090deg, DF) eproj_180_sub = numpy.subtract(eim_180deg, DF) FF_sub = numpy.subtract(numpy.array(FF), numpy.array(DF)) eim_000_normalized = numpy.divide(eproj_000_sub, FF_sub) eim_090_normalized = numpy.divide(eproj_090_sub, FF_sub) eim_180_normalized = numpy.divide(eproj_180_sub, FF_sub) div_000_normalized = numpy.divide(eim_000_normalized, im_000_normalized) div_090_normalized = numpy.divide(eim_090_normalized, im_090_normalized) div_180_normalized = numpy.divide(eim_180_normalized, im_180_normalized) filename_e_000 = path_out_changes + namepart + 'end_000_deg' + filetype print('End Projection at 0°:', filename_e_000) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'End Projection at 0°:' + filename_e_000) img = Image.fromarray(eim_000_normalized) img.save(filename_e_000) filename_e_090 = path_out_changes + namepart + 'end_090_deg' + filetype print('End Projection at 90°:', filename_e_090) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'End Projection at 90°:' + filename_e_090) img = Image.fromarray(eim_090_normalized) img.save(filename_e_090) filename_e_180 = path_out_changes + namepart + 'end_180_deg' + filetype print('End Projection at 180°:', filename_e_180) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'End Projection at 180°:' + filename_e_180) img = Image.fromarray(eim_180_normalized) img.save(filename_e_180) filename_000 = path_out_changes + namepart + 'div_000_deg' + filetype print('Difference in Projection at 0°:', filename_000) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'Difference in Projection at 0°:' + filename_000) img = Image.fromarray(div_000_normalized) img.save(filename_000) filename_090 = path_out_changes + namepart + 'div_090_deg' + filetype print('Difference in Projection at 90°:', filename_090) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'Difference in Projection at 90°:' + filename_090) img = Image.fromarray(div_090_normalized) img.save(filename_090) filename_180 = path_out_changes + namepart + 'div_180_deg' + filetype print('Difference in Projection at 180°:', filename_180) self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'Difference in Projection at 180°:' + filename_180) img = Image.fromarray(div_180_normalized) img.save(filename_180) print('Done!') self.logbook.append( strftime("%Y_%m_%d %H:%M:%S ", localtime()) + 'Done!') protocol = self.logbook.toPlainText() print(len(protocol), ' signs saved in protocol') text_file = open(self.file_name_protocol, "wt") z = text_file.write(protocol) text_file.close() sys.exit(app.exec_())
def frame_shift(array, shift_y, shift_x, imlib='ndimage-fourier', interpolation='bicubic'): """ Shifts an 2d array by shift_y, shift_x. Boundaries are filled with zeros. Parameters ---------- array : array_like Input 2d array. shift_y, shift_x: float Shifts in x and y directions. imlib : {'ndimage-fourier', 'opencv', 'ndimage-interp'}, string optional Library or method used for performing the image shift. interpolation : {'bicubic', 'bilinear', 'nearneig'}, optional Only used in case of imlib is set to 'opencv' or 'ndimage-interp', where the images are shifted via interpolation. 'nneighbor' stands for nearest-neighbor, 'bilinear' stands for bilinear and 'bicubic' stands for bicubic interpolation over 4x4 pixel neighborhood. 'bicubic' is the default. The 'nearneig' is the fastest method and the 'bicubic' the slowest of the three. The 'nearneig' is the poorer option for interpolation of noisy astronomical images. Returns ------- array_shifted : array_like Shifted 2d array. Notes ----- Regarding the imlib parameter: 'ndimage-fourier', does a fourier shift operation and preserves better the pixel values (therefore the flux and photometry). 'ndimage-fourier' is used by default from VIP version 0.5.3. Interpolation based shift ('opencv' and 'ndimage-interp') is faster than the fourier shift. 'opencv' could be used when speed is critical and the flux preservation is not that important. """ if not array.ndim == 2: raise TypeError ('Input array is not a frame or 2d array') image = array.copy() if imlib not in ['ndimage-fourier', 'ndimage-interp', 'opencv']: msg = 'Imlib value not recognized, try ndimage-fourier, ndimage-interp ' msg += 'or opencv' raise ValueError(msg) if imlib=='ndimage-fourier': shift_val = (shift_y, shift_x) array_shifted = fourier_shift(np.fft.fftn(image), shift_val) array_shifted = np.fft.ifftn(array_shifted) array_shifted = array_shifted.real elif imlib=='ndimage-interp': if interpolation == 'bilinear': intp = 1 elif interpolation == 'bicubic': intp= 3 elif interpolation == 'nearneig': intp = 0 else: raise TypeError('Interpolation method not recognized.') array_shifted = shift(image, (shift_y, shift_x), order=intp) elif imlib=='opencv': if no_opencv: msg = 'Opencv python bindings cannot be imported. Install opencv or ' msg += 'set imlib to ndimage-fourier or ndimage-interp' raise RuntimeError(msg) if interpolation == 'bilinear': intp = cv2.INTER_LINEAR elif interpolation == 'bicubic': intp= cv2.INTER_CUBIC elif interpolation == 'nearneig': intp = cv2.INTER_NEAREST else: raise TypeError('Interpolation method not recognized.') image = np.float32(image) y, x = image.shape M = np.float32([[1,0,shift_x],[0,1,shift_y]]) array_shifted = cv2.warpAffine(image, M, (x,y), flags=intp) return array_shifted
def total_model(disk, imres=0.05, distance=122., chanmin=-2.24, nchans=15, chanstep=0.32, flipme=True, Jnum=2, freq0=345.79599, xnpix=512, vsys=5.79, PA=312.46, offs=[0.0, 0.0], modfile='testpy_alma', abund=1., obsv=None, wind=False, isgas=True, includeDust=False, extra=0, bin=1, hanning=False, x_offset=0.48, y_offset=0.62, stellar_flux=1.65e-5): '''Run all of the model calculations given a disk object. Outputs are a fits file with the model images, along with visibility files (one in miriad format and one in fits format) for this model :param disk: A Disk object. This contains the structure of the disk over which the radiative transfer calculation will be done. :param imres: Model image resolution in arcsec. Should be the pixel size in the data image. :param distance: Distance in parsec to the target :param chanmin: Minimum channel velocity in km/sec :param nchans: Number of channels to model :param chanstep: Resolution of each channel, in km/sec :param flipme: To save time, the code can calculate the radiative transfer for half of the line, and then mirror these results to fill in the rest of the line. Set flipme=1 to perform this mirroring, or use flipme=0 to compute the entire line profile :param Jnum: The lower J quantum of the transition of interest. Ex: For the CO J=3-2 transition, set Jnum=2 :param freq0: The rest frequency of the transition, in GHz. :param xnpix: Number of pixels in model image. xnpix*imres will equal the desired width of the image. :param vsys: Systemic velocity of the star, in km/sec :param PA: position angle of the disk (updated to 312.46 from Katherine's value) :param offs: Disk offset from image center, in arcseconds :param modfile: The base name for the model files. This code will create modfile+'.fits' (the model image) :param datfile: The base name for the data files. You need to have datfile+'.vis' (data visibilities in miriad uv format) and datfile+'.cm' (cleaned map of data) for the code to work. The visibility file is needed when running uvmodel and the cleaned map is needed for the header keywords. :param miriad: Set to True to call a set of miriad tasks that convert the model fits image to a visbility fits file. If this is False, then there is no need to set the datfile keyword (the miriad tasks are the only place where they are used). :param abund: This code assumes that you are working with the dominant isotope of CO. If this is not the case, then use the abund keyword to set the relative abundance of your molecule (e.g. 13CO or C18O) relative to CO. :param obsv: Velocities of the channels in the observed line. The model is interpolated onto these velocities, accounting for vsys, the systematic velocity :param wind: Include a very artificial wind. This 'wind' travels vertically away from the midplane at the sound speed from every place within the disk :param isgas: Do the modeling of a gas line instead of just continuum. Setting isgas to False will only calculate dust continuum emission at the specified frequency. :param includeDust: Set to True if you want to include dust continuum in the radiative transfer calculation. This does not calculate a separate continuum image (set isgas=False for that to happen) but instead include dust radiative transfer effects in the gas calculations (e.g. dust is optically thick and obscuring some of the gas photons from the midplane) :param extra: A parameter to control what extra plots/data are output. The options are 1 (figure showing the disk structure with the tau=1 surface marked with a dashed line), 2.1(a list of the heights as a function of radius between which 50% of the flux arises), 2.2(a list of temperatures as a function of radius between which 50% of the flux arises), 3.0(channel maps showing height of tau=1 surface), 3.1(channel maps showing the temperature at the tau=1 surface), 3.2 (channel maps showing the maximum optical depth) :param bin: (default=1) If you are comparing to data that has been binned from the native resolution, then you can include that binning in the models. e.g. If the data have been binned down by a factor of two, then set bin=2. This ensures that the model goes through similar processing as the data. Note that bin only accepts integer values. :param hanning: (default=False) Set to True to perform hanning smoothing on a spectrum. Hanning smoothing is designed to reduce Gibbs ringing, which is associated with the finite time sampling that is used in the generation of a spectrum within an interferometer. Hanning smoothing is included as a running average that replaces the flux in channel i with 25% of the flux in channel i-1, 50% of the flux in channel i, and 25% of the flux in channel i+1. :param x_offset :param y_offset :param stellar_flux ''' params = disk.get_params() obs = [Jnum, freq0] obs2 = disk.get_obs() for x in obs2: obs.append(x) obs.append(0.) #If accounting for binning then decrease the channel width, and increase the number of channels if not isinstance(bin, int): print('bin must be an integer. Setting bin=1') bin = 1 nchans *= bin chanstep /= bin chanmin -= (bin - 1) * chanstep / 2. if nchans == 1: flipme = False xpixscale = imres dd = distance * pc # - distance in cm arcsec = rad / dd # - angular conversion factor (cm to arcsec) chans = chanmin + np.arange(nchans) * chanstep tchans = chans.astype('|S6') # - convert channel names to string # extract disk structure from Disk object cube = np.zeros((disk.nphi, disk.nr, nchans)) cube2 = np.zeros((disk.nphi, disk.nr, disk.nz, nchans)) #tau cube3 = np.zeros((disk.nphi, disk.nr, disk.nz, nchans)) #tau_dust X = disk.X Y = disk.Y if isgas: # approximation for partition function try: #The code recognizes 13CO(2-1), C18O(2-1), DCO+(3-2), HCO+(4-3), HCN(4-3), CO(3-2), CS(7-6), CO(1-0), CO(2-1), CO(6-5), DCO+(5-4), DCO+(4-3), C18O(3-2), C18O(1-0) if Jnum == 1 and np.abs(freq0 - 220.398677) < .1: moldat = mol_dat(file='13co.dat') elif Jnum == 1 and np.abs(freq0 - 219.56036) < .1: moldat = mol_dat(file='c18o.dat') elif Jnum == 2 and np.abs(freq0 - 216.11258) < 1: moldat = mol_dat(file='dcoplus.dat') elif Jnum == 3 and np.abs(freq0 - 356.734223) < 0.1: moldat = mol_dat(file='hcoplus.dat') elif Jnum == 3 and np.abs(freq0 - 354.50547590) < 0.1: moldat = mol_dat(file='hcn.dat') elif Jnum == 2 and np.abs(freq0 - 345.7959899) < 0.1: moldat = mol_dat(file='co.dat') elif Jnum == 6 and np.abs(freq0 - 342.88285030) < 0.1: moldat = mol_dat(file='cs.dat') elif Jnum == 0 and np.abs(freq0 - 115.2712) < 0.1: moldat = mol_dat(file='co.dat') elif Jnum == 1 and np.abs(freq0 - 230.538) < 0.1: moldat = mol_dat(file='co.dat') elif Jnum == 5 and np.abs(freq0 - 691.4730763) < 0.1: moldat = mol_dat(file='co.dat') elif Jnum == 2 and np.abs(freq0 - 329.3305525) < 0.1: moldat = mol_dat(file='c18o.dat') elif Jnum == 0 and np.abs(freq - 109.7821734) < 0.1: moldat = mol_dat(file='c18o.dat') elif Jnum == 4 and np.abs(freq0 - 360.16978) < 0.1: moldat = mol_dat(file='dcoplus.dat') elif Jnum == 3 and np.abs(freq0 - 288.143858) < 0.1: moldat = mol_dat(file='dcoplus.dat') else: raise ValueError( 'Make sure that Jnum and freq0 match one of: 13CO(2-1), C18O(2-1), DCO+(3-2), HCO+(4-3), HCN(4-3), CO(3-2), CS(7-6), CO(1-0), CO(2-1), CO(6-5), DCO+(5-4), DCO+(4-3), C18O(3-2), C18O(1-0)' ) except: raise gl = 2. * obs[0] + 1 El = moldat['eterm'][obs[0]] * h * c # - energy of lower level Te = 2 * El / (obs[0] * (obs[0] + 1) * kB) parZ = np.sqrt(1. + (2. / Te)**2 * disk.T**2) # calculate level population tnl = gl * abund * disk.rhoG * np.exp(-(El / kB) / disk.T) / parZ w = tnl < 0 if w.sum() > 0: tnl[w] = 0 # Do the calculation if flipme & (nchans % 2 == 0): dchans = int(nchans / 2.) elif flipme & (nchans % 2 == 1): dchans = int(nchans / 2. + 0.5) else: dchans = nchans for i in range(int(dchans)): obs[6] = chans[i] # - vsys if isgas: Inu, Inuz, tau_dust = gasmodel(disk, params, obs, moldat, tnl, wind, includeDust=includeDust) #Inu_dust,tau_dust = dustmodel(disk,freq0) cube[:, :, i] = Inu #print('Finished channel %i / %i' % (i+1,nchans)) cube2[:, :, :, i] = Inuz cube3[:, :, :, i] = tau_dust else: Inu, tau_dust = dustmodel(disk, freq0) cube[:, :, i] = Inu cube2[:, :, :, i] = tau_dust if flipme: cube[:, :, dchans:] = cube[:, :, -(dchans + 1):-(nchans + 1):-1] cube2[:, :, :, dchans:] = cube2[:, :, :, -(dchans + 1):-(nchans + 1):-1] cube3[:, :, :, dchans:] = cube3[:, :, :, -(dchans + 1):-(nchans + 1):-1] if extra == 1: # plot tau=1 surface in central channel plot_tau1(disk, cube2[:, :, :, int(nchans / 2 - 1)], cube3[:, :, :, int(nchans / 2 - 1)]) if (extra == 2.1) or (extra == 2.2): for r in range(10, 500, 20): #20 if extra > 2.1: flux_range( disk, cube3, r, height=False ) #cube3 is cumulative flux along each sight line [nr,nphi,ns,nchan] else: flux_range(disk, cube3, r, height=True) if extra > 2.5: print('*** Creating tau=1 image ***') ztau1tot = np.zeros((disk.nphi, disk.nr, nchans)) for i in range(int(nchans)): ztau1tot[:, :, i] = findtau1(disk, cube2[:, :, :, i], cube[:, :, i], cube3[:, :, :, i], flag=extra - 3) #ztau1tot[:,:,i] = cube2[:,:,290,i]*Disk.AU #now create images of ztau1, similar to images of intensity imt = xy_interpol(ztau1tot, X * arcsec, Y * arcsec, xnpix=xnpix, imres=imres, flipme=flipme) imt[np.isnan(imt)] = -170 * disk.AU velo = chans + vsys if obsv is not None: imt2 = np.zeros((xnpix, xnpix, len(obsv))) for ix in range(xnpix): for iy in range(xnpix): if velo[1] - velo[0] < 0: imt2[ix, iy, :] = np.interp(obsv, velo[::-1], imt[ix, iy, ::-1]) #imt[ix,iy,:]=imt[ix,iy,::-1] else: imt2[ix, iy, :] = np.interp(obsv, velo, imt[ix, iy, :]) hdrt = write_h(nchans=len(obsv), dd=distance, xnpix=xnpix, xpixscale=xpixscale, lstep=chanstep, vsys=vsys) else: imt2 = imt hdrt = write_h(nchans=nchans, dd=distance, xnpix=xnpix, xpixscale=xpixscale, lstep=chanstep, vsys=vsys) #imt2[np.isnan(imt2)] = -170*disk.AU #imt2[np.isinf(imt2)] = -170*disk.AU imt_s = ndimage.rotate(imt2, 90. + PA, reshape=False) pixshift = np.array([-1., 1.]) * offs / ( 3600. * np.abs([hdrt['cdelt1'], hdrt['cdelt2']])) imt_s = ndimage.shift(imt_s, (pixshift[0], pixshift[1], 0), mode='nearest') hdut = fits.PrimaryHDU((imt_s / disk.AU).T, hdrt) #hdut=fits.PrimaryHDU((imt_s).T,hdrt) hdut.writeto(modfile + 'p_tau1.fits', overwrite=True, output_verify='fix') # - interpolate onto a square grid im = xy_interpol(cube, X * arcsec, Y * arcsec, xnpix=xnpix, imres=imres, flipme=flipme) if isgas: # - interpolate onto velocity grid of observed star velo = chans + vsys if obsv is not None: obsv2 = np.arange( len(obsv) * bin) * (obsv[1] - obsv[0]) / bin + obsv[0] im2 = np.zeros((xnpix, xnpix, len(obsv2))) for ix in range(xnpix): for iy in range(xnpix): if velo[1] - velo[0] < 0: im2[ix, iy, :] = np.interp(obsv2, velo[::-1], im[ix, iy, ::-1]) else: im2[ix, iy, :] = np.interp(obsv2, velo, im[ix, iy, :]) else: im2 = im if hanning: im2 = perform_hanning(im2) if bin > 1: new_im = np.zeros((im2.shape[0], im2.shape[1], im2.shape[2] // bin)) for k in range(new_im.shape[2]): new_im[:, :, k] = np.mean(im2[:, :, k * bin:k * bin + bin], axis=2) im2 = new_im nchans /= bin chanstep *= bin chans = chanmin + np.arange(nchans) * chanstep # - make header if isgas: if obsv is not None: hdr = write_h(nchans=len(obsv), dd=distance, xnpix=xnpix, xpixscale=xpixscale, lstep=chanstep, vsys=vsys) else: hdr = write_h(nchans=nchans, dd=distance, xnpix=xnpix, xpixscale=xpixscale, lstep=chanstep, vsys=vsys) else: im2 = im hdr = write_h_cont(dd=distance, xnpix=xnpix, xpixscale=xpixscale) # - shift and rotate model im_s = ndimage.rotate(im2, 90. + PA, reshape=False) #***# pixshift = np.array( [-1., 1.]) * offs / (3600. * np.abs([hdr['cdelt1'], hdr['cdelt2']])) im_s = ndimage.shift(im_s, (pixshift[0], pixshift[1], 0), mode='nearest') * Jy * (xpixscale / rad)**2 # Here is a good place to add a star in the appropriate pixel - this is currently Meredith's pseudocode and won't run #xpix = np.rint(cen+x_offset/xpixscale) #ypix = np.rint(cen+y_offset/xpixscale) #im_s[xpix,ypix,:]+=star_flux #Ava's Code: #cen = np.array([xnpix/2.+.5, xnpix/2.+.5]) # - central pixel location #print xpixscale xpix = int(np.rint((xnpix / 2. + .5) - x_offset / xpixscale)) ypix = int(np.rint((xnpix / 2. + .5) + y_offset / xpixscale)) #print(xpix, ypix, stellar_flux) #print(np.shape(im_s[xpix,ypix,:])) im_s[xpix, ypix, :] += np.repeat(stellar_flux, nchans) # write processed model hdu = fits.PrimaryHDU(im_s.T, hdr) hdu.writeto(modfile + '.fits', overwrite=True, output_verify='fix')
def align_traces(traces, lgcjustshifts=False, n_cut=5000, cut_off_freq=5000.0, fs=625e3): """ Function to align dIdV traces if each trace does not trigger at the same point. Uses a convolution of the traces to find the time offset. Parameters ---------- traces : ndarray Array of shape (# traces, # bins per trace). lgcjustshifts : boolean, optional If False, the aligned traces and the phase shifts are returned. If True, just the phase shifts are returned. Default is False. n_cut : int, optional The number of bins to use to do the convolution. Just need enough information to see the periodic signal. Default is 5000. cut_off_freq : float or int, optional 3dB cut off frequency for filter. Default is 5000 Hz. fs : float or int, optional Sample rate of data in Hz. Default is 625e3 Hz. Returns ------- shifts : ndarray Array of phase shifts for each trace in units of bins. masked_aligned : masked ndarray, optional Array of time shift corrected traces, same shape as input traces. The masked array masks the np.NaN values in the time shifted traces so that normal numpy functions will ignore the nan's in computations. """ # Filter and truncate all traces to speed up. traces_filt = lowpassfilter( traces[:, :n_cut], cut_off_freq=5000, fs=625e3, ) traces_temp = traces_filt - np.mean(traces_filt, axis=-1, keepdims=True) traces_norm = traces_temp / (np.amax(traces_temp, axis=-1, keepdims=True)) # use the first trace to define the origin of alignment t1 = traces_norm[0] # define the origin orig = np.argmax(signal.fftconvolve(t1, t1[::-1], mode='full')) # initialize empty array to store the aligned traces traces_aligned = np.zeros_like(traces) shifts = np.zeros(traces.shape[0]) for ii in range(traces.shape[0]): t2 = traces_norm[ii] # Convolve each trace against the origin trace, find the index of the # max value, then subtract of the index of the origin trace t2_shift = np.argmax(signal.fftconvolve(t1, t2[::-1], mode='full'), ) - orig shifts[ii] = t2_shift if not lgcjustshifts: traces_aligned[ii] = ndimage.shift( traces[ii], t2_shift, cval=np.nan, ) if lgcjustshifts: return shifts else: flat_aligned = traces_aligned.flatten() masked_aligned = np.ma.array( flat_aligned, mask=np.isnan(flat_aligned), ).reshape(traces_aligned.shape) return shifts, masked_aligned
def as_generator( self, a, do_rotations=True, do_translations=True, x_shift_limits=(-5, 5), y_shift_limits=(-5, 5), angular_limits=(-5, 5), granularity=10, random_sampling=False, abort_early=True, ): """Adversarially chosen rotations and translations. Parameters ---------- input_or_adv : `numpy.ndarray` or :class:`Adversarial` The original, unperturbed input as a `numpy.ndarray` or an :class:`Adversarial` instance. label : int The reference label of the original input. Must be passed if `a` is a `numpy.ndarray`, must not be passed if `a` is an :class:`Adversarial` instance. unpack : bool If true, returns the adversarial input, otherwise returns the Adversarial object. do_rotations : bool If False no rotations will be applied to the image. do_translations : bool If False no translations will be applied to the image. x_shift_limits : int or (int, int) Limits for horizontal translations in pixels. If one integer is provided the limits will be (-x_shift_limits, x_shift_limits). y_shift_limits : int or (int, int) Limits for vertical translations in pixels. If one integer is provided the limits will be (-y_shift_limits, y_shift_limits). angular_limits : int or (int, int) Limits for rotations in degrees. If one integer is provided the limits will be [-angular_limits, angular_limits]. granularity : int Density of sampling within limits for each dimension. random_sampling : bool If True we sample translations/rotations randomly within limits, otherwise we use a regular grid. abort_early : bool If True, the attack stops as soon as it finds an adversarial. """ min_, max_ = a.bounds() channel_axis = a.channel_axis(batch=False) def get_samples(limits, num, do_flag): # get regularly spaced or random samples within limits lb, up = (-limits, limits) if isinstance(limits, int) else limits if not do_flag: return [0] elif random_sampling: return nprng.uniform(lb, up, num) else: return np.linspace(lb, up, num) def crop_center(img): # crop center of the image (of the size of the original image) start = tuple( map(lambda a, da: (a - da) // 2, img.shape, a.unperturbed.shape)) end = tuple(map(operator.add, start, a.unperturbed.shape)) slices = tuple(map(slice, start, end)) return img[slices] x_shifts = get_samples(x_shift_limits, granularity, do_translations) y_shifts = get_samples(y_shift_limits, granularity, do_translations) rotations = get_samples(angular_limits, granularity, do_rotations) transformations = product(x_shifts, y_shifts, rotations) for x_shift, y_shift, angle in transformations: if channel_axis == 0: xy_shift = (0, x_shift, y_shift) axes = (1, 2) elif channel_axis == 2: xy_shift = (x_shift, y_shift, 0) axes = (0, 1) else: # pragma: no cover raise ValueError("SpatialAttack only supports models " "and inputs with NCHW or NHWC format") # rotate image (increases size) x = a.unperturbed x = rotate(x, angle=angle, axes=axes, reshape=True, order=1) # translate image x = shift(x, shift=xy_shift, mode="constant") # crop center x = crop_center(x) # ensure values are in range x = np.clip(x, min_, max_) # test image _, is_adv = yield from a.forward_one(x) if abort_early and is_adv: break
# Function to calculate SSD value for a given angle def ssd_calc_angle(theta): if img_ref.shape != img_mov.shape: print("Images don't have the same shape.") return np.sum((np.array(img_ref_1, dtype=np.float32) - np.array( ndimage.rotate(img_mov_1, theta, reshape=False, mode='nearest'), dtype=np.float32))**2) # center of mass and thresholding of the of fixed image img_ref[img_ref < 350] = 0 img_ref_ind = np.nonzero(img_ref) center_img_ref = [round(img_ref_ind[0].mean()), round(img_ref_ind[1].mean())] img_ref_1 = ndimage.shift(img_ref, ((len(img_ref) // 2 - center_img_ref[0]), (len(img_ref[0]) // 2 - center_img_ref[1])), mode='nearest') plt.imshow(img_ref + img_ref_1) plt.show() print(center_img_ref) plt.imshow(img_ref + img_mov) plt.show() # Center Of mass shifting and thresholding of the moving image img_mov[img_mov < 350] = 0 img_mov_ind = np.nonzero(img_mov) center_img_mov = [round(img_mov_ind[0].mean()), round(img_mov_ind[1].mean())] print(center_img_mov) img_mov_1 = ndimage.shift(img_mov, ((len(img_ref) // 2 - center_img_mov[0]),
def heikinashi(bars): """ Heikin Ashi calculation: https://school.stockcharts.com/doku.php?id=chart_analysis:heikin_ashi ha_open calculation based on: https://stackoverflow.com/a/55110393 ha_open = [ calculate first record ][ append remaining records with list comprehension method ] list comprehension method is significantly faster as a for loop result: ha_open[0] = (bars.open[0] + bars.close[0]) / 2 ha_open[1] = (ha_open[0] + ha_close[0]) / 2 ... ha_open[last] = ha_open[len(bars)-1] + ha_close[len(bars)-1]) / 2 """ bars = bars.copy() bars.loc[:, 'ha_close'] = bars.loc[:, ['open', 'high', 'low', 'close']].mean( axis=1) ha_open = [(bars.open[0] + bars.close[0]) / 2] [ ha_open.append((ha_open[x] + bars.ha_close[x]) / 2) for x in range(0, len(bars) - 1) ] bars['ha_open'] = ha_open bars.loc[:, 'ha_high'] = bars.loc[:, ['high', 'ha_open', 'ha_close']].max(axis=1) bars.loc[:, 'ha_low'] = bars.loc[:, ['low', 'ha_open', 'ha_close']].min(axis=1) result = pd.DataFrame(index=bars.index, data={ 'open': bars['ha_open'], 'high': bars['ha_high'], 'low': bars['ha_low'], 'close': bars['ha_close'] }) # usefull little helpers result['flat_bottom'] = np.vectorize(_flat_bottom)(result['close'], result['low'], result['open'], result['high']) result['flat_top'] = np.vectorize(_flat_top)(result['close'], result['low'], result['open'], result['high']) result['small_body'] = np.vectorize(_small_body)(result['close'], result['low'], result['open'], result['high']) result['candle'] = np.vectorize(_candle_type)(result['open'], result['close']) result['reversal'] = np.vectorize(_reversal)(result['candle'], shift(result['candle'], 1, cval=np.NAN)) result['lower_wick'] = np.vectorize(_wick_length)(result['close'], result['low'], result['open'], result['high'], False) result['upper_wick'] = np.vectorize(_wick_length)(result['close'], result['low'], result['open'], result['high'], True) return result
def mcg_02_01_051(): import numpy as np import astropy.io.fits as pyfits import research.pab.align import stwcs # research.pab.align.coarse_align('MCG-02-01-051-46-243-F673N_drc_sci.fits', ref='MGC-02-01-051-01-240-F132N_drz_sci.fits') # research.pab.align.coarse_align('ARP256NED01-02-060-F435W_drc_sci.fits', ref='MGC-02-01-051-01-240-F132N_drz_sci.fits') # research.pab.align.coarse_align('ARP256NED01-02-060-F814W_drc_sci.fits', ref='MGC-02-01-051-01-240-F132N_drz_sci.fits') pab = pyfits.open('MGC-02-01-051-01-240-F132N_drz_sci.fits') con = pyfits.open('MGC-02-01-051-01-240-F130N_drz_sci.fits') bb = pyfits.open('MGC-02-01-051-01-240-F110W_drz_sci.fits') pab_wht = pyfits.open('MGC-02-01-051-01-240-F132N_drz_wht.fits') con_wht = pyfits.open('MGC-02-01-051-01-240-F130N_drz_wht.fits') bb_wht = pyfits.open('MGC-02-01-051-01-240-F110W_drz_wht.fits') slx, sly = slice(896, 1413), slice(703, 1695) ######### Voronoi bins wcs = stwcs.wcsutil.HSTWCS(pab) slwcs = wcs.slice([sly, slx]) pab_sub = (pab[0].data - con[0].data)[sly, slx] rms_sub = np.sqrt(1 / pab_wht[0].data + 1 / con_wht[0].data)[sly, slx] rms_sub[~np.isfinite(rms_sub)] = 1e10 pab_sub = (pab[0].data)[sly, slx] rms_sub = np.sqrt(1 / pab_wht[0].data)[sly, slx] rms_scl = 1 # SN = 5 # omask = np.abs(pab_sub/rms_sub) > SN # pab_sm = pab_sub*1 # pab_sm[omask] = 0. # sigma, rnd_scl = 2, 1./np.sqrt(2) #sigma = 1 # rnd_noise = np.random.normal(size=(100,100)) # rnd_scl = np.std(nd.gaussian_filter(rnd_noise, sigma=sigma)) # pab_sm = nd.gaussian_filter(pab_sm, sigma=sigma) # pab_sm[omask] = SN*100 #rnd_scl = threedhst.utils.nmad((pab_sub/rms_sub)[~omask]) rnd_scl = 1. #pyfits.writeto('pab_sci.fits', data=pab_sm, header=slwcs.to_header(), clobber=True) pyfits.writeto('pab_sci.fits', data=pab_sub, header=slwcs.to_header(), clobber=True) pyfits.writeto('pab_rms.fits', data=rms_sub * rms_scl, header=slwcs.to_header(), clobber=True) import research.pab research.pab.voronoi.go(cfgfile=None, fluximagefile='pab_sci.fits', fluxerrorfile='pab_rms.fits', distmpc=0.1, snthresh=SN, cvt_iters=10, max_z_kpc=100000.0, max_bin_size_pix=1000, output_prefix='pab_voronoi') #research.pab.voronoi.go(cfgfile='voronoi.cfg', output_prefix='pab_voronoi_cfg') v = pyfits.open('pab_voronoi.fits') mask = (np.isfinite(pab_sub) & (rms_sub < 10)) omask = np.abs(pab_sub / rms_sub) > SN files = glob.glob('[AM]*sci.fits') for file in files: im = pyfits.open(file) im_wht = pyfits.open(file.replace('_sci', '_wht')) wcs_im = stwcs.wcsutil.HSTWCS(im, ext=0) xy = wcs_im.all_world2pix([pab[0].header['CRVAL1']], [pab[0].header['CRVAL2']], 1) dx = pab[0].header['CRPIX1'] - xy[0][0] dy = pab[0].header['CRPIX2'] - xy[1][0] im_sub = nd.shift(im[0].data, (dy, dx))[sly, slx] wht_sub = nd.shift(im_wht[0].data, (dy, dx))[sly, slx] im_rms = 1 / np.sqrt(wht_sub) im_rms[~np.isfinite(im_rms)] = 1e10 binned_sci, binned_var, binned_area = research.pab.voronoi.rebin_output( im_sub, (im_rms)**2, v[0].data + 1, mask=mask, ds9=None) binned_sci[omask] = im_sub[omask] binned_var[omask] = im_rms[omask]**2 h = slwcs.to_header() filter = file.split('_')[0][-5:] h['FILTER'] = filter h['PHOTFLAM'] = im[0].header['PHOTFLAM'] pyfits.writeto('sub_%s_sci.fits' % (filter), data=binned_sci, header=h, clobber=True) pyfits.writeto('sub_%s_var.fits' % (filter), data=binned_var, header=h, clobber=True) #ds9.view(binned_sci) files = glob.glob('sub*sci.fits') sub = {} for file in files: print file im = pyfits.open(file) sub[im[0]. header['FILTER']] = im[0].data * im[0].header['PHOTFLAM'] / 4.e-19 pab = sub['F132N'] - sub['F130N'] ha = sub['F673N'] - (673. - 435.) / (814. - 435) * (sub['F814W'] / sub['F435W']) ha = sub['F673N'] - sub['F814W'] decrement = ha / pab / 10. dec_mask = (pab > 0.05) & np.isfinite(decrement) decrement[~dec_mask] = -10
def check_GD153(): import mywfc3.flux os.chdir('/Users/brammer/WFC3/Calibration/Cycle20/13092_Flux/Reduce') asn = threedhst.utils.ASNFile('Vy22-blue-F098M_asn.fits') root='' images = {'F098M':'ic461aghq_flt.fits.gz', 'F105W':'ic461agiq_flt.fits.gz', 'G102':'ic461agjq_flt.fits.gz', 'F140W':'ic461ah4q_flt.fits.gz', 'F160W':'ic461ah5q_flt.fits.gz', 'G141':'ic461ah6q_flt.fits.gz'} root='-off+x' images = {'F098M':'ic461agpq_flt.fits.gz', 'F105W':'ic461agqq_flt.fits.gz', 'G102':'ic461agrq_flt.fits.gz', 'F140W':'ic461ahbq_flt.fits.gz', 'F160W':'ic461ahcq_flt.fits.gz', 'G141':'ic461ahdq_flt.fits.gz'} # # root='-off-x' # images = {'F098M':'ic5v02aiq_flt.fits.gz', 'F105W':'ic5v02ajq_flt.fits.gz', 'G102':'ic5v02akq_flt.fits.gz', 'F140W':'ic5v41b6q_flt.fits.gz', 'F160W':'ic5v41b7q_flt.fits.gz', 'G141':'ic5v41b8q_flt.fits.gz'} blue = ['F098M', 'F105W', 'G102'] flat_file = {'G102':os.getenv('iref')+'/uc72113oi_pfl.fits', #F105W 'G141':os.getenv('iref')+'/uc721143i_pfl.fits'} #F140W flat = {} for key in flat_file.keys(): im = pyfits.open(flat_file[key]) flat[key] = im[1].data[5:-5, 5:-5] for filter in images.keys(): test = filter in blue band = 'blue'*test + 'red'*(not test) asn.product = 'GD153%s-%s-%s' %(root, band, filter) asn.exposures = [images[filter].split('_flt')[0]] asn.write(asn.product+'_asn.fits') im = pyfits.open('../RAW/'+images[filter]) if filter in flat.keys(): im[1].data /= flat[key] sky = pyfits.open('/Users/brammer/3DHST/Spectra/Work/CONF/sky.G141.set002.fits ')[0].data #sky /= flat[key] a = np.median(im[1].data/sky) im[1].data -= a*sky else: im[1].data -= np.median(im[1].data) # #if not os.path.exists(images[filter][:-3]): im.writeto(images[filter].split('.gz')[0], clobber=True) files=glob.glob('GD*%s*asn.fits' %(root)) for file in files: unicorn.reduce.interlace_combine(file.split('_asn')[0], growx=1, growy=1, pad=60, NGROW=0, view=False) model = unicorn.reduce.GrismModel('GD153-blue', direct='F105W', grism='G102', growx=1, growy=1, grow_factor=1) model.twod_spectrum(20, miny=-50) model.show_2d(savePNG=True) model = unicorn.reduce.GrismModel('GD153-red', growx=1, growy=1, grow_factor=1) model.twod_spectrum(12, miny=-50) model.show_2d(savePNG=True) import scipy.ndimage as nd twod = unicorn.reduce.Interlace2D('GD153-red_00012.2D.fits') wave, flux = twod.optimal_extract(twod.im['SCI'].data-twod.im['CONTAM'].data) sens = twod.im['SENS'].data plt.plot(wave, flux/sens, color='black', alpha=0.5, linewidth=3) plt.plot(wave, flux/nd.shift(sens, -0.5), color='purple', alpha=0.5) #plt.plot(wave, flux/nd.shift(sens, -1), color='green', alpha=0.5) # plt.plot(wave, flux/nd.shift(sens, -1.), color='red', alpha=0.5) sp = pyfits.open('/grp/hst/cdbs/calspec/gd153_mod_008.fits')[1].data plt.plot(sp['WAVELENGTH'], sp['FLUX']/1.e-17*0.92) twod.compute_model(lam_spec=np.cast[float](sp['WAVELENGTH']), flux_spec=np.cast[float](sp['FLUX'])/1.e-17*0.92/twod.total_flux) twod.model[twod.im['SCI'].data == 0] = 0 w2, f2 = twod.optimal_extract(twod.model) plt.plot(w2, f2/sens, color='green') bl = unicorn.reduce.Interlace2D('GD153-blue_00020.2D.fits') blwave, blflux = bl.optimal_extract(bl.im['SCI'].data-bl.im['CONTAM'].data) blsens = bl.im['SENS'].data plt.plot(blwave, blflux/blsens, color='black', alpha=0.5, linewidth=3) plt.plot(blwave, blflux/nd.shift(blsens, -0.5), color='purple', alpha=0.5) bl.compute_model(lam_spec=np.cast[float](sp['WAVELENGTH']), flux_spec=np.cast[float](sp['FLUX'])/1.e-17*0.92/bl.total_flux) bl.model[bl.im['SCI'].data == 0] = 0 w2, f2 = bl.optimal_extract(bl.model) plt.plot(w2, f2/blsens, color='green') plt.xlim(0.7e4,1.7e4) plt.ylim(10,500) plt.semilogy() model = unicorn.reduce.GrismModel('GD153-off+x-blue', direct='F105W', grism='G102', growx=1, growy=1, grow_factor=1) model.twod_spectrum(17, miny=-50) model.show_2d(savePNG=True) twod = unicorn.reduce.Interlace2D('GD153-off+x-blue_00017.2D.fits') w, f = twod.optimal_extract(twod.im['SCI'].data-twod.im['CONTAM'].data) s = twod.im['SENS'].data plt.plot(w, f/s, color='red', alpha=0.5, linewidth=3) model = unicorn.reduce.GrismModel('GD153-off+x-red', direct='F140W', grism='G141', growx=1, growy=1, grow_factor=1) model.twod_spectrum(18, miny=-50) model.show_2d(savePNG=True) twod = unicorn.reduce.Interlace2D('GD153-off+x-red_00018.2D.fits') w, f = twod.optimal_extract(twod.im['SCI'].data-twod.im['CONTAM'].data) s = twod.im['SENS'].data plt.plot(w, f/s, color='red', alpha=0.5, linewidth=3)
def sdss_j110501(): import research.pab.align research.pab.align.coarse_align( image='SDSS-J110501.98+594103.5-60-345-F775W_drc_sci.fits', ref='SDSS-J110501.98+594103.5-1A-074-F110W_drz_sci.fits') research.pab.align.coarse_align( image='SDSS-J110504.41+593957.3-65-066-F775W_drc_sci.fits', ref='SDSS-J110501.98+594103.5-1A-074-F110W_drz_sci.fits') for root in [ 'SDSS-J110501.98+594103.5-60-345', 'SDSS-J110504.41+593957.3-65-066' ]: im_ref = pyfits.open('%s-F775W_drc_sci.fits' % (root)) asn_files = glob.glob('%s*asn.fits' % (root)) for asn_file in asn_files: asn = threedhst.utils.ASNFile(asn_file) for exp in asn.exposures: print '%s -> %s' % (asn_file, exp) flc = pyfits.open('%s_flc.fits' % (exp), mode='update') mask = flc['DQ'].data == 0 slx = slice(733, 948) sly = slice(775, 943) window = flc['SCI'].data[sly, slx] bg = np.median(window[mask[sly, slx]]) flc['SCI'].data -= bg flc[1].header['CRVAL1'] += im_ref[0].header['DELTARA'] flc[1].header['CRVAL2'] += im_ref[0].header['DELTADE'] flc[1].header['DELTARA'] = im_ref[0].header['DELTARA'] flc[1].header['DELTADE'] = im_ref[0].header['DELTADE'] flc.flush() ### Drizzle IR images drizzlepac.astrodrizzle.AstroDrizzle( 'SDSS-J110501.98+594103.5-1A-074-F110W_asn.fits', output='SDSS-J110501-F110W', static=False, skysub=False, driz_separate=False, driz_sep_wcs=False, median=False, blot=False, driz_cr=False, driz_combine=True, final_wht_type='IVM', clean=True, final_wcs=True, final_refimage=None, final_rot=0, final_scale=0.128, final_pixfrac=1, context=False, resetbits=0, final_bits=576, preserve=False) drizzlepac.astrodrizzle.AstroDrizzle( 'SDSS-J110501.98+594103.5-1A-074-F132N_asn.fits', output='SDSS-J110501-F132N', static=False, skysub=False, driz_separate=False, driz_sep_wcs=False, median=False, blot=False, driz_cr=False, driz_combine=True, final_wht_type='IVM', clean=True, final_wcs=True, final_refimage='SDSS-J110501-F110W_drz_sci.fits', final_pixfrac=1, context=False, resetbits=0, final_bits=576, preserve=False) for filter in ['F438W', 'F673N', 'F775W']: exposures = [] asn_files = glob.glob('SDSS*%s_asn.fits' % (filter)) for asn_file in asn_files: asn = threedhst.utils.ASNFile(asn_file) exposures.extend(asn.exposures) flc_files = ['%s_flc.fits' % (exp) for exp in exposures] drizzlepac.astrodrizzle.AstroDrizzle( flc_files, output='SDSS-J110501-%s' % (filter), static=False, skysub=False, driz_separate=False, driz_sep_wcs=False, median=False, blot=False, driz_cr=False, driz_combine=True, final_wht_type='IVM', clean=True, final_wcs=True, final_refimage='SDSS-J110501-F110W_drz_sci.fits', final_pixfrac=1, context=False, resetbits=0, final_bits=576, preserve=False) ######### Voronoi bins pab = pyfits.open('SDSS-J110501-F132N_drz_sci.fits') pab_wht = pyfits.open('SDSS-J110501-F132N_drz_wht.fits') con = pyfits.open('SDSS-J110501-F110W_drz_sci.fits') con_wht = pyfits.open('SDSS-J110501-F110W_drz_wht.fits') ### top slx, sly = slice(600, 953), slice(805, 1054) ### bottom slx, sly = slice(430, 814), slice(334, 508) slx, sly = slice(420, 953), slice(330, 1054) root = 'NGC6670-15-167' slx, sly = slice(605, 1369), slice(641, 1080) root = 'NGC7592-51-247' slx, sly = slice(702, 1222), slice(688, 1316) pab = pyfits.open('%s-F132N_drz_sci.fits' % (root)) pab_wht = pyfits.open('%s-F132N_drz_wht.fits' % (root)) con = pyfits.open('%s-F110W_drz_sci.fits' % (root)) con_wht = pyfits.open('%s-F110W_drz_wht.fits' % (root)) wcs = stwcs.wcsutil.HSTWCS(pab) slwcs = wcs.slice([sly, slx]) # pab_sub = (pab[0].data-con[0].data)[sly, slx] # rms_sub = np.sqrt(1/pab_wht[0].data + 1/con_wht[0].data)[sly, slx] # rms_sub[~np.isfinite(rms_sub)] = 1e10 pab_sub = (pab[0].data)[sly, slx] rms_sub = np.sqrt(1 / pab_wht[0].data)[sly, slx] rms_scl = 1 # SN = 5 rnd_scl = 1. pyfits.writeto('pab_sci.fits', data=pab_sub, header=slwcs.to_header(), clobber=True) pyfits.writeto('pab_rms.fits', data=rms_sub * rms_scl, header=slwcs.to_header(), clobber=True) import research.pab research.pab.voronoi.go(cfgfile=None, fluximagefile='pab_sci.fits', fluxerrorfile='pab_rms.fits', distmpc=0.1, snthresh=SN, cvt_iters=10, max_z_kpc=100000.0, max_bin_size_pix=1000, output_prefix='pab_voronoi') #research.pab.voronoi.go(cfgfile='voronoi.cfg', output_prefix='pab_voronoi_cfg') v = pyfits.open('pab_voronoi.fits') mask = (np.isfinite(pab_sub) & (rms_sub < 10)) omask = np.abs(pab_sub / rms_sub) > SN files = glob.glob('SDSS-J110501-F*sci.fits') files = glob.glob('NGC6670*-F*sci.fits') files = glob.glob('NGC7592*-F*sci.fits') for file in files: im = pyfits.open(file) im_wht = pyfits.open(file.replace('_sci', '_wht')) wcs_im = stwcs.wcsutil.HSTWCS(im, ext=0) xy = wcs_im.all_world2pix([pab[0].header['CRVAL1']], [pab[0].header['CRVAL2']], 1) dx = pab[0].header['CRPIX1'] - xy[0][0] dy = pab[0].header['CRPIX2'] - xy[1][0] im_sub = nd.shift(im[0].data, (dy, dx))[sly, slx] wht_sub = nd.shift(im_wht[0].data, (dy, dx))[sly, slx] im_rms = 1 / np.sqrt(wht_sub) im_rms[~np.isfinite(im_rms)] = 1e10 binned_sci, binned_var, binned_area = research.pab.voronoi.rebin_output( im_sub, (im_rms)**2, v[0].data + 1, mask=mask, ds9=None) binned_sci[omask] = im_sub[omask] binned_var[omask] = im_rms[omask]**2 h = slwcs.to_header() filter = file.split('_')[0][-5:] h['FILTER'] = filter h['PHOTFLAM'] = im[0].header['PHOTFLAM'] pyfits.writeto('sub_%s_sci.fits' % (filter), data=binned_sci, header=h, clobber=True) pyfits.writeto('sub_%s_var.fits' % (filter), data=binned_var, header=h, clobber=True) print filter # files = glob.glob('sub*sci.fits') sub = {} for file in files: print file im = pyfits.open(file) sub[im[0]. header['FILTER']] = im[0].data * im[0].header['PHOTFLAM'] / 4.e-19 pab = sub['F132N'] - sub['F110W'] * 0.85 ha = sub['F673N'] - (775. - 438.) / (775. - 438) * (sub['F775W'] / sub['F438W']) ha = sub['F673N'] - sub['F775W'] decrement = ha / pab / 10. dec_mask = (pab > 0.05) & np.isfinite(decrement) decrement[~dec_mask] = -10
def make_det_mask(evthdr): import os from . import caldb from . import env # grade weighting from NGC253 002 obs. GRADE_WT = [ 1.00000, 0.124902, 0.117130, 0.114720, 0.118038, 0.0114296, 0.0101738, 0.0113617, 0.0122017, 0.0157910, 0.0144079, 0.0145691, 0.0149934, 0.00165462, 0.00194312, 0.00156128, 0.00143400, 0.00210433, 0.00180735, 0.00140006, 0.00169704, 0.00189220, 0.00160371, 0.00150188, 0.00168007, 0.000296983, 0.000364864 ] fpm = evthdr['INSTRUME'] obsutctime = evthdr['DATE-OBS'] cal = caldb.CalDB(os.environ[env._CALDB_ENV]) pixpospath = cal.getPIXPOS(fpm, 'DET0', obsutctime) pixposf = pf.open('%s/%s' % (os.environ[env._CALDB_ENV], pixpospath)) pixmap = np.full((360, 360), -1, dtype=np.int32) detnum = np.full((360, 360), -1, dtype=np.int32) allpdf = np.zeros((360, 360), dtype=np.float64) for ext in pixposf: if (('EXTNAME' not in ext.header) or ('PIXPOS' not in ext.header['EXTNAME']) or ('DETNAM' not in ext.header)): continue idet = int(ext.header['DETNAM'].replace('DET', '')) pixpos = ext.data # Store references to columns pp_det1x = pixpos['REF_DET1X'] pp_det1y = pixpos['REF_DET1Y'] pp_rawx = pixpos['RAWX'] pp_rawy = pixpos['RAWY'] pp_grade = pixpos['GRADE'] pp_pdf = pixpos['PDF'] for ix in np.arange(32): for iy in np.arange(32): # Get array indices where all of the following are True ii = np.where((pp_det1x != -1) * (pp_rawx == ix) * (pp_rawy == iy) * (pp_grade <= 26))[0] thispdf = np.zeros((360, 360), dtype=np.float64) for i in ii: if not np.isnan(pp_pdf[i]).any(): # No nan value in PDF ref_x = pp_det1x[i] ref_y = pp_det1y[i] thispdf[ref_y:ref_y + 7, ref_x:ref_x + 7] += (pp_pdf[i] * GRADE_WT[pp_grade[i]]) ii = np.where(thispdf > allpdf) if len(ii) > 0: allpdf[ii] = thispdf[ii] pixmap[ii] = ix + iy * 32 detnum[ii] = idet pixmap = shift(pixmap, [-1, -1], mode='wrap', prefilter=False, order=1) detnum = shift(detnum, [-1, -1], mode='wrap', prefilter=False, order=1) return pixmap, detnum
def make_mask(self, filename, res, num_attr_masks=5, debug=False): data = np.load(filename, encoding='latin1', allow_pickle=True) region_db = None if self.opt.embed_attributes and self.coco_match_attributes is not None: image_id = int(os.path.basename(filename).split('.')[0]) if not self.coco_match_attributes: region_db = self.vg_attributes[ self.indices[image_id]]['region_db'] elif image_id in self.coco_to_vg: image_id = self.coco_to_vg[image_id] region_db = self.vg_attributes[ self.indices[image_id]]['region_db'] def parse(key): d = data[key] if len(d.shape) == 0: return d.item() else: return d boxes = parse('boxes') segms = parse('segments') classes = parse('classes') parsed_res = parse('resolution') out_mask = np.empty((res[1], res[0]), dtype=np.uint16) out_mask[:] = 0 out_mask_bg = np.empty((res[1], res[0]), dtype=np.uint16) out_mask_bg[:] = 0 out_mask_fg = np.empty((res[1], res[0]), dtype=np.uint16) out_mask_fg[:] = 0 attr_masks = [] for i in range(num_attr_masks): attr_mask = np.zeros((res[1], res[0]), dtype=np.uint16) attr_masks.append(attr_mask) attr_masks_bg = [] for i in range(num_attr_masks): attr_mask_bg = np.zeros((res[1], res[0]), dtype=np.uint16) attr_masks_bg.append(attr_mask_bg) attr_masks_fg = [] for i in range(num_attr_masks): attr_mask_fg = np.zeros((res[1], res[0]), dtype=np.uint16) attr_masks_fg.append(attr_mask_fg) def iou_mask(a, b): assert a.shape == b.shape intersection = np.sum(a & b) if intersection == 0: return 0 union = np.sum(a | b) return intersection / union def iou_bb(boxA, boxB): xA = max(boxA[0], boxB[0]) yA = max(boxA[1], boxB[1]) xB = min(boxA[2] + boxA[0], boxB[2] + boxB[0]) yB = min(boxA[3] + boxA[1], boxB[3] + boxB[1]) interArea = max(0, xB - xA) * max(0, yB - yA) boxAArea = boxA[2] * boxA[3] boxBArea = boxB[2] * boxB[3] iou = interArea / (boxAArea + boxBArea - interArea) return iou def bb_intersect(boxA, boxB): xA = max(boxA[0], boxB[0]) yA = max(boxA[1], boxB[1]) xB = min(boxA[2] + boxA[0], boxB[2] + boxB[0]) yB = min(boxA[3] + boxA[1], boxB[3] + boxB[1]) interArea = max(0, xB - xA) * max(0, yB - yA) return interArea > 1e-6 def diff_mask(parent, child): assert parent.shape == child.shape intersection = np.sum(parent & child) if intersection == 0: return 0 child_area = np.sum(child) return intersection / child_area thresh = self.threshold if boxes is not None: scores = boxes[:, -1] valid = scores > thresh original_classes = classes.copy() # Remap classes as needed for i in range(len(classes)): if classes[i] in self.remapped_classes: classes[i] = self.remapped_classes[classes[i]] for i, cl in enumerate(classes): if cl not in self.remapped_idx: valid[i] = False if segms is not None and len(segms) > 0: masks = mask_util.decode([{ 'counts': bytes(x), 'size': parsed_res } for x in segms]) # Perform NMS valid_indices, = np.where(valid) valid_indices = valid_indices[np.argsort( -scores[valid_indices])] for j1, i1 in enumerate(valid_indices): if not valid[i1]: continue bb1 = boxes[i1, :4].copy() bb1[2:] -= bb1[:2] for j2, i2 in enumerate(valid_indices): if j2 > j1 and valid[i2]: assert scores[i1] >= scores[i2], ( scores[i1], scores[i2]) # Check if sorted bb2 = boxes[i2, :4].copy() bb2[2:] -= bb2[:2] overlap_bb = iou_bb(bb1, bb2) if overlap_bb > 1e-3: m1 = masks[:, :, i1] m2 = masks[:, :, i2] overlap_mask = iou_mask(m1, m2) if overlap_mask > 0.7: if scores[i1] > scores[i2]: valid[i2] = False classes = np.array(classes)[valid] original_classes = np.array(original_classes)[valid] masks = masks[:, :, valid] scores = boxes[valid, -1] boxes = boxes[valid, :4] assert masks.shape[:2] == out_mask.shape, (filename, masks.shape, out_mask.shape) mask_ref = np.zeros(masks.shape[:2], dtype=int) if debug: mask_debug = np.empty(masks.shape[:2], dtype=object) mask_debug[:] = None areas = np.sum(masks, axis=(0, 1)) draw_indices = np.argsort(-areas) drawn_masks = [] drawn_masks_indices = [] drawn_masks_scores = [] scene_graph = [] render_queue = [] for idx in draw_indices: class_name = vg_labels[classes[idx]] original_class_name = vg_labels[original_classes[idx]] if self.class_hook is not None: bool_mask = masks[:, :, idx].astype('bool') new_class, delta_pos = self.class_hook( idx, class_name, bool_mask) if new_class is None: continue # Do not draw this object classes[idx] = vg_labels.index(new_class) class_name = new_class if classes[idx] in self.remapped_idx: bool_mask = masks[:, :, idx].astype('bool') if self.class_hook is not None: bool_mask = shift(bool_mask, delta_pos, order=0) if np.count_nonzero(bool_mask) == 0: continue # Add node to scene graph is_foreground = False candidate = scene_graph bb1 = boxes[idx, :4].copy() bb1[2:] -= bb1[:2] def add_to_scene_graph(): nonlocal candidate, is_foreground added = False for cur_node in candidate: existing_mask = cur_node['mask'] bb2 = cur_node['bbox'].copy() bb2[2:] -= bb2[:2] if not bb_intersect(bb1, bb2): continue # Early out overlap_diff = diff_mask( existing_mask, bool_mask) if overlap_diff > 0.7: #assert not added candidate = cur_node['children'] is_foreground = cur_node['is_foreground'] add_to_scene_graph() added = True add_to_scene_graph() if is_foreground and self.remapped_idx[ classes[idx]] not in self.thing_classes: if self.remapped_idx[ classes[idx]] in self.always_stuff: # Illegal class (e.g. "grass" inside "zebra") -> do not draw continue # Determine type if not is_foreground: is_foreground = self.remapped_idx[ classes[idx]] in self.thing_classes candidate.append({ 'mask': bool_mask, 'class': classes[idx], 'children': [], 'score': scores[idx], 'bbox': boxes[idx], 'idx': idx, 'original_class_name': original_class_name, 'is_foreground': is_foreground, }) render_queue.append(candidate[-1]) for element in render_queue: bool_mask = element['mask'] cl = element['class'] score = element['score'] idx = element['idx'] if element['is_foreground']: out_mask_fg[bool_mask] = self.remapped_idx[cl] else: assert self.remapped_idx[cl] <= len( self.stuff_classes), self.remapped_idx[cl] out_mask_bg[bool_mask] = self.remapped_idx[cl] out_mask[bool_mask] = self.remapped_idx[cl] drawn_masks.append(bool_mask) drawn_masks_indices.append(self.remapped_idx[cl]) drawn_masks_scores.append(score) mask_ref[bool_mask] = len(drawn_masks) # Prepare attributes mask_attributes = [] if region_db is not None: mask_attributes = set() original_class_name = element['original_class_name'] if original_class_name in region_db: for region in region_db[original_class_name]: if 'attribute_synsets' in region: bb = region['x'], region['y'], region[ 'w'], region['h'] bb_adapted = element['bbox'].copy() bb_adapted[2:] -= bb_adapted[:2] overlap = iou_bb(bb_adapted, bb) if overlap > 0.5: mask_attributes.update( region['attribute_synsets']) #print('overlap {} {}'.format(original_class_name, overlap)) mask_attributes = list(mask_attributes) mask_attributes.sort( key=lambda x: -self.attribute_counter[x]) if self.attribute_hook is not None: self.attribute_hook(idx, original_class_name, mask_attributes) for i in range(num_attr_masks): if len(mask_attributes) > i and mask_attributes[ i] in self.attr_map: attr_masks[i][bool_mask] = self.attr_map[ mask_attributes[i]] if element['is_foreground']: attr_masks_fg[i][ bool_mask] = self.attr_map[ mask_attributes[i]] else: attr_masks_bg[i][ bool_mask] = self.attr_map[ mask_attributes[i]] else: attr_masks[i][bool_mask] = 0 if element['is_foreground']: attr_masks_fg[i][bool_mask] = 0 else: attr_masks_bg[i][bool_mask] = 0 if debug: tmp = mask_debug[bool_mask] tmp.fill((idx, mask_attributes)) mask_debug[bool_mask] = tmp #print(original_class_name, mask_attributes) if debug: return out_mask, out_mask_bg, out_mask_fg, mask_debug, attr_masks, attr_masks_bg, attr_masks_fg else: return out_mask, out_mask_bg, out_mask_fg, attr_masks, attr_masks_bg, attr_masks_fg
def propagate_translation(nodes, parent_delta): for cur_node in nodes: delta = (cur_node['delta_pos'][0] + parent_delta[0], cur_node['delta_pos'][1] + parent_delta[1]) if delta != (0, 0): cur_node['mask'] = shift(cur_node['mask'], delta, order=0) propagate_translation(cur_node['children'], delta)
def infer(args): system_info.print_system_info() # Prepare model in_size = feature.get_input_dim(args.frame_size, args.context_size, args.input_transform) if args.model_type == "BLSTM": model = BLSTMDiarization(in_size=in_size, n_speakers=args.num_speakers, hidden_size=args.hidden_size, n_layers=args.num_lstm_layers, embedding_layers=args.embedding_layers, embedding_size=args.embedding_size) elif args.model_type == 'Transformer': if args.use_attractor: model = TransformerEDADiarization( in_size, n_units=args.hidden_size, n_heads=args.transformer_encoder_n_heads, n_layers=args.transformer_encoder_n_layers, dropout=0, attractor_encoder_dropout=args.attractor_encoder_dropout, attractor_decoder_dropout=args.attractor_decoder_dropout, ) else: model = TransformerDiarization( args.num_speakers, in_size, n_units=args.hidden_size, n_heads=args.transformer_encoder_n_heads, n_layers=args.transformer_encoder_n_layers, dropout=0) else: raise ValueError('Unknown model type.') serializers.load_npz(args.model_file, model) if args.gpu >= 0: gpuid = use_single_gpu() model.to_gpu() kaldi_obj = kaldi_data.KaldiData(args.data_dir) for recid in kaldi_obj.wavs: data, rate = kaldi_obj.load_wav(recid) Y = feature.stft(data, args.frame_size, args.frame_shift) Y = feature.transform(Y, transform_type=args.input_transform) Y = feature.splice(Y, context_size=args.context_size) Y = Y[::args.subsampling] out_chunks = [] with chainer.no_backprop_mode(), chainer.using_config('train', False): hs = None for start, end in _gen_chunk_indices(len(Y), args.chunk_size): Y_chunked = Variable(Y[start:end]) if args.gpu >= 0: Y_chunked.to_gpu(gpuid) hs, ys = model.estimate_sequential( hs, [Y_chunked], n_speakers=args.num_speakers, th=args.attractor_threshold, shuffle=args.shuffle) if args.gpu >= 0: ys[0].to_cpu() out_chunks.append(ys[0].data) if args.save_attention_weight == 1: att_fname = f"{recid}_{start}_{end}.att.npy" att_path = os.path.join(args.out_dir, att_fname) model.save_attention_weight(att_path) outfname = recid + '.h5' outpath = os.path.join(args.out_dir, outfname) if hasattr(model, 'label_delay'): outdata = shift(np.vstack(out_chunks), (-model.label_delay, 0)) else: max_n_speakers = max([o.shape[1] for o in out_chunks]) out_chunks = [ np.insert(o, o.shape[1], np.zeros((max_n_speakers - o.shape[1], o.shape[0])), axis=1) for o in out_chunks ] outdata = np.vstack(out_chunks) with h5py.File(outpath, 'w') as wf: wf.create_dataset('T_hat', data=outdata)
def astra_reconstruction(sinogram, center, angles=None, ratio=1.0, method="FBP_CUDA", num_iter=1, filter_name="hann", pad=None, apply_log=True): """ Wrapper of reconstruction methods implemented in the astra toolbox package. https://www.astra-toolbox.com/docs/algs/index.html Users must install Astra Toolbox before using this function. Parameters ---------- sinogram : array_like 2D array. Sinogram image. center : float Center of rotation. angles : array_like 1D array. List of angles (radian) corresponding to the sinogram. ratio : float To apply a circle mask to the reconstructed image. method : str Reconstruction algorithms. for CPU: 'FBP', 'SIRT', 'SART', 'ART', 'CGLS'. for GPU: 'FBP_CUDA', 'SIRT_CUDA', 'SART_CUDA', 'CGLS_CUDA'. num_iter : int Number of iterations if using iteration methods. filter_name : str Apply filter if using FBP method. Options: 'hamming', 'hann', 'lanczos', 'kaiser', 'parzen',... pad : int Padding to reduce the side effect of FFT. apply_log : bool Apply the logarithm function to the sinogram before reconstruction. Returns ------- array_like Square array. """ try: import astra except ImportError: print("!!!!!! Error !!!!!!!") print("You must install Astra Toolbox before using this function!") raise if apply_log is True: sinogram = -np.log(sinogram) if pad is None: pad = int(0.1 * sinogram.shape[1]) sinogram = np.pad(sinogram, ((0, 0), (pad, pad)), mode='edge') (nrow, ncol) = sinogram.shape if angles is None: angles = np.linspace(0.0, 180.0, nrow) * np.pi / 180.0 proj_geom = astra.create_proj_geom('parallel', 1, ncol, angles) vol_geom = astra.create_vol_geom(ncol, ncol) cen_col = (ncol - 1.0) / 2.0 sinogram = shift(sinogram, (0, cen_col - (center + pad)), mode='nearest') sino_id = astra.data2d.create('-sino', proj_geom, sinogram) rec_id = astra.data2d.create('-vol', vol_geom) if "CUDA" not in method: proj_id = astra.create_projector('line', proj_geom, vol_geom) cfg = astra.astra_dict(method) cfg['ProjectionDataId'] = sino_id cfg['ReconstructionDataId'] = rec_id if "CUDA" not in method: cfg['ProjectorId'] = proj_id if (method == "FBP_CUDA") or (method == "FBP"): cfg["FilterType"] = filter_name alg_id = astra.algorithm.create(cfg) astra.algorithm.run(alg_id, num_iter) recon = astra.data2d.get(rec_id) astra.algorithm.delete(alg_id) astra.data2d.delete(sino_id) astra.data2d.delete(rec_id) recon = recon[pad:ncol - pad, pad:ncol - pad] if ratio is not None: ncol0 = ncol - 2 * pad if ratio == 0.0: ratio = min(center, ncol0 - center) / (0.5 * ncol0) mask = util.make_circle_mask(ncol0, ratio) recon = recon * mask return recon
def test_shift(x): s = 10*np.random.uniform(-1, 1, 3) out1 = shift(x, s, interpolation="nearest") out2 = ndimage.shift(x, s, order=0, prefilter=False) return out1, out2
def join_image(mat1, mat2, joint_width, side, norm=True, total_width=None): """ Join projection images or sinogram images. This is useful for fixing the problem of non-overlap between images. Parameters ---------- mat1 : array_like 2D array. Projection image or sinogram image. mat2 : array_like 2D array. Projection image or sinogram image. joint_width : float Width of the joint area between two images. side : {0, 1} Only two options: 0 or 1. It is used to indicate the overlap side respects to image 1. "0" corresponds to the left side. "1" corresponds to the right side. norm : bool Enable/disable normalization before joining. total_width : int, optional Final width of the joined image. Returns ------- array_like Stitched image. """ (nrow1, ncol1) = mat1.shape (nrow2, ncol2) = mat2.shape joint_int = int(np.floor(joint_width)) sub_pixel = joint_width - joint_int side = int(side) if sub_pixel > 0.0: if side == 1: mat1 = shift(mat1, (0, sub_pixel), mode='nearest') mat2 = shift(mat2, (0, -sub_pixel), mode='nearest') else: mat1 = shift(mat1, (0, -sub_pixel), mode='nearest') mat2 = shift(mat2, (0, sub_pixel), mode='nearest') if nrow1 != nrow2: raise ValueError("Two images are not at the same height!!!") total_width0 = ncol1 + ncol2 + joint_int if (total_width is None) or (total_width < total_width0): total_width = total_width0 mat_comb = np.zeros((nrow1, total_width0), dtype=np.float32) if side == 1: if norm is True: factor1 = np.mean(mat1[:, -3:]) factor2 = np.mean(mat2[:, :3]) mat2 = mat2 * factor1 / factor2 mat_comb[:, 0:ncol1] = mat1 mat_comb[:, (ncol1 + joint_int):total_width0] += mat2 list_mask = np.zeros(total_width0, dtype=np.float32) list_mask[ncol1 - 2:ncol1 + joint_int + 3] = 1.0 listx = np.where(list_mask < 1.0)[0] listy = np.arange(nrow1) mat = mat_comb[:, listx] finter = interpolate.interp2d(listx, listy, mat, kind='linear') listx_miss = np.where(list_mask > 0.0)[0] if len(listx_miss) > 0: mat_comb[:, listx_miss] = finter(listx_miss, listy) else: if norm is True: factor2 = np.mean(mat2[:, -3:]) factor1 = np.mean(mat1[:, :3]) mat2 = mat2 * factor1 / factor2 mat_comb[:, 0:ncol2] = mat2 mat_comb[:, (ncol2 + joint_int):total_width0] += mat1 list_mask = np.zeros(total_width0, dtype=np.float32) list_mask[ncol2 - 2:ncol2 + joint_int + 3] = 1.0 listx = np.where(list_mask < 1.0)[0] listy = np.arange(nrow1) mat = mat_comb[:, listx] finter = interpolate.interp2d(listx, listy, mat, kind='linear') listx_miss = np.where(list_mask > 0.0)[0] if len(listx_miss) > 0: mat_comb[:, listx_miss] = finter(listx_miss, listy) if total_width > total_width0: mat_comb = np.pad(mat_comb, ((0, 0), (0, total_width - total_width0)), mode='edge') return mat_comb
def stitch_image(mat1, mat2, overlap, side, wei_mat1=None, wei_mat2=None, norm=True, total_width=None): """ Stitch projection images or sinogram images using a linear ramp. Parameters ---------- mat1 : array_like 2D array. Projection image or sinogram image. mat2 : array_like 2D array. Projection image or sinogram image. overlap : float Width of the overlap area between two images. side : {0, 1} Only two options: 0 or 1. It is used to indicate the overlap side respects to image 1. "0" corresponds to the left side. "1" corresponds to the right side. wei_mat1 : array_like, optional Weighting matrix used for image 1. wei_mat2 : array_like, optional Weighting matrix used for image 2. norm : bool, optional Enable/disable normalization before stitching. total_width : int, optional Final width of the stitched image. Returns ------- array_like Stitched image. """ (nrow1, ncol1) = mat1.shape (nrow2, ncol2) = mat2.shape overlap_int = int(np.floor(overlap)) sub_pixel = overlap - overlap_int if sub_pixel > 0.0: if side == 1: mat1 = shift(mat1, (0, sub_pixel), mode='nearest') mat2 = shift(mat2, (0, -sub_pixel), mode='nearest') else: mat1 = shift(mat1, (0, -sub_pixel), mode='nearest') mat2 = shift(mat2, (0, sub_pixel), mode='nearest') if nrow1 != nrow2: raise ValueError("Two images are not at the same height!!!") if (wei_mat1 is None) or (wei_mat2 is None): (wei_mat1, wei_mat2) = make_weight_matrix(mat1, mat2, overlap_int, side) total_width0 = ncol1 + ncol2 - overlap_int if (total_width is None) or (total_width < total_width0): total_width = total_width0 mat_comb = np.zeros((nrow1, total_width0), dtype=np.float32) if side == 1: if norm is True: factor1 = np.mean(mat1[:, -overlap_int:]) factor2 = np.mean(mat2[:, :overlap_int]) mat2 = mat2 * factor1 / factor2 mat_comb[:, 0:ncol1] = mat1 * wei_mat1 mat_comb[:, (ncol1 - overlap_int):total_width0] += mat2 * wei_mat2 else: if norm is True: factor2 = np.mean(mat2[:, -overlap_int:]) factor1 = np.mean(mat1[:, :overlap_int]) mat2 = mat2 * factor1 / factor2 mat_comb[:, 0:ncol2] = mat2 * wei_mat2 mat_comb[:, (ncol2 - overlap_int):total_width0] += mat1 * wei_mat1 if total_width > total_width0: mat_comb = np.pad(mat_comb, ((0, 0), (0, total_width - total_width0)), mode='edge') return mat_comb
def augment(path): v, t, o, p, l = load_gzip(path) v = v[:, :, :res_shape[2]] if res_shape[2] == 3: v[:, :, 2] *= 255 v_new = empty(res_shape, dtype="uint8") # t = concatenate([t,o],axis=1) for i in xrange(v.shape[0]): #batch # body # cut = 0 cut = rng.randint(7) # rot = 0 rot = randi(20) / 10. trans_t = 0 # trans_t = randi(2) trans_b = (0, trans_t, randi(3), randi(3)) trans_h = (0, trans_t, randi(3), randi(3)) # trans_b = (0,trans_t,0,0) # trans_h = (0,trans_t,0,0) # scale = ratio scale = ratio + randi(10) / 1000. if p[i] < 10: p[i] = 100 ofs = p[i] * scale mid = v.shape[-1] / 2. sli = None if ofs < mid: start = int(round(mid - ofs)) end = int(round(mid + ofs)) sli = slice(start, end) t_ofs_ = rng.randint(32 - res_shape[3] + 1) # t_ofs_ = 32-res_shape[3] for j in xrange(v.shape[2]): #maps for k in xrange(res_shape[3]): #frames #body img = v[i, 0, j, k + t_ofs_] img = cut_img(img, cut) img = misc.imresize(img, (h, h)) # if j==0: img = misc.imfilter(img,"find_edges") # img = lms(img) # img = cut_img(img,1) # img = misc.imrotate(img,rot) v_new[i, 0, j, k] = img #hand img = v[i, 1, j, k + t_ofs_] img = misc.imrotate(img, rot) img = img[sli, sli] img = misc.imresize(img, (h, h)) # if j==0: img = lms(img) v_new[i, 1, j, k] = img v_new[i, 0] = ndimage.shift(v_new[i, 0], trans_b, order=0, mode='nearest', prefilter=False) if rot != 0: v_new[i, 0] = ndimage.rotate( v_new[i, 0], rot, axes=(-2, -1), order=1, reshape=False, mode='nearest', ) v_new[i, 1] = ndimage.shift(v_new[i, 1], trans_h, order=0, mode='nearest', prefilter=False) # t[i] = ndimage.shift(t[i],(0,trans_t), order=0, mode='nearest', prefilter=False) # v_new = add_noise(v_new) # t = add_noise_traj(t) return v_new, (t, o, p), l
def sim_sameField(location, mode='moffat', numIms=100, bkg_mag=22.5, fwhm_min=3, fwhm_max=6, rot_min=-2.5, rot_max=2.5, shift_min=-2, shift_max=2, scale_mult=(0,1.5), scale_add=(-20,50), zero_point=25): '''Test **OASIS**'s ability to handle frame-by-frame variations in astronomical data and filter out false-positive sources. The procedure of the simulation is as follows: 1. Copies a random science image from the specified dataset to the **simulations** directory. 2. A source catalog of the chosen science image is made, containing information on each source's centroid location and total flux. 3. Using this source catalog, simulations of the chosen science image are made, all with constant source flux and location, but with different backgrounds, seeing, and pointing. 4. The set of simulated images are sent through the **OASIS Pipeline**. 5. Low numbers of detected sources signifies a successful simulation. There are no variable objects in the simulated images, so ideally zero sources should be detected by **OASIS**. :param str location: Path of data file tree (contains the **configs**, **data**, **psf**, **residuals**, **sources**, **templates** directories). Use a comma-separated list for mapping to multiple datasets. :param str mode: Simulation mode. Method by which simulated images are made. All images are given a uniform background, then smeared according to Poisson statistics. * *moffat* (default): Sources are convolved with a 2D Moffat kernel. * *gauss*: Sources are convolved with a symmetric 2D Gaussian kernel. * *real*: The actual PSF model of the chosen science image is used as the convolution kernel. * *sky*: AstrOmatic program ``SkyMaker`` (Bertin) is used to make simulated images. :param int numIms (default=100): Number of simulated images to make. :param float bkg_mag: Average background level in mags. Actual simulated background levels are chosen to be a random value within the interval :math:`[bkg\_mag-1.5, bkg\_mag+1.5]`. :param float fwhm_min: Minimum FWHM of simulated images in pixels. :param float fwhm_max: Maximum FWHM of simulated images in pixels. :param float rot_min: Lower bound on angle of rotation in degrees. :param float rot_max: Upper bound on angle of rotation in degrees. :param float shift_min: Lower bound on (X,Y) shift in pixels. :param float shift_max: Upper bound on (X,Y) shift in pixels. :param tuple scale_mult: Interval of acceptable multiplicative scale factors. :param tuple scale_add: Interval of acceptable additive scale factors. :param float zero_point: Zero point magnitude. :returns: Standard **OASIS Pipeline** output, residual frames located in **residuals** and source catalogs located in **sources**. ''' ref_im = glob.glob(location + '/data/*_ref_A_.fits') if os.path.exists(location) == False: print("-> Error: Problem with path name(s)-- make sure paths exist and are entered correctly\n-> Exiting...") sys.exit() if len(ref_im) != 1: print("-> Error: Problem with number of reference images\n-> Exiting...\n") sys.exit() ref_im = ref_im[0] ref_fwhm = fwhm(ref_im) path_splits = ref_im.split('/') image_name = path_splits[-1] sim_loc = location.replace('targets', 'simulations') len_loc = len(loc.split('/')) tar = path_splits[len_loc+2] copy_to_sim(tar, image=ref_im, mode='samefield') ref_psf = glob.glob("%s/psf/*_ref_A_.psf" % (sim_loc)) if len(ref_psf) != 1: print("-> Error: Problem with number of reference PSF files\n-> Exiting...\n") sys.exit() try: clear_contents(sim_loc) except: pass images = glob.glob("%s/data/*.fits" % (sim_loc)) ref_im_sim = ref_im.replace("targets", "simulations") #delete all original images except reference for i in images: name = i.split('/')[-1] if name != image_name: os.remove(i) #create configs directory if none exists create_configs(sim_loc) #make source catalog of reference using SExtractor sim_config = "%s/configs/default_sim.sex" % (sim_loc) sim_params = "%s/configs/default_param_sim.sex" % (sim_loc) with open(sim_config, 'r') as conf: lines = conf.readlines() lines[6] = "CATALOG_NAME" + " " + "%s/data/reference.cat" % (sim_loc) + "\n" lines[9] = "PARAMETERS_NAME" + " " + sim_params + "\n" lines[22] = "FILTER_NAME" + " " + "%s/configs/default.conv" % (sim_loc) + "\n" lines[70] = "SEEING_FWHM" + " " + str(ref_fwhm) + "\n" lines[127] = "PSF_NAME" + " " + ref_psf[0] + "\n" with open(sim_config, 'w') as conf_write: conf_write.writelines(lines) os.system("sextractor %s[0] -c %s" % (ref_im_sim, sim_config)) #extract x_pos, y_pos, and fluxes from SExtractor catalog ref_cat = "%s/data/reference.cat" % (sim_loc) with open(ref_cat, 'r') as cat: cat_lines = cat.readlines() #get simulated image's metadata ref_hdu = fits.open(ref_im_sim) ref_data = ref_hdu[0].data ref_header = ref_hdu[0].header ref_mask = ref_hdu[1].data try: weight_check = fits.getval(ref_im_sim, 'WEIGHT') except: weight_check = 'N' if weight_check == 'Y': ref_mask = (ref_mask-1)*-1 ref_mask = ref_mask.astype(np.int64) ref_hdu.close() from astropy.stats import sigma_clipped_stats mean, median, std = sigma_clipped_stats(ref_data, sigma=3.0) #extract simulated image's source information from SExtractor catalog x_pos = [] y_pos = [] flux = [] sources = {} for c in cat_lines: splits = c.split() if splits[0] != '#': flux.append(float(splits[0])) x_pos.append(round(float(splits[3]))) y_pos.append(round(float(splits[4]))) sources.update({float(splits[0]) : (round(float(splits[3])), round(float(splits[4])))}) flux_ordered = sorted(sources) flux_iter = round(len(flux)*0.99) flux_sim = flux_ordered[flux_iter] xy_sim = sources[flux_sim] #if mode is set to use SkyMaker for making the simulations, configure SkyMaker if mode == 'sky': mags = [] for f in flux: mags.append((28-(np.log(f)))) with open("%s/configs/sky_list.txt" % (sim_loc), "w+") as sky_list: for i in range(len(flux)): sky_list.write("100 %.3f %.3f %.3f\n" % (x_pos[i], y_pos[i], mags[i])) #get pixel scale of reference image pixscale = float(ref_header['PIXSCALE']) #define oversampling oversample = pixscale*25 #define sky.config location sky_config = "%s/configs/sky.config" % (sim_loc) #start making fake images print("\n-> Making simulated images...") for n in tqdm(range(numIms)): #define image name if n == 0: image_name = '%s/data/%d_ref_A_.fits' % (sim_loc, n) else: image_name = '%s/data/%d_N_.fits' % (sim_loc, n) #for each image: make sources w/ random fwhm b/w (3,6), rotate/zoom, shift, add a different gaussian dist. of noise, change scale linearly, poisson smear #define FWHM of simulation image_fwhm = ((fwhm_max-fwhm_min) * np.random.random()) + fwhm_min #based on the mode chosen, create the corresponding convolution kernel and make simulated image if mode != 'sky': if mode == 'moffat': moffat_kernel_1 = Moffat2DKernel(gamma=make_stars.get_moffat_gamma(image_fwhm), alpha=7) moffat_kernel_2 = Moffat2DKernel(gamma=make_stars.get_moffat_gamma(image_fwhm), alpha=2) conv_kernel = (0.8*moffat_kernel_1) + (0.2*moffat_kernel_2) elif mode == 'gauss': gaussian_kernel_1 = Gaussian2DKernel(x_stddev=(image_fwhm/2.355), y_stddev=(image_fwhm/2.355)) gaussian_kernel_2 = Gaussian2DKernel(x_stddev=((image_fwhm*2)/2.355), y_stddev=((image_fwhm*2)/2.355)) conv_kernel = (0.9*gaussian_kernel_1) + (0.1*gaussian_kernel_2) elif mode == 'real': conv_kernel = get_first_model(ref_im) try: conv_kernel /= np.sum(conv_kernel) except: pass flux_variable = np.array(flux) * np.random.random() * 2 image = make_stars.make_image(ref_data.shape[0], ref_data.shape[1], x_loc=y_pos, y_loc=x_pos, fluxes=flux_variable, psf=[conv_kernel]) #if mode is set to 'sky' use SkyMaker to make simulated image elif mode == 'sky': bkg_Mag = (1.5*np.random.random()) + bkg_mag image_fwhm_arcsec = image_fwhm*pixscale with open(sky_config, 'r') as sky: sky_lines = sky.readlines() sky_lines[6] = "IMAGE_NAME" + " " + image_name + "\n" sky_lines[7] = "IMAGE_SIZE" + " " + str("%d, %d" % (ref_data.shape[1], ref_data.shape[0])) + "\n" sky_lines[19] = "SATUR_LEVEL" + " " + str(ref_header['SATURATE']) + "\n" sky_lines[21] = "EXPOSURE_TIME" + " " + str(ref_header['EXPTIME']) + "\n" sky_lines[26] = "PIXEL_SIZE" + " " + str(pixscale) + "\n" sky_lines[34] = "SEEING_FWHM" + " " + str(image_fwhm_arcsec) + "\n" sky_lines[37] = "PSF_OVERSAMP" + " " + str(oversample) + "\n" sky_lines[65] = "BACK_MAG" + " " + str(bkg_Mag) + "\n" with open(sky_config, 'w') as sky: sky.writelines(sky_lines) os.system("sky %s/configs/sky_list.txt -c %s" % (sim_loc, sky_config)) try: os.remove("%s/data/%s.list" % (sim_loc, image_name[:-5])) except: pass image = fits.getdata(image_name) else: print("-> Error: Please enter a valid mode (gauss, moffat, sky, real)\n-> Exiting...") sys.exit() #now we start the warping of each simulation #first rotate/zoom (angle is random b/w 0 and 30 degrees, zoom is random b/w 0 and 2) if n != 0: #define initial mask for each simulation Mask = np.zeros(image.shape) rot_angle = ((rot_max-rot_min)*np.random.random())+rot_min dx = (shift_max-shift_min) * np.random.random() - shift_min dy = (shift_max-shift_min) * np.random.random() - shift_min image = rotate(image, rot_angle, reshape=False) image = shift(image, [dx,dy]) Mask = rotate(ref_mask, rot_angle, reshape=False, cval=1) Mask = shift(Mask, [dx,dy], cval=1) else: Mask = ref_mask #for non-SkyMaker simulations, add in a random background, poisson smear the image, and rescale it if mode != 'sky': #add constant background bkg_loc = 2.512**(zero_point - bkg_mag) bkg_scl = ((std+5)-(std-5))*np.random.random()+(std-5) bkg = np.random.normal(loc=bkg_loc, scale=bkg_scl, size=image.shape) image = np.add(image, bkg) #poisson smear negative_image = np.zeros(image.shape) negative_image[:] = image[:] image[image < 0] = 0 negative_image[negative_image > 0] = 0 image = np.random.poisson(image) image = image.astype(np.float64) negative_image *= -1 negative_image = np.random.poisson(negative_image) negative_image = negative_image.astype(np.float64) negative_image *= -1 image += negative_image #rescale image linearly a = ((scale_mult[1] - scale_mult[0])*np.random.random()) + scale_mult[0] b = (scale_add[1] - scale_add[0])*np.random.random() - scale_add[0] image *= a image += b #write new image to data folder in target's simulations folder newHDUData = fits.PrimaryHDU(image, header=ref_header) newHDUMask = fits.ImageHDU(Mask) newHDUList = fits.HDUList([newHDUData, newHDUMask]) newHDUList.writeto(image_name, overwrite=True) newHDU = fits.open(image_name, mode='update') (newHDU[0].header).set('WEIGHT', 'N') (newHDU[0].header).set('SCALED', 'N') newHDU.close() os.system("mv %s %s" % (ref_im_sim, sim_loc)) os.system("mv %s %s" % (ref_psf, sim_loc)) os.system("mv %s %s.cat" % (ref_psf[:-4], sim_loc)) if mode == 'sky': sim_lists = glob.glob("%s/data/*.list" % (sim_loc)) for sl in sim_lists: os.remove(sl) pipeline.pipeline_run_sim(sim_loc, sim=False) print(flux_iter, flux_sim, xy_sim)
def glcm(self): """ Creation of the grey level co-occurrence matrix. The neighbourhood distance is set to 1 in this instance. All neighborhood shifts are calculated for each modality :return: multi_mod_glcm list of m (number of modalities) matrices of size bin x bin x neigh """ shifts = [[0, 0, 0], [1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0], [0, 0, 1], [0, 0, -1], [1, 1, 0], [-1, -1, 0], [-1, 1, 0], [1, -1, 0], [1, 1, 0], [0, -1, -1], [0, -1, 1], [0, 1, -1], [1, 0, 1], [-1, 0, -1], [-1, 0, 1], [1, 0, -1], [1, 1, 1], [-1, 1, -1], [-1, 1, 1], [1, 1, -1], [1, -1, 1], [-1, -1, -1], [-1, -1, 1], [1, -1, -1]] bins = np.arange(0, self.bin) multi_mod_glcm = [] if self.seg is None: return None for m in range(0, self.img.shape[4]): shifted_image = [] for n in range(0, self.neigh + 1): new_img = self.seg * self.img[:, :, :, 0, m] print(np.max(self.img), 'is max img') new_img = ndimage.shift(new_img, shifts[n], order=0) print(np.max(self.seg), 'is max shift') if np.count_nonzero(new_img) > 0: flattened_new = new_img.flatten() flattened_seg = self.seg.flatten() affine = np.round(flattened_new * self.mul + self.trans) # select = [round(flattened_new[i] * self.mul+self.trans) for i in # range(0, new_img.size) if # flattened_seg[i]>0] select_new = np.digitize(affine[flattened_seg == 1], bins) select_new[select_new >= self.bin] = self.bin - 1 print(np.max(select_new), ' is max bin', np.max(affine)) shifted_image.append(select_new) glcm = np.zeros([self.bin, self.bin, self.neigh]) for n in range(0, self.neigh): for i in range(0, shifted_image[0].size): glcm[shifted_image[0][i], shifted_image[n + 1][i], n] += 1 glcm[:, :, n] = glcm[:, :, n] / np.sum(glcm[:, :, n]) multi_mod_glcm.append(glcm) return multi_mod_glcm
def ssd_calc_trans(tx, ty): return np.sum((np.array(img_ref, dtype=np.float32) - np.array( ndimage.shift(final_img, (tx - center_img_rot[0], ty - center_img_rot[1]), mode='nearest'), dtype=np.float32))**2)