def shift_msim_data(msim_data, out=None):
    """
    In Richardson-Lucy deconvolution, you have to matrix-multiply by
    the transpose of the "expected data" operation at each iteration.
    In many cases, this is equivalent to blurring and summing. For
    MSIM, the transpose of the "expected data" operation also requires
    a shifting.

    Interestingly, this shifting is equivalent to "Enderlein's trick",
    the standard data processing method used for MSIM data (which we
    often refer to as "scaling" of the images of each pinhole.

    It's worth noting, in our new decon method, this shifting is
    applied to the correction factor, not directly to the MSIM data as
    in previous work.
    """
    if out is None:
        shifted_msim_data = np.zeros_like(msim_data)
    else:
        shifted_msim_data = out
    for m in range(msim_data.shape[2]):
        for n in range(msim_data.shape[3]):
            interpolation.shift(
                input=msim_data[:, :, m, n],
                shift=(0.5*(m - 0.5*msim_data.shape[2]),
                       0.5*(n - 0.5*msim_data.shape[3])),
                output=shifted_msim_data[:, :, m, n],
                order=3)
    shifted_msim_data[shifted_msim_data < 0] = 0 #Interpolation can produce zeros
    return shifted_msim_data
示例#2
0
    def test_saveAndLoad(self):

        # test basic saving a loading functionality
        # new registration methods should add tests
        # for loading and saving

        random.seed(42)
        ref = random.randn(25, 25)

        im = shift(ref, [2, 0], mode='constant', order=0)
        im2 = shift(ref, [0, 2], mode='constant', order=0)
        imIn = ImagesLoader(self.sc).fromArrays([im, im2])
        reg = Registration('crosscorr')
        reg.prepare(ref)
        model1 = reg.fit(imIn)

        t = tempfile.mkdtemp()
        model1.save(t + '/test.json')
        # with open(t + '/test.json', 'r') as fp:
        #    print fp.read()
        model2 = Registration.load(t + '/test.json')
        # print model2

        out1 = model1.transform(imIn).first()[1]
        out2 = model2.transform(imIn).first()[1]

        assert_true(allclose(out1, out2))
示例#3
0
def combine_dithers(image_list, pos=None):

    # open the set of hdus
    hdu_list = [fits.open(img) for img in image_list]
    n_images = len(hdu_list)

    # determine the position of each image
    if pos is None: pos = calc_offset(hdu_list)

    # shift each image
    # assumes there is a variance and bpm, but just does a simple shift
    for i in range(n_images):
        hdu_list[i][1].data = interpolation.shift(hdu_list[i][1].data, np.array([pos[0] - pos[i], 0]))
        try:
           hdu_list[i][2].data = interpolation.shift(hdu_list[i][2].data, np.array([pos[0] - pos[i], 0]))**2
           hdu_list[i][3].data = interpolation.shift(hdu_list[i][3].data, np.array([pos[0] - pos[i], 0])) 
        except:
           pass
  
        # sum the results
        if i>0:
           hdu_list[0][1].data += hdu_list[i][1].data
           hdu_list[0][2].data += hdu_list[i][2].data
           hdu_list[0][3].data *= hdu_list[i][3].data
 
    hdu_list[0][1].data /= n_images
    hdu_list[0][2].data /= hdu_list[0][2].data**0.5 / n_images
    obsdate = hdu_list[0][0].header['DATE-OBS'].replace('-','')
    output = '{}_P{}.fits'.format(hdu_list[0][0].header['OBJECT'].replace(' ', '_'), obsdate)
    hdu_list[0].writeto(output, overwrite=True)
    return output
    def testRelativeShiftedCopy(self):
        sigma_matrix = SigmaWaist(sigma_x=3e-6,
                                            sigma_y=1e-6,
                                            sigma_x_prime=5e-6,
                                            sigma_y_prime=5e-6)

        x_coordinates = np.linspace(-1e-5, 1e-5, 51)
        y_coordinates = np.linspace(-1e-5, 1e-5, 51)
        wavenumber = 1e+11

        density = PhaseSpaceDensity(sigma_matrix, wavenumber)

        e_field = createGaussian2D(sigma_x=1.0e-6,
                                   sigma_y=1.0e-6,
                                   x_coordinates=x_coordinates,
                                   y_coordinates=y_coordinates)
        e_field = e_field + 0j

        strategy = BuilderStrategyPython(x_coordinates, y_coordinates, density, x_coordinates, y_coordinates, e_field[np.newaxis,:,:])

        shifted_field = np.zeros_like(e_field)

        for i_x_shift in range(-25,25):
            for i_y_shift in range(-25,25):
                comp = shift(e_field.real,(i_x_shift, i_y_shift),order=0) +  1j*shift(e_field.imag,(i_x_shift, i_y_shift),order=0)
                strategy.relativeShiftedCopy(i_x_shift, i_y_shift,e_field, shifted_field)

                #print(i_x_shift, i_y_shift, np.unravel_index(shifted_field.argmax(), shifted_field.shape), np.unravel_index(comp.argmax(), comp.shape), np.linalg.norm(shifted_field-comp))

                self.assertLess(np.linalg.norm(shifted_field-comp), 1e-12)
示例#5
0
def PVI(prices):
    """
    33. PVI正量指标(Positive Volume Index, PVI)
    说明:正量指标集中关注那些成交量比前期增加了的交易日,显示了并不那么聪明的资金正在干什么。

    计算方法:
    PVI = IF(VOL>VOL[1],PVI[1]+(CLOSE-CLOSE[1])/CLOSE[1]*PVI[1],PVI[1])
    PVI初始值设为1000

    :param prices:
    :return:
    """
    assert prices is not None
    _assert_greater_or_equal(len(prices), 1)

    # df_price = prices.copy()
    df_price = prices.sort_index(ascending=True)
    close, volume = df_price[['close', 'volume']].T.values

    close1 = inp.shift(close, 1, order=0, cval=np.nan)
    volume1 = inp.shift(volume, 1, order=0, cval=np.nan)
    N = len(prices)
    pvi = np.zeros(shape=(N,), dtype=np.float64)
    pvi[0] = 1000
    for ind in range(1, N):
        pvi[ind] = pvi[ind - 1] + (close - close1)[ind] / close1[
            ind] * pvi[ind - 1] if volume[ind] > volume1[ind] else pvi[ind - 1]

    pvi = pd.Series(pvi, index=df_price.index)
    return pvi
示例#6
0
    def test_crosscorr_image(self):

        random.seed(42)
        ref = random.randn(25, 25)

        im = shift(ref, [2, 0], mode='constant', order=0)
        imin = ImagesLoader(self.sc).fromArrays(im)
        paramout = Register('crosscorr').estimate(imin, ref)[0][1]
        imout = Register('crosscorr').transform(imin, ref).first()[1]
        assert(allclose(ref[:-2, :], imout[:-2, :]))
        assert(allclose(paramout, [2, 0]))

        im = shift(ref, [0, 2], mode='constant', order=0)
        imin = ImagesLoader(self.sc).fromArrays(im)
        paramout = Register('crosscorr').estimate(imin, ref)[0][1]
        imout = Register('crosscorr').transform(imin, ref).first()[1]
        assert(allclose(ref[:, :-2], imout[:, :-2]))
        assert(allclose(paramout, [0, 2]))

        im = shift(ref, [2, -2], mode='constant', order=0)
        imin = ImagesLoader(self.sc).fromArrays(im)
        paramout = Register('crosscorr').estimate(imin, ref)[0][1]
        imout = Register('crosscorr').transform(imin, ref).first()[1]
        assert(allclose(ref[:-2, 2:], imout[:-2, 2:]))
        assert(allclose(paramout, [2, -2]))

        im = shift(ref, [-2, 2], mode='constant', order=0)
        imin = ImagesLoader(self.sc).fromArrays(im)
        paramout = Register('crosscorr').estimate(imin, ref)[0][1]
        imout = Register('crosscorr').transform(imin, ref).first()[1]
        assert(allclose(ref[2:, :-2], imout[2:, :-2]))
        assert(allclose(paramout, [-2, 2]))
示例#7
0
def main():
    fitslst = glob.glob('*skysub.fits')
    fitslst.sort()

    ref = pf.getdata(fitslst[0])
    xref,yref = np.loadtxt(fitslst[0]+'.coo')
    refmask = makeCircularMask(ref.shape,15,xref,yref) - makeCircularMask(ref.shape,8,xref,yref) 
    refmed = np.median(ref[refmask])
    pf.writeto('aligned_'+fitslst[0], ref)

    data = [ref]
    for fits in fitslst[1:]:
        targ = pf.getdata(fits)
        xtarg,ytarg = np.loadtxt(fits+'.coo')

        shim = shift(targ,(yref-ytarg,xref-xtarg),order=1)
        shval = alignIm(shim,ref,refmask) # first 
        shim = shift(shim,shval,order=1)
        targmed = np.median(shim[refmask])
        pf.writeto('aligned_'+fits, refmed*shim/targmed)
        data.append(shim)
    pf.writeto('Combo_%s.fits'%(fitslst[0].split('_')[0]),np.median(np.array(data),axis=0))
    if not os.path.exists('../Aligned'): os.mkdir('../Aligned')
    os.system('mv aligned*fits ../Aligned/')
    os.system('mv Combo_*fits ../Aligned/')
示例#8
0
def plotTwoPtCodes(iPt, iCharge, iRefLayer):

    iChargeIndex = (iCharge+1)/2

    yValues = range(0,11)
    layerToYValue = [10, 0, 1, 11, 12, 2, 3, 13, 14, 4, 5]

    title = "PtCode = "+str(iPt)+" charge="+str(iCharge)+" ref layer: "+refLayersNames[iRefLayer]
    figure, axesArray = pylab.subplots(1,sharex=True,sharey=True,num=title,figsize=(8,12))

    for yValue in yValues:
        iLayer = layerToYValue[yValue]
        shiftedPdf = shift(pdfArray[iPt,iChargeIndex,iLayer,iRefLayer], meanDistPhiArray[iPt,iChargeIndex,iLayer,iRefLayer], cval=0)
        shiftedPdf+=yValue*63
        axesArray.plot(shiftedPdf,linewidth=5,color='red',label=layersNames[iLayer],drawstyle='steps-mid')
        iPtSecond = 5*2+1
        shiftedPdf = shift(pdfArray[iPtSecond,iChargeIndex,iLayer,iRefLayer], meanDistPhiArray[iPtSecond,iChargeIndex,iLayer,iRefLayer], cval=0)
        shiftedPdf+=yValue*63
        axesArray.plot(shiftedPdf,linewidth=5,color='blue',label=layersNames[iLayer],drawstyle='steps-mid')
        axesArray.text(141, yValue*63+30, layersNames[iLayer])

    pylab.setp([a.get_xticklabels() for a in figure.axes[:-1]], visible=False)
    pylab.xlabel('bin number')
    pylab.title('')

    pylab.show()
示例#9
0
    def test_crosscorrImage(self):
        random.seed(42)
        ref = random.randn(25, 25)

        reg = Registration('crosscorr')

        im = shift(ref, [2, 0], mode='constant', order=0)
        imIn = ImagesLoader(self.sc).fromArrays(im)
        paramOut = reg.prepare(ref).fit(imIn).transformations[0].delta
        imOut = reg.prepare(ref).run(imIn).first()[1]
        assert_true(allclose(ref[:-2, :], imOut[:-2, :]))
        assert_true(allclose(paramOut, [2, 0]))

        im = shift(ref, [0, 2], mode='constant', order=0)
        imIn = ImagesLoader(self.sc).fromArrays(im)
        paramOut = reg.prepare(ref).fit(imIn).transformations[0].delta
        imOut = reg.prepare(ref).run(imIn).first()[1]
        assert_true(allclose(ref[:, :-2], imOut[:, :-2]))
        assert_true(allclose(paramOut, [0, 2]))

        im = shift(ref, [2, -2], mode='constant', order=0)
        imIn = ImagesLoader(self.sc).fromArrays(im)
        paramOut = reg.prepare(ref).fit(imIn).transformations[0].delta
        imOut = reg.prepare(ref).run(imIn).first()[1]
        assert_true(allclose(ref[:-2, 2:], imOut[:-2, 2:]))
        assert_true(allclose(paramOut, [2, -2]))

        im = shift(ref, [-2, 2], mode='constant', order=0)
        imIn = ImagesLoader(self.sc).fromArrays(im)
        paramOut = reg.prepare(ref).fit(imIn).transformations[0].delta
        imOut = reg.prepare(ref).run(imIn).first()[1]
        assert_true(allclose(ref[2:, :-2], imOut[2:, :-2]))
        assert_true(allclose(paramOut, [-2, 2]))
示例#10
0
    def join_with(self, other, x, y, smooth_blend=True):
        """ Stitches a new slice to the current slice at the given coordinates.

        :param StitchedSlice other: The other slice.
        :param float xs, ys: Coordinates of the other slices.
        :param bool smooth_blend: Whether to taper edges for a smoother blending. It
            assumes other is aside self (not above or below).
        """
        # Compute size of the joint ROI
        x_min = min(self.x - self.width / 2, x - other.width / 2)
        x_max = max(self.x + self.width / 2, x + other.width / 2)
        y_min = min(self.y - self.height / 2, y - other.height / 2)
        y_max = max(self.y + self.height / 2, y + other.height / 2)
        x_max -= (x_max - x_min) % 1 # Make sure they add up to an integer value
        y_max -= (y_max - y_min) % 1 # Make sure they add up to an integer value
        output_height, output_width = int(round(y_max - y_min)), int(round(x_max - x_min))

        # Taper sides for smoother blending
        if smooth_blend:
            overlap = (self.width + other.width) - output_width
            taper = signal.hann(2 * overlap)[:overlap]

            if self.x + self.width / 2 > x + other.width / 2:  # other | self
                self.mask[..., :overlap] *= taper
                other.mask[..., -overlap:] *= (1 - taper)
            else:
                other.mask[..., :overlap] *= taper
                self.mask[..., -overlap:] *= (1 - taper)

        # Initialize empty (big) slices
        mask1 = np.zeros([output_height, output_width], dtype=np.float32)
        slice1 = np.zeros([output_height, output_width], dtype=self.dtype)
        mask1[:self.height, :self.width] = self.mask
        slice1[:self.height, :self.width] = self.slice

        mask2 = np.zeros([output_height, output_width], dtype=np.float32)
        slice2 = np.zeros([output_height, output_width], dtype=other.dtype)
        mask2[:other.height, :other.width] = other.mask
        slice2[:other.height, :other.width] = other.slice

        # Move rois to their final position
        delta1 = (self.y - self.height / 2) - y_min, (self.x - self.width / 2) - x_min
        mask1 = interpolation.shift(mask1, delta1, order=1)
        slice1 = interpolation.shift(slice1, delta1, order=1)

        delta2 = (y - other.height / 2) - y_min, (x - other.width / 2) - x_min
        mask2 = interpolation.shift(mask2, delta2, order=1)
        slice2 = interpolation.shift(slice2, delta2, order=1)

        # Blend (mask act as weights and normalization needed for them to sum to 1)
        self.mask = mask1 + mask2
        self.slice = slice1 * mask1 + slice2 * mask2
        self.slice[self.mask > 1e-7] /= self.mask[self.mask > 1e-7]

        # Bookkeeping: Update coordinates
        self.x = x_min + output_width / 2
        self.y = y_min + output_height / 2
def test_fit_axis(eng):
    reference = arange(60).reshape(2, 5, 6)
    algorithm = CrossCorr(axis=0)
    a = shift(reference[0], [1, 2], mode='wrap', order=0)
    b = shift(reference[1], [-2, 1], mode='wrap', order=0)
    c = shift(reference[0], [2, 1], mode='wrap', order=0)
    d = shift(reference[1], [1, -2], mode='wrap', order=0)
    shifted = [asarray([a, b]), asarray([c, d]),]
    model = algorithm.fit(shifted, reference=reference)
    assert allclose(model.toarray(), [[[1, 2], [-2, 1]], [[2, 1], [1, -2]]])
示例#12
0
def tiltaxisalign(im_series,tilt_angles,shift_and_tilt=('hold','hold')):
    series_shape = np.shape(im_series)
    
    new_series = im_series.copy()
    final_series = im_series.copy()
    
    #deg0_int = input('Which image is the 0 degree image? ')
    midy = int(series_shape[1]/2)
    
    axis_shift = shift_and_tilt[0]
    axis_tilt = shift_and_tilt[1]
    
    if axis_shift == 'hold':
        shift_continue = 1
    
        while shift_continue == 1:
            plt.imshow(iradon(np.rot90(new_series[:,midy,:]),  # rot
            theta = tilt_angles,output_size = series_shape[2]))# anti-clokwise
            plt.show()
            
            axis_shift = float(input('By how many pixels from the original mid-point should the tilt axis be shifted? '))
            
            for i in range(series_shape[0]):
                new_series[i,:,:] = interpolation.shift(im_series.copy()[i,:,:],(0,axis_shift)) # shift along np x-axis
                
            shift_continue = int(input('Would you like to apply further image shifts (1 for yes, 0 for no)? '))
                
    for i in range(series_shape[0]):
        final_series[i,:,:] = interpolation.shift(final_series[i,:,:],(0,axis_shift))
        
    topy = int(3*series_shape[1]/8)
    bottomy = int(5*series_shape[1]/8)

    if axis_tilt == 'hold':
        tilt_series = final_series.copy()
        tilt_continue = 1
        while tilt_continue == 1:
            plt.imshow(iradon(np.rot90(new_series[:,topy,:]), theta = tilt_angles,output_size = series_shape[2]))
            plt.show()
            plt.imshow(iradon(np.rot90(new_series[:,bottomy,:]), theta = tilt_angles,output_size = series_shape[2]))
            plt.show()
            
            axis_tilt = float(input('By what angle from the original y axis (in degrees) should the tilt axis be rotated? '))
            
            for i in range(series_shape[0]):
                new_series[i,:,:] = interpolation.rotate(tilt_series.copy()[i,:,:],axis_tilt,reshape=False)
                    
            tilt_continue = int(input('Would you like to try another tilt angle (1 for yes, 0 for no)? '))
                    
    for i in range(series_shape[0]):
        final_series[i,:,:] = interpolation.rotate(final_series[i,:,:],axis_tilt,reshape=False)
            
    shift_and_tilt = (axis_shift,axis_tilt)
            
    return(final_series, shift_and_tilt)
示例#13
0
    def onkey(self, event):
        if event.key in ['.','>']:
            if self.current >= len(self.filelist)-1:
                return
            self.datalist[self.current] = self.active_data
            self.current += 1
            self.orig_data = pyfits.getdata(self.filelist[self.current])
            self.active_data = self.datalist[self.current]
            
        elif event.key in [',','<']:
            if self.current == 0:
                return
            self.datalist[self.current] = self.active_data
            self.current -= 1
            
            self.orig_data = pyfits.getdata(self.filelist[self.current])
            self.active_data = self.datalist[self.current]

        elif event.key == '-':
             self.fig.canvas.mpl_disconnect(self.keycid)
             self.pausetext = '-'
             self.pid = self.displaytext(self.pausetext)
             self.keycid = self.fig.canvas.mpl_connect('key_press_event',self.pausekey)
             return

        elif event.key == 'left':
            if self.active_data is None:
                return
            self.active_data = shift(self.active_data,[0,-self.step])
            self.offsets[self.current][0] -= self.step

        elif event.key == 'right':
            if self.active_data is None:
                return
            self.active_data = shift(self.active_data,[0,self.step])
            self.offsets[self.current][0] += self.step

        elif event.key == 'down':
            if self.active_data is None:
                return
            self.active_data = shift(self.active_data,[-self.step,0])
            self.offsets[self.current][1] -= self.step

        elif event.key == 'up':
            if self.active_data is None:
                return
            self.active_data = shift(self.active_data,[self.step,0])
            self.offsets[self.current][1] += self.step

        self.display(self.active_data)
        self.displaytext('[%.2f, %.2f] s=%.2f'%
                         (self.offsets[self.current][0],
                          self.offsets[self.current][1],
                          self.step),
                         x=0.60)
def process_eso(vol_out):
    print 'iterpolating eso...'
    voleso=vol_out==1
    voleso=voleso.astype(np.uint8)
    idxesoini=np.where(voleso>0)
    
    id_eso=np.where(vol_out==1)
    seg_eso=np.zeros_like(vol_out)
    seg_eso[id_eso]=1
    listorgan=np.where(seg_eso>0)
    zmin=np.min(listorgan[0])
    zmax=np.max(listorgan[0])
    ini_found=False
    for idx in xrange(zmin,zmax):
        eso_slice=seg_eso[idx]
        centroid=center_of_mass(eso_slice)
        if not ini_found:#if we have not found the first slice empty
            if np.isnan(centroid).any():#look for the first emppty slice
                #print 'is NAN ',idx
                ini=idx-1
                pini=list(center_of_mass(seg_eso[idx-1]))
                pini.append(idx-1)
                ini_found=True
        else:#if we have already found the first empty slice, look for the final one
            idvox=np.where(eso_slice==1)
            nvoxels=len(idvox[0])
            if not np.isnan(centroid).any() and nvoxels>5:#the slice with data and enough voxels

                #print 'final nan ',idx
                fin=idx
                pfin=list(center_of_mass(seg_eso[fin]))
                pfin.append(idx)
                #print 'pini ',pini
                #print 'pfin ',pfin
                for z in xrange(ini,fin):#we will fill the empty slices here
                    newcenter=interpolateline(pini,pfin,z)
                    #print 'new center ',newcenter
                    #print 'prev center ',center_of_mass(seg_eso[z-1])
                    translation=np.int16(np.array(newcenter)-np.array(center_of_mass(seg_eso[z-1])))
                    #print 'trans ',translation
                    #tx = tf.SimilarityTransform(translation=(0,0))#tuple(translation)
                    if z==ini:
                        slicetmp = shift(seg_eso[z-5],translation)#tf.warp(seg_eso[z-1], tx)
                    else:
                        slicetmp = shift(seg_eso[z-1],translation)#tf.warp(seg_eso[z-1], tx)
                    #print 'unique slice befor trans ',np.unique(seg_eso[z-1])
                    #print 'unique slice tmp ',np.unique(slicetmp)
                    seg_eso[z]=slicetmp
                ini_found=False
    idxeso=np.where(seg_eso>0)
    volfinal=np.copy(vol_out)
    volfinal[idxesoini]=0
    volfinal[idxeso]=1
    return volfinal
示例#15
0
def DDI(prices, timeperiod=20):
    """
    19. DDI方向标准离差指标(Directional Divergence Index,DDI)
    说明:

    计算方法:默认N=20
    DMZ = IF(HIGH+LOW<=HIGH[1]+LOW[1],0,MAX(ABS(HIGH-HIGH[1]),ABS(LOW-LOW[1])))
    DMF = IF(HIGH+LOW>HIGH[1]+LOW[1],0, MAX(ABS(HIGH-HIGH[1]),ABS(LOW-LOW[1])))
    DIZ = SUM(DMZ,N)/(SUM(DMZ,N)+SUM(DMF,N))
    DIF = SUM(DMF,N)/(SUM(DMZ,N)+SUM(DMF,N))
    DDI=DIZ-DIF

    :param prices:
    :param timeperiod:
    :return:
    """

    assert prices is not None
    _assert_greater_or_equal(len(prices), timeperiod)
    assert isinstance(timeperiod, int)

    # df_price = prices.copy()

    df_price = prices.sort_index(ascending=True)
    high, low = df_price[['high', 'low']].T.values
    high1 = inp.shift(high, 1, cval=np.nan)
    low1 = inp.shift(low, 1, cval=np.nan)

    abs_high_high1 = np.abs(high - high1)
    abs_low_low1 = np.abs(low - low1)
    val_max = np.max(np.column_stack((abs_high_high1, abs_low_low1)), axis=1)

    # DMZ
    cond1 = high + low <= high1 + low1
    dmz = val_max.copy()
    dmz[cond1] = 0

    # DMF
    dmf = val_max
    dmf[~cond1] = 0

    SUM = ta.SUM
    sum_dmz = SUM(dmz, timeperiod)
    sum_dmf = SUM(dmf, timeperiod)
    sum_dmz_dmf = sum_dmz + sum_dmf
    diz = sum_dmz / sum_dmz_dmf
    dif = sum_dmf / sum_dmz_dmf

    ddi = pd.Series((diz - dif) * 100, index=df_price.index)

    return ddi
示例#16
0
文件: MainWindow.py 项目: RFlehr/AccQ
 def getPeakData(self, wl, dbmData):
     peak = self.centerOfGravity(wl, dbmData)
     timestamp = time.clock() - self.startTime
     numVal = np.count_nonzero(self.peaks)
     self.chan1IsLabel.setText(str("{0:.3f}".format(peak)))
             
     if numVal < self.__maxBuffer:
         self.peaks[numVal] = peak
         self.peaksTime[numVal] = timestamp
     else:
         self.peaks = shift(self.peaks, -1, cval = peak)
         self.peaksTime = shift(self.peaksTime, -1, cval = timestamp)
         
     return numVal
示例#17
0
def VRSI(prices, timeperiod=14):
    """
    44. VRSI量相对强弱指标(Volume Relative Strength Index,VRSI)
    说明:
    VRSI是从RSI(强弱指数)演变出的一种指标,即成交量的相对强弱指数。
    它计算一段时间内价格上升日与下跌日成交量的比值,来反应成交量与价格升跌的关系。
    其原理与RSI和VR相类似。此指标的计算方式同RSI,只是将收盘价改成成交手数,应用方式请参照RSI。

    计算方法:
    U=IF(CLOSE>CLOSE[1],VOL,IF(CLOSE=CLOSE[1],VOL/2,0))
    D=IF(CLOSE<CLOSE[1],VOL, IF(CLOSE=CLOSE[1],VOL/2,0))
    UU=((N-1)U[1]+U)/N
    DD=((N-1)D[1]+D)/N
    VRSI=100*UU/(UU+DD)

    :param prices:
    :param timeperiod:
    :return:
    """
    assert prices is not None
    _assert_greater_or_equal(len(prices), timeperiod)
    assert isinstance(timeperiod, int)
    # df_price = prices.copy()
    df_price = prices.sort_index(ascending=True)
    close, volume = df_price[['close', 'volume']].T.values
    close1 = inp.shift(close, 1, cval=np.nan)

    cond1 = close > close1
    cond_e = np.isclose(close, close1)
    cond2 = close < close1

    u = np.empty(shape=close1.shape)
    d = u.copy()

    u[cond1] = volume[cond1]
    u[cond_e] = 0.5 * volume[cond_e]
    u[~(cond1 | cond_e)] = 0.0

    d[cond2] = volume[cond2]
    d[cond_e] = 0.5 * volume[cond_e]
    d[~(cond2 | cond_e)] = 0.0

    uu = ((timeperiod - 1) * inp.shift(u, 1, order=0,
                                       cval=np.nan) + u) / timeperiod
    dd = ((timeperiod - 1) * inp.shift(d, 1, order=0,
                                       cval=np.nan) + d) / timeperiod

    vrsi = 100 * uu / (uu + dd)

    return pd.Series(vrsi, index=df_price.index)
示例#18
0
    def updateData(self):
        numPeaks = 0
        #data = np.zeros((1,1))
        data, timestamp, success  = self.readDataFromQ()
        if not success: return None
        
        actualTime = timestamp-self.startMeasurementTime
        if self.tempConnected:
            temp = float(self.tempDisplay.text())
            temp1 = float(self.tempDisplay1.text())
            numTempVal = np.count_nonzero(self.__tempArray[0])
            if numTempVal < self.__maxTempBuffer:
                self.__tempArray[1][numTempVal] = temp
                self.__tempArray[2][numTempVal] = temp1
                self.__tempArray[0][numTempVal] = actualTime#-self.startMeasurementTime
            else:
                self.__tempArray[1] = shift(self.__tempArray[1], -1, cval = temp)
                self.__tempArray[2] = shift(self.__tempArray[2], -1, cval = temp1)
                self.__tempArray[0] = shift(self.__tempArray[0], -1, cval = actualTime)#-self.startMeasurementTime)
            
            self.calcTempGradient(numTempVal)

        if len(data[:,0]) == len(self.channelList):
            if self.setFreqAction.value() < 5:
                numPeaks = self.__fbg.searchPeaks(self.__scaledWavelength, data,self.channelList, 
                                       self.channelSelection, actualTime)
            else:
                numPeaks = self.__fbg.searchPeaks(self.__scaledWavelength, data,self.channelList, 
                                       self.channelSelection, actualTime, peakfit=1)
        if self.plotTab.currentIndex() == 0:
            self.plotSpec.plotS(self.__scaledWavelength, data)
        else:
            self.plotTrace.plotTraces(self.channelList, numPeaks, self.__fbg)
            if self.tempConnected:
                self.plotTrace.plotTemp(self.__tempArray[:,:numTempVal])
                
        if self.recordAction.isChecked() and self.__freq >= 10:
            self.saveLastData(numPeaks)
            

        dt = timestamp-self.lastTime
        if timestamp:
            if self.fps is None:
                self.fps = 1.0/dt
            else:
                s = np.clip(dt*3., 0, 1)
                self.fps = self.fps * (1-s) + (1.0/dt) * s
            self.statusBar().showMessage('%0.2f Hz' % (self.fps))
            self.lastTime = timestamp
def create_model(max_shift, type):
    print 'Creating ' + type + ' models for up to ' + str(max_shift) + ' minutes'
    ttt = TimeToEventLoader(start_time, end_time, interval)
    ttt.set_event_name(type)
    arr, tri, wait, real = RealTimeWait.moving_average(ttt)
    tri = np.asarray(tri)
    tri = tri / 60000 - start_time_min

    arr, tri2, speed_arr, speed_tri = RealTimeWait.get_speeds(ttt)
    arr = np.asarray(arr)
    arr = arr / 60000 - start_time_min
    tri2 = np.asarray(tri2)
    tri2 = tri2 / 60000 - start_time_min

    untriage = UntriagedLoader(start_time, end_time, interval)
    untriage.set_event_name(type)
    y4 = untriage.load_vector()
    X4 = untriage.get_times()[:, np.newaxis]
    X4 = (X4 / 60000 - start_time_min)

    wait_loader = AverageTimeWaitedLoader(start_time, end_time, interval)
    wait_loader.set_event_name(type)
    y5 = wait_loader.load_vector() / 60000
    X5 = wait_loader.get_times()[:, np.newaxis]
    X5 = (X5 / 60000 - start_time_min)

    print 'All data picked up, transforming it to uniform axes'
    model = neighbors.KNeighborsRegressor(5, weights='distance')
    #model = LWRegressor(sigma=50)
    y1 = get_uniform_axes(tri, wait, model)
    y2 = get_uniform_axes(arr, speed_arr, model)
    y3 = get_uniform_axes(tri2, speed_tri, model)
    y4 = get_uniform_axes(X4, y4, model)
    y5 = get_uniform_axes(X5, y5, model)
    #y6 = np.roll(y1, 30)
    #y7 = np.roll(y1, 15)
    y6 = shift(y1.tolist(), 3, cval=0)
    y7 = shift(y1.tolist(), 2, cval=0)
    #y7 = untriage.get_times_of_day()
    X = np.column_stack([y1, y2, y3, y4, y5, y6, y7])
    ys = []
    mpl = MLPRegressor()
    for i in range(0, max_shift + 1, 10):
        print 'Fitting model shifted ' + str(i) + ' minutes'
        y = np.roll(y1, -i/10)
        save_data(X, y, i, type)
        ys.append(y)
        fit_and_save_model(mpl, X, y, i, 'mpl', type)
    def modulatePF_unwrapped(self):
        #geometry = self._control.slm.getGeometry()
        geometry = self._getGeo()
        MOD = -1*self.unwrap()
        MOD = np.flipud(MOD)
        MOD = np.rot90(MOD)
        cx,cy,d = geometry.cx, geometry.cy, geometry.d
        # Diameter of phase retrieval output [pxl]:
        dPhRt = (self._pupil.k_max/self._pupil.kx.max())*self._pupil.nx
        # Zoom needed to fit onto SLM map:
        zoom = d/dPhRt
        MOD = interpolation.zoom(MOD,zoom,order=0,mode='nearest')
        # Flip up down:
        #MOD = np.flipud(MOD)
        # Flip left right:
        #MOD = np.fliplr(MOD)
        #MOD = np.rot90(MOD)
        MOD = np.rot90(-1.0*MOD) #Invert and rot90
        # Shift center:
        MOD = interpolation.shift(MOD,(cy-255.5,cx-255.5),order=0,
                                                       mode='nearest')
        # Cut out center 512x512:
        c = MOD.shape[0]/2
        MOD = MOD[c-256:c+256,c-256:c+256]

        
        # Add an 'Other' modulation using the SLM API. Store the index in _modulations:
        #index = self._control.slm.addOther(MOD)
        index = self._addMOD(MOD)
        self._modulations.append(index)
        return index
示例#21
0
def imshift(filename,shifts,center,refFile,name_ext='.al',clobber=False):
    f = pyfits.open(filename)

    header = f[0].header
    header['REF_FILE'] = (os.path.basename(refFile),'Reference file')
    header['PRE_FILE'] = (os.path.basename(filename),'Filename before shift')
    header['XSHIFT'] = (shifts[0],'X shift from ref_file')
    header['YSHIFT'] = (shifts[1],'Y shift from ref_file')
    header['XCEN'] = (center[0],'X shift from ref_file')
    header['YCEN'] = (center[1],'Y shift from ref_file')
    header['PALIGN'] = (True,'Aligned')

    newName = os.path.splitext(filename)
    newName = ''.join([newName[0],name_ext,newName[1]])

    if shifts[0] != 0 and shifts[1] != 0:
        newDat = shift(f[0].data,(shifts[0],shifts[1]))
    else:
        newDat = f[0].data
        
    print filename
    print '\tShifting (%.2f,%.2f) pixels' % (shifts[0],shifts[1])
    print '\tWriting to %s' % newName
    pyfits.writeto(newName,newDat,header=header,clobber=clobber)

    return newName
def test_fit(eng):
    reference = arange(25).reshape(5, 5)
    algorithm = CrossCorr()
    deltas = [[1, 2], [-2, 1]]
    shifted = [shift(reference, delta, mode='wrap', order=0) for delta in deltas]
    model = algorithm.fit(shifted, reference=reference)
    assert allclose(model.toarray(), deltas)
示例#23
0
def _register_frame(frame, mean_img, upsample_factor=1,
                    max_displacement=None,
                    return_registered=False):
    """
    Called by _make_mean_img and _register_all_frames
    """
    # compute the offsets
    dy, dx = _register_translation(mean_img, frame,
                                   upsample_factor=upsample_factor)

    if max_displacement is not None:
        if dy > max_displacement[0]:
            dy = max_displacement[0]
            # dy = 0
        if dx > max_displacement[1]:
            dx = max_displacement[1]
            # dx = 0

    if return_registered:
        registered_frame = shift(frame,
                                 [dy, dx],
                                 order=3,
                                 mode='constant',
                                 cval=0,
                                 output=frame.dtype)

        return dy, dx, registered_frame
    else:
        return dy, dx
示例#24
0
文件: vmi.py 项目: stggh/PyAbel
 def _align(offset, sliceA, sliceB):
     """intensity difference between an axial slice and its shifted opposite.
     """
     diff = shift(sliceA, offset) - sliceB
     fvec = (diff**2).sum()
     print ("---", offset, "---", fvec)
     return fvec
示例#25
0
def draw_ellipsoid(shape, radius, center, FWHM, noise=0):
    sigma = FWHM / 2.35482
    cutoff = 2 * FWHM

    # draw a sphere
    R = max(radius)
    zoom_factor = np.array(radius) / R
    size = int((R + cutoff)*2)
    c = size // 2
    z, y, x = np.meshgrid(*([np.arange(size)] * 3), indexing='ij')
    h = np.sqrt((z - c)**2+(y - c)**2+(x - c)**2) - R
    mask = np.abs(h) < cutoff
    im = np.zeros((size,)*3, dtype=np.float)
    im[mask] += np.exp((h[mask] / sigma)**2/-2)/(sigma*np.sqrt(2*np.pi))

    # zoom so that radii are ok
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        im = zoom(im, zoom_factor)

    # shift and make correct shape
    center_diff = center - np.array(center_of_mass(im))
    left_padding = np.round(center_diff).astype(np.int)
    subpx_shift = center_diff - left_padding

    im = shift(im, subpx_shift)
    im = crop_pad(im, -left_padding, shape)
    im[im < 0] = 0

    assert_almost_equal(center_of_mass(im), center, decimal=2)

    if noise > 0:
        im += np.random.random(shape) * noise * im.max()

    return (im / im.max() * 255).astype(np.uint8)
示例#26
0
文件: lmi.py 项目: sargas/dct
def _combine_images(image1, image2, offset=None, clip=True):
    if image1 is None: return image2
    if image2 is None: return image1

    image1, image2, padding_offset = _zero_pad_to_same_size(image1, image2)

    if offset is None:
        offset = find_offset(image1, image2)

    padding_offset = np.asarray(padding_offset)
    offset = np.asarray(offset)

    offset += padding_offset * (-1 if clip else 1)
    offsetx, offsety = [int(x) for x in offset]

    image2 = shift(image2, offset[::-1])
    image = image1 + image2

    keep_range = [ (offsety, None), (offsetx, None) ]
    if offsety < 0: keep_range[0] = (None, offsety)
    if offsetx < 0: keep_range[1] = (None, offsetx)

    if clip:
        image = image[keep_range[0][0]:keep_range[0][1], :]
        image = image[:, keep_range[1][0]:keep_range[1][1] ]
    else:
        image[keep_range[0][1]:keep_range[0][0], :] = 0
        image[:, keep_range[1][1]:keep_range[1][0] ] = 0

    return image
示例#27
0
def main():
    parser = argparse.ArgumentParser(description='Cross correlate images and return shift necessary to align image2 to image1')
    parser.add_argument('image1',type=str, help='FITS file of image1')
    parser.add_argument('image2',type=str, help='FITS file of image2')
    parser.add_argument('-s',metavar='size',type=int, default=None, help='Specify box size for correlation. Default is the full image, which can be very slow')
    parser.add_argument('-c',metavar=('x_cen', 'y_cen'),type=int,nargs=2, default=None,help="If '-size' specified, optionally include a center for the box region. Default is the center of image1.")
    parser.add_argument('-o',type=str,nargs='?',metavar='outfile',const='-1',default=None,help="If '-o' specified, shift image2 and write to [image2].shft.fits.  If '-o [filename]', shift image2 and write to [filename].")

    args = parser.parse_args()

    print 'Cross-correlating\n\t%s\n\t%s' % (args.image1,args.image2)
    xcorr_im = xcorr(args.image1,args.image2,size=args.s,center=args.c)

    print 'Calculating shift'
    shiftx, shifty = find_shift(xcorr_im)

    print '\t(%i, %i)' % (shiftx, shifty)

    # if outfile specified, perform shift of second image
    if args.o:
        outfile = args.o if args.o != '-1' else \
                  os.path.splitext(args.image2)[0] + '.shft.fits'

        image2, header = pyfits.getdata(args.image2, header=True)
        image2 = shift(image2, (shifty,shiftx), cval=np.nan)
        header['SHFT_REF'] = (args.image1, 'Reference image of shift')
        header['SHFT_X'] = (shiftx, 'X shift pix')
        header['SHFT_Y'] = (shifty, 'Y shift pix')

        print 'Performing shift on %s' % args.image2
        print '\tWriting to %s' % outfile
        pyfits.writeto(outfile,image2,header=header,clobber=True)
    
    return 0
示例#28
0
def extract(image,y0,x0,y1,x1,mode='nearest',cval=0):
    h,w = image.shape
    ch,cw = y1-y0,x1-x0
    y,x = clip(y0,0,h-ch),clip(x0,0,w-cw)
    sub = image[y:y+ch,x:x+cw]
    # print "extract",image.dtype,image.shape
    try:
        return interpolation.shift(sub,(y-y0,x-x0),mode=mode,cval=cval,order=0)
    except RuntimeError:
	# workaround for platform differences between 32bit and 64bit
        # scipy.ndimage
	dtype = sub.dtype
        sub = array(sub,dtype='float64')
        sub = interpolation.shift(sub,(y-y0,x-x0),mode=mode,cval=cval,order=0)
        sub = array(sub,dtype=dtype)
        return sub
示例#29
0
文件: ABuNDAtr.py 项目: 3774257/abu
def _calc_atr_from_pd(high, low, close, time_period=14):
    """
    通过atr公式手动计算atr
    :param high: 最高价格序列,pd.Series或者np.array
    :param low: 最低价格序列,pd.Series或者np.array
    :param close: 收盘价格序列,pd.Series或者np.array
    :param time_period: atr的N值默认值14,int
    :return: atr值序列,np.array对象
    """
    if isinstance(close, pd.Series):
        # shift(1)构成昨天收盘价格序列
        pre_close = close.shift(1).values
    else:
        from scipy.ndimage.interpolation import shift
        # 也可以暂时转换为pd.Series进行shift
        pre_close = shift(close, 1)
    pre_close[0] = pre_close[1]

    if isinstance(high, pd.Series):
        high = high.values
    if isinstance(low, pd.Series):
        low = low.values

    # ∣最高价 - 最低价∣
    tr_hl = np.abs(high - low)
    # ∣最高价 - 昨收∣
    tr_hc = np.abs(high - pre_close)
    # ∣昨收 - 最低价∣
    tr_cl = np.abs(pre_close - low)
    # TR =∣最高价 - 最低价∣,∣最高价 - 昨收∣,∣昨收 - 最低价∣中的最大值
    tr = np.maximum(np.maximum(tr_hl, tr_hc), tr_cl)
    # (ATR)= MA(TR, N)(TR的N日简单移动平均), 这里没有完全按照标准公式使用简单移动平均,使用了pd_ewm_mean,即加权移动平均
    atr = pd_ewm_mean(pd.Series(tr), span=time_period, min_periods=1)
    # 返回atr值序列,np.array对象
    return atr.values
示例#30
0
    def shift_to_coords(self, pix, fill_value=np.nan):
        """Create a new map that is shifted to the pixel coordinates
        ``pix``."""

        pix_offset = self.get_offsets(pix)
        dpix = np.zeros(len(self.shape) - 1)
        for i in range(len(self.shape) - 1):
            x = self.rebin * (pix[i] - pix_offset[i + 1]
                              ) + (self.rebin - 1.0) / 2.
            dpix[i] = x - self._pix_ref[i]

        pos = [pix_offset[i] + self.shape[i] // 2
               for i in range(self.data.ndim)]
        s0, s1 = utils.overlap_slices(self.shape_out, self.shape, pos)

        k = np.zeros(self.data.shape)
        for i in range(k.shape[0]):
            k[i] = shift(self._data_spline[i], dpix, cval=np.nan,
                         order=2, prefilter=False)

        for i in range(1, len(self.shape)):
            k = utils.sum_bins(k, i, self.rebin)

        k0 = np.ones(self.shape_out) * fill_value

        if k[s1].size == 0 or k0[s0].size == 0:
            return k0
        k0[s0] = k[s1]
        return k0
示例#31
0
def transform_img(img,
                  scale=1.0,
                  angle=0.0,
                  tvec=(0, 0),
                  mode="constant",
                  bgval=None,
                  order=1):
    """
    Return translation vector to register images.

    Args:
        img (2D or 3D numpy array): What will be transformed.
            If a 3D array is passed, it is treated in a manner in which RGB
            images are supposed to be handled - i.e. assume that coordinates
            are (Y, X, channels).
            Complex images are handled in a way that treats separately
            the real and imaginary parts.
        scale (float): The scale factor (scale > 1.0 means zooming in)
        angle (float): Degrees of rotation (clock-wise)
        tvec (2-tuple): Pixel translation vector, Y and X component.
        mode (string): The transformation mode (refer to e.g.
            :func:`scipy.ndimage.shift` and its kwarg ``mode``).
        bgval (float): Shade of the background (filling during transformations)
            If None is passed, :func:`imreg_dft.utils.get_borderval` with
            radius of 5 is used to get it.
        order (int): Order of approximation (when doing transformations). 1 =
            linear, 3 = cubic etc. Linear works surprisingly well.

    Returns:
        np.ndarray: The transformed img, may have another
        i.e. (bigger) shape than the source.
    """
    if img.ndim == 3:
        # A bloody painful special case of RGB images
        ret = np.empty_like(img)
        for idx in range(img.shape[2]):
            sli = (slice(None), slice(None), idx)
            ret[sli] = transform_img(img[sli], scale, angle, tvec, mode, bgval,
                                     order)
        return ret
    elif np.iscomplexobj(img):
        decomposed = np.empty(img.shape + (2, ), float)
        decomposed[:, :, 0] = img.real
        decomposed[:, :, 1] = img.imag
        # The bgval makes little sense now, as we decompose the image
        res = transform_img(decomposed, scale, angle, tvec, mode, None, order)
        ret = res[:, :, 0] + 1j * res[:, :, 1]
        return ret

    if bgval is None:
        bgval = utils.get_borderval(img)

    bigshape = np.round(np.array(img.shape) * 1.2).astype(int)
    bg = np.zeros(bigshape, img.dtype) + bgval

    dest0 = utils.embed_to(bg, img.copy())
    # TODO: We have problems with complex numbers
    # that are not supported by zoom(), rotate() or shift()
    if scale != 1.0:
        dest0 = ndii.zoom(dest0, scale, order=order, mode=mode, cval=bgval)
    if angle != 0.0:
        dest0 = ndii.rotate(dest0, angle, order=order, mode=mode, cval=bgval)

    if tvec[0] != 0 or tvec[1] != 0:
        dest0 = ndii.shift(dest0, tvec, order=order, mode=mode, cval=bgval)

    bg = np.zeros_like(img) + bgval
    dest = utils.embed_to(bg, dest0)
    return dest
def shift_image(image, dx, dy):
    image = image.reshape((28, 28))
    shifted_image = shift(image, [dy, dx], cval=0, mode="constant")
    return shifted_image.reshape([-1])
示例#33
0
    def extract_data(self, data = None, length_in_seconds = None, acc_rise_times = None):
        if data is None:
            data = self.raw_acc_data
        else:
            self.raw_acc_data = data
        assert (type(data) == list), "The acceleration measurements need to be a list! It is {}".format(type(data))
        assert (len(data) > 0), "The acceleration data contain 0 data points. No way to extract meaningful data from that"
        if length_in_seconds is None or np.isnan(length_in_seconds):
            length_in_seconds = self.length_in_seconds
        else:
            self.length_in_seconds = length_in_seconds

        if (length_in_seconds is None or np.isnan(length_in_seconds)):
            print("{}: We need to know the time base for the measurements! Nothing extracted.".format(self.name))
            return

        n_data = len(data)
        dt = length_in_seconds / n_data
        t = np.arange(0, n_data*dt, 1*dt, dtype=float)

        # print("{}: t={}s ({} points, {} rise times)".format(self.name, length_in_seconds, n_data, len(acc_rise_times)))

        real_acc = [-(datapoint - self.calibration['level0g']) / self.calibration['scaleg'] for datapoint in data]

        smooth_acc = savgol_filter(real_acc, 101, 3)
        gradient_acc = np.gradient(smooth_acc)
        mean_gradient_acc = np.mean(gradient_acc)
        min_gradient_acc = np.min(gradient_acc)
        max_gradient_acc = np.max(gradient_acc)
        amp_gradient_acc = (max_gradient_acc - min_gradient_acc) / 2

        mean_smooth_acc = np.mean(smooth_acc)
        min_smooth_acc = np.min(smooth_acc)
        max_smooth_acc = np.max(smooth_acc)
        amp_smooth_acc = (max_smooth_acc - min_smooth_acc) / 2

        shift_smooth_acc = shift(smooth_acc, -25, cval=np.NaN)

        t_gradient_acc_positive = savgol_filter(gradient_acc, 7, 3) > (0.5 * amp_gradient_acc + mean_gradient_acc)
        with np.warnings.catch_warnings():
            np.warnings.filterwarnings('ignore', r'invalid value')
            t_high_acc = shift_smooth_acc > (mean_smooth_acc + amp_smooth_acc * 0.5)

        if acc_rise_times is None:
            peak_times = []
            peaks = t_gradient_acc_positive * t_high_acc
            for time,g in enumerate(peaks):
                if g == True and peaks[time-1] != True:
                    peak_times.append(time)
            # print("Found rising flanks for positive g at: ", *peak_times)
        else:
            # peak_times = [time * dt for time in acc_rise_times]
            peak_times = acc_rise_times
            # print("Using supplied peak times: ", *peak_times)

        if len(peak_times) == 2 and all(np.isfinite(peak_times)):
            delta_t = t[peak_times[1]] - t[peak_times[0]]
            f = 1 / delta_t
            # print("Period = {:f}s and frequency = {:f}Hz.".format(delta_t, f))
        else:
            # print("More than 2 times found... that's bad!")
            delta_t = None
            f = None

        self.frequency = f
        self.shaking_duration = delta_t

        self.acc_analysis = {}
        self.acc_analysis['frequency'] = f
        self.acc_analysis['shaking_duration'] = delta_t
        self.acc_analysis['peak_times'] = peak_times
        self.acc_analysis['time'] = t

        self.acc_data = {}
        self.acc_data['smooth'] = {
                'acc': smooth_acc,
                'mean': mean_smooth_acc,
                'min': min_smooth_acc,
                'max': max_smooth_acc,
                'amplitude': amp_smooth_acc
                }
        self.acc_data['gradient'] = {
                'acc': gradient_acc,
                'mean': mean_gradient_acc,
                'min': min_gradient_acc,
                'max': max_gradient_acc,
                'amplitude': amp_gradient_acc
                }
        self.acc_data['shift_smooth'] = {
                'acc': shift_smooth_acc,
                'shift': -25
                }
        self.acc_data['real'] = {
                'acc': real_acc
                }
def lnlike(fitparams, fma, cov_func, readnoise=False):
    """
    Likelihood function
    Args:
        fitparams: array of params (size N). First three are [dRA,dDec,f]. Additional parameters are GP hyperparams
                    dRA,dDec: RA,Dec offsets from star. Also coordianaes in self.data_{RA,Dec}_offset
                    f: flux scale factor to normalizae the flux of the data_stamp to the model
        fma (FMAstrometry): a FMAstrometry object that has been fully set up to run
        cov_func (function): function that given an input [x,y] coordinate array returns the covariance matrix
                  e.g. cov = cov_function(x_indices, y_indices, sigmas, cov_params)
        readnoise: boolean. If True, the last fitparam fits for diagonal noise

    Returns:
        likeli: log of likelihood function (minus a constant factor)
    """
    dRA_trial = fitparams[0]
    dDec_trial = fitparams[1]
    f_trial = fitparams[2]
    hyperparms_trial = fitparams[3:]

    if readnoise:
        # last hyperparameter is a diagonal noise term. Separate it out
        readnoise_amp = np.exp(hyperparms_trial[-1])
        hyperparms_trial = hyperparms_trial[:-1]

    # get trial parameters out of log space
    f_trial = math.exp(f_trial)
    hyperparms_trial = np.exp(hyperparms_trial)

    dx = -(dRA_trial - fma.data_stamp_RA_offset_center)
    dy = dDec_trial - fma.data_stamp_Dec_offset_center

    fm_shifted = sinterp.shift(fma.fm_stamp, [dy, dx])

    if fma.padding > 0:
        fm_shifted = fm_shifted[fma.padding:-fma.padding,
                                fma.padding:-fma.padding]

    diff_ravel = fma.data_stamp.ravel() - f_trial * fm_shifted.ravel()

    cov = cov_func(fma.data_stamp_RA_offset.ravel(),
                   fma.data_stamp_Dec_offset.ravel(), fma.noise_map.ravel(),
                   hyperparms_trial)

    if readnoise:
        # add a diagonal term
        cov = (1 - readnoise_amp) * cov + readnoise_amp * np.diagflat(
            fma.noise_map.ravel()**2)

    # solve Cov * x = diff for x = Cov^-1 diff. Numerically more stable than inverse
    # to make it faster, we comptue the Cholesky factor and use it to also compute the determinent
    try:
        (L_cov, lower_cov) = linalg.cho_factor(cov)
        cov_inv_dot_diff = linalg.cho_solve(
            (L_cov, lower_cov), diff_ravel)  # solve Cov x = diff for x
    except:
        cov_inv = np.linalg.inv(cov)
        cov_inv_dot_diff = np.dot(cov_inv, diff_ravel)
    residuals = diff_ravel.dot(cov_inv_dot_diff)

    # compute log(det(Cov))
    logdet = 2 * np.sum(np.log(np.diag(L_cov)))
    constant = logdet

    return -0.5 * (residuals + constant)
示例#35
0
def shift_digit(digit_array, dx, dy, new=0):
    return shift(digit_array.reshape(28, 28), [dy, dx], cval=new).reshape(784)
    def rot_trans(self, regex, split_on, in_folder, out_folder):
        counter = 0
        for directory in os.walk(in_folder):
            # Walk inside the directory
            for file in directory[2]:
                # Match all files ending with 'regex'
                input_file = os.path.join(directory[0], file)
                if re.search(regex, input_file):
                    for direction in ['x', 'y', 'z']:
                        x_axis = y_axis = z_axis = 0
                        if direction == 'x':
                            x_axis = 1
                        if direction == 'y':
                            y_axis = 1
                        if direction == 'z':
                            z_axis = 1
                        output_file = out_folder + str(
                            input_file.rsplit('/', 1)[1])
                        output_path = output_file.split(split_on)[0]
                        output_file = output_path + \
                                        '_sub_rot3_{0}.nii.gz'.format(
                                            direction)

                        rot_matrix = 'rot3_{0}.mat'.format(direction)
                        angle_rot = 3 if random.uniform(0, 1) > 0.5 \
                            else -3
                        print("Rotating image: " + input_file)
                        subprocess.call('makerot -c {0},{1},{2} -a {3},{4},'
                                        '{5} -t {6} -o {7}'.format(
                                            self.params['dim']['x'] / 2,
                                            self.params['dim']['y'] / 2,
                                            self.params['dim']['z'] / 2,
                                            x_axis, y_axis, z_axis, angle_rot,
                                            rot_matrix),
                                        shell=True)
                        subprocess.call('flirt -in {0} -ref {1} -out '
                                        '{2} -applyxfm -init {3}'.format(
                                            input_file, input_file,
                                            output_file, rot_matrix),
                                        shell=True)
                        print("Generated image: " + output_file)

                        # Translating input image
                        mri_image = nb.load(input_file)
                        aff = mri_image.get_affine()
                        mri_image = mri_image.get_data()
                        translated_image = 0

                        # Translating rotated image
                        mri_image_rot = nb.load(output_file)
                        aff_rot = mri_image_rot.get_affine()
                        mri_image_rot = mri_image_rot.get_data()
                        translated_image_rot = 0

                        if x_axis == 1:
                            print("Translating in x-axis")
                            translated_image = shift(mri_image, [3, 0, 0],
                                                     mode='nearest')
                            translated_image_rot = shift(mri_image_rot,
                                                         [3, 0, 0],
                                                         mode='nearest')
                        if y_axis == 1:
                            print("Translating in y-axis")
                            translated_image = shift(mri_image, [0, 3, 0],
                                                     mode='nearest')
                            translated_image_rot = shift(mri_image_rot,
                                                         [0, 3, 0],
                                                         mode='nearest')
                        if z_axis == 1:
                            print("Translating in z-axis")
                            translated_image = shift(mri_image, [0, 0, 3],
                                                     mode='nearest')
                            translated_image_rot = shift(mri_image_rot,
                                                         [0, 0, 3],
                                                         mode='nearest')
                        im = nb.Nifti1Image(translated_image, affine=aff)
                        output_file = output_path + \
                                      '_sub_trans3_{0}.nii.gz'.format(
                                          direction)
                        nb.save(im, output_file)
                        print("Saving to " + output_file)

                        im_rot = nb.Nifti1Image(translated_image_rot,
                                                affine=aff_rot)
                        output_file = output_path + \
                                      '_sub_rot3_trans3_{0}.nii.gz'.format(
                                          direction)
                        nb.save(im_rot, output_file)
                        print("Saving to " + output_file)

                        counter += 1
                        print('File No.: ' + str(counter) +
                              ' Generated output: ' + output_file)
def find_first_rows_groups(df_series_col):
    col_array = np.array(df_series_col)
    col_array_shifted = shift(col_array, 1, cval=np.NaN)
    first_row_mask = col_array != col_array_shifted

    return first_row_mask
示例#38
0
文件: sas.py 项目: ehsteve/heroespy
def test_ras_find_relative_angle(array):
    # apply a shift of 100 pixels in the x direction
    shifted_array = shift(r.data, (0, 100))
    # resize the array to get the solution faster
    resize_fraction
示例#39
0
def main():
    gdal.AllRegister()
    path = auxil.select_directory('Working directory')
    if path:
        os.chdir(path)
    file0 = auxil.select_infile(title='Base image')
    if file0:
        inDataset0 = gdal.Open(file0, GA_ReadOnly)
        cols0 = inDataset0.RasterXSize
        rows0 = inDataset0.RasterYSize
        print 'Base image: %s' % file0
    else:
        return
    rasterBand = inDataset0.GetRasterBand(1)
    span0 = rasterBand.ReadAsArray(0, 0, cols0, rows0)
    rasterBand = inDataset0.GetRasterBand(4)
    span0 += 2 * rasterBand.ReadAsArray(0, 0, cols0, rows0)
    rasterBand = inDataset0.GetRasterBand(6)
    span0 += rasterBand.ReadAsArray(0, 0, cols0, rows0)
    span0 = log(real(span0))
    inDataset0 = None
    file1 = auxil.select_infile(title='Warp image')
    if file1:
        inDataset1 = gdal.Open(file1, GA_ReadOnly)
        cols1 = inDataset1.RasterXSize
        rows1 = inDataset1.RasterYSize
        print 'Warp image: %s' % file1
    else:
        return
    outfile, fmt = auxil.select_outfilefmt()
    if not outfile:
        return
    image1 = zeros((6, rows1, cols1), dtype=cfloat)
    for k in range(6):
        band = inDataset1.GetRasterBand(k + 1)
        image1[k,:,:]=band\
          .ReadAsArray(0,0,cols1,rows1).astype(cfloat)
    inDataset1 = None
    span1 = sum(image1[[0,3,5] ,:,:],axis=0)\
                                        +image1[3,:,:]
    span1 = log(real(span1))
    scale, angle, shift = auxil.similarity(span0, span1)
    tmp_real = zeros((6, rows0, cols0))
    tmp_imag = zeros((6, rows0, cols0))
    for k in range(6):
        bn1 = real(image1[k, :, :])
        bn2 = ndii.zoom(bn1, 1.0 / scale)
        bn2 = ndii.rotate(bn2, angle)
        bn2 = ndii.shift(bn2, shift)
        tmp_real[k, :, :] = bn2[0:rows0, 0:cols0]
        bn1 = imag(image1[k, :, :])
        bn2 = ndii.zoom(bn1, 1.0 / scale)
        bn2 = ndii.rotate(bn2, angle)
        bn2 = ndii.shift(bn2, shift)
        tmp_imag[k, :, :] = bn2[0:rows0, 0:cols0]
    image2 = tmp_real + 1j * tmp_imag
    driver = gdal.GetDriverByName(fmt)
    outDataset = driver.Create(outfile, cols0, rows0, 6, GDT_CFloat32)
    for k in range(6):
        outBand = outDataset.GetRasterBand(k + 1)
        outBand.WriteArray(image2[k, :, :], 0, 0)
        outBand.FlushCache()
    outDataset = None
    print 'Warped image written to: %s' % outfile
示例#40
0
def register(fn1, fn2, warpband, dims1=None, outfile=None):
    gdal.AllRegister()
    print '--------------------------------'
    print '        Register'
    print '---------------------------------'
    print time.asctime()
    print 'reference image: ' + fn1
    print 'warp image: ' + fn2
    print 'warp band: %i' % warpband

    start = time.time()
    try:
        if outfile is None:
            path2 = os.path.dirname(fn2)
            basename2 = os.path.basename(fn2)
            root2, ext2 = os.path.splitext(basename2)
            outfile = path2 + '/' + root2 + '_warp' + ext2
        inDataset1 = gdal.Open(fn1, GA_ReadOnly)
        inDataset2 = gdal.Open(fn2, GA_ReadOnly)
        try:
            cols1 = inDataset1.RasterXSize
            rows1 = inDataset1.RasterYSize
            cols2 = inDataset2.RasterXSize
            rows2 = inDataset2.RasterYSize
            bands2 = inDataset2.RasterCount
        except Exception as e:
            print 'Error %s  --Image could not be read in' % e
            sys.exit(1)
        if dims1 is None:
            x0 = 0
            y0 = 0
        else:
            x0, y0, cols1, rows1 = dims1

        band = inDataset1.GetRasterBand(warpband)
        refband = band.ReadAsArray(x0, y0, cols1, rows1).astype(np.float32)
        band = inDataset2.GetRasterBand(warpband)
        warpband = band.ReadAsArray(x0, y0, cols1, rows1).astype(np.float32)

        #  similarity transform parameters for reference band number
        scale, angle, shift = similarity(refband, warpband)

        driver = inDataset2.GetDriver()
        outDataset = driver.Create(outfile, cols1, rows1, bands2, GDT_Float32)
        projection = inDataset1.GetProjection()
        geotransform = inDataset1.GetGeoTransform()
        if geotransform is not None:
            gt = list(geotransform)
            gt[0] = gt[0] + x0 * gt[1]
            gt[3] = gt[3] + y0 * gt[5]
            outDataset.SetGeoTransform(tuple(gt))
        if projection is not None:
            outDataset.SetProjection(projection)

    #  warp
        for k in range(bands2):
            inband = inDataset2.GetRasterBand(k + 1)
            outBand = outDataset.GetRasterBand(k + 1)
            bn1 = inband.ReadAsArray(0, 0, cols2, rows2).astype(np.float32)
            bn2 = ndii.zoom(bn1, 1.0 / scale)
            bn2 = ndii.rotate(bn2, angle)
            bn2 = ndii.shift(bn2, shift)
            outBand.WriteArray(bn2[y0:y0 + rows1, x0:x0 + cols1])
            outBand.FlushCache()
        inDataset1 = None
        inDataset2 = None
        outDataset = None
        print 'Warped image written to: %s' % outfile
        print 'elapsed time: %s' % str(time.time() - start)
        return outfile
    except Exception as e:
        print 'registersms failed: %s' % e
        return None
示例#41
0
def fitFocusMultiPRF(flux, ydim, xdim, column, row, prfn, crval1p, crval2p,
                     cdelt1p, cdelt2p, interpolation, tol, ftol, fluxes,
                     columns, rows, bkg, wfac, verbose, logfile):
    """Fit multi- PRF model + constant background with focus variations to
    Kepler pixel mask data
    """

    # caonstruct input summed image
    imgflux = np.asarray(flux).reshape(ydim, xdim)
    # interpolate the calibrated PRF shape to the target position

    prf = np.zeros(np.shape(prfn[0]), dtype='float32')
    prfWeight = np.zeros((5), dtype='float32')
    for i in range(5):
        prfWeight[i] = math.sqrt((column - crval1p[i])**2 +
                                 (row - crval2p[i])**2)
        if prfWeight[i] == 0.0:
            prfWeight[i] = 1.0e6
        prf = prf + prfn[i] / prfWeight[i]
        prf = prf / nansum(prf)

    # dimensions of data image
    datDimY = np.shape(imgflux)[0]
    datDimX = np.shape(imgflux)[1]

    # center of the data image (in CCD pixel units)
    datCenY = row + float(datDimY) / 2 - 0.5
    datCenX = column + float(datDimX) / 2 - 0.5

    # initial guess for fit parameters
    guess = []
    f, y, x = fluxes, rows, columns
    b, w = bkg, wfac

    # initial guess for fit parameters
    guess = []
    if len(x) != len(y) or len(x) != len(f):
        errmsg = ('ERROR -- KEPFIT:FITMULTIPRF: Guesses for rows, columns and '
                  'fluxes must have the same number of sources')
        kepmsg.err(logfile, message, verbose)
    else:
        for i in range(len(fluxes)):
            guess.append(float(fluxes[i]))
            guess.append((float(rows[i]) - datCenY) / cdelt2p[0])
            guess.append((float(columns[i]) - datCenX) / cdelt1p[0])
    guess.append(b)
    guess.append(w)

    # fit input image with model
    f, x, y = [], [], []
    nsrc = (len(guess) - 2) / 3
    args = (imgflux, prf, cdelt1p[0], cdelt2p[0], datDimY, datDimX,
            interpolation, verbose)
    ans = fmin_powell(kepfunc.kepler_focus_multi_prf_2d,
                      guess,
                      args=args,
                      xtol=tol,
                      ftol=ftol,
                      disp=False)
    for i in range(nsrc):
        f.append(ans[i])
        y.append(ans[nsrc + i])
        x.append(ans[nsrc * 2 + i])
    b = ans[nsrc * 3]
    w = ans[nsrc * 3 + 1]
    print(ans)
    print(f, y, x, b, w)

    # calculate best-fit model
    prfDimY = datDimY / cdelt1p[0] / w
    prfDimX = datDimX / cdelt2p[0] / w
    prfY0 = (np.shape(prf)[0] - prfDimY) / 2
    prfX0 = (np.shape(prf)[1] - prfDimX) / 2
    DY, DX = 0.0, 0.0
    if int(prfDimY) % 2 == 0:
        DY = 1.0
    if int(prfDimX) % 2 == 0:
        DX = 1.0
    print(w, prfDimY, prfDimX)
    prfMod = np.zeros((prfDimY + DY, prfDimX + DX))
    for i in range(nsrc):
        prfTmp = shift(prf, [y[i] / w, x[i] / w], order=1, mode='constant')
        prfMod = (prfMod +
                  prfTmp[prfY0:prfY0 + prfDimY, prfX0:prfX0 + prfDimX] * f[i])
    prfFit = keparray.rebin2D(
        prfMod,
        [np.shape(imgflux)[0], np.shape(imgflux)[1]], interpolation, True,
        False)
    prfFit = prfFit / cdelt1p[0] / cdelt2p[0] / w / w
    prfFit = prfFit + b

    # calculate residual between data and model
    prfRes = imgflux - prfFit

    # convert PRF pixels sizes to CCD pixel sizes
    for i in range(nsrc):
        y[i] = y[i] * cdelt1p[0] * w + datCenY
        x[i] = x[i] * cdelt2p[0] * w + datCenX

    return f, y, x, b, w, prfMod, prfFit, prfRes
示例#42
0
def fitMultiPRF(flux, ydim, xdim, column, row, prfn, crval1p, crval2p, cdelt1p,
                cdelt2p, interpolation, tol, ftol, fluxes, columns, rows, mode,
                verbose, logfile):
    """Fit multi-PRF model to Kepler pixel mask data"""

    # construct input summed image
    imgflux = np.empty((ydim, xdim))
    n = 0
    for i in range(ydim):
        for j in range(xdim):
            imgflux[i, j] = flux[n]
            n += 1

    # interpolate the calibrated PRF shape to the target position
    prf = np.zeros(np.shape(prfn[0]), dtype='float32')
    prfWeight = np.zeros((5), dtype='float32')
    for i in range(5):
        prfWeight[i] = math.sqrt((column - crval1p[i])**2 +
                                 (row - crval2p[i])**2)
        if prfWeight[i] == 0.0:
            prfWeight[i] = 1.0e6
        prf = prf + prfn[i] / prfWeight[i]
        prf = prf / nansum(prf)

    # dimensions of data image
    datDimY = np.shape(imgflux)[0]
    datDimX = np.shape(imgflux)[1]

    # dimensions of data image if it had PRF-sized pixels
    prfDimY = datDimY / cdelt1p[0]
    prfDimX = datDimX / cdelt2p[0]

    # center of the data image (in CCD pixel units)
    datCenY = row + float(datDimY) / 2 - 0.5
    datCenX = column + float(datDimX) / 2 - 0.5

    # location of the data image centered on the PRF image (in PRF pixel units)
    prfY0 = (np.shape(prf)[0] - prfDimY) / 2
    prfX0 = (np.shape(prf)[1] - prfDimX) / 2

    # initial guess for fit parameters
    guess = []
    if len(x) != len(y) or len(x) != len(f):
        errmsg = ('ERROR -- KEPFIT:FITMULTIPRF: Guesses for rows, columns and '
                  'fluxes must have the same number of sources')
        kepmsg.err(logfile, message, verbose)
    else:
        for i in range(len(fluxes)):
            guess.append(float(fluxes[i]))
            guess.append((float(rows[i]) - datCenY) / cdelt2p[0])
            guess.append((float(columns[i]) - datCenX) / cdelt1p[0])

    # fit input image with model
    f, x, y = [], [], []
    nsrc = len(guess) // 3
    args = (imgflux, prf, cdelt1p[0], cdelt2p[0], prfDimY, prfDimX, prfY0,
            prfX0, interpolation, verbose)
    if mode == '2D' and nsrc == 1:
        ans = fmin_powell(kepfunc.kepler_prf_2d,
                          guess,
                          args=args,
                          xtol=tol,
                          ftol=ftol,
                          disp=False)
        f.append(ans[0])
        y.append(ans[1])
        x.append(ans[2])
    elif mode == '1D' and nsrc == 1:
        guess.insert(0, guess[0])
        ans = fmin_powell(kepfunc.kepler_prf_1d,
                          guess,
                          args=args,
                          xtol=tol,
                          ftol=ftol,
                          disp=False)
        f.append((ans[0] + ans[1]) / 2)
        y.append(ans[2])
        x.append(ans[3])
    else:
        ans = fmin_powell(kepfunc.kepler_multi_prf_2d,
                          guess,
                          args=args,
                          xtol=tol,
                          ftol=ftol,
                          disp=False)
        for i in range(nsrc):
            f.append(ans[i])
            y.append(ans[nsrc + i])
            x.append(ans[nsrc * 2 + i])

    # calculate best-fit model
    prfMod = np.zeros((prfDimY + 1, prfDimX + 1))
    for i in range(nsrc):
        prfTmp = shift(prf, [y[i], x[i]], order=1, mode='constant')
        prfTmp = prfTmp[prfY0:prfY0 + prfDimY, prfX0:prfX0 + prfDimX]
        prfMod = prfMod + prfTmp * f[i]
    prfFit = keparray.rebin2D(prfMod, [
        np.shape(imgflux)[0], np.shape(imgflux)[1]
    ], interpolation, True, False) / cdelt1p[0] / cdelt2p[0]

    prfRes = imgflux - prfFit

    # convert PRF pixels sizes to CCD pixel sizes
    for i in range(nsrc):
        y[i] = y[i] * cdelt1p[0] + datCenY
        x[i] = x[i] * cdelt2p[0] + datCenX

    return f, y, x, prfMod, prfFit, prfRes
示例#43
0
def diff(x, n, remove_nan=True):
    r = x - shift(x, n, cval=np.NaN)
    if remove_nan:
        r = r[~np.isnan(r)]
    return r
示例#44
0
def shift_up(X, y):
    shifted_X = shift(X, [-1, 0], cval=0)
    return shifted_X, y
示例#45
0
def register(file0, file1, dims=None, outfile=None):
    import auxil.auxil1 as auxil
    import os, time
    import numpy as np
    from osgeo import gdal
    import scipy.ndimage.interpolation as ndii
    from osgeo.gdalconst import GA_ReadOnly, GDT_Float32

    print('========================= ')
    print('       Register SAR')
    print('=========================')
    print(time.asctime())
    try:
        if outfile is None:
            path = os.path.abspath(file1)
            dirn = os.path.dirname(path)
            path = os.path.dirname(file1)
            basename = os.path.basename(file1)
            root, ext = os.path.splitext(basename)
            outfile = dirn + '/' + root + '_warp' + ext
        start = time.time()
        gdal.AllRegister()
        #  reference
        inDataset0 = gdal.Open(file0, GA_ReadOnly)
        cols = inDataset0.RasterXSize
        rows = inDataset0.RasterYSize
        bands = inDataset0.RasterCount
        print('Reference SAR image:\n %s' % file0)
        if dims == None:
            dims = [0, 0, cols, rows]
        x0, y0, cols, rows = dims
        #  target
        inDataset1 = gdal.Open(file1, GA_ReadOnly)
        cols1 = inDataset1.RasterXSize
        rows1 = inDataset1.RasterYSize
        bands1 = inDataset1.RasterCount
        print('Target SAR image:\n %s' % file1)
        if bands != bands1:
            print('Number of bands must be equal')
            return 0
    #  create the output file
        driver = inDataset1.GetDriver()
        outDataset = driver.Create(outfile, cols, rows, bands, GDT_Float32)
        projection0 = inDataset0.GetProjection()
        geotransform0 = inDataset0.GetGeoTransform()
        geotransform1 = inDataset1.GetGeoTransform()
        gt0 = list(geotransform0)
        gt1 = list(geotransform1)
        if projection0 is not None:
            outDataset.SetProjection(projection0)
    #  find the upper left corner (x0,y0) of reference subset in target (x1,y1)
        ulx0 = gt0[0] + x0 * gt0[1] + y0 * gt0[2]
        uly0 = gt0[3] + x0 * gt0[4] + y0 * gt0[5]
        GT1 = np.mat([[gt1[1], gt1[2]], [gt1[4], gt1[5]]])
        ul1 = np.mat([[ulx0 - gt1[0]], [uly0 - gt1[3]]])
        tmp = GT1.I * ul1
        x1 = int(round(tmp[0, 0]))
        y1 = int(round(tmp[1, 0]))
        #  create output geotransform
        gt1 = gt0
        gt1[0] = ulx0
        gt1[3] = uly0
        outDataset.SetGeoTransform(tuple(gt1))
        #  get matching subsets from geotransform
        rasterBand = inDataset0.GetRasterBand(1)
        span0 = rasterBand.ReadAsArray(x0, y0, cols, rows)
        rasterBand = inDataset1.GetRasterBand(1)
        span1 = rasterBand.ReadAsArray(x1, y1, cols, rows)
        if bands == 9:
            #      get warp parameters using span images
            print('warping 9 bands (quad pol)...')
            rasterBand = inDataset0.GetRasterBand(6)
            span0 += rasterBand.ReadAsArray(x0, y0, cols, rows)
            rasterBand = inDataset0.GetRasterBand(9)
            span0 += rasterBand.ReadAsArray(x0, y0, cols, rows)
            span0 = np.log(np.nan_to_num(span0) + 0.001)
            rasterBand = inDataset1.GetRasterBand(6)
            span1 += rasterBand.ReadAsArray(x1, y1, cols, rows)
            rasterBand = inDataset1.GetRasterBand(9)
            span1 += rasterBand.ReadAsArray(x1, y1, cols, rows)
            span1 = np.log(np.nan_to_num(span1) + 0.001)
            scale, angle, shift = auxil.similarity(span0, span1)
            #      warp the target to the reference and clip
            for k in range(9):
                rasterBand = inDataset1.GetRasterBand(k + 1)
                band = rasterBand.ReadAsArray(0, 0, cols1,
                                              rows1).astype(np.float32)
                bn1 = np.nan_to_num(band)
                bn2 = ndii.zoom(bn1, 1.0 / scale)
                bn2 = ndii.rotate(bn2, angle)
                bn2 = ndii.shift(bn2, shift)
                bn = bn2[y1:y1 + rows, x1:x1 + cols]
                outBand = outDataset.GetRasterBand(k + 1)
                outBand.WriteArray(bn)
                outBand.FlushCache()
        elif bands == 4:
            #      get warp parameters using span images
            print('warping 4 bands (dual pol)...')
            rasterBand = inDataset0.GetRasterBand(4)
            span0 += rasterBand.ReadAsArray(x0, y0, cols, rows)
            span0 = np.log(np.nan_to_num(span0) + 0.001)
            rasterBand = inDataset1.GetRasterBand(4)
            span1 += rasterBand.ReadAsArray(x1, y1, cols, rows)
            span1 = np.log(np.nan_to_num(span1) + 0.001)
            scale, angle, shift = auxil.similarity(span0, span1)
            #      warp the target to the reference and clip
            for k in range(4):
                rasterBand = inDataset1.GetRasterBand(k + 1)
                band = rasterBand.ReadAsArray(0, 0, cols1,
                                              rows1).astype(np.float32)
                bn1 = np.nan_to_num(band)
                bn2 = ndii.zoom(bn1, 1.0 / scale)
                bn2 = ndii.rotate(bn2, angle)
                bn2 = ndii.shift(bn2, shift)
                bn = bn2[y1:y1 + rows, x1:x1 + cols]
                outBand = outDataset.GetRasterBand(k + 1)
                outBand.WriteArray(bn)
                outBand.FlushCache()
        elif bands == 3:
            #      get warp parameters using span images
            print('warping 3 bands (quad pol diagonal)...')
            rasterBand = inDataset0.GetRasterBand(2)
            span0 += rasterBand.ReadAsArray(x0, y0, cols, rows)
            rasterBand = inDataset0.GetRasterBand(3)
            span0 += rasterBand.ReadAsArray(x0, y0, cols, rows)
            span0 = np.log(np.nan_to_num(span0) + 0.001)
            rasterBand = inDataset1.GetRasterBand(2)
            span1 += rasterBand.ReadAsArray(x1, y1, cols, rows)
            rasterBand = inDataset1.GetRasterBand(3)
            span1 += rasterBand.ReadAsArray(x1, y1, cols, rows)
            span1 = np.log(np.nan_to_num(span1) + 0.001)
            scale, angle, shift = auxil.similarity(span0, span1)
            #      warp the target to the reference and clip
            for k in range(3):
                rasterBand = inDataset1.GetRasterBand(k + 1)
                band = rasterBand.ReadAsArray(0, 0, cols1,
                                              rows1).astype(np.float32)
                bn1 = np.nan_to_num(band)
                bn2 = ndii.zoom(bn1, 1.0 / scale)
                bn2 = ndii.rotate(bn2, angle)
                bn2 = ndii.shift(bn2, shift)
                bn = bn2[y1:y1 + rows, x1:x1 + cols]
                outBand = outDataset.GetRasterBand(k + 1)
                outBand.WriteArray(bn)
                outBand.FlushCache()
        elif bands == 2:
            #      get warp parameters using span images
            print('warping 2 bands (dual pol diagonal)...')
            rasterBand = inDataset0.GetRasterBand(2)
            span0 += rasterBand.ReadAsArray(x0, y0, cols, rows)
            span0 = np.log(np.nan_to_num(span0) + 0.001)
            rasterBand = inDataset1.GetRasterBand(2)
            span1 += rasterBand.ReadAsArray(x1, y1, cols, rows)
            span1 = np.log(np.nan_to_num(span1) + 0.001)
            scale, angle, shift = auxil.similarity(span0, span1)
            #      warp the target to the reference and clip
            for k in range(2):
                rasterBand = inDataset1.GetRasterBand(k + 1)
                band = rasterBand.ReadAsArray(0, 0, cols1,
                                              rows1).astype(np.float32)
                bn1 = np.nan_to_num(band)
                bn2 = ndii.zoom(bn1, 1.0 / scale)
                bn2 = ndii.rotate(bn2, angle)
                bn2 = ndii.shift(bn2, shift)
                bn = bn2[y1:y1 + rows, x1:x1 + cols]
                outBand = outDataset.GetRasterBand(k + 1)
                outBand.WriteArray(bn)
                outBand.FlushCache()
        elif bands == 1:
            #      get warp parameters using span images
            print('warping 1 band (single pol)...')
            span0 = np.log(np.nan_to_num(span0) + 0.001)
            span1 = np.log(np.nan_to_num(span1) + 0.001)
            scale, angle, shift = auxil.similarity(span0, span1)
            #      warp the target to the reference and clip
            for k in range(1):
                rasterBand = inDataset1.GetRasterBand(k + 1)
                band = rasterBand.ReadAsArray(0, 0, cols1,
                                              rows1).astype(np.float32)
                bn1 = np.nan_to_num(band)
                bn2 = ndii.zoom(bn1, 1.0 / scale)
                bn2 = ndii.rotate(bn2, angle)
                bn2 = ndii.shift(bn2, shift)
                bn = bn2[y1:y1 + rows, x1:x1 + cols]
                outBand = outDataset.GetRasterBand(k + 1)
                outBand.WriteArray(bn)
                outBand.FlushCache()
        inDataset0 = None
        inDataset1 = None
        outDataset = None
        print('Warped image written to: %s' % outfile)
        print('elapsed time: ' + str(time.time() - start))
        return outfile
    except Exception as e:
        print('registersar failed: %s' % e)
        return None
def find_diff_prev_row(df_series_col):
    col_array = np.array(df_series_col)
    col_array_shifted = shift(col_array, 1, cval=np.NaN)
    col_diff = abs(col_array - col_array_shifted)

    return col_diff
示例#47
0
文件: vmi.py 项目: stggh/PyAbel
def find_image_center_by_slice(IM,
                               slice_width=10,
                               radial_range=(0, -1),
                               axis=(0, 1)):
    """ Center image by comparing opposite side, vertical (axis=1) and/or 
        horizontal slice (axis=0) profiles, both axis=(0,1).. 

    Parameters
    ----------
    IM : 2D np.array
         The image data.
 
    slice_width : integer
      Add together this number of rows (cols) to improve signal, default 10.
      
    radial_range: tuple
      (rmin,rmax): radial range [rmin:rmax] for slice profile comparison

    axis : integer or tuple 
      Center with respect to axis = 0 (horizontal), or 1 (vertical), or (0,1).

    Returns
    -------
    IMcenter : 2D np.array
      Centered image

    tuple of floats
      (vertical axis=1 shift, horizontal axis=0 shift)
   
    """
    def _align(offset, sliceA, sliceB):
        """intensity difference between an axial slice and its shifted opposite.
        """
        diff = shift(sliceA, offset) - sliceB
        fvec = (diff**2).sum()
        return fvec

    rows, cols = IM.shape

    if cols % 2 == 0:
        # drop left most column, and bottom row to make odd size
        IM = IM[:-1, 1:]
        rows, cols = IM.shape

    odd_rows = rows % 2  # used to correct image split
    odd_cols = cols % 2

    r2 = rows // 2 + odd_rows
    c2 = cols // 2 + odd_cols

    sw2 = slice_width // 2  # slice thickness +- sw2

    # compare and determine shift to best overlap slice profiles
    rmin, rmax = radial_range

    if type(axis) is int:
        axis = (axis, )

    xyoffset = np.zeros(2)
    for ax in axis:
        if ax == 0:
            # horizontal data, sum columns for slice profile
            sliceA = IM[:r2 + odd_rows, c2 - sw2:c2 + sw2].sum(axis=1)
            sliceB = IM[r2:, c2 - sw2:c2 + sw2].sum(axis=1)
        else:
            # vertical data, sum rows for slice profile
            sliceA = IM[r2 - sw2:r2 + sw2, :c2 + odd_cols].sum(axis=0)
            sliceB = IM[r2 - sw2:r2 + sw2, c2:].sum(axis=0)

        # reorient sliceA to the same direction as B,
        sliceA = sliceA[::-1]  # flip to be same direction

        # selected region [rmin:rmax] to compare
        sliceA = sliceA[rmin:rmax]
        sliceB = sliceB[rmin:rmax]

        # determine shift to align both slices
        # limit shift to +- 20 pixels
        initial_shift = [
            0.1,
        ]

        fit = minimize(_align,
                       initial_shift,
                       args=(sliceA, sliceB),
                       bounds=((-50, 50), ),
                       tol=1)

        if fit["success"]:
            xyoffset[ax] = -float(fit['x']) / 2  # x1/2 for image center shift
        else:
            print("fit failure: axis = {:d}, zero shift set".format(ax))
            print(fit)

    xyoffset = tuple(xyoffset)

    IM_centered = shift(IM, xyoffset)  # center image

    return IM_centered, xyoffset
示例#48
0
文件: vmi.py 项目: stggh/PyAbel
 def _align(offset, sliceA, sliceB):
     """intensity difference between an axial slice and its shifted opposite.
     """
     diff = shift(sliceA, offset) - sliceB
     fvec = (diff**2).sum()
     return fvec
示例#49
0
def get_redis_data(db):
    print("DB_NO:", db_no)
    r = redis.Redis(host=host, port=6379, db=db_no, decode_responses=True)
    result = r.zrangebyscore(db, start_stp, end_stp, withscores=True)
    close_tmp, high_tmp, low_tmp = [], [], []
    time_tmp = []
    score_tmp = []
    spread_tmp = []
    payout_tmp = {}
    spread_dict = {}

    print(result[0:5])
    print(result[-5:])
    #result.reverse()
    #print(index)
    indicies = np.ones(len(index), dtype=np.int32)
    #経済指標発表前後2時間は予想対象からはずす
    for i, ind in enumerate(index):
        tmp_datetime = datetime.strptime(ind, '%Y-%m-%d %H:%M:%S')
        indicies[i] = int(time.mktime(tmp_datetime.timetuple()))

    for line in result:
        body = line[0]
        score = line[1]
        tmps = json.loads(body)
        close_t = tmps.get("close")
        close_tmp.append(tmps.get("close"))
        time_tmp.append(tmps.get("time"))
        score_tmp.append(score)
        spr = tmps.get("spreadAus") / 100
        spread_tmp.append(spr)

        if spr in spread_dict.keys():
            spread_dict[spr] = spread_dict[spr] + 1
        else:
            spread_dict[spr] = 1

        pay = tmps.get("payout")
        if pay in payout_tmp.keys():
            payout_tmp[pay] = payout_tmp[pay] + 1
        else:
            payout_tmp[pay] = 1

        if int(close_t) == 0:
            print("close:0 " + str(score))
        #high_tmp.append(tmps.get("high"))
        #low_tmp.append(tmps.get("low"))
        closes_tmp[score] = (close_t, tmps.get("spreadAus") / 100)

    for i in spread_dict.keys():
        print("SPREAD:" + str(i), spread_dict[i])

    for i in payout_tmp.keys():
        print("PAYOUT:" + str(i), payout_tmp[i])

    close = 10000 * np.log(close_tmp / shift(close_tmp, 1, cval=np.NaN))[1:]
    #high = 10000 * np.log(high_tmp / shift(high_tmp, 1, cval=np.NaN) )[1:]
    #low = 10000 * np.log(low_tmp / shift(low_tmp, 1, cval=np.NaN)  )[1:]

    close_data, high_data, low_data, time_data, price_data , predict_time_data, predict_score_data , end_price_data \
        = [], [], [], [], [], [], [], []
    label_data = []
    spread_data = []
    spread0, spread1, spread2, spread3, spread4, spread5, spread6over = 0, 0, 0, 0, 0, 0, 0

    data_length = len(close) - maxlen - pred_term - 1
    print("data_length:" + str(data_length))

    up = 0
    same = 0

    for i in range(data_length):
        continue_flg = False

        if except_index:
            tmp_datetime = datetime.strptime(time_tmp[1 + i + maxlen - 1],
                                             '%Y-%m-%d %H:%M:%S')
            score = int(time.mktime(tmp_datetime.timetuple()))
            for ind in indicies:
                ind_datetime = datetime.fromtimestamp(ind)

                bef_datetime = ind_datetime - timedelta(hours=1)
                aft_datetime = ind_datetime + timedelta(hours=1)
                bef_time = int(time.mktime(bef_datetime.timetuple()))
                aft_time = int(time.mktime(aft_datetime.timetuple()))

                if bef_time <= score and score <= aft_time:
                    continue_flg = True
                    break

            if continue_flg:
                continue
        #ハイローオーストラリアの取引時間外を学習対象からはずす
        if except_highlow:
            if datetime.strptime(time_tmp[1 + i + maxlen - 1],
                                 '%Y-%m-%d %H:%M:%S').hour in except_list:
                continue
        #maxlen前の時刻までつながっていないものは除外。たとえば日付またぎなど
        tmp_time_bef = datetime.strptime(time_tmp[1 + i], '%Y-%m-%d %H:%M:%S')
        tmp_time_aft = datetime.strptime(time_tmp[1 + i + maxlen - 1],
                                         '%Y-%m-%d %H:%M:%S')
        delta = tmp_time_aft - tmp_time_bef

        if delta.total_seconds() > ((maxlen - 1) * int(s)):
            #print(tmp_time_aft)
            continue
        close_data.append(close[i:(i + maxlen)])
        time_data.append(time_tmp[1 + i + maxlen - 1])
        price_data.append(close_tmp[1 + i + maxlen - 1])
        spr = spread_tmp[1 + i + maxlen - 1]
        spread_data.append(spr)
        if spr == 0.001:
            spread1 = spread1 + 1
        elif spr == 0.002:
            spread2 = spread2 + 1
        elif spr == 0.003:
            spread3 = spread3 + 1
        elif spr == 0.004:
            spread4 = spread4 + 1
        elif spr == 0.005:
            spread5 = spread5 + 1
        elif spr >= 0.006:
            spread6over = spread6over + 1
        elif spr < 0.001:
            spread0 = spread0 + 1

        predict_time_data.append(time_tmp[1 + i + maxlen])
        predict_score_data.append(score_tmp[1 + i + maxlen])
        end_price_data.append(close_tmp[1 + i + maxlen + pred_term - 1])

        #high_data.append(high[i:(i + maxlen)])
        #low_data.append(low[i:(i + maxlen)])

        bef = close_tmp[1 + i + maxlen - 1]
        aft = close_tmp[1 + i + maxlen + pred_term - 1]
        # 正解をいれる
        lbl, up_cnt, same_cnt = get_label_data(bef, aft, spr, up, same)
        up = up_cnt
        same = same_cnt
        label_data.append(lbl)

    close_np = np.array(close_data)
    time_np = np.array(time_data)
    price_np = np.array(price_data)

    predict_time_np = np.array(predict_time_data)
    predict_score_np = np.array(predict_score_data)
    end_price_np = np.array(end_price_data)

    close_tmp_np = np.array(close_tmp)
    time_tmp_np = np.array(time_tmp)
    spread_np = np.array(spread_data)
    #high_np = np.array(high_data)
    #low_np = np.array(low_data)

    retX = np.zeros((len(close_np), maxlen, in_num))
    retX[:, :, 0] = close_np[:]
    #retX[:, :, 1] = high_np[:]
    #retX[:, :, 2] = low_np[:]

    retY = np.array(label_data)
    #retZ = np.array(label_dataZ)

    print("X SHAPE:", retX.shape)
    print("Y SHAPE:", retY.shape)
    print("UP: ", up / len(retY))
    print("SAME: ", same / len(retY))
    print("DOWN: ", (len(retY) - up - same) / len(retY))
    spread_total = spread1 + spread2 + spread3 + spread4 + spread5 + spread6over + spread0

    print("spread total: ", spread_total)
    print("spread0: ", spread0 / spread_total)
    print("spread1: ", spread1 / spread_total)
    print("spread2: ", spread2 / spread_total)
    print("spread3: ", spread3 / spread_total)
    print("spread4: ", spread4 / spread_total)
    print("spread5: ", spread5 / spread_total)
    print("spread6over: ", spread6over / spread_total)

    return retX, retY, price_np, time_np, close_tmp_np, predict_time_np, predict_score_np, end_price_np, spread_np
示例#50
0
def whirc_shiftcalc(obj_id):
    
    import pyfits
    import numpy as np
    import pdb
    import pickle
    import astropy.coordinates as coord
    import astropy.units as u
    import scipy.ndimage as snd 
    from scipy.ndimage import interpolation as interp 
    import pyfits
    import matplotlib.pyplot as plt
    
    #READ IN DATA AND OBSERVING LOG
    #Read in data from "whirc_info.dat" (the observation log)
    im1 = open('whirc_info.dat','r')
    data1 = im1.readlines()
    im1.close()
    
    filename = []
    dateobs = []
    objname = []
    imgtype = []
    ra = []
    dec = []
    exptime = []
    usable = []
    rotangle = []
    raoffset = []
    decoffset = []
    
    for line in data1:
        p = line.split()
        filename.append(p[0])
        dateobs.append(p[1])
        objname.append(p[2])
        imgtype.append(p[3])
        ra.append(p[4])
        dec.append(p[5])
        exptime.append(p[6])
        usable.append(p[7])
        rotangle.append(p[8])
        raoffset.append(p[9])
        decoffset.append(p[10])
    
    #Rewrite in a more convenient array with format array[line][element]    
    alldata = []
    
    for line in range(len(usable)):
        alldata.append([filename[line],dateobs[line],objname[line],imgtype[line],ra[line],dec[line],exptime[line],usable[line],rotangle[line],raoffset[line],decoffset[line]])
        
    #Find junk files and good files:
    junk = []
    good = []
    for line in range(len(alldata)):
       if "no" in alldata[line][7]:
           junk.append(alldata[line])
       if "yes" in alldata[line][7]:
           good.append(alldata[line])
    
    #Find files related to obj_id in good files
    aobj_id = []
    for line in range(len(good)):
        if str(obj_id) in good[line][2]:
            aobj_id.append(good[line])
    print str(len(aobj_id)),"files for "+str(obj_id)
    
    # Change RA and Dec stuff to degrees
    for line in range(len(aobj_id)):
        raoff = aobj_id[line][9]
        decoff = aobj_id[line][10]
        roff = coord.Angle(raoff,unit=u.hour)
        doff = coord.Angle(decoff, unit=u.degree)
        raoff = (roff.degree)
        decoff = doff.degree
        if np.abs(raoff) > 1:
            raoff = -(360 - raoff)
        aobj_id[line][9] = raoff
        aobj_id[line][10] = decoff        
        #print aobj_id[line][9], aobj_id[line][10]
    
    #Find files through J_filter
    aobj_idJ = []
    for line in range(len(aobj_id)):
        if "J" in aobj_id[line][2]:
            aobj_idJ.append(aobj_id[line])
    print str(len(aobj_idJ)), "J files for "+str(obj_id)
    
    #Find files through K_filter
    aobj_idK = []
    for line in range(len(aobj_id)):
        if "K" in aobj_id[line][2]:
            aobj_idK.append(aobj_id[line])
    print str(len(aobj_idK)), "K files for "+str(obj_id)
    
    
    #Find J files from Night 1
    aobj_idJN1 = []
    for line in range(len(aobj_idJ)):
        if "N1" in aobj_idJ[line][0]:
            aobj_idJN1.append(aobj_idJ[line])
    print str(len(aobj_idJN1)), "J files in N1"
    
    #Fink K files from Night 1
    aobj_idKN1 = []
    for line in range(len(aobj_idK)):
        if "N1" in aobj_idK[line][0]:
            aobj_idKN1.append(aobj_idK[line])
    print str(len(aobj_idKN1)), "K files in N1"
    
    #Find J files from Night 2
    aobj_idJN2 = []
    for line in range(len(aobj_idJ)):
        if "N2" in aobj_idJ[line][0]:
            aobj_idJN2.append(aobj_idJ[line])
    print str(len(aobj_idJN2)), "J files in N2"
    
    #Find K files from Night 2
    aobj_idKN2 = []
    for line in range(len(aobj_idK)):
        if "N2" in aobj_idK[line][0]:
            aobj_idKN2.append(aobj_idK[line])
    print str(len(aobj_idKN2)), "K files in N2"
    
    #Find J files from Night 3
    aobj_idJN3 = []
    for line in range(len(aobj_idJ)):
        if "N3" in aobj_idJ[line][0]:
            aobj_idJN3.append(aobj_idJ[line])
    print str(len(aobj_idJN3)), "J files in N3"
    
    #Find K files from Night 3
    aobj_idKN3 = []
    for line in range(len(aobj_idK)):
        if "N3" in aobj_idK[line][0]:
            aobj_idKN3.append(aobj_idK[line])
    print str(len(aobj_idKN3)), "K files in N3"
    
    #WRITE PATH TO J_FILES_N1
    Jobj_idN1_locs = []
    for line in range(len(aobj_idJN1)):
        Jobj_idN1_locs.append('Calibs/reduced/'+str(obj_id)+'_'+'JN1_'+str(line + 1)+'.fits')
        
    #WRITE PATH TO K_FILES_N1
    Kobj_idN1_locs = []
    for line in range(len(aobj_idKN1)):
        Kobj_idN1_locs.append('Calibs/reduced/'+str(obj_id)+'_'+'KN1_'+str(line + 1)+'.fits')
        
    #WRITE PATH OF J_FILES_N2
    Jobj_idN2_locs = []
    for line in range(len(aobj_idJN2)):
        Jobj_idN2_locs.append('Calibs/reduced/'+str(obj_id)+'_'+'JN2_'+str(line + 1)+'.fits')
    
    #WRITE PATH OF K_FILES_N2
    Kobj_idN2_locs = []
    for line in range(len(aobj_idKN2)):
        Kobj_idN2_locs.append('Calibs/reduced/'+str(obj_id)+'_'+'KN2_'+str(line + 1)+'.fits')
        
    #WRITE PATH OF J_FILES_N3
    Jobj_idN3_locs = []
    for line in range(len(aobj_idJN3)):
        Jobj_idN3_locs.append('Calibs/reduced/'+str(obj_id)+'_'+'JN3_'+str(line + 1)+'.fits')
    
    #WRITE PATH OF K_FILES_N3
    Kobj_idN3_locs = []
    for line in range(len(aobj_idKN3)):
        Kobj_idN3_locs.append('Calibs/reduced/'+str(obj_id)+'_'+'KN3_'+str(line + 1)+'.fits')
        
    
    #READ IN STARFIELDS FROM PICKLE FILE
    print "reading in JN1"
    
    if len(aobj_idJN1) > 0:
        
        
        xy_shifts_JN1 = []
                
        # FIND REFERENCE IMAGE
        centrals = []
        for line in range(len(aobj_idJN1)):
            if aobj_idJN1[line][9] == 0 and aobj_idJN1[line][10] == 0:
                centrals.append(line)
        
        if len(centrals) == 0: 
            centrals.append(4) #in case no object has zero offset, align all objects to fifth object
        center = centrals[0]
        
        # READ IN AND INITIALIZE DISTANCE MINIMIZATION
        star_coords = pickle.load(open( "Calibs/starfields/star_coords"+str(obj_id)+'JN1.p', 'rb'))
        
        ref_im = star_coords[center]
        
        def distance(x0,y0,x1,y1):
            return np.sqrt((x0-x1)**2+(y0-y1)**2)
            
        #LOOP THROUGH EACH EXPOSURE IN JN1
        for shf_num in range(len(aobj_idJN1)):
            
            shift_im = np.array(star_coords[shf_num]) 
            
            #----------testing possible xrange to improve time------------#
            xranges = []
            yranges = []
            for sline in range(len(shift_im)):
                for rline in range(len(ref_im)):
                    xranges.append(float(shift_im[sline][0]-ref_im[rline][0]))
                    yranges.append(float(shift_im[sline][1]-ref_im[rline][1]))
                                
            #pdb.set_trace()
            #x_shift = np.array(xranges)
            #y_shift = np.array(yranges)
                    
                
            
            #---------------end testing ---------------------------------------#
            
            #FIRST ITERATION, TO GET ROUGH APPROXIMATION
            x_shift = np.arange(-1000, 1000, 20)
            y_shift = np.arange(-1000, 1000, 20)
            datai = np.zeros((len(x_shift),len(y_shift)))
            
            #Loop through different x and y shifts
            for x in range(len(x_shift)):
               for y in range(len(y_shift)):
                   tot_points = []
                   
                   #Apply shift
                   shift_im = np.array(star_coords[shf_num]) + [x_shift[x],y_shift[y]]
                   dist = dict() #creates a separate variable for each star in shift_im
            
                   #loop through stars in image to shift
                   for shift_line in range(len(shift_im)):
                       dist[shift_line] = [] 
                       
                       #loop through stars in reference image
                       for ref_line in range(len(ref_im)):
                           dist[shift_line].append(distance(ref_im[ref_line][0], ref_im[ref_line][1], shift_im[shift_line][0], shift_im[shift_line][1]))
                           
                       #find distance to closest star in ref_im for each star in shift_im
                       dist[shift_line] = np.min(dist[shift_line])
                       
                       if dist[shift_line] < 20:
                           tot_points.append(1.0)
        
                   totpoints = sum(tot_points)      
                   #tot_dist =  sum(dist.values())
                   datai[x, y] = totpoints
                   #shift_im = array(star_coords[0])
            mins = np.where(datai==datai.max())
            
            mina = [np.median(x_shift[mins[0]]),np.median(y_shift[mins[1]])]
            #print mina
       
            #pdb.set_trace()
            
            #-----------------------------Now Repeat--------------------------------#
            
            #SECOND ITERATION, TO GET BETTER APPROXIMATION       
            x_shift2 = np.arange(float(mina[0])-150, float(mina[0])+150, 10)
            y_shift2 = np.arange(float(mina[1])-150, float(mina[1])+150, 10)
            
            datai = np.zeros((len(x_shift2),len(y_shift2)))

            for x in range(len(x_shift2)):
               for y in range(len(y_shift2)):
                   
                   tot_points = []
                   shift_im = np.array(star_coords[shf_num]) + [x_shift2[x], y_shift2[y]]
                   dist = dict()
                   for shift_star in range(len(shift_im)):
                       dist[shift_star] = [] 
                       for ref_star in range(len(ref_im)):
                           dist[shift_star].append(distance(ref_im[ref_star][0], ref_im[ref_star][1], shift_im[shift_star][0],shift_im[shift_star][1]))
                       dist[shift_star] = np.min(dist[shift_star])
                       
                       if dist[shift_star] < 20:
                           tot_points.append(1.0)
        
                   totpoints = sum(tot_points)      
                   #tot_dist =  sum(dist.values())
                   datai[x, y] = totpoints
                   #shift_im = array(star_coords[0])
            mins = np.where(datai==datai.max())
            
            mina = [np.median(x_shift2[mins[0]]),np.median(y_shift2[mins[1]])]
            #print [float(mina[0]),float(mina[1])]

            #THIRD ITERATION, TO GET BETTER APPROXIMATION
            x_shift3 = np.arange(float(mina[0])-20, float(mina[0])+20, 1)
            y_shift3 = np.arange(float(mina[1])-20, float(mina[1])+20, 1)
            datai = np.zeros((len(x_shift3),len(y_shift3)))

            for x in range(len(x_shift3)):
               for y in range(len(y_shift3)):
                   tot_points = []
                   shift_im = np.array(star_coords[shf_num]) + [x_shift3[x],y_shift3[y]]
                   dist = dict()
                   for shift_star in range(len(shift_im)):
                       dist[shift_star] = [] 
                       for ref_star in range(len(ref_im)):
                           dist[shift_star].append(distance(ref_im[ref_star][0], ref_im[ref_star][1], shift_im[shift_star][0],shift_im[shift_star][1]))
                       dist[shift_star] = np.min(dist[shift_star])
                       if dist[shift_star] < 4:
                           tot_points.append(1.0)
        
                   totpoints = sum(tot_points)      
                   #tot_dist =  sum(dist.values())
                   datai[x, y] = totpoints
                   #shift_im = array(star_coords[0])
            mins = np.where(datai==datai.max())
            
            mina = [np.median(x_shift3[mins[0]]),np.median(y_shift3[mins[1]])]
            print mina
            
            #Append optimal x,y shift for each exposure 
            xy_shifts_JN1.append([float(mina[0]),float(mina[1])])
            
        print xy_shifts_JN1
        
        #APPLY SHIFTS TO DATA
        shifteds = []
        for line in range(len(aobj_idJN1)):
            sci = pyfits.getdata(Jobj_idN1_locs[line])
            shifted = interp.shift(sci, xy_shifts_JN1[line] ,order = 0)
            #shifted = shifted - np.median(shifted)
            shifteds.append(shifted)
        combined = np.mean(shifteds, axis = 0) #trying instead of sum
        #for i in shifteds:
            
        
        ##### MAKING PLOTS #########
        '''
        boxs = [shifteds[a][500:1500] for a in range(len(Jobj_idN1_locs))]
        meds = [np.median(a) for a in boxs]
        means = [np.mean(a) for a in boxs]
        fig = plt.subplot(1,1,1)
        xvar = range(len(Jobj_idN1_locs))
        plt.plot(xvar, meds, 'o', color = 'blue', label = 'median')
        plt.plot(xvar, means, 'o', color = 'red', label = 'mean')
        fig.legend(loc = 2)
        fig.set_xlabel('exposure number')
        fig.set_ylabel('counts')
        
        
        pdb.set_trace()
        '''
        
        median = np.median(shifteds, axis = 0)


        file = pyfits.PrimaryHDU(combined)
        mfile = pyfits.PrimaryHDU(median)
        file.writeto('Calibs/shifted/'+str(obj_id)+'_JN1_combined.fits',clobber=True)
        mfile.writeto('Calibs/shifted/'+str(obj_id)+'_JN1_median.fits',clobber=True)
        
        #--------------------------------------------------------------------------#
        
    print "reading in KN1"
    
    if len(aobj_idKN1) > 0:
        
        
        xy_shifts_KN1 = []
                
        # FIND REFERENCE IMAGE
        centrals = []
        for line in range(len(aobj_idKN1)):
            if aobj_idKN1[line][9] == 0 and aobj_idKN1[line][10] == 0:
                centrals.append(line)
        
        if len(centrals) == 0: 
            centrals.append(4) #in case no object has zero offset, align all objects to fifth object
        center = centrals[0]
        
        # READ IN AND INITIALIZE DISTANCE MINIMIZATION
        star_coords = pickle.load(open("Calibs/starfields/star_coords"+str(obj_id)+'KN1.p', 'rb'))
        
        ref_im = star_coords[center]
        
        def distance(x0,y0,x1,y1):
            return np.sqrt((x0-x1)**2+(y0-y1)**2)
            
        #LOOP THROUGH EACH EXPOSURE IN JN1
        for shf_num in range(len(aobj_idKN1)):
            
            shift_im = np.array(star_coords[shf_num]) 
            
            #----------testing possible xrange to improve time------------#
            xranges = []
            yranges = []
            for sline in range(len(shift_im)):
                for rline in range(len(ref_im)):
                    xranges.append(float(shift_im[sline][0]-ref_im[rline][0]))
                    yranges.append(float(shift_im[sline][1]-ref_im[rline][1]))
                                
            #pdb.set_trace()
            #x_shift = np.array(xranges)
            #y_shift = np.array(yranges)
                    
                
            
            #---------------end testing ---------------------------------------#
            
            #FIRST ITERATION, TO GET ROUGH APPROXIMATION
            x_shift = np.arange(-1000, 1000, 20)
            y_shift = np.arange(-1000, 1000, 20)
            datai = np.zeros((len(x_shift),len(y_shift)))
            
            #Loop through different x and y shifts
            for x in range(len(x_shift)):
               for y in range(len(y_shift)):
                   tot_points = []
                   
                   #Apply shift
                   shift_im = np.array(star_coords[shf_num]) + [x_shift[x],y_shift[y]]
                   dist = dict() #creates a separate variable for each star in shift_im
            
                   #loop through stars in image to shift
                   for shift_line in range(len(shift_im)):
                       dist[shift_line] = [] 
                       
                       #loop through stars in reference image
                       for ref_line in range(len(ref_im)):
                           dist[shift_line].append(distance(ref_im[ref_line][0], ref_im[ref_line][1], shift_im[shift_line][0], shift_im[shift_line][1]))
                           
                       #find distance to closest star in ref_im for each star in shift_im
                       dist[shift_line] = np.min(dist[shift_line])
                       
                       if dist[shift_line] < 10:
                           tot_points.append(1.0)
        
                   totpoints = sum(tot_points)      
                   #tot_dist =  sum(dist.values())
                   datai[x, y] = totpoints
                   #shift_im = array(star_coords[0])
            mins = np.where(datai==datai.max())
            
            mina = [np.median(x_shift[mins[0]]),np.median(y_shift[mins[1]])]
            #print mina
       
            #pdb.set_trace()
            
            #-----------------------------Now Repeat--------------------------------#
            
            #SECOND ITERATION, TO GET BETTER APPROXIMATION       
            x_shift2 = np.arange(float(mina[0])-150, float(mina[0])+150, 10)
            y_shift2 = np.arange(float(mina[1])-150, float(mina[1])+150, 10)
            
            datai = np.zeros((len(x_shift2),len(y_shift2)))

            for x in range(len(x_shift2)):
               for y in range(len(y_shift2)):
                   
                   tot_points = []
                   shift_im = np.array(star_coords[shf_num]) + [x_shift2[x], y_shift2[y]]
                   dist = dict()
                   for shift_star in range(len(shift_im)):
                       dist[shift_star] = [] 
                       for ref_star in range(len(ref_im)):
                           dist[shift_star].append(distance(ref_im[ref_star][0], ref_im[ref_star][1], shift_im[shift_star][0],shift_im[shift_star][1]))
                       dist[shift_star] = np.min(dist[shift_star])
                       
                       if dist[shift_star] < 20:
                           tot_points.append(1.0)
        
                   totpoints = sum(tot_points)      
                   #tot_dist =  sum(dist.values())
                   datai[x, y] = totpoints
                   #shift_im = array(star_coords[0])
            mins = np.where(datai==datai.max())
            
            mina = [np.median(x_shift2[mins[0]]),np.median(y_shift2[mins[1]])]
            #print [float(mina[0]),float(mina[1])]

            #THIRD ITERATION, TO GET BETTER APPROXIMATION
            x_shift3 = np.arange(float(mina[0])-20, float(mina[0])+20, 1)
            y_shift3 = np.arange(float(mina[1])-20, float(mina[1])+20, 1)
            datai = np.zeros((len(x_shift3),len(y_shift3)))

            for x in range(len(x_shift3)):
               for y in range(len(y_shift3)):
                   tot_points = []
                   shift_im = np.array(star_coords[shf_num]) + [x_shift3[x],y_shift3[y]]
                   dist = dict()
                   for shift_star in range(len(shift_im)):
                       dist[shift_star] = [] 
                       for ref_star in range(len(ref_im)):
                           dist[shift_star].append(distance(ref_im[ref_star][0], ref_im[ref_star][1], shift_im[shift_star][0],shift_im[shift_star][1]))
                       dist[shift_star] = np.min(dist[shift_star])
                       if dist[shift_star] < 4:
                           tot_points.append(1.0)
        
                   totpoints = sum(tot_points)      
                   #tot_dist =  sum(dist.values())
                   datai[x, y] = totpoints
                   #shift_im = array(star_coords[0])
            mins = np.where(datai==datai.max())
            
            mina = [np.median(x_shift3[mins[0]]),np.median(y_shift3[mins[1]])]
            print mina
            
            #Append optimal x,y shift for each exposure 
            xy_shifts_KN1.append([float(mina[0]),float(mina[1])])
            
        print xy_shifts_KN1
        
        #APPLY SHIFTS TO DATA
        shifteds = []
        for line in range(len(aobj_idKN1)):
            sci = pyfits.getdata(Kobj_idN1_locs[line])
            shifted = interp.shift(sci, xy_shifts_KN1[line] ,order = 0)
            #shifted = shifted - np.median(shifted)
            shifteds.append(shifted)
        combined = sum(shifteds)
        median = np.median(shifteds, axis = 0)


        file = pyfits.PrimaryHDU(combined)
        mfile = pyfits.PrimaryHDU(median)
        file.writeto('Calibs/shifted/'+str(obj_id)+'_KN1_combined.fits',clobber=True)
        mfile.writeto('Calibs/shifted/'+str(obj_id)+'_KN1_median.fits',clobber=True)

    
    #---------------------------------------------------------------------------------#
            
    print "reading in JN2"
    
    if len(aobj_idJN2) > 0:
        
        xy_shifts_JN2 = []
                
        # FIND REFERENCE IMAGE
        centrals = []
        for line in range(len(aobj_idJN2)):
            if aobj_idJN2[line][9] == 0 and aobj_idJN2[line][10] == 0:
                centrals.append(line)
        
        if len(centrals) == 0: 
            centrals.append(4) #in case no object has zero offset, align all objects to fifth object
        center = centrals[0]
        
        # READ IN AND INITIALIZE DISTANCE MINIMIZATION
        star_coords = pickle.load(open( "Calibs/starfields/star_coords"+str(obj_id)+'JN2.p', 'rb'))
        
        ref_im = star_coords[center]
        
        def distance(x0,y0,x1,y1):
            return np.sqrt((x0-x1)**2+(y0-y1)**2)
            
        #LOOP THROUGH EACH EXPOSURE IN JN1
        for shf_num in range(len(aobj_idJN2)):
            
            shift_im = np.array(star_coords[shf_num]) 
            
            #----------testing possible xrange to improve time------------#
            xranges = []
            yranges = []
            for sline in range(len(shift_im)):
                for rline in range(len(ref_im)):
                    xranges.append(float(shift_im[sline][0]-ref_im[rline][0]))
                    yranges.append(float(shift_im[sline][1]-ref_im[rline][1]))
                                
            #pdb.set_trace()
            #x_shift = np.array(xranges)
            #y_shift = np.array(yranges)
                    
                
            
            #---------------end testing ---------------------------------------#
            
            #FIRST ITERATION, TO GET ROUGH APPROXIMATION
            x_shift = np.arange(-1000, 1000, 20)
            y_shift = np.arange(-1000, 1000, 20)
            datai = np.zeros((len(x_shift),len(y_shift)))
            
            #Loop through different x and y shifts
            for x in range(len(x_shift)):
               for y in range(len(y_shift)):
                   tot_points = []
                   
                   #Apply shift
                   shift_im = np.array(star_coords[shf_num]) + [x_shift[x],y_shift[y]]
                   dist = dict() #creates a separate variable for each star in shift_im
            
                   #loop through stars in image to shift
                   for shift_line in range(len(shift_im)):
                       dist[shift_line] = [] 
                       
                       #loop through stars in reference image
                       for ref_line in range(len(ref_im)):
                           dist[shift_line].append(distance(ref_im[ref_line][0], ref_im[ref_line][1], shift_im[shift_line][0], shift_im[shift_line][1]))
                           
                       #find distance to closest star in ref_im for each star in shift_im
                       dist[shift_line] = np.min(dist[shift_line])
                       
                       if dist[shift_line] < 20:
                           tot_points.append(1.0)
        
                   totpoints = sum(tot_points)      
                   #tot_dist =  sum(dist.values())
                   datai[x, y] = totpoints
                   #shift_im = array(star_coords[0])
            mins = np.where(datai==datai.max())
            
            mina = [np.median(x_shift[mins[0]]),np.median(y_shift[mins[1]])]
            #print mina
       
            #pdb.set_trace()
            
            #-----------------------------Now Repeat--------------------------------#
            
            #SECOND ITERATION, TO GET BETTER APPROXIMATION       
            x_shift2 = np.arange(float(mina[0])-150, float(mina[0])+150, 10)
            y_shift2 = np.arange(float(mina[1])-150, float(mina[1])+150, 10)
            
            datai = np.zeros((len(x_shift2),len(y_shift2)))

            for x in range(len(x_shift2)):
               for y in range(len(y_shift2)):
                   
                   tot_points = []
                   shift_im = np.array(star_coords[shf_num]) + [x_shift2[x], y_shift2[y]]
                   dist = dict()
                   for shift_star in range(len(shift_im)):
                       dist[shift_star] = [] 
                       for ref_star in range(len(ref_im)):
                           dist[shift_star].append(distance(ref_im[ref_star][0], ref_im[ref_star][1], shift_im[shift_star][0],shift_im[shift_star][1]))
                       dist[shift_star] = np.min(dist[shift_star])
                       
                       if dist[shift_star] < 20:
                           tot_points.append(1.0)
        
                   totpoints = sum(tot_points)      
                   #tot_dist =  sum(dist.values())
                   datai[x, y] = totpoints
                   #shift_im = array(star_coords[0])
            mins = np.where(datai==datai.max())
            
            mina = [np.median(x_shift2[mins[0]]),np.median(y_shift2[mins[1]])]
            #print [float(mina[0]),float(mina[1])]

            #THIRD ITERATION, TO GET BETTER APPROXIMATION
            x_shift3 = np.arange(float(mina[0])-20, float(mina[0])+20, 1)
            y_shift3 = np.arange(float(mina[1])-20, float(mina[1])+20, 1)
            datai = np.zeros((len(x_shift3),len(y_shift3)))

            for x in range(len(x_shift3)):
               for y in range(len(y_shift3)):
                   tot_points = []
                   shift_im = np.array(star_coords[shf_num]) + [x_shift3[x],y_shift3[y]]
                   dist = dict()
                   for shift_star in range(len(shift_im)):
                       dist[shift_star] = [] 
                       for ref_star in range(len(ref_im)):
                           dist[shift_star].append(distance(ref_im[ref_star][0], ref_im[ref_star][1], shift_im[shift_star][0],shift_im[shift_star][1]))
                       dist[shift_star] = np.min(dist[shift_star])
                       if dist[shift_star] < 4:
                           tot_points.append(1.0)
        
                   totpoints = sum(tot_points)      
                   #tot_dist =  sum(dist.values())
                   datai[x, y] = totpoints
                   #shift_im = array(star_coords[0])
            mins = np.where(datai==datai.max())
            
            mina = [np.median(x_shift3[mins[0]]),np.median(y_shift3[mins[1]])]
            print mina
            
            #Append optimal x,y shift for each exposure 
            xy_shifts_JN2.append([float(mina[0]),float(mina[1])])
            
        print xy_shifts_JN2
        
        #APPLY SHIFTS TO DATA
        shifteds = []
        for line in range(len(aobj_idJN2)):
            sci = pyfits.getdata(Jobj_idN2_locs[line])
            shifted = interp.shift(sci, xy_shifts_JN2[line] ,order = 0)
            #shifted = shifted - np.median(shifted)
            shifteds.append(shifted)
        combined = np.mean(shifteds, axis =0) #trying instead of sum
        median = np.median(shifteds, axis = 0)


        file = pyfits.PrimaryHDU(combined)
        mfile = pyfits.PrimaryHDU(median)
        file.writeto('Calibs/shifted/'+str(obj_id)+'_JN2_combined.fits',clobber=True)
        mfile.writeto('Calibs/shifted/'+str(obj_id)+'_JN2_median.fits',clobber=True)
        
        #--------------------------------------------------------------------------#
        
    print "reading in KN2"
    
    if len(aobj_idKN2) > 0:
        
        xy_shifts_KN2 = []
                
        # FIND REFERENCE IMAGE
        centrals = []
        for line in range(len(aobj_idKN2)):
            if aobj_idKN2[line][9] == 0 and aobj_idKN2[line][10] == 0:
                centrals.append(line)
        
        if len(centrals) == 0: 
            centrals.append(4) #in case no object has zero offset, align all objects to fifth object
        center = centrals[0]
        
        # READ IN AND INITIALIZE DISTANCE MINIMIZATION
        star_coords = pickle.load(open( "Calibs/starfields/star_coords"+str(obj_id)+'KN2.p', 'rb'))
        
        ref_im = star_coords[center]
        
        def distance(x0,y0,x1,y1):
            return np.sqrt((x0-x1)**2+(y0-y1)**2)
            
        #LOOP THROUGH EACH EXPOSURE IN JN1
        for shf_num in range(len(aobj_idKN2)):
            
            shift_im = np.array(star_coords[shf_num]) 
            
            #----------testing possible xrange to improve time------------#
            xranges = []
            yranges = []
            for sline in range(len(shift_im)):
                for rline in range(len(ref_im)):
                    xranges.append(float(shift_im[sline][0]-ref_im[rline][0]))
                    yranges.append(float(shift_im[sline][1]-ref_im[rline][1]))
                                
            #pdb.set_trace()
            #x_shift = np.array(xranges)
            #y_shift = np.array(yranges)
                    
                
            
            #---------------end testing ---------------------------------------#
            
            #FIRST ITERATION, TO GET ROUGH APPROXIMATION
            x_shift = np.arange(-1000, 1000, 20)
            y_shift = np.arange(-1000, 1000, 20)
            datai = np.zeros((len(x_shift),len(y_shift)))
            
            #Loop through different x and y shifts
            for x in range(len(x_shift)):
               for y in range(len(y_shift)):
                   tot_points = []
                   
                   #Apply shift
                   shift_im = np.array(star_coords[shf_num]) + [x_shift[x],y_shift[y]]
                   dist = dict() #creates a separate variable for each star in shift_im
            
                   #loop through stars in image to shift
                   for shift_line in range(len(shift_im)):
                       dist[shift_line] = [] 
                       
                       #loop through stars in reference image
                       for ref_line in range(len(ref_im)):
                           dist[shift_line].append(distance(ref_im[ref_line][0], ref_im[ref_line][1], shift_im[shift_line][0], shift_im[shift_line][1]))
                           
                       #find distance to closest star in ref_im for each star in shift_im
                       dist[shift_line] = np.min(dist[shift_line])
                       
                       if dist[shift_line] < 20:
                           tot_points.append(1.0)
        
                   totpoints = sum(tot_points)      
                   #tot_dist =  sum(dist.values())
                   datai[x, y] = totpoints
                   #shift_im = array(star_coords[0])
            mins = np.where(datai==datai.max())
            
            mina = [np.median(x_shift[mins[0]]),np.median(y_shift[mins[1]])]
            #print mina
       
            #pdb.set_trace()
            
            #-----------------------------Now Repeat--------------------------------#
            
            #SECOND ITERATION, TO GET BETTER APPROXIMATION       
            x_shift2 = np.arange(float(mina[0])-150, float(mina[0])+150, 10)
            y_shift2 = np.arange(float(mina[1])-150, float(mina[1])+150, 10)
            
            datai = np.zeros((len(x_shift2),len(y_shift2)))

            for x in range(len(x_shift2)):
               for y in range(len(y_shift2)):
                   
                   tot_points = []
                   shift_im = np.array(star_coords[shf_num]) + [x_shift2[x], y_shift2[y]]
                   dist = dict()
                   for shift_star in range(len(shift_im)):
                       dist[shift_star] = [] 
                       for ref_star in range(len(ref_im)):
                           dist[shift_star].append(distance(ref_im[ref_star][0], ref_im[ref_star][1], shift_im[shift_star][0],shift_im[shift_star][1]))
                       dist[shift_star] = np.min(dist[shift_star])
                       
                       if dist[shift_star] < 20:
                           tot_points.append(1.0)
        
                   totpoints = sum(tot_points)      
                   #tot_dist =  sum(dist.values())
                   datai[x, y] = totpoints
                   #shift_im = array(star_coords[0])
            mins = np.where(datai==datai.max())
            
            mina = [np.median(x_shift2[mins[0]]),np.median(y_shift2[mins[1]])]
            #print [float(mina[0]),float(mina[1])]

            #THIRD ITERATION, TO GET BETTER APPROXIMATION
            x_shift3 = np.arange(float(mina[0])-20, float(mina[0])+20, 1)
            y_shift3 = np.arange(float(mina[1])-20, float(mina[1])+20, 1)
            datai = np.zeros((len(x_shift3),len(y_shift3)))

            for x in range(len(x_shift3)):
               for y in range(len(y_shift3)):
                   tot_points = []
                   shift_im = np.array(star_coords[shf_num]) + [x_shift3[x],y_shift3[y]]
                   dist = dict()
                   for shift_star in range(len(shift_im)):
                       dist[shift_star] = [] 
                       for ref_star in range(len(ref_im)):
                           dist[shift_star].append(distance(ref_im[ref_star][0], ref_im[ref_star][1], shift_im[shift_star][0],shift_im[shift_star][1]))
                       dist[shift_star] = np.min(dist[shift_star])
                       if dist[shift_star] < 4:
                           tot_points.append(1.0)
        
                   totpoints = sum(tot_points)      
                   #tot_dist =  sum(dist.values())
                   datai[x, y] = totpoints
                   #shift_im = array(star_coords[0])
            mins = np.where(datai==datai.max())
            
            mina = [np.median(x_shift3[mins[0]]),np.median(y_shift3[mins[1]])]
            print mina
            
            #Append optimal x,y shift for each exposure 
            xy_shifts_KN2.append([float(mina[0]),float(mina[1])])
            
        print xy_shifts_KN2
        
        #APPLY SHIFTS TO DATA
        shifteds = []
        for line in range(len(aobj_idKN2)):
            sci = pyfits.getdata(Kobj_idN2_locs[line])
            shifted = interp.shift(sci, xy_shifts_KN2[line] ,order = 0)
            #shifted = shifted - np.median(shifted)
            shifteds.append(shifted)
        combined = sum(shifteds)
        median = np.median(shifteds, axis = 0)


        file = pyfits.PrimaryHDU(combined)
        mfile = pyfits.PrimaryHDU(median)
        file.writeto('Calibs/shifted/'+str(obj_id)+'_KN2_combined.fits',clobber=True)
        mfile.writeto('Calibs/shifted/'+str(obj_id)+'_KN2_median.fits',clobber=True)
        
    #--------------------------------------------------------------------------------#
            
    print "reading in JN3"
    
    if len(aobj_idJN3) > 0:
        
        xy_shifts_JN3 = []
                
        # FIND REFERENCE IMAGE
        centrals = []
        for line in range(len(aobj_idJN3)):
            if aobj_idJN3[line][9] == 0 and aobj_idJN3[line][10] == 0:
                centrals.append(line)
        
        if len(centrals) == 0: 
            centrals.append(4) #in case no object has zero offset, align all objects to fifth object
        center = centrals[0]
        
        # READ IN AND INITIALIZE DISTANCE MINIMIZATION
        star_coords = pickle.load(open( "Calibs/starfields/star_coords"+str(obj_id)+'JN3.p', 'rb'))
        
        ref_im = star_coords[center]
        
        def distance(x0,y0,x1,y1):
            return np.sqrt((x0-x1)**2+(y0-y1)**2)
            
        #LOOP THROUGH EACH EXPOSURE IN JN1
        for shf_num in range(len(aobj_idJN3)):
            
            shift_im = np.array(star_coords[shf_num]) 
            
            #----------testing possible xrange to improve time------------#
            xranges = []
            yranges = []
            for sline in range(len(shift_im)):
                for rline in range(len(ref_im)):
                    xranges.append(float(shift_im[sline][0]-ref_im[rline][0]))
                    yranges.append(float(shift_im[sline][1]-ref_im[rline][1]))
                                
            #pdb.set_trace()
            #x_shift = np.array(xranges)
            #y_shift = np.array(yranges)
                    
                
            
            #---------------end testing ---------------------------------------#
            
            #FIRST ITERATION, TO GET ROUGH APPROXIMATION
            x_shift = np.arange(-1000, 1000, 20)
            y_shift = np.arange(-1000, 1000, 20)
            datai = np.zeros((len(x_shift),len(y_shift)))
            
            #Loop through different x and y shifts
            for x in range(len(x_shift)):
               for y in range(len(y_shift)):
                   tot_points = []
                   
                   #Apply shift
                   shift_im = np.array(star_coords[shf_num]) + [x_shift[x],y_shift[y]]
                   dist = dict() #creates a separate variable for each star in shift_im
            
                   #loop through stars in image to shift
                   for shift_line in range(len(shift_im)):
                       dist[shift_line] = [] 
                       
                       #loop through stars in reference image
                       for ref_line in range(len(ref_im)):
                           dist[shift_line].append(distance(ref_im[ref_line][0], ref_im[ref_line][1], shift_im[shift_line][0], shift_im[shift_line][1]))
                           
                       #find distance to closest star in ref_im for each star in shift_im
                       dist[shift_line] = np.min(dist[shift_line])
                       
                       if dist[shift_line] < 20:
                           tot_points.append(1.0)
        
                   totpoints = sum(tot_points)      
                   #tot_dist =  sum(dist.values())
                   datai[x, y] = totpoints
                   #shift_im = array(star_coords[0])
            mins = np.where(datai==datai.max())
            
            mina = [np.median(x_shift[mins[0]]),np.median(y_shift[mins[1]])]
            #print mina
       
            #pdb.set_trace()
            
            #-----------------------------Now Repeat--------------------------------#
            
            #SECOND ITERATION, TO GET BETTER APPROXIMATION       
            x_shift2 = np.arange(float(mina[0])-150, float(mina[0])+150, 10)
            y_shift2 = np.arange(float(mina[1])-150, float(mina[1])+150, 10)
            
            datai = np.zeros((len(x_shift2),len(y_shift2)))

            for x in range(len(x_shift2)):
               for y in range(len(y_shift2)):
                   
                   tot_points = []
                   shift_im = np.array(star_coords[shf_num]) + [x_shift2[x], y_shift2[y]]
                   dist = dict()
                   for shift_star in range(len(shift_im)):
                       dist[shift_star] = [] 
                       for ref_star in range(len(ref_im)):
                           dist[shift_star].append(distance(ref_im[ref_star][0], ref_im[ref_star][1], shift_im[shift_star][0],shift_im[shift_star][1]))
                       dist[shift_star] = np.min(dist[shift_star])
                       
                       if dist[shift_star] < 20:
                           tot_points.append(1.0)
        
                   totpoints = sum(tot_points)      
                   #tot_dist =  sum(dist.values())
                   datai[x, y] = totpoints
                   #shift_im = array(star_coords[0])
            mins = np.where(datai==datai.max())
            
            mina = [np.median(x_shift2[mins[0]]),np.median(y_shift2[mins[1]])]
            #print [float(mina[0]),float(mina[1])]

            #THIRD ITERATION, TO GET BETTER APPROXIMATION
            x_shift3 = np.arange(float(mina[0])-20, float(mina[0])+20, 1)
            y_shift3 = np.arange(float(mina[1])-20, float(mina[1])+20, 1)
            datai = np.zeros((len(x_shift3),len(y_shift3)))

            for x in range(len(x_shift3)):
               for y in range(len(y_shift3)):
                   tot_points = []
                   shift_im = np.array(star_coords[shf_num]) + [x_shift3[x],y_shift3[y]]
                   dist = dict()
                   for shift_star in range(len(shift_im)):
                       dist[shift_star] = [] 
                       for ref_star in range(len(ref_im)):
                           dist[shift_star].append(distance(ref_im[ref_star][0], ref_im[ref_star][1], shift_im[shift_star][0],shift_im[shift_star][1]))
                       dist[shift_star] = np.min(dist[shift_star])
                       if dist[shift_star] < 4:
                           tot_points.append(1.0)
        
                   totpoints = sum(tot_points)      
                   #tot_dist =  sum(dist.values())
                   datai[x, y] = totpoints
                   #shift_im = array(star_coords[0])
            mins = np.where(datai==datai.max())
            
            mina = [np.median(x_shift3[mins[0]]),np.median(y_shift3[mins[1]])]
            print mina
            
            #Append optimal x,y shift for each exposure 
            xy_shifts_JN3.append([float(mina[0]),float(mina[1])])
            
        print xy_shifts_JN3
        
        #APPLY SHIFTS TO DATA
        shifteds = []
        for line in range(len(aobj_idJN3)):
            sci = pyfits.getdata(Jobj_idN3_locs[line])
            shifted = interp.shift(sci, xy_shifts_JN3[line] ,order = 0)
            #shifted = shifted - np.median(shifted)
            shifteds.append(shifted)
        combined = np.mean(shifteds, axis =0) #trying instead of sum
        median = np.median(shifteds, axis = 0)


        file = pyfits.PrimaryHDU(combined)
        mfile = pyfits.PrimaryHDU(median)
        file.writeto('Calibs/shifted/'+str(obj_id)+'_JN3_combined.fits',clobber=True)
        mfile.writeto('Calibs/shifted/'+str(obj_id)+'_JN3_median.fits',clobber=True)
        
        #--------------------------------------------------------------------------#
        
    print "reading in KN3"
    
    if len(aobj_idKN3) > 0:
        
        xy_shifts_KN3 = []
                
        # FIND REFERENCE IMAGE
        centrals = []
        for line in range(len(aobj_idKN3)):
            if aobj_idKN3[line][9] == 0 and aobj_idKN3[line][10] == 0:
                centrals.append(line)
        
        if len(centrals) == 0: 
            centrals.append(4) #in case no object has zero offset, align all objects to fifth object
        center = centrals[0]
        
        # READ IN AND INITIALIZE DISTANCE MINIMIZATION
        star_coords = pickle.load(open( "Calibs/starfields/star_coords"+str(obj_id)+'KN3.p', 'rb'))
        
        ref_im = star_coords[center]
        
        def distance(x0,y0,x1,y1):
            return np.sqrt((x0-x1)**2+(y0-y1)**2)
            
        #LOOP THROUGH EACH EXPOSURE IN JN1
        for shf_num in range(len(aobj_idKN3)):
            
            shift_im = np.array(star_coords[shf_num]) 
            
            #----------testing possible xrange to improve time------------#
            xranges = []
            yranges = []
            for sline in range(len(shift_im)):
                for rline in range(len(ref_im)):
                    xranges.append(float(shift_im[sline][0]-ref_im[rline][0]))
                    yranges.append(float(shift_im[sline][1]-ref_im[rline][1]))
                                
            #pdb.set_trace()
            #x_shift = np.array(xranges)
            #y_shift = np.array(yranges)
                    
                
            
            #---------------end testing ---------------------------------------#
            
            #FIRST ITERATION, TO GET ROUGH APPROXIMATION
            x_shift = np.arange(-1000, 1000, 20)
            y_shift = np.arange(-1000, 1000, 20)
            datai = np.zeros((len(x_shift),len(y_shift)))
            
            #Loop through different x and y shifts
            for x in range(len(x_shift)):
               for y in range(len(y_shift)):
                   tot_points = []
                   
                   #Apply shift
                   shift_im = np.array(star_coords[shf_num]) + [x_shift[x],y_shift[y]]
                   dist = dict() #creates a separate variable for each star in shift_im
            
                   #loop through stars in image to shift
                   for shift_line in range(len(shift_im)):
                       dist[shift_line] = [] 
                       
                       #loop through stars in reference image
                       for ref_line in range(len(ref_im)):
                           dist[shift_line].append(distance(ref_im[ref_line][0], ref_im[ref_line][1], shift_im[shift_line][0], shift_im[shift_line][1]))
                           
                       #find distance to closest star in ref_im for each star in shift_im
                       dist[shift_line] = np.min(dist[shift_line])
                       
                       if dist[shift_line] < 20:
                           tot_points.append(1.0)
        
                   totpoints = sum(tot_points)      
                   #tot_dist =  sum(dist.values())
                   datai[x, y] = totpoints
                   #shift_im = array(star_coords[0])
            mins = np.where(datai==datai.max())
            
            mina = [np.median(x_shift[mins[0]]),np.median(y_shift[mins[1]])]
            #print mina
       
            #pdb.set_trace()
            
            #-----------------------------Now Repeat--------------------------------#
            
            #SECOND ITERATION, TO GET BETTER APPROXIMATION       
            x_shift2 = np.arange(float(mina[0])-150, float(mina[0])+150, 10)
            y_shift2 = np.arange(float(mina[1])-150, float(mina[1])+150, 10)
            
            datai = np.zeros((len(x_shift2),len(y_shift2)))

            for x in range(len(x_shift2)):
               for y in range(len(y_shift2)):
                   
                   tot_points = []
                   shift_im = np.array(star_coords[shf_num]) + [x_shift2[x], y_shift2[y]]
                   dist = dict()
                   for shift_star in range(len(shift_im)):
                       dist[shift_star] = [] 
                       for ref_star in range(len(ref_im)):
                           dist[shift_star].append(distance(ref_im[ref_star][0], ref_im[ref_star][1], shift_im[shift_star][0],shift_im[shift_star][1]))
                       dist[shift_star] = np.min(dist[shift_star])
                       
                       if dist[shift_star] < 20:
                           tot_points.append(1.0)
        
                   totpoints = sum(tot_points)      
                   #tot_dist =  sum(dist.values())
                   datai[x, y] = totpoints
                   #shift_im = array(star_coords[0])
            mins = np.where(datai==datai.max())
            
            mina = [np.median(x_shift2[mins[0]]),np.median(y_shift2[mins[1]])]
            #print [float(mina[0]),float(mina[1])]

            #THIRD ITERATION, TO GET BETTER APPROXIMATION
            x_shift3 = np.arange(float(mina[0])-20, float(mina[0])+20, 1)
            y_shift3 = np.arange(float(mina[1])-20, float(mina[1])+20, 1)
            datai = np.zeros((len(x_shift3),len(y_shift3)))

            for x in range(len(x_shift3)):
               for y in range(len(y_shift3)):
                   tot_points = []
                   shift_im = np.array(star_coords[shf_num]) + [x_shift3[x],y_shift3[y]]
                   dist = dict()
                   for shift_star in range(len(shift_im)):
                       dist[shift_star] = [] 
                       for ref_star in range(len(ref_im)):
                           dist[shift_star].append(distance(ref_im[ref_star][0], ref_im[ref_star][1], shift_im[shift_star][0],shift_im[shift_star][1]))
                       dist[shift_star] = np.min(dist[shift_star])
                       if dist[shift_star] < 4:
                           tot_points.append(1.0)
        
                   totpoints = sum(tot_points)      
                   #tot_dist =  sum(dist.values())
                   datai[x, y] = totpoints
                   #shift_im = array(star_coords[0])
            mins = np.where(datai==datai.max())
            
            mina = [np.median(x_shift3[mins[0]]),np.median(y_shift3[mins[1]])]
            print mina
            
            #Append optimal x,y shift for each exposure 
            xy_shifts_KN3.append([float(mina[0]),float(mina[1])])
            
        print xy_shifts_KN3
        
        #APPLY SHIFTS TO DATA
        shifteds = []
        for line in range(len(aobj_idKN3)):
            sci = pyfits.getdata(Kobj_idN3_locs[line])
            shifted = interp.shift(sci, xy_shifts_KN3[line] ,order = 0)
            #shifted = shifted - np.median(shifted)
            shifteds.append(shifted)
        combined = sum(shifteds)
        median = np.median(shifteds, axis = 0)


        file = pyfits.PrimaryHDU(combined)
        mfile = pyfits.PrimaryHDU(median)
        file.writeto('Calibs/shifted/'+str(obj_id)+'_KN3_combined.fits',clobber=True)
        mfile.writeto('Calibs/shifted/'+str(obj_id)+'_KN3_median.fits',clobber=True)

        #--------------------------------------------------------------------------#
        
        
        
        
        
            
            
            
            
            

        
        
        
        
        
示例#51
0
    def load_modality(self,
                      modality,
                      normalize_volumes=True,
                      downsample=2,
                      rotate_mult=0.0,
                      shift_mult=0.0):

        if self.dataset == 'ISLES':
            file_name = self.data_folder + '/ISLES/' + modality + '.npz'
            data = np.load(file_name)['arr_0']
        elif self.dataset == 'BRATS':
            file_name = self.data_folder + '/' + modality + '.npz'
            data = np.load(file_name)
        elif self.dataset == 'IXI':
            data = self.load_ixi(modality)
        else:
            raise Exception('Unknown dataset ', self.dataset)

        # array of 3D volumes
        X = [data[i].astype('float32') for i in range(self.num_vols)]

        # trim the matrices and downsample: downsample x downsample -> 1x1
        for i, x in enumerate(X):

            if rotate_mult != 0:
                print 'Rotating ' + modality + '. Multiplying by ' + str(
                    rotate_mult)
                rotations = [[-5.57, 2.79, -11.99], [-5.42, -18.34, -14.22],
                             [4.64, 5.80, -5.96], [-17.02, -8.70, 15.43],
                             [18.79, 17.44, 17.06], [-14.55, -4.90, 9.19],
                             [14.37, -0.58, -16.85], [-9.49, -12.53, -2.89],
                             [-16.75, -4.07, 3.23], [14.39, -16.58, 3.35],
                             [-14.05, -2.25, -10.58], [8.47, -8.95, -12.73],
                             [13.00, -10.90, -2.85], [2.61, -7.51, -6.26],
                             [-13.99, -0.38, 6.29], [10.16, -9.88, -11.89],
                             [6.76, 0.83, -19.85], [18.74, -6.70, 15.46],
                             [-3.01, -2.85, 18.45], [-17.37, -1.32, -3.48],
                             [14.67, -17.93, 18.74], [6.55, 18.19, -8.24],
                             [13.52, -4.09, 19.32], [5.27, 11.27, 4.93],
                             [2.29, 17.83, 10.07], [-11.98, 10.49, 0.02],
                             [14.49, -12.00, -17.21], [17.86, -17.38, 19.04]]
                theta = rotations[i]

                x = rotate(x,
                           rotate_mult * theta[0],
                           axes=(1, 0),
                           reshape=False,
                           order=3,
                           mode='constant',
                           cval=0.0,
                           prefilter=True)
                x = rotate(x,
                           rotate_mult * theta[1],
                           axes=(1, 2),
                           reshape=False,
                           order=3,
                           mode='constant',
                           cval=0.0,
                           prefilter=True)
                x = rotate(x,
                           rotate_mult * theta[2],
                           axes=(0, 2),
                           reshape=False,
                           order=3,
                           mode='constant',
                           cval=0.0,
                           prefilter=True)

            if shift_mult != 0:
                print 'Shifting ' + modality + '. Multiplying by ' + str(
                    shift_mult)
                shfts = [[0.931, 0.719, -0.078], [0.182, -0.220, 0.814],
                         [0.709, 0.085, -0.262], [-0.898, 0.367, 0.395],
                         [-0.936, 0.591, -0.101], [0.750, 0.522, 0.132],
                         [-0.093, 0.188, 0.898], [-0.517, 0.905, -0.389],
                         [0.616, 0.599, 0.098], [-0.209, -0.215, 0.285],
                         [0.653, -0.398, -0.153], [0.428, -0.682, -0.501],
                         [-0.421, -0.929, -0.925], [-0.753, -0.492, 0.744],
                         [0.532, -0.302, 0.353], [0.139, 0.991, -0.086],
                         [-0.453, 0.657, 0.072], [0.576, 0.918, 0.242],
                         [0.889, -0.543, 0.738], [-0.307, -0.945, 0.093],
                         [0.698, -0.443, 0.037], [-0.209, 0.882, 0.014],
                         [0.487, -0.588, 0.312], [0.007, -0.789, -0.107],
                         [0.215, 0.104, 0.482], [-0.374, 0.560, -0.187],
                         [-0.227, 0.030, -0.921], [0.106, 0.975, 0.997]]
                shft = shfts[i]
                x = shift(x, [
                    shft[0] * shift_mult, shft[1] * shift_mult,
                    shft[2] * shift_mult
                ])

            if self.dataset == 'ISLES':
                x = x[:, 0:-6, 34:-36]

            if self.trim_and_downsample:
                X[i] = block_reduce(x,
                                    block_size=(1, downsample, downsample),
                                    func=np.mean)

                if self.dataset == 'BRATS':
                    # power of 2 padding
                    (_, w, h) = X[i].shape

                    w_pad_size = int(
                        math.ceil(
                            (math.pow(2, math.ceil(math.log(w, 2))) - w) / 2))
                    h_pad_size = int(
                        math.ceil(
                            (math.pow(2, math.ceil(math.log(h, 2))) - h) / 2))

                    X[i] = np.lib.pad(X[i], ((0, 0), (w_pad_size, w_pad_size),
                                             (h_pad_size, h_pad_size)),
                                      'constant',
                                      constant_values=0)

                    (_, w, h) = X[i].shape

                    # check if dimensions are even

                    if w & 1:
                        X[i] = X[i][:, 1:, :]

                    if h & 1:
                        X[i] = X[i][:, :, 1:]

            else:
                X[i] = x

        if normalize_volumes:
            for i, x in enumerate(X):
                X[i] = X[i] / np.mean(x)

        if rotate_mult > 0:
            for i, x in enumerate(X):
                X[i][X[i] < 0.25] = 0

        return X
y = h(x)

plt.figure(1)
plt.plot(x, P(x), label='P(x)')
plt.plot(x, h(x), label='h(x)')
plt.title('Funciones P(x) y h(x)')
plt.xlabel('x')
plt.legend()
plt.show()

n = len(x)

z = np.zeros(2 * n - 1)

for i in range(1, 2 * n):
    z[i - 1] = np.multiply(P(x), nt.shift(h(-x), -n + i)).sum()

plt.figure(2)
plt.plot(z)
plt.title('Resultado convolución entre P(x) y h(x)')
plt.xlabel('x')
plt.ylabel('z(x)')
plt.legend()
plt.show()

# Observación: error en la escala de tiempo del resultado (corregir).

#%% Ejercicio 2: Filtrado (detección de bordes) de la imagen Lenna.png
#                utilizando el kernel sobel y convolución en dos dimensiones.

import numpy as np
def shift_image(image, dx, dy,width = 28 , height = 28):
    image = image.reshape((width, height))
    shifted_image = shift(image, [dy, dx], cval=0, mode="constant")
    return shifted_image
示例#54
0
    N_snt = len(data_name)
    N_batches = int(N_snt / batch_size)
else:
    N_ex_tr = data_set.shape[0]
    N_batches = int(N_ex_tr / batch_size)

beg_batch = 0
end_batch = batch_size

snt_index = 0
beg_snt = 0

start_time = time.time()

# array of sentence lengths
arr_snt_len = shift(shift(data_end_index, -1) - data_end_index, 1)
arr_snt_len[0] = data_end_index[0]

loss_sum = 0
err_sum = 0

inp_dim = data_set.shape[1]

for i in range(N_batches):

    max_len = 0

    if seq_model:

        max_len = int(max(arr_snt_len[snt_index:snt_index + batch_size]))
        inp = torch.zeros(max_len, batch_size, inp_dim).contiguous()
    run_lst = np.loadtxt(subject + '/' + 'short_run_list.txt', dtype=str)

    bold_fname = []

    for i in run_lst:

        all_runs = (subject + '/' + subject + '.nii/' + i + '.nii')

        bold_fname.append(all_runs)

    vt = fmri_dataset(subject + '/vt.nii')  #load mask
    #vt.shape

    conditions = loadmat(subject + '/conds_short_tlrc.mat')
    conditions = conditions['conds_short_tlrc']
    conditions_sh2 = shift(conditions, [0, 2], cval=0)  #shift by 2 TRs

    def convert_binary_to_multiclass(binary_conditions):
        """Convert binary representation into multiclass reprentation:
        For example: convert [[1 1 1 1 0 0 0 0]
                              [0 0 0 0 1 1 1 1]]
        to [1 1 1 1 2 2 2 2]"""
        x, y = np.where(binary_conditions)
        conditions = np.zeros(binary_conditions.shape[1])
        conditions[y] = x + 1
        return conditions

    conditions_multi = convert_binary_to_multiclass(conditions_sh2)

    runs = np.arange(0, 512) // 32
示例#56
0
    def stacking(self,
                 star_list,
                 mean_list,
                 mean,
                 psf_type,
                 restrict_psf=None,
                 symmetry=1,
                 inverse_shift=True,
                 vmax=None,
                 vmin=None,
                 verbose=True):
        """

        :param star_list:
        :return:
        """
        num_stars = len(star_list)
        if restrict_psf is None:
            restrict_psf = [True] * num_stars
        shifteds = []
        mean_list_select = []
        for i in range(num_stars):
            if restrict_psf[i] is True:
                data = star_list[i] - mean
                if psf_type == 'gaussian' or psf_type == 'pixel':
                    amp, sigma, center_x, center_y = mean_list[i]
                elif psf_type == 'moffat':
                    amp, alpha, beta, center_x, center_y = mean_list[i]
                else:
                    raise ValueError('psf type %s not valid' % psf_type)
                data[data < 0] = 0
                if inverse_shift is True:
                    shifted = util.de_shift_kernel(data,
                                                   shift_x=-center_x - 0.5,
                                                   shift_y=-center_y - 0.,
                                                   iterations=10)
                else:
                    shifted = interp.shift(data,
                                           [-center_y - 0.5, -center_x - 0.5],
                                           order=1)
                sym_shifted = util.symmetry_average(shifted, symmetry)
                shifteds.append(sym_shifted)
                mean_list_select.append(mean_list[i])
                if verbose is True:
                    print('=== object ===', i, center_x, center_y)
                    import matplotlib.pylab as plt
                    fig, ax1 = plt.subplots()
                    im = ax1.matshow(np.log10(sym_shifted),
                                     origin='lower',
                                     vmax=vmax,
                                     vmin=vmin)

                    #v_max = np.max(np.nan_to_num(np.log10(sym_shifted)))
                    #v_min = np.min(np.nan_to_num(np.log10(sym_shifted)))

                    #v_min = max(v_max-5, v_min)
                    #im = ax1.matshow(np.log10(sym_shifted), origin='lower',vmin=v_min, vmax=v_max)
                    plt.axes(ax1)
                    fig.colorbar(im)
                    plt.show()

        combined = sum(shifteds)
        mean_list_select = np.mean(mean_list_select[:])
        """
        new = np.empty_like(combined)
        max_pix = np.max(combined)
        p = combined[combined >= max_pix/10**6]  #in the SIS regime
        new[combined < max_pix/10**6] = 0
        new[combined >= max_pix/10**6] = p
        """
        kernel = util.kernel_norm(combined)
        return kernel, mean_list_select, restrict_psf, shifteds
示例#57
0
def classify_image(image, classifier):

    #image = mahotas.imread( path )
    #imageSize = 1024
    #image = image[0:imageSize,0:imageSize]
    #image = Utility.normalizeImage( image ) - 0.5

    imageSize = image.shape[0]

    start_time = time.clock()

    #GPU
    image_shared = theano.shared(np.float32(image), borrow=True)
    image_shared = image_shared.reshape((1, 1, imageSize, imageSize))

    fragments = [image_shared]

    print "Convolutions"

    print '#convlayers:', len(classifier.convLayers)
    for clayer in classifier.convLayers:
        newFragments = []
        print '#fragments:', len(fragments)
        for img_sh in fragments:
            convolved_image = get_convolution_output(image_shared=img_sh,
                                                     clayer=clayer)
            output = get_max_pool_fragments(convolved_image, clayer=clayer)
            newFragments.extend(output)

        fragments = newFragments

    #### now the hidden layer

    print "hidden layer"

    hidden_fragments = []

    for fragment in fragments:
        #hidden_out = get_hidden_output(image_shared=fragment, hiddenLayer=classifier.mlp.hiddenLayers[0], nHidden=200, nfilt=classifier.nkerns[-1])
        hidden_out = get_hidden_output(
            image_shared=fragment,
            hiddenLayer=classifier.mlp.hiddenLayers[0],
            nHidden=classifier.hiddenSizes[0],
            nfilt=classifier.nkerns[-1])
        hidden_fragments.append(hidden_out)

    ### VERIFIED CORRECT UNTIL HERE

    #### and the missing log reg layer

    print "logistic regression layer"

    final_fragments = []
    for fragment in hidden_fragments:
        logreg_out = get_logistic_regression_output(
            image_shared=fragment,
            logregLayer=classifier.mlp.logRegressionLayer,
            n_classes=classifier.n_classes)

        logreg_out = logreg_out.eval()
        final_fragments.append(logreg_out)

    print "assembling final image"

    prob_imgs = np.zeros(
        (classifier.n_classes, image.shape[0], image.shape[1]))
    prob_img = np.zeros(image.shape)

    offsets_tmp = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])

    if len(classifier.convLayers) >= 1:
        offsets = offsets_tmp

    if len(classifier.convLayers) >= 2:
        offset_init_1 = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
        offset_init_2 = offset_init_1 * 2

        offsets = np.zeros((4, 4, 2))
        for o_1 in range(4):
            for o_2 in range(4):
                offsets[o_1, o_2] = offset_init_1[o_1] + offset_init_2[o_2]

        offsets = offsets.reshape((16, 2))

    if len(classifier.convLayers) >= 3:
        offset_init_1 = offsets.copy()
        offset_init_2 = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) * 4

        offsets = np.zeros((16, 4, 2))
        for o_1 in range(16):
            for o_2 in range(4):
                offsets[o_1, o_2] = offset_init_1[o_1] + offset_init_2[o_2]

        offsets = offsets.reshape((64, 2))

    # offsets = [(0,0),(0,2),(2,0),(2,2),
    #            (0,1),(0,3),(2,1),(2,3),
    #            (1,0),(1,2),(3,0),(3,2),
    #            (1,1),(1,3),(3,1),(3,3)]

    # offsets_1 = [(0,0),(0,4),(4,0),(4,4),
    #              (0,2),(0,6),(4,2),(4,6)]

    offset_jumps = np.int16(np.sqrt(len(offsets)))
    for f_fragments, o in zip(final_fragments, offsets):
        for c in range(classifier.n_classes):
            f = f_fragments[c]
            prob_img = prob_imgs[c]

            prob_size = prob_img[o[0]::offset_jumps, o[1]::offset_jumps].shape
            f_s = np.zeros(prob_size)
            f_s[:f.shape[0], :f.shape[1]] = f.copy()
            prob_img[o[0]::offset_jumps, o[1]::offset_jumps] = f_s

    #floor of patchsize/2
    probs = []
    shift_amount = np.floor(classifier.patchSize / 2)
    for c in range(prob_imgs.shape[0]):
        prob_img = shift(prob_imgs[c], (shift_amount, shift_amount))
        prob_img = prob_img.flatten()

        if len(probs) == 0:
            probs = prob_img
        else:
            probs = np.column_stack((probs, prob_img))

    predicted_classes = np.argmax(probs, axis=1)
    predicted_probs = np.max(probs, axis=1)

    total_time = time.clock() - start_time
    print "This took %f seconds." % (total_time)

    return predicted_probs, predicted_classes
示例#58
0
def shift_image(image, dx, dy):
    image = image.reshape((28, 28))
    shifted_image = shift(image, [dx, dy], cval=0, mode='constant')
    return shifted_image
示例#59
0
def fitPRF(flux, ydim, xdim, column, row, prfn, crval1p, crval2p, cdelt1p,
           cdelt2p, interpolation, tol, guess, mode, verbose):
    """Fit single PRF model to Kepler pixel mask data"""

    # construct input summed image
    imgflux = np.empty((ydim, xdim))
    n = 0
    for i in range(ydim):
        for j in range(xdim):
            imgflux[i, j] = flux[n]
            n += 1

    # interpolate the calibrated PRF shape to the target position
    prf = np.zeros(np.shape(prfn[0]), dtype='float32')
    prfWeight = np.zeros((5), dtype='float32')
    for i in range(5):
        prfWeight[i] = math.sqrt((column - crval1p[i])**2 +
                                 (row - crval2p[i])**2)
        if prfWeight[i] == 0.0:
            prfWeight[i] = 1.0e6
        prf = prf + prfn[i] / prfWeight[i]
        prf = prf / nansum(prf)

    # dimensions of data image
    datDimY = np.shape(imgflux)[0]
    datDimX = np.shape(imgflux)[1]

    # dimensions of data image if it had PRF-sized pixels
    prfDimY = datDimY / cdelt1p[0]
    prfDimX = datDimX / cdelt2p[0]

    # location of the data image centered on the PRF image (in PRF pixel units)
    prfY0 = (np.shape(prf)[0] - prfDimY) / 2
    prfX0 = (np.shape(prf)[1] - prfDimX) / 2

    # fit input image with model
    args = (imgflux, prf, cdelt1p[0], cdelt2p[0], prfDimY, prfDimX, prfY0,
            prfX0, interpolation, verbose)
    if mode == '2D':
        [f, y, x] = fmin_powell(kepfunc.kepler_prf_2d,
                                guess,
                                args=args,
                                xtol=tol,
                                ftol=1.0,
                                disp=False)
    elif mode == '1D':
        guess.insert(0, guess[0])
        [fy, fx, y, x] = fmin_powell(kepfunc.kepler_prf_1d,
                                     guess,
                                     args=args,
                                     xtol=tol,
                                     ftol=1.0,
                                     disp=False)
        f = (fx + fy) / 2.0

    # calculate best-fit model
    prfMod = shift(prf, [y, x], order=1, mode='constant')
    prfMod = prfMod[prfY0:prfY0 + prfDimY, prfX0:prfX0 + prfDimX]
    prfFit = keparray.rebin2D(
        prfMod,
        [np.shape(imgflux)[0], np.shape(imgflux)[1]], interpolation, True,
        False)
    prfFit = prfFit * f / cdelt1p[0] / cdelt2p[0]

    # calculate residual between data and model
    prfRes = imgflux - prfFit

    return f, y * cdelt1p[0], x * cdelt2p[0], prfMod, prfFit, prfRes
    def best_fit_and_residuals(self, fig=None):
        """
        Generate a plot of the best fit FM compared with the data_stamp and also the residuals
        Args:
            fig (matplotlib.Figure): if not None, a matplotlib Figure object

        Returns:
            fig (matplotlib.Figure): the Figure object. If input fig is None, function will make a new one

        """
        import matplotlib
        import matplotlib.pylab as plt

        if fig is None:
            fig = plt.figure(figsize=(12, 4))

        # create best fit FM
        dx = -(self.raw_RA_offset.bestfit - self.data_stamp_RA_offset_center)
        dy = self.raw_Dec_offset.bestfit - self.data_stamp_Dec_offset_center

        fm_bestfit = self.raw_flux.bestfit * sinterp.shift(
            self.fm_stamp, [dy, dx])
        if self.padding > 0:
            fm_bestfit = fm_bestfit[self.padding:-self.padding,
                                    self.padding:-self.padding]

        # make residual map
        residual_map = self.data_stamp - fm_bestfit

        # normalize all images to same scale
        colornorm = matplotlib.colors.Normalize(
            vmin=np.percentile(self.data_stamp, 0.03),
            vmax=np.percentile(self.data_stamp, 99.7))

        # plot the data_stamp
        ax1 = fig.add_subplot(131)
        im1 = ax1.imshow(self.data_stamp,
                         interpolation='nearest',
                         cmap='cubehelix',
                         norm=colornorm)
        ax1.invert_yaxis()
        ax1.set_title("Data")
        ax1.set_xlabel("X (pixels)")
        ax1.set_ylabel("Y (pixels)")

        ax2 = fig.add_subplot(132)
        im2 = ax2.imshow(fm_bestfit,
                         interpolation='nearest',
                         cmap='cubehelix',
                         norm=colornorm)
        ax2.invert_yaxis()
        ax2.set_title("Best-fit Model")
        ax2.set_xlabel("X (pixels)")

        ax3 = fig.add_subplot(133)
        im3 = ax3.imshow(residual_map,
                         interpolation='nearest',
                         cmap='cubehelix',
                         norm=colornorm)
        ax3.invert_yaxis()
        ax3.set_title("Residuals")
        ax3.set_xlabel("X (pixels)")

        fig.subplots_adjust(right=0.82)
        fig.subplots_adjust(hspace=0.4)
        ax_pos = ax3.get_position()

        cbar_ax = fig.add_axes([0.84, ax_pos.y0, 0.02, ax_pos.height])
        cb = fig.colorbar(im1, cax=cbar_ax)
        cb.set_label("Counts (DN)")

        return fig