コード例 #1
0
ファイル: vis.py プロジェクト: robi56/pedal
def gallery_rgb_patches(W,show=False, rescale=False):
    """Create a gallery of image patches from <W>, with
    rgb patches aligned along columns"""
    
    n_vis, n_feats = W.shape;

    n_pix = np.sqrt(n_vis/3)
    n_rows = np.floor(np.sqrt(n_feats))
    n_cols = np.ceil(n_feats / n_rows)    
    border_pix = 1;

    # INITIALIZE GALLERY CONTAINER    
    im_gallery = np.nan*np.ones((border_pix+n_rows*(border_pix+n_pix),
                   border_pix+n_cols*(border_pix+n_pix),3))

    for iw in xrange(n_feats):
        # RESCALE EACH IMAGE
        W_tmp = W[:,iw].copy()        
        W_tmp = W_tmp.reshape(3, n_pix, n_pix).transpose([1,2,0])

        if rescale:
            for c in xrange(3):
                cols = W_tmp[:,:,c]
                W_tmp[:,:,c] = (cols - cols.mean())/np.max(np.abs(cols))


        # FANCY INDEXING INTO IMAGE GALLERY
        im_gallery[border_pix + np.floor(iw/n_cols)*(border_pix+n_pix): 
                        border_pix + (1 + np.floor(iw/n_cols))*(border_pix+n_pix) - border_pix, 
                  border_pix + np.mod(iw,n_cols)*(border_pix+n_pix): 
                        border_pix + (1 + np.mod(iw,n_cols))*(border_pix+n_pix) - border_pix,:] = W_tmp
    if show:
        plt.imshow(im_gallery,interpolation='none')
        plt.axis("image")
        plt.axis("off")
コード例 #2
0
ファイル: gcmt_utils.py プロジェクト: GEMScienceTools/hmtk
def matrix_to_euler(rotmat):
    '''Inverse of euler_to_matrix().'''
    if not isinstance(rotmat, np.matrixlib.defmatrix.matrix):
        # As this calculation relies on np.matrix algebra - convert array to
        # matrix
        rotmat = np.matrix(rotmat)

    def cvec(x, y, z):
        return np.matrix([[x, y, z]]).T
    ex = cvec(1., 0., 0.)
    ez = cvec(0., 0., 1.)
    exs = rotmat.T * ex
    ezs = rotmat.T * ez
    enodes = np.cross(ez.T, ezs.T).T
    if np.linalg.norm(enodes) < 1e-10:
        enodes = exs
    enodess = rotmat * enodes
    cos_alpha = float((ez.T*ezs))
    if cos_alpha > 1.:
        cos_alpha = 1.
    if cos_alpha < -1.:
        cos_alpha = -1.
    alpha = acos(cos_alpha)
    beta = np.mod(atan2(enodes[1, 0], enodes[0, 0]), pi * 2.)
    gamma = np.mod(-atan2(enodess[1, 0], enodess[0, 0]), pi*2.)
    return unique_euler(alpha, beta, gamma)
コード例 #3
0
ファイル: vtest.py プロジェクト: gitj/vegas_devel
def sync(rns=range(1, 9)):
    print "Preparing"
    for rn in rns:
        roachd[rn].write_int("sg_sync", 0x12)  # disable pps and sync
        roachd[rn].write_int("arm", 0)
    print "Waiting for even second...",
    while np.mod(time.time(), 1) > 0.1:
        pass
    while np.mod(time.time(), 1) < 0.1:
        pass
    tic = time.time()
    print tic
    print "Arming Roaches"
    for rn in rns:
        roachd[rn].write_int("sg_sync", 0x14)
        roachd[rn].write_int("arm", 1)
        roachd[rn].write_int("arm", 0)

    print "Done in", (time.time() - tic)
    print "Setting STTMJD"
    mjd = astro_utils.current_MJD()
    for rn in rns:
        vsd[rn].setParams(STTMJD=mjd)
    print "Done in", (time.time() - tic)
    while (time.time() - tic) < 1.2:
        pass
    print "Should have PPS now, disarming roaches"
    for rn in rns:
        roachd[rn].write_int("sg_sync", 0x10)
コード例 #4
0
ファイル: vis.py プロジェクト: robi56/pedal
def gallery_gray_patches(W,show=False, rescale=False):
    """Create a gallery of image patches from <W>, with
    grayscale patches aligned along columns"""

    n_vis, n_feats = W.shape;

    n_pix = np.sqrt(n_vis)
    n_rows = np.floor(np.sqrt(n_feats))
    n_cols = np.ceil(n_feats / n_rows)    
    border_pix = 1;

    # INITIALIZE GALLERY CONTAINER    
    im_gallery = np.nan*np.ones((border_pix+n_rows*(border_pix+n_pix),
                   border_pix+n_cols*(border_pix+n_pix)))

    for iw in xrange(n_feats):
        # RESCALE EACH IMAGE
        W_tmp = W[:,iw].copy()
        if rescale:
            W_tmp = (W_tmp - W_tmp.mean())/np.max(np.abs(W_tmp)); 

        W_tmp = W_tmp.reshape(n_pix, n_pix)

        # FANCY INDEXING INTO IMAGE GALLERY
        im_gallery[border_pix + np.floor(iw/n_cols)*(border_pix+n_pix): 
                        border_pix + (1 + np.floor(iw/n_cols))*(border_pix+n_pix) - border_pix, 
                  border_pix + np.mod(iw,n_cols)*(border_pix+n_pix): 
                        border_pix + (1 + np.mod(iw,n_cols))*(border_pix+n_pix) - border_pix] = W_tmp
    if show:
        plt.imshow(im_gallery,interpolation='none')
        plt.axis("image")
        plt.axis("off")

    return im_gallery
コード例 #5
0
ファイル: quiver.py プロジェクト: AmitAronovitch/matplotlib
    def _find_tails(self, mag, rounding=True, half=5, full=10, flag=50):
        '''
        Find how many of each of the tail pieces is necessary.  Flag
        specifies the increment for a flag, barb for a full barb, and half for
        half a barb. Mag should be the magnitude of a vector (ie. >= 0).

        This returns a tuple of:

            (*number of flags*, *number of barbs*, *half_flag*, *empty_flag*)

        *half_flag* is a boolean whether half of a barb is needed,
        since there should only ever be one half on a given
        barb. *empty_flag* flag is an array of flags to easily tell if
        a barb is empty (too low to plot any barbs/flags.
        '''

        #If rounding, round to the nearest multiple of half, the smallest
        #increment
        if rounding:
            mag = half * (mag / half + 0.5).astype(np.int)

        num_flags = np.floor(mag / flag).astype(np.int)
        mag = np.mod(mag, flag)

        num_barb = np.floor(mag / full).astype(np.int)
        mag = np.mod(mag, full)

        half_flag = mag >= half
        empty_flag = ~(half_flag | (num_flags > 0) | (num_barb > 0))

        return num_flags, num_barb, half_flag, empty_flag
コード例 #6
0
ファイル: wcs_util.py プロジェクト: hamogu/aplpy
def b1950toj2000(ra, dec):
    """
    Convert B1950 to J2000 coordinates.

    This routine is based on the technique described at
    http://www.stargazing.net/kepler/b1950.html
    """

    # Convert to radians
    ra = np.radians(ra)
    dec = np.radians(dec)

    # Convert RA, Dec to rectangular coordinates
    x = np.cos(ra) * np.cos(dec)
    y = np.sin(ra) * np.cos(dec)
    z = np.sin(dec)

    # Apply the precession matrix
    x2 = P1[0, 0] * x + P1[1, 0] * y + P1[2, 0] * z
    y2 = P1[0, 1] * x + P1[1, 1] * y + P1[2, 1] * z
    z2 = P1[0, 2] * x + P1[1, 2] * y + P1[2, 2] * z

    # Convert the new rectangular coordinates back to RA, Dec
    ra = np.arctan2(y2, x2)
    dec = np.arcsin(z2)

    # Convert to degrees
    ra = np.degrees(ra)
    dec = np.degrees(dec)

    # Make sure ra is between 0. and 360.
    ra = np.mod(ra, 360.0)
    dec = np.mod(dec + 90.0, 180.0) - 90.0

    return ra, dec
コード例 #7
0
ファイル: display.py プロジェクト: Cortexelus/librosa
    def __call__(self, x, pos=None):
        '''Return the time format as pos'''

        _, dmax = self.axis.get_data_interval()
        vmin, vmax = self.axis.get_view_interval()

        # In lag-time axes, anything greater than dmax / 2 is negative time
        if self.lag and x >= dmax * 0.5:
            # In lag mode, don't tick past the limits of the data
            if x > dmax:
                return ''
            value = np.abs(x - dmax)
            # Do we need to tweak vmin/vmax here?
            sign = '-'
        else:
            value = x
            sign = ''

        if vmax - vmin > 3600:
            s = '{:d}:{:02d}:{:02d}'.format(int(value / 3600.0),
                                            int(np.mod(value / 60.0, 60)),
                                            int(np.mod(value, 60)))
        elif vmax - vmin > 60:
            s = '{:d}:{:02d}'.format(int(value / 60.0),
                                     int(np.mod(value, 60)))
        else:
            s = '{:.2g}'.format(value)

        return '{:s}{:s}'.format(sign, s)
コード例 #8
0
ファイル: __init__.py プロジェクト: abelcarreras/DynaPhoPy
def dynaphopy_order(i, size):
    x = np.mod(i, size[0])
    y = np.mod(i, size[0]*size[1])/size[0]
    z = np.mod(i, size[0]*size[1]*size[2])/(size[1]*size[0])
    k = i/(size[1]*size[0]*size[2])

    return np.array([x, y, z, k])
コード例 #9
0
ファイル: wcs_util.py プロジェクト: hamogu/aplpy
def j2000tob1950(ra, dec):
    """
    Convert J2000 to B1950 coordinates.

    This routine was derived by taking the inverse of the b1950toj2000 routine
    """

    # Convert to radians
    ra = np.radians(ra)
    dec = np.radians(dec)

    # Convert RA, Dec to rectangular coordinates
    x = np.cos(ra) * np.cos(dec)
    y = np.sin(ra) * np.cos(dec)
    z = np.sin(dec)

    # Apply the precession matrix
    x2 = P2[0, 0] * x + P2[1, 0] * y + P2[2, 0] * z
    y2 = P2[0, 1] * x + P2[1, 1] * y + P2[2, 1] * z
    z2 = P2[0, 2] * x + P2[1, 2] * y + P2[2, 2] * z

    # Convert the new rectangular coordinates back to RA, Dec
    ra = np.arctan2(y2, x2)
    dec = np.arcsin(z2)

    # Convert to degrees
    ra = np.degrees(ra)
    dec = np.degrees(dec)

    # Make sure ra is between 0. and 360.
    ra = np.mod(ra, 360.0)
    dec = np.mod(dec + 90.0, 180.0) - 90.0

    return ra, dec
コード例 #10
0
 def createPattern(self,details=2):
     if details in [0,1]:
         return None
     elif details == 2:
         cell=self.coords
         name=str(cell[0])+"-"+str(cell[1])
         bitDist=self.descriptor.bitDistance
         zero=[-self.descriptor.markerArea[i]/2. for i in [0,1]]
         BITS=Structure("CellBits_"+name)
         CIRCLEMARKER=self.createCircleMarker()
         bit=[int(i) for i in bin(int(cell[0]))[::-1][:-2]]
         ##TODO use descriptor.getBits(cell)
         for i,b in enumerate(bit):
             if b:
                 x=zero[0]+mod(i+1,self.descriptor.noBitsX)*bitDist
                 y=zero[1]+((i+1)/self.descriptor.noBitsY)*bitDist
                 BITS.insertElement(CIRCLEMARKER,xy=(x,y))
         bit=[int(i) for i in bin(int(cell[1]))[::-1][:-2]]
         for i,b in enumerate(bit):
             if b:
                 x=zero[0]+(self.descriptor.noBitsX-mod(i+1,self.descriptor.noBitsX)-1)*bitDist
                 y=zero[1]+(self.descriptor.noBitsY-(i+1)/self.descriptor.noBitsY-1)*bitDist
                 BITS.insertElement(CIRCLEMARKER,xy=(x,y))
         return BITS
     else:
         raise ValueError("details can be 0,1,2")
コード例 #11
0
ファイル: gaussian.py プロジェクト: aFarchi/Optimal-Transport
def boundaryGaussian2(N,P,
                      A00,A01,alphaX00,alphaX01,x00,x01,
                      A10,A11,alphaX10,alphaX11,x10,x11):
    #
    # f0(x) = A00exp(-alphaX00(x-x00)^2) + A01exp(-alphaX01(x-x01)^2)
    # f1(x) = A10exp(-alphaX10(x-x10)^2) + A11exp(-alphaX11(x-x11)^2)
    #

    x00 = np.mod(x00,1.)
    x01 = np.mod(x01,1.)
    x10 = np.mod(x10,1.)
    x11 = np.mod(x11,1.)

    # Defines f0 and f1
    X  = np.linspace( 0.0 , 1.0 , N + 1 )

    f0 = ( A00 * np.exp( -alphaX00 * np.power( X - x00 , 2 ) ) +
           A01 * np.exp( -alphaX01 * np.power( X - x01 , 2 ) ) )

    f1 = ( A10 * np.exp( -alphaX10 * np.power( X - x10 , 2 ) ) +
           A11 * np.exp( -alphaX11 * np.power( X - x11 , 2 ) ) )

    temporalBoundaries = grid.TemporalBoundaries( N , P , f0 , f1 )
    spatialBoundaries  = grid.SpatialBoundaries( N , P )

    return grid.Boundaries( N , P ,
                            temporalBoundaries, spatialBoundaries )
コード例 #12
0
  def movementCompute(self, displacement, noiseFactor = 0):
    """
    Shift the current active cells by a vector.

    @param displacement (pair of floats)
    A translation vector [di, dj].
    """

    if noiseFactor != 0:
      displacement = copy.deepcopy(displacement)
      xnoise = np.random.normal(0, noiseFactor)
      ynoise = np.random.normal(0, noiseFactor)
      displacement[0] += xnoise
      displacement[1] += ynoise


    # Calculate delta in the module's coordinates.
    phaseDisplacement = (np.matmul(self.rotationMatrix, displacement) *
                         self.phasesPerUnitDistance)

    # Shift the active coordinates.
    np.add(self.activePhases, phaseDisplacement, out=self.activePhases)

    # In Python, (x % 1.0) can return 1.0 because of floating point goofiness.
    # Generally this doesn't cause problems, it's just confusing when you're
    # debugging.
    np.round(self.activePhases, decimals=9, out=self.activePhases)
    np.mod(self.activePhases, 1.0, out=self.activePhases)

    self._computeActiveCells()
    self.phaseDisplacement = phaseDisplacement
コード例 #13
0
 def getBits(self,cell):
         zero=[-self.markerArea[i]/2. for i in [0,1]]
         bitx=[int(i) for i in bin(int(cell[0]))[::-1][:-2]]
         bity=[int(i) for i in bin(int(cell[1]))[::-1][:-2]]
         s0=int(np.log2(self.cellsPerBlock[0]*self.noBlocks[0]))
         s1=int(np.log2(self.cellsPerBlock[1]*self.noBlocks[1]))
         for i in range(s0-len(bitx)):
             bitx.append(0)
         for i in range(s1-len(bity)):
             bity.append(0)
         tx=np.zeros(s0,dtype=np.bool)
         ty=np.zeros(s1,dtype=np.bool)
         px=np.empty((s0,2))
         py=np.empty((s1,2))
         for i,b in enumerate(bitx):
             x=zero[0]+mod(i+1,self.noBitsX)*self.bitDistance
             y=zero[1]+((i+1)/self.noBitsY)*self.bitDistance
             px[i]=(x,y)
             tx[i]=b
         for i,b in enumerate(bity):
             x=zero[0]+(self.noBitsX-mod(i+1,self.noBitsX)-1)*self.bitDistance
             y=zero[1]+(self.noBitsY-(i+1)/self.noBitsY-1)*self.bitDistance
             py[i]=(x,y)
             ty[i]=b
         return px,py,tx,ty
コード例 #14
0
def toSynthetic(theta,which = 'Pueyo', mass = 1, referenceTime = None):
    """
    """
    if which == 'Pueyo':
        res = np.zeros(6)
        temp = theta
        res[0] = math.log(period(temp[0],starMass = mass))
        res[2] = math.cos(math.radians(temp[2]))
        res[3] = np.mod(temp[4]+temp[3],360)
        res[4] = np.mod(temp[4]-temp[3],360)        
        res[1] = temp[1]
        res[5] = temp[5]
    elif which == 'alternative':
        res = np.zeros(6)
        temp = theta
        res[0] = math.log(period(temp[0],starMass = mass))
        res[2] = math.cos(math.radians(temp[2]))
        res[3] = temp[3]
        res[4] = temp[4]        
        res[1] = temp[1]
        res[5] = temp[5]        
    elif which == 'Chauvin':
        stat = StatisticsMCMC()
        res = stat.uFROMx(theta,referenceTime,mass)

    return res
コード例 #15
0
def toKepler(u, which = 'Pueyo', mass = 1, referenceTime = None):
    """
    """
    if which == 'Pueyo':
        res = np.zeros(6)
        res[1] = u[1]
        res[5] = u[5]
        
        res[0] = semimajoraxis(math.exp(u[0]), starMass = mass)
        res[2] = math.degrees(math.acos(u[2]))
        res[3] = np.mod((u[3]-u[4])*0.5,360)
        res[4] = np.mod((u[3]+u[4])*0.5,360)
        return res
    elif which == 'alternative':
        res = np.zeros(6)
        res[1] = u[1]
        res[5] = u[5]
        
        res[0] = semimajoraxis(math.exp(u[0]), starMass = mass)
        res[2] = math.degrees(math.acos(u[2]))
        res[3] = u[3]
        res[4] = u[4]
        return res        
    elif which == 'Chauvin':
        stat = StatisticsMCMC()
        res = stat.xFROMu(u,referenceTime,mass)    
        return res
    
    return None
コード例 #16
0
def colon(r1, inc, r2):
    """
      Matlab's colon operator, althought it doesn't although inc is required

    """

    s = np.sign(inc)

    if s == 0:
        return_value = np.zeros(1)
    elif s == 1:
        n = ((r2 - r1) + 2 * np.spacing(r2 - r1)) // inc
        return_value = np.linspace(r1, r1 + inc * n, n + 1)
    else:  # s == -1:
        # NOTE: I think this is slightly off as we start on the wrong end
        # r1 should be exact, not r2
        n = ((r1 - r2) + 2 * np.spacing(r1 - r2)) // np.abs(inc)
        temp = np.linspace(r2, r2 + np.abs(inc) * n, n + 1)
        return_value = temp[::-1]

    # If the start and steps are whole numbers, we should cast as int
    if(np.equal(np.mod(r1, 1), 0) and
       np.equal(np.mod(s, 1), 0) and
       np.equal(np.mod(r2, 1), 0)):
        return return_value.astype(int)
    else:
        return return_value
コード例 #17
0
ファイル: network.py プロジェクト: iulia-ia13/pylayers
    def compute_LDPs(self,ln,RAT):
        """compute edge LDP

        Parameters
        ----------

        n1      : float/string
            node ID
        n2      : float/string
            node ID
        RAT     : string
            A specific RAT which exist in the network ( if not , raises an error)
        value    : list : [LDP value , LDP standard deviation] 
        method    : ElectroMagnetic Solver method ( 'direct', 'Multiwall', 'PyRay'


        """
        p=nx.get_node_attributes(self.SubNet[RAT],'p')
        epwr=nx.get_node_attributes(self.SubNet[RAT],'epwr')
        sens=nx.get_node_attributes(self.SubNet[RAT],'sens')
        e=self.link[RAT]#self.SubNet[RAT].edges()
        re=self.relink[RAT] # reverse link aka other direction of link
        lp,lt, d, v= self.EMS.solve(p,e,'all',RAT,epwr,sens)
        lD=[{'Pr':lp[i],'TOA':lt[np.mod(i,len(e))] ,'d':d[np.mod(i,len(e))],'vis':v[i]} for i in range(len(d))]
        self.update_LDPs(iter(e+re),RAT,lD)
コード例 #18
0
ファイル: misc.py プロジェクト: achuwilson/openrave
 def sampleR3(averagedist,boxdims):
     """low-discrepancy sampling using primes.
     The samples are evenly distributed with an average distance of averagedist inside the box with dimensions boxdims.
     Algorithim from "Geometric Discrepancy: An Illustrated Guide" by Jiri Matousek"""
     minaxis = numpy.argmin(boxdims)
     maxaxis = numpy.argmax(boxdims)
     meddimdist = numpy.sort(boxdims)[1]
     # convert average distance to number of samples.... do simple 3rd degree polynomial fitting...
     x = meddimdist/averagedist
     if x < 25.6:
         N = int(numpy.polyval([ -3.50181522e-01,   2.70202333e+01,  -3.10449514e+02, 1.07887093e+03],x))
     elif x < 36.8:
         N = int(numpy.polyval([  4.39770585e-03,   1.10961031e+01,  -1.40066591e+02, 1.24563464e+03],x))
     else:
         N = int(numpy.polyval([5.60147111e-01,  -8.77459988e+01,   7.34286834e+03, -1.67779452e+05],x))
     pts = numpy.zeros((N,3))
     pts[:,0] = numpy.linspace(0.0,meddimdist,N)
     pts[:,1] = meddimdist*numpy.mod(0.5+0.5*numpy.sqrt(numpy.arange(0,5.0*N,5.0)),1.0)
     pts[:,2] = meddimdist*numpy.mod(0.5+3*numpy.sqrt(numpy.arange(0,13.0*N,13.0)),1.0)
     if boxdims[minaxis] < meddimdist:
         pts = pts[pts[:,minaxis]<=boxdims[minaxis],:]
     if boxdims[maxaxis] > meddimdist:
         # have to copy across the max dimension
         numfullcopies = numpy.floor(boxdims[maxaxis]/meddimdist)
         oldpts = pts
         pts = numpy.array(oldpts)
         for i in range(int(numfullcopies)-1):
             oldpts[:,maxaxis] += meddimdist
             pts = numpy.r_[pts,oldpts]
         if boxdims[maxaxis]/meddimdist > numfullcopies:
             oldpts[:,maxaxis] += meddimdist
             pts = numpy.r_[pts,oldpts[oldpts[:,maxaxis]<=boxdims[maxaxis],:]]
     return pts
コード例 #19
0
    def plotSubstratePhaseHistograms(self, measurementList, nRows, nColumns, figsize, top=0.9, bottom=0.16, left=0.15, right=0.9, hspace=0.2, wspace=0.3,  hist_maxProbability=0.5):
        # Plot histograms of substrate phase at time of contraction start:

        
        nMeasurements = len(measurementList)
        fig, axs = plt.subplots(nRows, nColumns, figsize=figsize, facecolor='w', edgecolor='k')
        fig.subplots_adjust(top=top, bottom=bottom, left=left, right=right, hspace=hspace, wspace=wspace) 
        axs = axs.ravel()
        
        tt = np.linspace(0,2*np.pi,1000)
        yy = self.maxStrain * (0.5 - 0.5 * np.cos(tt))
        nBins = 8
        bins = np.linspace(0,2*np.pi,nBins+1)
        
        for i in range(nMeasurements):
        
            hist, rbins = np.histogram(2*np.pi*measurementList[i].subTheta[measurementList[i].cellIx], bins=bins)
            widths = np.diff(bins)
            scaling = 1/measurementList[i].cellIx.size
            hist = hist*scaling
            axs[i].bar(rbins[:-1], hist, widths)

            if self.cellNaturalFreq == 0:
                print("Don't forget to update cellNaturalFreq")
            axs[i].set_xlim([0,2*np.pi])
            axs[i].set_xticks(np.linspace(0,2*np.pi,5))
            axs[i].set_xticklabels(['0','',r"$\pi$",'',r"2$\pi$"], fontsize=20)
            axs[i].set_ylim([0,hist_maxProbability])
            axs[i].set_yticks(np.linspace(0,hist_maxProbability,3))
            axs[i].yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
            #axs[i].set_title(r"$\Delta$=" + str(measurementList[i].Delta), fontsize=16)
            axs[i].set_title(r"$\Delta$=" + '%.2f' % measurementList[i].Delta, fontsize=16)
            
            for tl in axs[i].get_yticklabels():
                tl.set_color('b')
                tl.set_fontsize(14)
        
            ax2 = axs[i].twinx()
            ax2.plot(tt,yy,'r--')
            ax2.set_yticks(np.linspace(0,self.maxStrain,4))
            ax2.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
            for tl in ax2.get_yticklabels():
                tl.set_color('r')
                tl.set_fontsize(14)
        
            if np.mod(i,nColumns)==0:
                ax2.set_yticklabels([])
                axs[i].set_ylabel(r"$p_{c}$", color='blue', fontsize=20)
            elif np.mod(i,nColumns)==nColumns-1:
                axs[i].set_yticklabels([])
                ax2.set_ylabel(r"$\varepsilon$", color='red', fontsize=20)
            else:
                axs[i].set_yticklabels([])
                ax2.set_yticklabels([])
        
            if np.floor(i/nColumns)+1==nRows:
                axs[i].set_xlabel(r"$\phi_{substrate}$", fontsize=24)

        plt.savefig(self.experimentTitle + '_substratePhaseHistograms.eps', dpi=160, facecolor='w')
        plt.show()
コード例 #20
0
ファイル: magic.py プロジェクト: mikemt/pywafo
def magic(n):
    ix = np.arange(n)+1
    J, I = np.meshgrid(ix,ix)
    A = np.mod(I+J-(n+3)/2,n)
    B = np.mod(I+2*J-2,n)
    M = n*A + B + 1
    return M
コード例 #21
0
    def firing_rate_modifier(self, x, rotation_symmetry_period, mirror_symmetric):
        """

        :param  x                       : Input angles in radians
        :param  rotation_symmetry_period: Rotation symmetry period, How many times in a 360 degree
                                          rotation does the object looks like itself. Valid range
                                          {1, 360}. 1 = No rotation symmetry, 360 = compete
                                          symmetry.
        :param  mirror_symmetric         : Whether the object is mirror symmetric. Valid values
                                          = {1, 0} for each input angle.

        Note: dimensions of x, rotation_symmetry_period and mirror_symmetry mast be equal
        """
        valid_range = 2 * np.pi / rotation_symmetry_period

        # Adjust the mean to lie within the valid range
        mu_p = np.mod(self.preferred_angle, valid_range)

        # Map input angles to allowed range
        x_p = np.mod(x, valid_range)

        # Find the mirror symmetric mean, flip across the y-axis and map to the valid range
        mu_s = np.mod(-mu_p, valid_range)

        # Adjust input angles x_p such that they are defined around (-valid_range/2.valid_range/2)
        # of the target mean. This takes care of edge effects.
        x_adj = self.adjust_angles(x_p, mu_p, valid_range)
        fire_rate_p = np.exp(-(x_adj - mu_p)**2 / (2 * self.spread ** 2))

        x_adj = self.adjust_angles(x_p, mu_s, valid_range)
        fire_rate_s = mirror_symmetric * np.exp(-(x_adj - mu_s) ** 2 / (2 * self.spread ** 2))

        # Return the maximum firing rate either from the normal or mirror symmetric gaussian
        return np.maximum(fire_rate_p, fire_rate_s)
コード例 #22
0
    def champ(self):
        if self.structure: N_lame = self.N_lame-self.struct_N
        else: N_lame = self.N_lame

        force = np.zeros_like(self.lames[2, :N_lame])
        damp_min = 0.8
        damp_tau = 15.
        damp = lambda t: damp_min #+ (1.-damp_min)*np.exp(-np.abs(np.mod(t+self.period/2, self.period)-self.period/2)/damp_tau)

        smooth = lambda t: 1.-np.exp(-np.abs(np.mod(t+self.period/2, self.period)-self.period/2)**2/damp_tau**2)
        on_off = lambda t, freq: (np.sin(2*np.pi*t/self.period*freq) > 0.)

        noise = lambda t: .4 * smooth(t)
        np.random.seed(12345)
        random_timing = np.random.rand(N_lame)
        #print(np.mod(random_timing + self.t/self.period, 1))
        struct_angles = np.random.permutation( np.hstack((self.struct_angles, -np.array(self.struct_angles)))) * np.pi / 180
        #print(struct_angles, (np.mod(random_timing + self.t/self.period, 1)*len(struct_angles)).astype(np.int)) 
        angle_desired = np.zeros(N_lame)
        for i, idx in enumerate((np.mod(random_timing + self.t/self.period, 1)*len(struct_angles)).astype(np.int)):
            angle_desired[i] = struct_angles[idx]
        
        force -= 20 * (np.mod(self.lames[2, :N_lame] - angle_desired +np.pi/2, np.pi) - np.pi/2 ) * smooth(self.t)
        force -= 80 * (np.mod(self.lames[2, :N_lame] + np.pi/2, np.pi) - np.pi/2) * (1- smooth(self.t) )
        force += noise(self.t)*np.pi*np.random.randn(N_lame)
        force -= damp(self.t) * self.lames[3, :N_lame]/self.dt
        force = .02 * 100 * np.tanh(force/100)
        return force    
コード例 #23
0
ファイル: libJHTDB.py プロジェクト: lowks/pyJHTDB
 def getBoxFilter(self,
         time, point_coords,
         data_set = 'isotropic1024coarse',
         make_modulo = False,
         field = 'velocity',
         filter_width = 7*2*np.pi / 1024):
     if not self.connection_on:
         print('you didn\'t connect to the database')
         sys.exit()
     if not (point_coords.shape[-1] == 3):
         print ('wrong number of values for coordinates in getBoxFilter')
         sys.exit()
         return None
     if not (point_coords.dtype == np.float32):
         print 'point coordinates in getBoxFilter must be floats. stopping.'
         sys.exit()
         return None
     npoints = point_coords.shape[0]
     for i in range(1, len(point_coords.shape)-1):
         npoints *= point_coords.shape[i]
     if make_modulo:
         pcoords = np.zeros(point_coords.shape, np.float64)
         pcoords[:] = point_coords
         np.mod(pcoords, 2*np.pi, point_coords)
     result_array = point_coords.copy()
     self.lib.getBoxFilter(self.authToken,
              ctypes.c_char_p(data_set),
              ctypes.c_char_p(field),
              ctypes.c_float(time),
              ctypes.c_float(filter_width),
              ctypes.c_int(npoints),
              point_coords.ctypes.data_as(ctypes.POINTER(ctypes.POINTER(ctypes.c_float))),
              result_array.ctypes.data_as(ctypes.POINTER(ctypes.POINTER(ctypes.c_float))))
     return result_array
コード例 #24
0
ファイル: UllmannMap.py プロジェクト: meyerson/BOUT_sims
def go_back(x,y,a=40,b=50,R_0 = 90,l=10,m=3,aa=0.0,q0=3.0,eps=.07):

    hit_divert = (x>b)
    inCORE = x<b
    stopevolve = hit_divert
    
    #eps = .2
    C = ((2*m*l*a**2)/(R_0*q0*b**2))*eps
    
    def func(y_out):
        return (-y + y_out - C*(x/b)**(m-2) *np.cos(m*y_out))**2
    
    def func2(y_out):
        return (-y_old + y_out + (2.0*np.pi/q) + aa*np.cos(y_out))**2
    
    y_old = copy(y)
    y_old = (newton_krylov(func,y))
    y_old = np.mod(y_old,2.0*np.pi)

    x_old = x + (m*b*C)/(m-1)*(x/b)**(m-1) *np.sin(m*y_old)

    q = q0*(x_old/a)**2

    y_old2 = copy(y_old)
    y_old2 = (newton_krylov(func2,y_old))

    #y_old2 = y_old - 2*np.pi/q #- aa*np.cos(
    y_old2 = np.mod(y_old2,2.0*np.pi)
    x_old2 = x_old*(1.0 -aa*np.sin(y_old2))



    return x_old2,y_old2
コード例 #25
0
def L_sys_curve(function, level):
    axiom, rules, angleL, angleR, angle0 = function()
    angleL, angleR, angle0 = np.array([angleL, angleR, angle0]) * np.pi / 180.
    gen = generation(axiom, rules, level)
    print(gen)
    deux_pi, radius = 2 * np.pi, 1.0
    x0, y0, angle, stack = 0., 0., angle0, []
    x, y = [x0], [y0]
    plt.clf()
    for c in gen:
        if c == '[':
            stack.append((x0, y0, angle))
        elif c == ']':
            plt.plot(x, y, 'g')
            x0, y0, angle = stack.pop()
            x, y = [x0], [y0]
        elif c == '+':
            angle = np.mod(angle + angleR, deux_pi)
        elif c == '-':
            angle = np.mod(angle + angleL, deux_pi)
        else:
            if c == 'f':  # jump
                plt.plot(x, y, 'b')
                x, y = [], []
            x0 = x0 + radius * np.cos(angle)
            y0 = y0 + radius * np.sin(angle)
            x.append(x0)
            y.append(y0)
    plt.axis('off')  # ,axisbg=(1, 1, 1))
    plt.plot(x, y, 'g')
    plt.show()
    return
コード例 #26
0
ファイル: myfit.py プロジェクト: amerand/UTILS
def PsplineXY(x,a):
    """
    periodic spline (P=1)

    a = [x0,y0, x1,y1, x2,y2...]
    x in [0,1]
    """
    coef = numpy.array(a)
    xp = numpy.zeros(3*len(coef)/2)
    yp = numpy.zeros(3*len(coef)/2)
    x0 = numpy.mod(coef[::2], 1.0)
    s = numpy.array(x0).argsort()
    xp[0:len(coef)//2]      = numpy.mod(x0[s], 1.0)-1
    xp[len(coef)//2:len(coef)] = xp[0:len(coef)//2]+1
    xp[-len(coef)//2:]      = xp[0:len(coef)//2]+2
    yp[0:len(coef)//2]      = coef[1::2][s]
    yp[len(coef)//2:len(coef)] = yp[0:len(coef)//2]
    yp[-len(coef)//2:]      = yp[0:len(coef)//2]
        
    xx = numpy.array(x).flatten()
    res = interp1d(xp, yp, kind='quadratic', \
                   bounds_error=False, fill_value=0.0)\
                   (numpy.mod(xx, 1))
    res = res.reshape(numpy.array(x).shape)
    return res
コード例 #27
0
ファイル: UllmannMap.py プロジェクト: meyerson/BOUT_sims
def go_forward(x,y,a=40,b=50,R_0 = 90,l=10,m=3,aa=0.0,q0=3.0):

    hit_divert = (x>b)
    inCORE = x<b
    stopevolve = hit_divert
    
    eps = .2
    C = ((2*m*l*a**2)/(R_0*q0*b**2))*eps

    x_new = x/(1-aa*np.sin(y))
    q = q0*(x_new/a)**2
    y_new =  (y+ 2*np.pi/q + aa*np.cos(y))
    y_new = np.mod(y_new,2*np.pi)

    def func(x_out):
        return (-x_new + x_out +(m*b*C)/(m-1)*(x_out/b)**(m-1) *np.sin(m*y_new))**2
    
    x_new2 = (newton_krylov(func,x_new,method='gmres',maxiter=50))
    y_new2 = (y_new - C*(x_new2/b)**(m-2) * np.cos(m*y_new))
                                
    #print 'xchange:', x_new2/x

    x_new = x_new2
    y_new = np.mod(y_new2,2*np.pi)

    
    return x_new,y_new
コード例 #28
0
ファイル: spiral.py プロジェクト: tobywaite/firemix
    def draw(self, dt):
        if self._mixer.is_onset():
            self.onset_speed_boost = self.parameter('onset-speed-boost').get()

        self.center_offset_angle += dt * self.parameter('center-speed').get() * self.onset_speed_boost
        self.hue_inner += dt * self.parameter('hue-speed').get() * self.onset_speed_boost
        self.wave_offset += dt * self.parameter('wave-speed').get() * self.onset_speed_boost
        self.color_offset += dt * self.parameter('speed').get() * self.onset_speed_boost

        self.onset_speed_boost = max(1, self.onset_speed_boost - self.parameter('onset-speed-decay').get())

        wave_hue_period = 2 * math.pi * self.parameter('wave-hue-period').get()
        wave_hue_width = self.parameter('wave-hue-width').get()
        radius_hue_width = self.parameter('radius-hue-width').get()
        angle_hue_width = self.parameter('angle-hue-width').get()

        cx, cy = self.scene().center_point()
        self.locations = np.asarray(self.scene().get_all_pixel_locations())
        x,y = self.locations.T
        x -= cx + math.cos(self.center_offset_angle) * self.parameter('center-distance').get()
        y -= cy + math.sin(self.center_offset_angle) * self.parameter('center-distance').get()
        self.pixel_distances = np.sqrt(np.square(x) + np.square(y))
        self.pixel_angles = np.arctan2(y, x) / (2.0 * math.pi)
        self.pixel_distances /= max(self.pixel_distances)

        angles = np.mod(1.0 + self.pixel_angles + np.sin(self.wave_offset + self.pixel_distances * wave_hue_period) * wave_hue_width, 1.0)
        hues = self.color_offset + (radius_hue_width * self.pixel_distances) + (2 * np.abs(angles - 0.5) * angle_hue_width)
        hues = np.int_(np.mod(hues, 1.0) * self._fader_steps)
        colors = self._fader.color_cache[hues]
        colors = colors.T
        colors[0] = np.mod(colors[0] + self.hue_inner, 1.0)
        colors = colors.T

        self._pixel_buffer = colors
コード例 #29
0
ファイル: readrotate.py プロジェクト: pusher96/ms_attenuation
def calc_geoinc(trace,metric=True):
    """docstring for calc_geoinc"""
    stla=trace.stats.sac.stla
    stlo=trace.stats.sac.stlo
    stdp=trace.stats.sac.stdp
    evla=trace.stats.sac.evla
    evlo=trace.stats.sac.evlo
    evdp=trace.stats.sac.evdp
    
    
    if metric:
        baz=np.rad2deg(np.arctan2((evlo-stlo),(evla-stla)))
        EpiDist = np.sqrt((evlo-stlo)**2. +  (evla-stla)**2.)
        inc = np.rad2deg(np.arctan(EpiDist/ (evdp-stdp)))
        
        HypoDist = np.sqrt((evdp-stdp)**2. + EpiDist**2)
         
    if baz<0.:
        baz=baz+360.
        
    
    azi=np.mod(baz+180.0,360.0)
    inc=np.mod(inc+180.0,180.)

    
    return azi,inc,baz,HypoDist,EpiDist,stdp
コード例 #30
0
def hist_lastAxSub(data, row, col, plot_title = '', chBool = None, tag = 'CH'):
    """imshow basically. takes 3D data, plots first 2 dim, separates by 3rd dim"""
    numCh = data.shape[-1]
    print "numCh:", numCh
    if numCh > row*col:
        numCh = row*col
    f, axarr = plt.subplots(row, col, sharex='col', sharey='row')
    if row == 1 or col == 1:
        for v in range(numCh):
            axarr[v].hist(data[:,v])
            axarr[v].set_title(tag + str(v))
            try:
                if chBool[v] == False:
                    axarr[v].set_title('(' + tag.lower() + + str(v) + ')')
            except:
                pass
        plt.suptitle(plot_title)
    else:
        for v in range(numCh):
            axarr[v/col, np.mod(v,col)].hist(data[:,v])
            axarr[v/col, np.mod(v,col)].set_title(tag + str(v))
            axarr[v/col, np.mod(v,col)].locator_params(nbins=2)
            try:
                if chBool[v] == False:
                    axarr[v/col, np.mod(v,col)].set_title('(' + tag.lower() + + str(v) + ')')
            except:
                pass                         
        plt.suptitle(plot_title)
コード例 #31
0
ファイル: UGATIT.py プロジェクト: SpikeKing/UGATIT
    def train(self):
        # initialize all variables
        tf.global_variables_initializer().run()

        # saver to save model
        self.saver = tf.train.Saver()

        # summary writer
        self.writer = tf.summary.FileWriter(self.log_dir + '/' + self.model_dir, self.sess.graph)


        # restore check-point if it exits
        could_load, checkpoint_counter = self.load(self.checkpoint_dir)
        if could_load:
            start_epoch = (int)(checkpoint_counter / self.iteration)
            start_batch_id = checkpoint_counter - start_epoch * self.iteration
            counter = checkpoint_counter
            print(" [*] Load SUCCESS")
        else:
            start_epoch = 0
            start_batch_id = 0
            counter = 1
            print(" [!] Load failed...")

        # loop for epoch
        start_time = time.time()
        past_g_loss = -1.
        lr = self.init_lr
        for epoch in range(start_epoch, self.epoch):
            # lr = self.init_lr if epoch < self.decay_epoch else self.init_lr * (self.epoch - epoch) / (self.epoch - self.decay_epoch)
            if self.decay_flag :
                #lr = self.init_lr * pow(0.5, epoch // self.decay_epoch)
                lr = self.init_lr if epoch < self.decay_epoch else self.init_lr * (self.epoch - epoch) / (self.epoch - self.decay_epoch)
            for idx in range(start_batch_id, self.iteration):
                train_feed_dict = {
                    self.lr : lr
                }

                # Update D
                _, d_loss, summary_str = self.sess.run([self.D_optim,
                                                        self.Discriminator_loss, self.D_loss], feed_dict = train_feed_dict)
                self.writer.add_summary(summary_str, counter)

                # Update G
                g_loss = None
                if (counter - 1) % self.n_critic == 0 :
                    batch_A_images, batch_B_images, fake_A, fake_B, _, g_loss, summary_str = self.sess.run([self.real_A, self.real_B,
                                                                                                            self.fake_A, self.fake_B,
                                                                                                            self.G_optim,
                                                                                                            self.Generator_loss, self.G_loss], feed_dict = train_feed_dict)
                    self.writer.add_summary(summary_str, counter)
                    past_g_loss = g_loss

                # display training status
                counter += 1
                if g_loss == None :
                    g_loss = past_g_loss
                print("Epoch: [%2d] [%5d/%5d] time: %4.4f d_loss: %.8f, g_loss: %.8f" % (epoch, idx, self.iteration, time.time() - start_time, d_loss, g_loss))

                if np.mod(idx+1, self.print_freq) == 0 :
                    save_images(batch_A_images, [self.batch_size, 1],
                                './{}/real_A_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, idx+1))
                    # save_images(batch_B_images, [self.batch_size, 1],
                    #             './{}/real_B_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, idx+1))

                    # save_images(fake_A, [self.batch_size, 1],
                    #             './{}/fake_A_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, idx+1))
                    save_images(fake_B, [self.batch_size, 1],
                                './{}/fake_B_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, idx+1))

                if np.mod(idx + 1, self.save_freq) == 0:
                    self.save(self.checkpoint_dir, counter)



            # After an epoch, start_batch_id is set to zero
            # non-zero value is only for the first epoch after loading pre-trained model
            start_batch_id = 0

            # save model for final step
            self.save(self.checkpoint_dir, counter)
コード例 #32
0
    output_val[ind1[0], ind1[1], ind1[2], ind1[3], ind1[4], i6,
               4] = np.std(xv)  # td_std
    output_val[ind1[0], ind1[1], ind1[2], ind1[3], ind1[4], i6,
               5] = np.mean(xv)  # td_av
    xv = [
        obj.vb for obj in c
        if obj.tb > 400 * par_vals['dt'] * np.log(2) / par_vals['lambda']
    ]
    output_val[ind1[0], ind1[1], ind1[2], ind1[3], ind1[4], i6,
               6] = np.std(xv)  # vb_std
    output_val[ind1[0], ind1[1], ind1[2], ind1[3], ind1[4], i6,
               7] = np.mean(xv)  # vb_av
    yv = [
        obj.vd for obj in c
        if obj.tb > 400 * par_vals['dt'] * np.log(2) / par_vals['lambda']
    ]
    temp = scipy.stats.linregress(xv, yv)
    output_val[ind1[0], ind1[1], ind1[2], ind1[3], ind1[4], i6,
               8] = temp[0]  # vb vd linear slope

    # saving the results so that we can restart from where we left off
    np.save(out_path, output_val)
    # showing that we have made this progress
    progress = np.array([ind])
    np.save(prog_path, progress)
    if np.mod(ind - temp3[0], 10) == 0:
        print 'I have completed: {0} repeats. Time taken ='.format(
            ind - temp3[0] + 1), time.time() - tic
    del c, obs, init_pop, xv, yv
print 'I have completed: {0} (all) repeats. Time taken ='.format(
    len(temp3)), time.time() - tic
コード例 #33
0
    def train(self, config):

        if (config.Optimizer == "Adam"):
            cnn_optim = tf.train.AdamOptimizer(self.learning_rate, beta1=config.beta1) \
                .minimize(self.cnn_loss)
        elif (config.Optimizer == "RMS"):
            cnn_optim = tf.train.RMSPropOptimizer(self.learning_rate).minimize(
                self.cnn_loss)
        else:
            cnn_optim = tf.train.AdagradOptimizer(self.learning_rate).minimize(
                self.cnn_loss)

        checkpoint_path = '/flush4/ver100/TKDE/DeepCU-master/Trained_chk/'

        tf.global_variables_initializer().run()

        Au_trdat, Tx_trdat, Vi_trdat, Au_trlab = self.load_train()
        Au_tsdat, Tx_tsdat, Vi_tsdat, Au_tslab = self.load_test()
        Au_vdat, Tx_vdat, Vi_vdat, Au_vlab = self.load_val()

        train_batches = Au_trdat.shape[0] // self.batch_size
        test_batches = Au_tsdat.shape[0] // self.batch_size
        val_batches = Au_vdat.shape[0] // self.batch_size

        left_index_test = Au_tsdat.shape[0] - (test_batches *
                                               config.batch_size)
        left_index_train = Au_trdat.shape[0] - (train_batches *
                                                config.batch_size)
        left_index_val = Au_vdat.shape[0] - (val_batches * config.batch_size)

        dropout_list = np.arange(0.2, 0.95, 0.05)

        for drop1 in dropout_list:

            tf.global_variables_initializer().run()
            seed = 20

            print("dropout ratio --->", drop1)

            #### Start training the model
            lr = config.learning_rate

            for epoch in range(config.epoch):
                seed += 1

                if np.mod(epoch + 1, 40) == 0:
                    lr = lr - lr * 0.1

                random_index = np.random.RandomState(seed=seed).permutation(
                    Au_trdat.shape[0])
                train_data_au = Au_trdat[random_index]
                train_data_vi = Vi_trdat[random_index]
                train_data_tx = Tx_trdat[random_index]
                train_lab_au = Au_trlab[random_index]

                for idx in range(train_batches):
                    batch_au = train_data_au[idx *
                                             config.batch_size:(idx + 1) *
                                             config.batch_size]
                    batch_vi = train_data_vi[idx *
                                             config.batch_size:(idx + 1) *
                                             config.batch_size]
                    batch_tx = train_data_tx[idx *
                                             config.batch_size:(idx + 1) *
                                             config.batch_size]
                    batch_labels = train_lab_au[idx *
                                                config.batch_size:(idx + 1) *
                                                config.batch_size]

                    _ = self.sess.run(
                        [cnn_optim],
                        feed_dict={
                            self.audio_inputs: batch_au,
                            self.video_inputs: batch_vi,
                            self.text_inputs: batch_tx,
                            self.y: batch_labels,
                            self.drop_ratio: [drop1],
                            self.learning_rate: lr
                        })

                ##### Printing Loss on each epoch to monitor convergence
                ##### Apply Early stoping procedure to report results

                print("epoch", epoch)

                Val_Loss = 0.0

                random_index = np.random.permutation(Au_vdat.shape[0])
                VAL_data_au = Au_vdat[random_index]
                VAL_data_vi = Vi_vdat[random_index]
                VAL_data_tx = Tx_vdat[random_index]
                VAL_lab_au = Au_vlab[random_index]

                for idx in range(val_batches):
                    batch_au = VAL_data_au[idx * config.batch_size:(idx + 1) *
                                           config.batch_size]
                    batch_vi = VAL_data_vi[idx * config.batch_size:(idx + 1) *
                                           config.batch_size]
                    batch_tx = VAL_data_tx[idx * config.batch_size:(idx + 1) *
                                           config.batch_size]
                    batch_labels = VAL_lab_au[idx *
                                              config.batch_size:(idx + 1) *
                                              config.batch_size]

                    Val_Loss += self.Accuracy.eval({
                        self.audio_inputs: batch_au,
                        self.video_inputs: batch_vi,
                        self.text_inputs: batch_tx,
                        self.y: batch_labels
                    })

                batch_au = train_data_au[-left_index_val:]
                batch_vi = train_data_vi[-left_index_val:]
                batch_tx = train_data_tx[-left_index_val:]
                batch_labels = train_lab_au[-left_index_val:]

                Val_Loss += self.Accuracy.eval({
                    self.audio_inputs: batch_au,
                    self.video_inputs: batch_vi,
                    self.text_inputs: batch_tx,
                    self.y: batch_labels
                })

                Val_MAE = Val_Loss / (Au_vdat.shape[0])

                ### Check the training loss
                Tr_Loss = 0.0

                random_index = np.random.permutation(Au_trdat.shape[0])
                train_data_au = Au_trdat[random_index]
                train_data_vi = Vi_trdat[random_index]
                train_data_tx = Tx_trdat[random_index]
                train_lab_au = Au_trlab[random_index]

                for idx in range(train_batches):
                    batch_au = train_data_au[idx *
                                             config.batch_size:(idx + 1) *
                                             config.batch_size]
                    batch_vi = train_data_vi[idx *
                                             config.batch_size:(idx + 1) *
                                             config.batch_size]
                    batch_tx = train_data_tx[idx *
                                             config.batch_size:(idx + 1) *
                                             config.batch_size]
                    batch_labels = train_lab_au[idx *
                                                config.batch_size:(idx + 1) *
                                                config.batch_size]

                    Tr_Loss += self.Accuracy.eval({
                        self.audio_inputs: batch_au,
                        self.video_inputs: batch_vi,
                        self.text_inputs: batch_tx,
                        self.y: batch_labels
                    })

                batch_au = train_data_au[-left_index_train:]
                batch_vi = train_data_vi[-left_index_train:]
                batch_tx = train_data_tx[-left_index_train:]
                batch_labels = train_lab_au[-left_index_train:]

                Tr_Loss += self.Accuracy.eval({
                    self.audio_inputs: batch_au,
                    self.video_inputs: batch_vi,
                    self.text_inputs: batch_tx,
                    self.y: batch_labels
                })

                Train_MAE = Tr_Loss / (Au_trdat.shape[0])

                Test_loss = 0.0

                for idx in range(test_batches):
                    batch_au = Au_tsdat[idx * config.batch_size:(idx + 1) *
                                        config.batch_size]
                    batch_vi = Vi_tsdat[idx * config.batch_size:(idx + 1) *
                                        config.batch_size]
                    batch_tx = Tx_tsdat[idx * config.batch_size:(idx + 1) *
                                        config.batch_size]
                    batch_labels = Au_tslab[idx * config.batch_size:(idx + 1) *
                                            config.batch_size]

                    Test_loss += self.Accuracy.eval({
                        self.audio_inputs: batch_au,
                        self.video_inputs: batch_vi,
                        self.text_inputs: batch_tx,
                        self.y: batch_labels
                    })

                ### Do it for the left exampels which does not account in batches
                batch_au = Au_tsdat[-left_index_test:]
                batch_vi = Vi_tsdat[-left_index_test:]
                batch_tx = Tx_tsdat[-left_index_test:]
                batch_labels = Au_tslab[-left_index_test:]

                Test_loss += self.Accuracy.eval({
                    self.audio_inputs: batch_au,
                    self.video_inputs: batch_vi,
                    self.text_inputs: batch_tx,
                    self.y: batch_labels
                })

                Test_MAE = Test_loss / Au_tsdat.shape[0]

                print(" ******* MOSI Results ************ ")
                print("Train MAE ---->", Train_MAE)
                print("VAl MAE ---->", Val_MAE)
                print("Test MAE ---->", Test_MAE)

        print('********** Iterations Terminated **********')
コード例 #34
0
def time_ticks(locs, *args, **kwargs):  # pylint: disable=star-args
    '''Plot time-formatted axis ticks.
    Parameters
    ----------
    locations : list or np.ndarray
        Time-stamps for tick marks
    n_ticks : int > 0 or None
        Show this number of ticks (evenly spaced).
        If none, all ticks are displayed.
        Default: 5
    axis : 'x' or 'y'
        Which axis should the ticks be plotted on?
        Default: 'x'
    time_fmt : None or {'ms', 's', 'm', 'h'}
        - 'ms': milliseconds   (eg, 241ms)
        - 's': seconds         (eg, 1.43s)
        - 'm': minutes         (eg, 1:02)
        - 'h': hours           (eg, 1:02:03)
        If none, formatted is automatically selected by the
        range of the times data.
        Default: None
    fmt : str
        .. warning:: This parameter name was in librosa 0.4.2
            Use the `time_fmt` parameter instead.
            The `fmt` parameter will be removed in librosa 0.5.0.
    kwargs : additional keyword arguments.
        See `matplotlib.pyplot.xticks` or `yticks` for details.
    Returns
    -------
    locs
    labels
        Locations and labels of tick marks
    See Also
    --------
    matplotlib.pyplot.xticks
    matplotlib.pyplot.yticks
    Examples
    --------
    >>> # Tick at pre-computed beat times
    >>> librosa.display.specshow(S)
    >>> librosa.display.time_ticks(beat_times)
    >>> # Set the locations of the time stamps
    >>> librosa.display.time_ticks(locations, timestamps)
    >>> # Format in seconds
    >>> librosa.display.time_ticks(beat_times, time_fmt='s')
    >>> # Tick along the y axis
    >>> librosa.display.time_ticks(beat_times, axis='y')
    '''
    from matplotlib import pyplot as plt

    n_ticks = kwargs.pop('n_ticks', 5)
    axis = kwargs.pop('axis', 'x')
    time_fmt = kwargs.pop('time_fmt', None)

    if axis == 'x':
        ticker = plt.xticks
    elif axis == 'y':
        ticker = plt.yticks
    else:
        raise ValueError("axis must be either 'x' or 'y'.")

    if len(args) > 0:
        times = args[0]
    else:
        times = locs
        locs = np.arange(len(times))

    if n_ticks is not None:
        # Slice the locations and labels evenly between 0 and the last point
        positions = np.linspace(0, len(locs) - 1, n_ticks,
                                endpoint=True).astype(int)
        locs = locs[positions]
        times = times[positions]

    # Format the labels by time
    formats = {
        'ms':
        lambda t: '{:d}ms'.format(int(1e3 * t)),
        's':
        '{:0.2f}s'.format,
        'm':
        lambda t: '{:d}:{:02d}'.format(int(t / 6e1), int(np.mod(t, 6e1))),
        'h':
        lambda t: '{:d}:{:02d}:{:02d}'.format(int(
            t / 3.6e3), int(np.mod(t / 6e1, 6e1)), int(np.mod(t, 6e1)))
    }

    if time_fmt is None:
        if max(times) > 3.6e3:
            time_fmt = 'h'
        elif max(times) > 6e1:
            time_fmt = 'm'
        elif max(times) > 1.0:
            time_fmt = 's'
        else:
            time_fmt = 'ms'

    elif time_fmt not in formats:
        raise ValueError('Invalid format: {:s}'.format(time_fmt))

    times = [formats[time_fmt](t) for t in times]

    return ticker(locs, times, **kwargs)
コード例 #35
0
def state2coor(s):
	return (np.mod(s,Ly), int(np.floor(1.0*s/Ly)) )
コード例 #36
0
def run_cases(state_data, modifiable_states_ind, acis_state_limits, cases,
              times, schedule):
    """ Run all valid chip turn-on permutations

    Args:
    state_data (ndarray): States array, in the form returned by cmd_states.get_cmd_states.fetch_states
        modifiable_states_ind (ndarray): Numeric index of states that represent dwells with modifiable chip counts
        acis_state_limits (dict): Dictionary of ndarrays, each returning ACIS limits that match the states array for
            the specified msid/key
        cases (list): List of all permutations, where each row represents a single case (combination), and each
            column represents the number of chips to be added for each state indexed by `modifiable_states_ind`. The
            first case is assumed to be the baseline case.
        times (numpy.ndarray): Array of time values, in seconds from '1997:365:23:58:56.816' (Chandra.Time.DateTime
            epoch)
        schedule (dict): Dictionary of pitch, roll, etc. values that match the time values specified above in `times`

    Returns:
        (dict): Dictionary of case results, where each item in the dictionary is an ndarray of length `len(cases)`

    """
    all_dpa_case_results = {}
    all_dea_case_results = {}
    all_psmc_case_results = {}
    all_fp_case_results = {}

    dpa_diagnostic_results = {}
    dea_diagnostic_results = {}
    psmc_diagnostic_results = {}
    fp_diagnostic_results = {}

    all_dpa_ok = {}
    all_dea_ok = {}
    all_psmc_ok = {}
    all_fp_ok = {}

    n = -1
    loop_cases = deepcopy(cases)
    zero_case = cases[0]

    max_cases = len(cases)

    while len(loop_cases) > 0:
        n = n + 1
        case = loop_cases.pop(0)
        if np.mod(n, 10) == 0:
            print('Running case {} out of {}'.format(n + 1, max_cases))

        # Generate new schedule data for CCD and FEP count
        mod_states_ccd_count = deepcopy(state_data['ccd_count'])
        mod_states_ccd_count[modifiable_states_ind] = mod_states_ccd_count[
            modifiable_states_ind] + np.array(case)
        ccd_count = np.array(
            list((zip(mod_states_ccd_count, mod_states_ccd_count)))).reshape(
                (-1))

        mod_states_fep_count = deepcopy(state_data['fep_count'])
        mod_states_fep_count[modifiable_states_ind] = mod_states_fep_count[
            modifiable_states_ind] + np.array(case)
        fep_count = np.array(
            list((zip(mod_states_fep_count, mod_states_fep_count)))).reshape(
                (-1))

        schedule['fep_count'] = fep_count
        schedule['ccd_count'] = ccd_count

        # Run the new profile
        dpa_case_results = run_profile(times, schedule, '1dpamzt',
                                       model_specs['1dpamzt'],
                                       model_init['1dpamzt'])
        dea_case_results = run_profile(times, schedule, '1deamzt',
                                       model_specs['1deamzt'],
                                       model_init['1deamzt'])
        psmc_case_results = run_profile(times, schedule, '1pdeaat',
                                        model_specs['1pdeaat'],
                                        model_init['1pdeaat'])
        fp_case_results = run_profile(times, schedule, 'fptemp',
                                      model_specs['fptemp'],
                                      model_init['fptemp'])

        # Determine the maximum temperatures for this case
        max_dpa = get_max_dwell_mvals(dpa_case_results['1dpamzt'], state_data)
        max_dea = get_max_dwell_mvals(dea_case_results['1deamzt'], state_data)
        max_psmc = get_max_dwell_mvals(psmc_case_results['1pdeaat'],
                                       state_data)
        max_fp = get_max_dwell_mvals(fp_case_results['fptemp'], state_data)

        # Store these cases (will delete later if bad)
        all_dpa_case_results[case] = max_dpa
        all_dea_case_results[case] = max_dea
        all_psmc_case_results[case] = max_psmc
        all_fp_case_results[case] = max_fp

        # Evaluate the current case against all models
        dpa_ok = evaluate_one_case_for_one_msid(
            acis_state_limits['1dpamzt'], all_dpa_case_results[zero_case],
            max_dpa)
        dea_ok = evaluate_one_case_for_one_msid(
            acis_state_limits['1deamzt'], all_dea_case_results[zero_case],
            max_dea)
        psmc_ok = evaluate_one_case_for_one_msid(
            acis_state_limits['1pdeaat'], all_psmc_case_results[zero_case],
            max_psmc)
        fp_ok = evaluate_one_case_for_one_msid(acis_state_limits['fptemp'],
                                               all_fp_case_results[zero_case],
                                               max_fp)
        all_ok = dpa_ok & dea_ok & psmc_ok & fp_ok

        if not np.all(all_ok):
            print('Case {} is bad'.format(case))
            all_dpa_case_results.pop(case)
            all_dea_case_results.pop(case)
            all_psmc_case_results.pop(case)
            all_fp_case_results.pop(case)

            first_change = case.index(1)

            if all_ok[modifiable_states_ind[first_change]] is not True:
                # Eliminate all other cases that use the failing case
                original_len = len(loop_cases)
                loop_cases = [c for c in loop_cases if c[first_change] != 1]
                new_len = len(loop_cases)
                max_cases = max_cases - (original_len - new_len)

        else:
            all_dpa_ok[case] = dpa_ok
            all_dea_ok[case] = dea_ok
            all_psmc_ok[case] = psmc_ok
            all_fp_ok[case] = fp_ok

        # Store results for later inspection
        dpa_diagnostic_results[case] = {
            'times': dpa_case_results['1dpamzt'].times,
            'mvals': dpa_case_results['1dpamzt'].mvals
        }
        dea_diagnostic_results[case] = {
            'times': dea_case_results['1deamzt'].times,
            'mvals': dea_case_results['1deamzt'].mvals
        }
        psmc_diagnostic_results[case] = {
            'times': psmc_case_results['1pdeaat'].times,
            'mvals': psmc_case_results['1pdeaat'].mvals
        }
        fp_diagnostic_results[case] = {
            'times': fp_case_results['fptemp'].times,
            'mvals': fp_case_results['fptemp'].mvals
        }

    diagnostic_results = {
        '1dpamzt': dpa_diagnostic_results,
        '1deamzt': dea_diagnostic_results,
        '1pdeaat': psmc_diagnostic_results,
        'fptemp': fp_diagnostic_results
    }

    case_results = {
        '1dpamzt': all_dpa_ok,
        '1deamzt': all_dea_ok,
        '1pdeaat': all_psmc_ok,
        'fptemp': all_fp_ok,
        'ok_cases': all_dpa_ok.keys()
    }  # Only OK cases are kept for all models

    return case_results, diagnostic_results
コード例 #37
0
ファイル: base_1124_11.py プロジェクト: mf1611/NFL
def correction_df(df):
    def correction_wind(df):
        ws_to_wd = {"SSW": 13, "E": 8, "SE": 1}
        wd_to_ws = {13: "SSW", 8: "E", 1: "SE"}

        def clean_ws(x):
            x = str(x).upper().replace(" ", "").replace("MPH", "")
            if "GUSTSUPTO" in x:
                return np.mean(np.array(x.split("GUSTSUPTO"), dtype=int))
            elif "-" in x:
                return np.mean(np.array(x.split("-"), dtype=int))
            elif x == "CALM" or x == "99":
                return 0.0
            else:
                try:
                    return float(x)
                except ValueError:
                    return x

        def clean_wd(x):
            try:
                return float(x)
            except ValueError:
                return x

        df["WindSpeed"] = df["WindSpeed"].fillna(99).apply(
            lambda x: clean_ws(x))
        df["WindDirection"] = df["WindDirection"].apply(lambda x: clean_wd(x))
        df["WindSpeed"] = [
            ws_to_wd[ws] if ws in ws_to_wd.keys() else ws
            for ws in df["WindSpeed"]
        ]
        df["WindDirection"] = [
            wd_to_ws[wd] if wd in wd_to_ws.keys() else wd
            for wd in df["WindDirection"]
        ]

        return df

    map_abbr = {'ARI': 'ARZ', 'BAL': 'BLT', 'CLE': 'CLV', 'HOU': 'HST'}
    for abb in df['PossessionTeam'].unique():
        map_abbr[abb] = abb

    def clean_StadiumType(txt):
        if pd.isna(txt):
            return np.nan
        txt = txt.lower()
        txt = ''.join([c for c in txt if c not in punctuation])
        txt = re.sub(' +', ' ', txt)
        txt = txt.strip()
        txt = txt.replace('outside', 'outdoor')
        txt = txt.replace('outdor', 'outdoor')
        txt = txt.replace('outddors', 'outdoor')
        txt = txt.replace('outdoors', 'outdoor')
        txt = txt.replace('oudoor', 'outdoor')
        txt = txt.replace('indoors', 'indoor')
        txt = txt.replace('ourdoor', 'outdoor')
        txt = txt.replace('retractable', 'rtr.')
        return txt

    def transform_StadiumType(txt):
        if pd.isna(txt):
            return np.nan
        if 'outdoor' in txt or 'open' in txt:
            return str(1)
        if 'indoor' in txt or 'closed' in txt:
            return str(0)

        return np.nan

    #from https://www.kaggle.com/c/nfl-big-data-bowl-2020/discussion/112681#latest-649087
    Turf = {
        'Field Turf': 'Artificial',
        'A-Turf Titan': 'Artificial',
        'Grass': 'Natural',
        'UBU Sports Speed S5-M': 'Artificial',
        'Artificial': 'Artificial',
        'DD GrassMaster': 'Artificial',
        'Natural Grass': 'Natural',
        'UBU Speed Series-S5-M': 'Artificial',
        'FieldTurf': 'Artificial',
        'FieldTurf 360': 'Artificial',
        'Natural grass': 'Natural',
        'grass': 'Natural',
        'Natural': 'Natural',
        'Artifical': 'Artificial',
        'FieldTurf360': 'Artificial',
        'Naturall Grass': 'Natural',
        'Field turf': 'Artificial',
        'SISGrass': 'Artificial',
        'Twenty-Four/Seven Turf': 'Artificial',
        'natural grass': 'Natural'
    }

    def height2cm(h):
        return int(h[0]) * 30.48 + int(h[2]) * 2.58

    def str_to_float(txt):
        try:
            return float(txt)
        except:
            return -1

    def clean_WindDirection(txt):
        if pd.isna(txt):
            return np.nan
        txt = txt.lower()
        txt = ''.join([c for c in txt if c not in punctuation])
        txt = txt.replace('from', '')
        txt = txt.replace(' ', '')
        txt = txt.replace('north', 'n')
        txt = txt.replace('south', 's')
        txt = txt.replace('west', 'w')
        txt = txt.replace('east', 'e')
        return txt

    def transform_WindDirection(txt):
        if pd.isna(txt):
            return np.nan

        if txt == 'n':
            return 0
        if txt == 'nne' or txt == 'nen':
            return 1 / 8
        if txt == 'ne':
            return 2 / 8
        if txt == 'ene' or txt == 'nee':
            return 3 / 8
        if txt == 'e':
            return 4 / 8
        if txt == 'ese' or txt == 'see':
            return 5 / 8
        if txt == 'se':
            return 6 / 8
        if txt == 'ses' or txt == 'sse':
            return 7 / 8
        if txt == 's':
            return 8 / 8
        if txt == 'ssw' or txt == 'sws':
            return 9 / 8
        if txt == 'sw':
            return 10 / 8
        if txt == 'sww' or txt == 'wsw':
            return 11 / 8
        if txt == 'w':
            return 12 / 8
        if txt == 'wnw' or txt == 'nww':
            return 13 / 8
        if txt == 'nw':
            return 14 / 8
        if txt == 'nwn' or txt == 'nnw':
            return 15 / 8
        return np.nan

    def map_weather(txt):
        """
        climate controlled or indoor => 3, sunny or sun => 2, clear => 1, cloudy => -1, rain => -2, snow => -3, others => 0
        partly => multiply by 0.5
        """
        ans = 1
        if pd.isna(txt):
            return 0
        if 'partly' in txt:
            ans *= 0.5
        if 'climate controlled' in txt or 'indoor' in txt:
            return ans * 3
        if 'sunny' in txt or 'sun' in txt:
            return ans * 2
        if 'clear' in txt:
            return ans
        if 'cloudy' in txt:
            return -ans
        if 'rain' in txt or 'rainy' in txt:
            return -2 * ans
        if 'snow' in txt:
            return -3 * ans
        return 0

    #########################################
    # formation features
    #########################################
    def split_personnel(s):
        splits = s.split(',')
        for i in range(len(splits)):
            splits[i] = splits[i].strip()

        return splits

    def defense_formation(l):
        dl = 0
        lb = 0
        db = 0
        other = 0

        for position in l:
            sub_string = position.split(' ')
            if sub_string[1] == 'DL':
                dl += int(sub_string[0])
            elif sub_string[1] in ['LB', 'OL']:
                lb += int(sub_string[0])
            else:
                db += int(sub_string[0])

        counts = (dl, lb, db, other)

        return counts

    def offense_formation(l):
        qb = 0
        rb = 0
        wr = 0
        te = 0
        ol = 0

        sub_total = 0
        qb_listed = False
        for position in l:
            sub_string = position.split(' ')
            pos = sub_string[1]
            cnt = int(sub_string[0])

            if pos == 'QB':
                qb += cnt
                sub_total += cnt
                qb_listed = True
            # Assuming LB is a line backer lined up as full back
            elif pos in ['RB', 'LB']:
                rb += cnt
                sub_total += cnt
            # Assuming DB is a defensive back and lined up as WR
            elif pos in ['WR', 'DB']:
                wr += cnt
                sub_total += cnt
            elif pos == 'TE':
                te += cnt
                sub_total += cnt
            # Assuming DL is a defensive lineman lined up as an additional line man
            else:
                ol += cnt
                sub_total += cnt

        # If not all 11 players were noted at given positions we need to make some assumptions
        # I will assume if a QB is not listed then there was 1 QB on the play
        # If a QB is listed then I'm going to assume the rest of the positions are at OL
        # This might be flawed but it looks like RB, TE and WR are always listed in the personnel
        if sub_total < 11:
            diff = 11 - sub_total
            if not qb_listed:
                qb += 1
                diff -= 1
            ol += diff

        counts = (qb, rb, wr, te, ol)

        return counts

    def personnel_features(df):
        personnel = df[[
            'GameId', 'PlayId', 'OffensePersonnel', 'DefensePersonnel'
        ]].drop_duplicates()
        personnel['DefensePersonnel'] = personnel['DefensePersonnel'].apply(
            lambda x: split_personnel(x))
        personnel['DefensePersonnel'] = personnel['DefensePersonnel'].apply(
            lambda x: defense_formation(x))
        personnel['num_DL'] = personnel['DefensePersonnel'].apply(
            lambda x: x[0])
        personnel['num_LB'] = personnel['DefensePersonnel'].apply(
            lambda x: x[1])
        personnel['num_DB'] = personnel['DefensePersonnel'].apply(
            lambda x: x[2])

        personnel['OffensePersonnel'] = personnel['OffensePersonnel'].apply(
            lambda x: split_personnel(x))
        personnel['OffensePersonnel'] = personnel['OffensePersonnel'].apply(
            lambda x: offense_formation(x))
        personnel['num_QB'] = personnel['OffensePersonnel'].apply(
            lambda x: x[0])
        personnel['num_RB'] = personnel['OffensePersonnel'].apply(
            lambda x: x[1])
        personnel['num_WR'] = personnel['OffensePersonnel'].apply(
            lambda x: x[2])
        personnel['num_TE'] = personnel['OffensePersonnel'].apply(
            lambda x: x[3])
        personnel['num_OL'] = personnel['OffensePersonnel'].apply(
            lambda x: x[4])

        # Let's create some features to specify if the OL is covered
        personnel['OL_diff'] = personnel['num_OL'] - personnel['num_DL']
        personnel['OL_TE_diff'] = (personnel['num_OL'] +
                                   personnel['num_TE']) - personnel['num_DL']
        # Let's create a feature to specify if the defense is preventing the run
        # Let's just assume 7 or more DL and LB is run prevention
        personnel['run_def'] = (personnel['num_DL'] + personnel['num_LB'] >
                                6).astype(int)

        personnel.drop(['OffensePersonnel', 'DefensePersonnel'],
                       axis=1,
                       inplace=True)

        df = pd.merge(df, personnel, on=['GameId', 'PlayId'])

        return df

    df = correction_wind(df)

    df['StadiumType'] = df['StadiumType'].apply(clean_StadiumType)
    df['StadiumType'] = df['StadiumType'].apply(transform_StadiumType)

    df['Turf'] = df['Turf'].map(Turf)
    df['Turf'] = (df['Turf'] == 'Natural') * 1

    df['PossessionTeam'] = df['PossessionTeam'].map(map_abbr)
    df['HomeTeamAbbr'] = df['HomeTeamAbbr'].map(map_abbr)
    df['VisitorTeamAbbr'] = df['VisitorTeamAbbr'].map(map_abbr)

    df['HomePossesion'] = (df['PossessionTeam'] == df['HomeTeamAbbr']) * 1
    df['Field_eq_Possession'] = (df['FieldPosition']
                                 == df['PossessionTeam']) * 1
    df['HomeField'] = (df['FieldPosition'] == df['HomeTeamAbbr']) * 1

    t_gemeclock = pd.to_datetime(df['GameClock'])
    df['GameClock'] = t_gemeclock.dt.minute * 60 + t_gemeclock.dt.second

    df['PlayerHeight'] = df['PlayerHeight'].map(height2cm)

    df['PlayerBirthDate'] = pd.to_datetime(df['PlayerBirthDate'])
    df['Age'] = 2019 - df['PlayerBirthDate'].dt.year

    t_handoff = pd.to_datetime(df['TimeHandoff'])
    t_handsnap = pd.to_datetime(df['TimeSnap'])
    df['TimeDelta'] = (t_handoff.dt.minute * 60 + t_handoff.dt.second) - (
        t_handsnap.dt.minute * 60 + t_handsnap.dt.second)

    # remove mph
    # replace the ones that has x-y by (x+y)/2
    # and also the ones with x gusts up to y
    df['WindSpeed'] = df['WindSpeed'].apply(lambda x: str(x).lower().replace(
        'mph', '').strip() if not pd.isna(x) else x)
    df['WindSpeed'] = df['WindSpeed'].apply(lambda x: (int(x.split('-')[
        0]) + int(x.split('-')[1])) / 2 if not pd.isna(x) and '-' in x else x)
    df['WindSpeed'] = df['WindSpeed'].apply(
        lambda x: (int(x.split()[0]) + int(x.split()[-1])) / 2
        if not pd.isna(x) and type(x) != float and 'gusts up to' in x else x)
    df['WindSpeed'] = df['WindSpeed'].apply(str_to_float)

    df['WindDirection'] = df['WindDirection'].apply(clean_WindDirection)
    df['WindDirection'] = df['WindDirection'].apply(transform_WindDirection)

    #########################################
    # winddirection
    #########################################
    map_wind = {
        1 / 8: 15 / 8,
        2 / 8: 14 / 8,
        3 / 8: 13 / 8,
        4 / 8: 12 / 8,
        5 / 8: 11 / 8,
        6 / 8: 10 / 8,
        7 / 8: 9 / 8
    }
    df.loc[df.PlayDirection == 'left',
           'WindDirection'] = df.loc[df.PlayDirection == 'left',
                                     'WindDirection'].map(map_wind)

    # binary variables to 0/1
    #df['Team'] = (df['Team'].apply(lambda x: x.strip()=='home')) * 1

    df['GameWeather'] = df['GameWeather'].str.lower()
    indoor = "indoor"
    df['GameWeather'] = df['GameWeather'].apply(
        lambda x: indoor if not pd.isna(x) and indoor in x else x)
    df['GameWeather'] = df['GameWeather'].apply(
        lambda x: x.replace('coudy', 'cloudy').replace('clouidy', 'cloudy').
        replace('party', 'partly') if not pd.isna(x) else x)
    df['GameWeather'] = df['GameWeather'].apply(lambda x: x.replace(
        'clear and sunny', 'sunny and clear') if not pd.isna(x) else x)
    df['GameWeather'] = df['GameWeather'].apply(lambda x: x.replace(
        'skies', '').replace("mostly", "").strip() if not pd.isna(x) else x)
    df['GameWeather'] = df['GameWeather'].apply(map_weather)

    df['IsRusher'] = (df['NflId'] == df['NflIdRusher']) * 1

    df['YardsLeft'] = df.apply(lambda row: 100 - row['YardLine']
                               if row['HomeField'] else row['YardLine'],
                               axis=1)
    df['YardsLeft'] = df.apply(
        lambda row: row['YardsLeft']
        if row['PlayDirection'] else 100 - row['YardsLeft'],
        axis=1)

    df = personnel_features(df)

    ##########################################################
    ##########################################################
    # Dir -> radianに
    df['Dir'] = np.mod(90 - df.Dir, 360) * math.pi / 180.0
    df['X'] += df['S'] * np.cos(df['Dir'])
    df['Y'] += df['S'] * np.sin(df['Dir'])

    return df
コード例 #38
0
ファイル: markov.py プロジェクト: KyleMortek/pythonAI
 def state2coord(self,s):
 	# transfer state to grid world coordinate (x,y)
 	row=int(s/self.worldShape[1])
 	col=np.mod(s,self.worldShape[1])
 	return row,col
コード例 #39
0
    def detect_face_limited(self, img, det_type=2):
        height, width, _ = img.shape
        if det_type >= 2:
            total_boxes = np.array(
                [[0.0, 0.0, img.shape[1], img.shape[0], 0.9]],
                dtype=np.float32)
            num_box = total_boxes.shape[0]

            # pad the bbox
            [dy, edy, dx, edx, y, ey, x, ex, tmpw,
             tmph] = self.pad(total_boxes, width, height)
            # (3, 24, 24) is the input shape for RNet
            input_buf = np.zeros((num_box, 3, 24, 24), dtype=np.float32)

            for i in range(num_box):
                tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
                tmp[dy[i]:edy[i] + 1,
                    dx[i]:edx[i] + 1, :] = img[y[i]:ey[i] + 1,
                                               x[i]:ex[i] + 1, :]
                input_buf[i, :, :, :] = adjust_input(cv2.resize(tmp, (24, 24)))

            output = self.RNet.predict(input_buf)

            # filter the total_boxes with threshold
            passed = np.where(output[1][:, 1] > self.threshold[1])
            total_boxes = total_boxes[passed]

            if total_boxes.size == 0:
                return None

            total_boxes[:, 4] = output[1][passed, 1].reshape((-1, ))
            reg = output[0][passed]

            # nms
            pick = nms(total_boxes, 0.7, 'Union')
            total_boxes = total_boxes[pick]
            total_boxes = self.calibrate_box(total_boxes, reg[pick])
            total_boxes = self.convert_to_square(total_boxes)
            total_boxes[:, 0:4] = np.round(total_boxes[:, 0:4])
        else:
            total_boxes = np.array(
                [[0.0, 0.0, img.shape[1], img.shape[0], 0.9]],
                dtype=np.float32)
        num_box = total_boxes.shape[0]
        [dy, edy, dx, edx, y, ey, x, ex, tmpw,
         tmph] = self.pad(total_boxes, width, height)
        # (3, 48, 48) is the input shape for ONet
        input_buf = np.zeros((num_box, 3, 48, 48), dtype=np.float32)

        for i in range(num_box):
            tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.float32)
            tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = img[y[i]:ey[i] + 1,
                                                             x[i]:ex[i] + 1, :]
            input_buf[i, :, :, :] = adjust_input(cv2.resize(tmp, (48, 48)))

        output = self.ONet.predict(input_buf)
        #print(output[2])

        # filter the total_boxes with threshold
        passed = np.where(output[2][:, 1] > self.threshold[2])
        total_boxes = total_boxes[passed]

        if total_boxes.size == 0:
            return None

        total_boxes[:, 4] = output[2][passed, 1].reshape((-1, ))
        reg = output[1][passed]
        points = output[0][passed]

        # compute landmark points
        bbw = total_boxes[:, 2] - total_boxes[:, 0] + 1
        bbh = total_boxes[:, 3] - total_boxes[:, 1] + 1
        points[:, 0:5] = np.expand_dims(
            total_boxes[:, 0], 1) + np.expand_dims(bbw, 1) * points[:, 0:5]
        points[:, 5:10] = np.expand_dims(
            total_boxes[:, 1], 1) + np.expand_dims(bbh, 1) * points[:, 5:10]

        # nms
        total_boxes = self.calibrate_box(total_boxes, reg)
        pick = nms(total_boxes, 0.7, 'Min')
        total_boxes = total_boxes[pick]
        points = points[pick]

        if not self.accurate_landmark:
            return total_boxes, points

        #############################################
        # extended stage
        #############################################
        num_box = total_boxes.shape[0]
        patchw = np.maximum(total_boxes[:, 2] - total_boxes[:, 0] + 1,
                            total_boxes[:, 3] - total_boxes[:, 1] + 1)
        patchw = np.round(patchw * 0.25)

        # make it even
        patchw[np.where(np.mod(patchw, 2) == 1)] += 1

        input_buf = np.zeros((num_box, 15, 24, 24), dtype=np.float32)
        for i in range(5):
            x, y = points[:, i], points[:, i + 5]
            x, y = np.round(x - 0.5 * patchw), np.round(y - 0.5 * patchw)
            [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(
                np.vstack([x, y, x + patchw - 1, y + patchw - 1]).T, width,
                height)
            for j in range(num_box):
                tmpim = np.zeros((tmpw[j], tmpw[j], 3), dtype=np.float32)
                tmpim[dy[j]:edy[j] + 1,
                      dx[j]:edx[j] + 1, :] = img[y[j]:ey[j] + 1,
                                                 x[j]:ex[j] + 1, :]
                input_buf[j, i * 3:i * 3 + 3, :, :] = adjust_input(
                    cv2.resize(tmpim, (24, 24)))

        output = self.LNet.predict(input_buf)

        pointx = np.zeros((num_box, 5))
        pointy = np.zeros((num_box, 5))

        for k in range(5):
            # do not make a large movement
            tmp_index = np.where(np.abs(output[k] - 0.5) > 0.35)
            output[k][tmp_index[0]] = 0.5

            pointx[:, k] = np.round(points[:, k] -
                                    0.5 * patchw) + output[k][:, 0] * patchw
            pointy[:, k] = np.round(points[:, k + 5] -
                                    0.5 * patchw) + output[k][:, 1] * patchw

        points = np.hstack([pointx, pointy])
        points = points.astype(np.int32)

        return total_boxes, points
コード例 #40
0
    def train(self):

        # initialize all variables
        tf.global_variables_initializer().run()

        # graph inputs for visualize training results
        self.sample_z = np.random.uniform(-1,
                                          1,
                                          size=(self.batch_size, self.z_dim))
        self.test_codes = self.data_y[0:self.batch_size]

        # saver to save model
        self.saver = tf.train.Saver()

        # summary writer
        self.writer = tf.summary.FileWriter(
            self.log_dir + '/' + self.model_name, self.sess.graph)

        # restore check-point if it exits
        could_load, checkpoint_counter = self.load(self.checkpoint_dir)
        if could_load:
            start_epoch = (int)(checkpoint_counter / self.num_batches)
            start_batch_id = checkpoint_counter - start_epoch * self.num_batches
            counter = checkpoint_counter
            print(" [*] Load SUCCESS")
        else:
            start_epoch = 0
            start_batch_id = 0
            counter = 1
            print(" [!] Load failed...")

        # loop for epoch
        start_time = time.time()
        for epoch in range(start_epoch, self.epoch):

            # get batch data
            for idx in range(start_batch_id, self.num_batches):
                batch_images = self.data_X[idx * self.batch_size:(idx + 1) *
                                           self.batch_size]
                batch_codes = self.data_y[idx * self.batch_size:(idx + 1) *
                                          self.batch_size]

                batch_z = np.random.uniform(
                    -1, 1, [self.batch_size, self.z_dim]).astype(self.nptype)

                # update D network
                _, summary_str, d_loss = self.sess.run(
                    [self.training_step_op_D, self.d_sum, self.d_loss],
                    feed_dict={
                        self.inputs: batch_images,
                        self.y: batch_codes,
                        self.z: batch_z
                    })
                self.writer.add_summary(summary_str, counter)

                # update G & Q network
                _, summary_str_g, g_loss, _, summary_str_q, q_loss = self.sess.run(
                    [
                        self.training_step_op_G, self.g_sum, self.g_loss,
                        self.training_step_op_Q, self.q_sum, self.q_loss
                    ],
                    feed_dict={
                        self.z: batch_z,
                        self.y: batch_codes,
                        self.inputs: batch_images
                    })
                self.writer.add_summary(summary_str_g, counter)
                self.writer.add_summary(summary_str_q, counter)

                # display training status
                counter += 1
                print("Epoch: [%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f" \
                      % (epoch, idx, self.num_batches, time.time() - start_time, d_loss, g_loss))

                # save training results for every 300 steps
                if np.mod(counter, 300) == 0:
                    samples = self.sess.run(self.fake_images,
                                            feed_dict={
                                                self.z: self.sample_z,
                                                self.y: self.test_codes
                                            })
                    tot_num_samples = min(self.sample_num, self.batch_size)
                    manifold_h = int(np.floor(np.sqrt(tot_num_samples)))
                    manifold_w = int(np.floor(np.sqrt(tot_num_samples)))
                    save_images(
                        samples[:manifold_h * manifold_w, :, :, :],
                        [manifold_h, manifold_w], './' +
                        check_folder(self.result_dir + '/' + self.model_dir) +
                        '/' + self.model_name +
                        '_train_{:02d}_{:04d}.png'.format(epoch, idx))

            # After an epoch, start_batch_id is set to zero
            # non-zero value is only for the first epoch after loading pre-trained model
            start_batch_id = 0

            # save model
            self.save(self.checkpoint_dir, counter)

            # show temporal results
            self.visualize_results(epoch)

        # save model for final step
        self.save(self.checkpoint_dir, counter)
コード例 #41
0
ファイル: data.py プロジェクト: warone/liquidSVM
 def _isIntegerArray(x):
     if x.dtype.kind == 'i':
         return True
     return bool(np.equal(np.mod(x, 1), 0).all())
コード例 #42
0
 def M2(x, y):
     return (u2 * (np.mod(x, eps) < m * eps) + u1 *
             (np.mod(x, eps) >= m * eps))
コード例 #43
0
ファイル: new_60.py プロジェクト: rosswhitfield/corelli
right3 = cleanDoubles(right3, d3ave)

#left3=np.roll(left3,-6)
#right3=np.roll(right3,-6)

NeutronMass = 1.674927211e-27
meV = 1.602176487e-22

d = 6.502
distanceMtoM3 = 24.554
distanceMtoM2 = 18.052

e = []
t0 = []
for i in range(37):
    r3 = x[right3[np.mod(i + 2, 39)]]
    if r3 < 1000:
        r3 += 16666.6
    v = d / (r3 - x[right2[i]])
    e.append(v**2 * 0.5e+12 * NeutronMass / meV)
    t0.append(x[right2[i]] - distanceMtoM2 / v)
    print i, r3, v, e[-1], t0[-1]

plt.plot(e, t0, 'o')
plt.show()

e = []
t0 = []
for i in range(38):
    l3 = x[left3[np.mod(i + 1, 38)]]
    if l3 < 1000:
コード例 #44
0
    def detect_face(self, img):
        """
            detect face over img
        Parameters:
        ----------
            img: numpy array, bgr order of shape (1, 3, n, m)
                input image
        Retures:
        -------
            bboxes: numpy array, n x 5 (x1,y2,x2,y2,score)
                bboxes
            points: numpy array, n x 10 (x1, x2 ... x5, y1, y2 ..y5)
                landmarks
        """

        # check input
        MIN_DET_SIZE = 12

        if img is None:
            return None

        # only works for color image
        if len(img.shape) != 3:
            return None

        # detected boxes
        total_boxes = []

        height, width, _ = img.shape
        minl = min(height, width)

        # get all the valid scales
        scales = []
        m = MIN_DET_SIZE / self.minsize
        minl *= m
        factor_count = 0
        while minl > MIN_DET_SIZE:
            scales.append(m * self.factor**factor_count)
            minl *= self.factor
            factor_count += 1

        #############################################
        # first stage
        #############################################
        #for scale in scales:
        #    return_boxes = self.detect_first_stage(img, scale, 0)
        #    if return_boxes is not None:
        #        total_boxes.append(return_boxes)

        sliced_index = self.slice_index(len(scales))
        total_boxes = []
        # for batch in sliced_index:
        #     local_boxes = self.Pool.map( detect_first_stage_warpper, \
        #             izip(repeat(img), self.PNets[:len(batch)], [scales[i] for i in batch], repeat(self.threshold[0])) )
        #     total_boxes.extend(local_boxes)

        for batch in sliced_index:
            local_boxes = detect_first_stage(img, self.PNet, scales[batch[0]],
                                             self.threshold[0])
            # print("local boxes: ", local_boxes)
            if local_boxes is not None:
                total_boxes.extend(local_boxes)

        # remove the Nones
        total_boxes = [i for i in total_boxes if i is not None]

        if len(total_boxes) == 0:
            return None

        total_boxes = np.vstack(total_boxes)

        if total_boxes.size == 0:
            return None

        # merge the detection from first stage
        pick = nms(total_boxes[:, 0:5], 0.7, 'Union')
        total_boxes = total_boxes[pick]

        bbw = total_boxes[:, 2] - total_boxes[:, 0] + 1
        bbh = total_boxes[:, 3] - total_boxes[:, 1] + 1

        # refine the bboxes
        total_boxes = np.vstack([
            total_boxes[:, 0] + total_boxes[:, 5] * bbw,
            total_boxes[:, 1] + total_boxes[:, 6] * bbh,
            total_boxes[:, 2] + total_boxes[:, 7] * bbw,
            total_boxes[:, 3] + total_boxes[:, 8] * bbh, total_boxes[:, 4]
        ])

        total_boxes = total_boxes.T
        total_boxes = self.convert_to_square(total_boxes)
        total_boxes[:, 0:4] = np.round(total_boxes[:, 0:4])

        #############################################
        # second stage
        #############################################
        num_box = total_boxes.shape[0]

        # pad the bbox
        [dy, edy, dx, edx, y, ey, x, ex, tmpw,
         tmph] = self.pad(total_boxes, width, height)
        # (3, 24, 24) is the input shape for RNet
        input_buf = np.zeros((num_box, 3, 24, 24), dtype=np.float32)

        for i in range(num_box):
            tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
            tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = img[y[i]:ey[i] + 1,
                                                             x[i]:ex[i] + 1, :]
            input_buf[i, :, :, :] = adjust_input(cv2.resize(tmp, (24, 24)))

        output = self.RNet.predict(input_buf)

        # filter the total_boxes with threshold
        passed = np.where(output[1][:, 1] > self.threshold[1])
        total_boxes = total_boxes[passed]

        if total_boxes.size == 0:
            return None

        total_boxes[:, 4] = output[1][passed, 1].reshape((-1, ))
        reg = output[0][passed]

        # nms
        pick = nms(total_boxes, 0.7, 'Union')
        total_boxes = total_boxes[pick]
        total_boxes = self.calibrate_box(total_boxes, reg[pick])
        total_boxes = self.convert_to_square(total_boxes)
        total_boxes[:, 0:4] = np.round(total_boxes[:, 0:4])

        #############################################
        # third stage
        #############################################
        num_box = total_boxes.shape[0]

        # pad the bbox
        [dy, edy, dx, edx, y, ey, x, ex, tmpw,
         tmph] = self.pad(total_boxes, width, height)
        # (3, 48, 48) is the input shape for ONet
        input_buf = np.zeros((num_box, 3, 48, 48), dtype=np.float32)

        for i in range(num_box):
            tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.float32)
            tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = img[y[i]:ey[i] + 1,
                                                             x[i]:ex[i] + 1, :]
            input_buf[i, :, :, :] = adjust_input(cv2.resize(tmp, (48, 48)))

        output = self.ONet.predict(input_buf)

        # filter the total_boxes with threshold
        passed = np.where(output[2][:, 1] > self.threshold[2])
        total_boxes = total_boxes[passed]

        if total_boxes.size == 0:
            return None

        total_boxes[:, 4] = output[2][passed, 1].reshape((-1, ))
        reg = output[1][passed]
        points = output[0][passed]

        # compute landmark points
        bbw = total_boxes[:, 2] - total_boxes[:, 0] + 1
        bbh = total_boxes[:, 3] - total_boxes[:, 1] + 1
        points[:, 0:5] = np.expand_dims(
            total_boxes[:, 0], 1) + np.expand_dims(bbw, 1) * points[:, 0:5]
        points[:, 5:10] = np.expand_dims(
            total_boxes[:, 1], 1) + np.expand_dims(bbh, 1) * points[:, 5:10]

        # nms
        total_boxes = self.calibrate_box(total_boxes, reg)
        pick = nms(total_boxes, 0.7, 'Min')
        total_boxes = total_boxes[pick]
        points = points[pick]

        if not self.accurate_landmark:
            return total_boxes, points

        #############################################
        # extended stage
        #############################################
        num_box = total_boxes.shape[0]
        patchw = np.maximum(total_boxes[:, 2] - total_boxes[:, 0] + 1,
                            total_boxes[:, 3] - total_boxes[:, 1] + 1)
        patchw = np.round(patchw * 0.25)

        # make it even
        patchw[np.where(np.mod(patchw, 2) == 1)] += 1

        input_buf = np.zeros((num_box, 15, 24, 24), dtype=np.float32)
        for i in range(5):
            x, y = points[:, i], points[:, i + 5]
            x, y = np.round(x - 0.5 * patchw), np.round(y - 0.5 * patchw)
            [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(
                np.vstack([x, y, x + patchw - 1, y + patchw - 1]).T, width,
                height)
            for j in range(num_box):
                tmpim = np.zeros((tmpw[j], tmpw[j], 3), dtype=np.float32)
                tmpim[dy[j]:edy[j] + 1,
                      dx[j]:edx[j] + 1, :] = img[y[j]:ey[j] + 1,
                                                 x[j]:ex[j] + 1, :]
                input_buf[j, i * 3:i * 3 + 3, :, :] = adjust_input(
                    cv2.resize(tmpim, (24, 24)))

        output = self.LNet.predict(input_buf)

        pointx = np.zeros((num_box, 5))
        pointy = np.zeros((num_box, 5))

        for k in range(5):
            # do not make a large movement
            tmp_index = np.where(np.abs(output[k] - 0.5) > 0.35)
            output[k][tmp_index[0]] = 0.5

            pointx[:, k] = np.round(points[:, k] -
                                    0.5 * patchw) + output[k][:, 0] * patchw
            pointy[:, k] = np.round(points[:, k + 5] -
                                    0.5 * patchw) + output[k][:, 1] * patchw

        points = np.hstack([pointx, pointy])
        points = points.astype(np.int32)

        return total_boxes, points
コード例 #45
0
def AnDA_generate_data(GD, seed=True):
    """ Generate the true state, noisy observations and catalog of numerical simulations. """

    # initialization
    class xt:
        values = []
        time = []

    class yo:
        values = []
        time = []

    class catalog:
        analogs = []
        successors = []
        source = []

    # test on parameters
    if GD.dt_states > GD.dt_obs:
        print('Error: GD.dt_obs must be bigger than GD.dt_states')
    if (np.mod(GD.dt_obs, GD.dt_states) != 0):
        print('Error: GD.dt_obs must be a multiple of GD.dt_states')

    # use this to generate the same data for different simulations
    if seed: np.random.seed(1)

    if (GD.model == 'Lorenz_63'):

        # 5 time steps (to be in the attractor space)
        x0 = np.array([8.0, 0.0, 30.0])
        S = odeint(AnDA_Lorenz_63,
                   x0,
                   np.arange(0, 5 + 0.000001, GD.dt_integration),
                   args=(GD.parameters.sigma, GD.parameters.rho,
                         GD.parameters.beta))
        x0 = S[S.shape[0] - 1, :]

        # generate true state (xt)
        S = odeint(AnDA_Lorenz_63,
                   x0,
                   np.arange(0.01, GD.nb_loop_test + 0.000001,
                             GD.dt_integration),
                   args=(GD.parameters.sigma, GD.parameters.rho,
                         GD.parameters.beta))
        T_test = S.shape[0]
        t_xt = np.arange(0, T_test, GD.dt_states)
        xt.time = t_xt * GD.dt_integration
        xt.values = S[t_xt, :]

        # generate  partial/noisy observations (yo)
        eps = np.random.multivariate_normal(np.zeros(3),
                                            GD.sigma2_obs * np.eye(3, 3),
                                            T_test)
        yo_tmp = S[t_xt, :] + eps[t_xt, :]
        t_yo = np.arange(0, T_test, GD.dt_obs)
        i_t_obs = np.where((np.in1d(t_xt, t_yo)) == True)[0]
        yo.values = xt.values * np.nan
        yo.values[np.ix_(i_t_obs,
                         GD.var_obs)] = yo_tmp[np.ix_(i_t_obs, GD.var_obs)]
        yo.time = xt.time

        #generate catalog
        S = odeint(AnDA_Lorenz_63,
                   S[S.shape[0] - 1, :],
                   np.arange(0.01, GD.nb_loop_train + 0.000001,
                             GD.dt_integration),
                   args=(GD.parameters.sigma, GD.parameters.rho,
                         GD.parameters.beta))
        T_train = S.shape[0]
        eta = np.random.multivariate_normal(np.zeros(3),
                                            GD.sigma2_catalog * np.eye(3, 3),
                                            T_train)
        catalog_tmp = S + eta
        catalog.analogs = catalog_tmp[0:-GD.dt_states:GD.dt_states, :]
        catalog.successors = catalog_tmp[GD.dt_states::GD.dt_states, :]
        catalog.source = GD.parameters

    elif (GD.model == 'Lorenz_96'):

        # 5 time steps (to be in the attractor space)
        x0 = GD.parameters.F * np.ones(GD.parameters.J)
        x0[np.int(
            np.around(GD.parameters.J /
                      2))] = x0[np.int(np.around(GD.parameters.J / 2))] + 0.01
        S = odeint(AnDA_Lorenz_96,
                   x0,
                   np.arange(0, 5 + 0.000001, GD.dt_integration),
                   args=(GD.parameters.F, GD.parameters.J))
        x0 = S[S.shape[0] - 1, :]

        # generate true state (xt)
        S = odeint(AnDA_Lorenz_96,
                   x0,
                   np.arange(0.01, GD.nb_loop_test + 0.000001,
                             GD.dt_integration),
                   args=(GD.parameters.F, GD.parameters.J))
        T_test = S.shape[0]
        t_xt = np.arange(0, T_test, GD.dt_states)
        xt.time = t_xt * GD.dt_integration
        xt.values = S[t_xt, :]

        # generate partial/noisy observations (yo)
        eps = np.random.multivariate_normal(
            np.zeros(GD.parameters.J), GD.sigma2_obs * np.eye(GD.parameters.J),
            T_test)
        yo_tmp = S[t_xt, :] + eps[t_xt, :]
        t_yo = np.arange(0, T_test, GD.dt_obs)
        i_t_obs = np.where((np.in1d(t_xt, t_yo)) == True)[0]
        yo.values = xt.values * np.nan
        yo.values[np.ix_(i_t_obs,
                         GD.var_obs)] = yo_tmp[np.ix_(i_t_obs, GD.var_obs)]
        yo.time = xt.time

        # generate catalog
        S = odeint(AnDA_Lorenz_96,
                   S[S.shape[0] - 1, :],
                   np.arange(0.01, GD.nb_loop_train + 0.000001,
                             GD.dt_integration),
                   args=(GD.parameters.F, GD.parameters.J))
        T_train = S.shape[0]
        eta = np.random.multivariate_normal(
            np.zeros(GD.parameters.J),
            GD.sigma2_catalog * np.eye(GD.parameters.J, GD.parameters.J),
            T_train)
        catalog_tmp = S + eta
        catalog.analogs = catalog_tmp[0:-GD.dt_states:GD.dt_states, :]
        catalog.successors = catalog_tmp[GD.dt_states::GD.dt_states, :]
        catalog.source = GD.parameters

    # reinitialize random generator number
    np.random.seed()

    return catalog, xt, yo
コード例 #46
0
ファイル: rdt.py プロジェクト: EirikJaccheri/omc3
def add_freq_to_header(header, plane, rdt):
    mod_header = header.copy()
    line = _determine_line(rdt, plane)
    freq = np.mod(line @ np.array([header['Q1'], header['Q2'], 0]), 1)
    mod_header["FREQ"] = freq if freq <= 0.5 else 1 - freq
    return mod_header
コード例 #47
0
  if args.steps is None or 'delta' in args.steps:
    print ('Creating delta azimuth for top-down view.')
    azimuth_top_down_path = op.join(pose_dir, 'azimuth-top-down.png')
    azimuth_top_down, mask_top_down = read_azimuth_image(azimuth_top_down_path)
   
    # Camera can see ech point at a certain angle.
    origin = pose.map['map_origin']
    X, Y = azimuth_top_down.shape[1], azimuth_top_down.shape[0]
    delta_x_1D = np.arange(X) - origin['x']
    delta_y_1D = np.arange(Y) - origin['y']
    delta_x = np.dot( np.ones((Y,1)), delta_x_1D[np.newaxis,:] ).astype(float)
    delta_y = np.dot( delta_y_1D[:,np.newaxis], np.ones((1,X)) ).astype(float)
    delta_azimuth = - np.arctan2 (delta_x, -delta_y)  # 0 is north, 90 is west.
    # From [-pi, pi] to [0 360]
    delta_azimuth = np.mod( (delta_azimuth * 180. / np.pi), 360. )
    # Write for debugging.
    delta_azimuth_path = op.join(for_azimuth_dir, 'azimuth-top-down-delta.png')
    write_azimuth_image(delta_azimuth_path, delta_azimuth)
    # Top-down azimuth in camera point of view, 0 is north, 90 is west.
    azimuth_top_down = np.mod(azimuth_top_down - delta_azimuth, 360.)
    azimuth_top_down_path = op.join(for_azimuth_dir, 'azimuth-top-down-from-camera.png')
    write_azimuth_image(azimuth_top_down_path, azimuth_top_down, mask_top_down)

  if args.steps is None or '2frame' in args.steps:
    print ('Warping from top-down view to frame view.')
    azimuth_top_down_path = op.join(for_azimuth_dir, 'azimuth-top-down-from-camera.png')
    azimuth_top_down, mask_top_down = read_azimuth_image(azimuth_top_down_path)
    azimuth_top_down[mask_top_down == 0] = 0
    azimuth_frame = warpPoseToMap(azimuth_top_down, 
        args.camera_id, args.pose_id,
コード例 #48
0
ファイル: _export.py プロジェクト: gavin-s-smith/mcrforest
    def node_to_str(self, tree, node_id, criterion):
        # Generate the node content string
        if tree.n_outputs == 1:
            value = tree.value[node_id][0, :]
        else:
            value = tree.value[node_id]

        # Should labels be shown?
        labels = (self.label == 'root' and node_id == 0) or self.label == 'all'

        characters = self.characters
        node_string = characters[-1]

        # Write node ID
        if self.node_ids:
            if labels:
                node_string += 'node '
            node_string += characters[0] + str(node_id) + characters[4]

        # Write decision criteria
        if tree.children_left[node_id] != _tree.TREE_LEAF:
            # Always write node decision criteria, except for leaves
            if self.feature_names is not None:
                feature = self.feature_names[tree.feature[node_id]]
            else:
                feature = "X%s%s%s" % (characters[1],
                                       tree.feature[node_id],
                                       characters[2])
            node_string += '%s %s %s%s' % (feature,
                                           characters[3],
                                           round(tree.threshold[node_id],
                                                 self.precision),
                                           characters[4])

        # Write impurity
        if self.impurity:
            if isinstance(criterion, _criterion.FriedmanMSE):
                criterion = "friedman_mse"
            elif not isinstance(criterion, str):
                criterion = "impurity"
            if labels:
                node_string += '%s = ' % criterion
            node_string += (str(round(tree.impurity[node_id], self.precision))
                            + characters[4])

        # Write node sample count
        if labels:
            node_string += 'samples = '
        if self.proportion:
            percent = (100. * tree.n_node_samples[node_id] /
                       float(tree.n_node_samples[0]))
            node_string += (str(round(percent, 1)) + '%' +
                            characters[4])
        else:
            node_string += (str(tree.n_node_samples[node_id]) +
                            characters[4])

        # Write node class distribution / regression value
        if self.proportion and tree.n_classes[0] != 1:
            # For classification this will show the proportion of samples
            value = value / tree.weighted_n_node_samples[node_id]
        if labels:
            node_string += 'value = '
        if tree.n_classes[0] == 1:
            # Regression
            value_text = np.around(value, self.precision)
        elif self.proportion:
            # Classification
            value_text = np.around(value, self.precision)
        elif np.all(np.equal(np.mod(value, 1), 0)):
            # Classification without floating-point weights
            value_text = value.astype(int)
        else:
            # Classification with floating-point weights
            value_text = np.around(value, self.precision)
        # Strip whitespace
        value_text = str(value_text.astype('S32')).replace("b'", "'")
        value_text = value_text.replace("' '", ", ").replace("'", "")
        if tree.n_classes[0] == 1 and tree.n_outputs == 1:
            value_text = value_text.replace("[", "").replace("]", "")
        value_text = value_text.replace("\n ", characters[4])
        node_string += value_text + characters[4]

        # Write node majority class
        if (self.class_names is not None and
                tree.n_classes[0] != 1 and
                tree.n_outputs == 1):
            # Only done for single-output classification trees
            if labels:
                node_string += 'class = '
            if self.class_names is not True:
                class_name = self.class_names[np.argmax(value)]
            else:
                class_name = "y%s%s%s" % (characters[1],
                                          np.argmax(value),
                                          characters[2])
            node_string += class_name

        # Clean up any trailing newlines
        if node_string.endswith(characters[4]):
            node_string = node_string[:-len(characters[4])]

        return node_string + characters[5]
コード例 #49
0
                if part_idx >= swarm_sizes[swarm_idx]:
                    part_idx = 0
                    swarm_idx += 1

                center_x, center_y = line.split()
                center_x, center_y = float(center_x) - xlo, float(
                    center_y) - ylo
                center_y = boxsize_y - center_y

                center_x = int(1024 * (float(center_x) / boxsize_x))
                center_y = int(1024 * (float(center_y) / boxsize_y))

                sizes = swarm_sizes.copy()
                sizes[0] = 1

                drawer.ellipse(
                    circle_to_box(center_x, center_y,
                                  4 * int(np.sqrt(sizes[swarm_idx]))),
                    colors[np.mod(swarm_idx, ncolors)])
                drawer.ellipse(circle_to_box(center_x, center_y, 1), 'black')

                part_idx += 1

    frames[0].save(output_file,
                   format='GIF',
                   append_images=frames[1:],
                   save_all=True,
                   duration=100,
                   loop=0)
コード例 #50
0
coords=np.array([[ 102.965,  83.1086],
       [887.738,  84.9951],
       [ 107.681, 571.706]])
    

circles_num=np.zeros((96,3))
d_vert=(coords[2]-coords[0])/7
d_horz=(coords[1]-coords[0])/11
init_pos=coords[0]
pos=init_pos.copy()
r=np.linalg.norm(d_horz)*0.35;circles_num[:,2]=r
    
for i in range(96):
    circles_num[i,[0,1]]=pos
    pos=pos+d_horz
    if np.mod(i,12)==11:
        pos=init_pos.copy()
        pos=pos+d_vert*np.ceil(i/12)        
circles_num=circles_num.astype(int)

output=image_rot.copy()
for (x, y, r) in circles_num:
    x=x.astype(int);    y=y.astype(int);    r=r.astype(int)

    cv2.circle(output, (x, y), r, (0, 255, 0), 10)
    cv2.rectangle(output, (x - 7, y - 7), (x + 7, y + 7), (0, 128, 255), -1)    
plt.figure();plt.imshow(output)

image_HSV=cv2.cvtColor(image_rot, cv2.COLOR_RGB2HSV)

#guardar isso pra depois
 result_phaselist = []
 result_absorptionlist = []
 globalphaselist = []
 globalabslist = []
 
 #%%
 ''' Optimize the model '''
 print('Start optimizing')
 np_meas = matlab_val # use the previously simulated data
 for iterx in range(iter_last,Niter):
     if iterx == 250:
         print('No change in learningrate!')
         #my_learningrate = my_learningrate*.1
     # try to optimize
     
     if(iterx==0 or not np.mod(iterx, Ndisplay)):
         my_res, my_res_absortpion, my_loss, my_fidelity, my_negloss, my_tvloss, myglobalphase, myglobalabs, myfwd =  \
             sess.run([muscat.TF_obj, muscat.TF_obj_absorption, tf_loss, tf_fidelity, tf_negsqrloss, tf_tvloss, tf_globalphase, tf_globalabs, tf_fwd_corrected], \
                      feed_dict={muscat.tf_meas:np_meas, muscat.tf_learningrate:my_learningrate, muscat.tf_lambda_tv:mylambdatv, muscat.tf_eps:myepstvval})
 
         print('Loss@'+str(iterx)+': ' + str(my_loss) + ' - Fid: '+str(my_fidelity)+', Neg: '+str(my_negloss)+', TV: '+str(my_tvloss)+' G-Phase:'+str(myglobalphase)+' G-ABS: '+str(myglobalabs))        
         mylosslist.append(my_loss)
         myfidelitylist.append(my_fidelity)
         myneglosslist.append(my_negloss)
         mytvlosslist.append(my_tvloss)
         result_phaselist.append(my_res)
         result_absorptionlist.append(my_res_absortpion)
         globalphaselist.append(myglobalphase)
         globalabslist.append(myglobalabs) 
         
         ''' Save Figures and Parameters '''
コード例 #52
0
    def loadMacroBatch(self, macro_idx, mini_idx):
        """
        Make sure that macro batch is loaded in the shared variable
        :param macro_idx: macro batch index
        :param mini_idx: mini batch index
        :return: None
        """
        def do_para_swap():
            if self.cfgParams.para_load is True:
                if self.isLastMacroBatch(macro_idx):
                    # copy data to RAM when ready
                    (ci, msg) = self.load_send_queue.get()
                    assert msg == self.SYNC_LOAD_FINISHED

                    for var in self.trainingVar:
                        if not hasattr(self, var):
                            raise ValueError("Variable " + var +
                                             " not defined!")
                        if self.getNumMacroBatches() > 1:
                            getattr(self, var +
                                    'DB')[:] = self.load_data_queue[var][0:(
                                        self.getNumMacroBatches() -
                                        1) * self.getNumSamplesPerMacroBatch()]
                            getattr(self, var + 'DBlast')[:] = self.alignData(
                                self.load_data_queue[var]
                                [(self.getNumMacroBatches() - 1) *
                                 self.getNumSamplesPerMacroBatch():],
                                fillData=self.load_data_queue[var])
                        else:
                            getattr(self,
                                    var + 'DB')[:] = self.load_data_queue[var]
                    self.currentChunk = ci
                    next_chunk = numpy.mod(ci + 1, self.numChunks)
                    print("Received chunk {}, requesting {}".format(
                        ci, next_chunk))
                    print("Loading chunk {}, last {}".format(
                        next_chunk, False))
                    self.load_recv_queue.put(
                        (next_chunk, self.cfgParams.load_fun_params, False))

        force_reload = (((mini_idx % self.getNumMiniBatchesPerChunk())
                         == self.getNumMiniBatchesPerMacroBatch() - 1)
                        and self.cfgParams.force_macrobatch_reload is True
                        and (self.getNumMacroBatches() == 1))
        if macro_idx != self.currentMacroBatch or force_reload is True:
            if self.cfgParams.para_augment is True:
                # copy data to GPU when ready
                old_mbi = -1
                mbi = -1
                for s in self.augment_send_queue:
                    (mbi, msg) = s.get()
                    assert msg == self.SYNC_BATCH_FINISHED
                    if old_mbi == -1:
                        old_mbi = mbi
                    else:
                        assert old_mbi == mbi

                new_data = self.augment_data_queue
                for var in self.trainingVar:
                    if not hasattr(self, var):
                        raise ValueError("Variable " + var + " not defined!")
                    # No borrow, since we modify the underlying memory
                    getattr(self, var).set_value(new_data[var], borrow=False)
                self.currentMacroBatch = mbi

                # swap data before we start augmenting new one
                do_para_swap()

                next_mbi = numpy.mod(mbi + 1, self.getNumMacroBatches())
                print("Received macro batch {}, requesting {}".format(
                    mbi, next_mbi))
                last, tidx, idxs = self.chunksForMP(next_mbi)
                print(
                    "Loading macro batch {}, last {}, start idx {}, end idx {}"
                    .format(next_mbi, last, numpy.min(idxs), numpy.max(idxs)))
                for i, r in enumerate(self.augment_recv_queue):
                    r.put((next_mbi, self.cfgParams.augment_fun_params, last,
                           tidx[i], idxs[i]))
            elif self.cfgParams.augment_fun_params['fun'] is not None:
                # singe thread augmentation
                new_data = self.augment_data_queue

                # invoke function to generate new data
                last, tidx, idxs = self.chunksForMP(macro_idx)
                print(
                    "Loading macro batch {}, last {}, start idx {}, end idx {}"
                    .format(macro_idx, last, numpy.min(idxs), numpy.max(idxs)))
                getattr(self, self.cfgParams.augment_fun_params['fun'])(
                    self.cfgParams.augment_fun_params, macro_idx, last,
                    [itm for sl in tidx for itm in sl],
                    [itm for sl in idxs for itm in sl], new_data)
                for var in self.trainingVar:
                    if not hasattr(self, var):
                        raise ValueError("Variable " + var + " not defined!")
                    # No borrow, since we modify the underlying memory
                    getattr(self, var).set_value(new_data[var], borrow=False)
                # remember current macro batch index
                self.currentMacroBatch = macro_idx

                # swap data
                do_para_swap()
            else:
                # last macro batch is handled separately, as it is padded
                if self.isLastMacroBatch(macro_idx):
                    start_idx = 0
                    end_idx = self.getNumSamplesPerMacroBatch()
                    print(
                        "Loading last macro batch {}, start idx {}, end idx {}"
                        .format(macro_idx, start_idx, end_idx))
                    self.replaceTrainingData(start_idx, end_idx, last=True)
                    # remember current macro batch index
                    self.currentMacroBatch = macro_idx
                else:
                    start_idx = macro_idx * self.getNumSamplesPerMacroBatch()
                    end_idx = min(
                        (macro_idx + 1) * self.getNumSamplesPerMacroBatch(),
                        self.train_data_xDB.shape[0])
                    print("Loading macro batch {}, start idx {}, end idx {}".
                          format(macro_idx, start_idx, end_idx))
                    self.replaceTrainingData(start_idx, end_idx)
                    # remember current macro batch index
                    self.currentMacroBatch = macro_idx

                # swap data
                do_para_swap()
コード例 #53
0
def wraptopi(phi):
    return np.mod(phi + np.pi, 2 * np.pi) - np.pi
コード例 #54
0
# load in data set
train_set, dev_set, test_set, train_ans, dev_ans, test_ans, sample_freq, phase_step = md.load_data_rr(path)

# get sample rate of dataset

filter_time = 0.4  # s
filter_space = 10  # degrees
sum_over_space = True
num_filter = (4, 1)

filter_indicies_t = int(np.ceil(filter_time*sample_freq)+1)
filter_indicies_x = int(np.ceil(filter_space/phase_step)+1)

# filters must have odd length
assert(np.mod(filter_indicies_t, 2) == 1)
assert(np.mod(filter_indicies_x, 2) == 1)


# intiialize model
m, size_t, size_x, n_c = train_set.shape
model, pad_x, pad_t, learning_rate, batch_size = md.ln_model(input_shape=(size_t, size_x, n_c),
                                                             filter_shape=(filter_indicies_t, filter_indicies_x),
                                                             sum_over_space=sum_over_space,
                                                             num_filter=num_filter)
#model, pad_x, pad_t, learning_rate, batch_size = md.ln_model_deep(input_shape=(size_t, size_x, n_c),
#                                                             filter_shape=(filter_indicies_t, filter_indicies_x),
#                                                             sum_over_space=sum_over_space,
#                                                             num_filter=num_filter)
#model, pad_x, pad_t, learning_rate, batch_size = md.hrc_model(input_shape=(size_t, size_x, n_c),
#                                                              filter_shape=(filter_indicies_t, filter_indicies_x),
コード例 #55
0
ファイル: model.py プロジェクト: bohblue2/MYFOCUS_AnoGAN
    def train(self, config):
        d_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
                  .minimize(self.d_loss, var_list=self.d_vars)
        g_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
                  .minimize(self.g_loss, var_list=self.g_vars)

        tf.global_variables_initializer().run()

        #summary_op: merge summary
        self.g_sum = merge_summary([
            self.z_sum, self.d__sum, self.G_sum, self.d_loss_fake_sum,
            self.g_loss_sum
        ])
        self.d_sum = merge_summary(
            [self.z_sum, self.d_sum, self.d_loss_real_sum, self.d_loss_sum])

        #Create Tensorboard
        self.writer = SummaryWriter("./logs", self.sess.graph)

        #Create Sample Benchmarks for monitoring of train results: use same random noises and real-images
        sample_z = np.random.uniform(-1, 1, size=(self.sample_num, self.z_dim))

        if config.dataset == 'mnist':
            sample_inputs = self.data_X[0:self.sample_num]
            sample_labels = self.data_y[0:self.sample_num]
        else:
            sample_files = self.data[0:self.sample_num]  #name_list
            sample = [
                get_image(sample_file,
                          input_height=self.input_height,
                          input_width=self.input_width,
                          resize_height=self.output_height,
                          resize_width=self.output_width,
                          crop=self.crop,
                          grayscale=self.grayscale)
                for sample_file in sample_files
            ]

            if (self.grayscale):
                sample_inputs = np.array(sample).astype(np.float32)[:, :, :,
                                                                    None]
            else:
                sample_inputs = np.array(sample).astype(np.float32)

        counter = 1
        start_time = time.time()
        could_load, checkpoint_counter = self.load(self.checkpoint_dir)
        if could_load:
            counter = checkpoint_counter
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")

        if config.dataset == 'mnist':
            batch_idxs = min(
                len(self.data_X), config.train_size
            ) // config.batch_size  #config.train_size: default is np.inf
            sample_feed_dict = {
                self.z: sample_z,
                self.inputs: sample_inputs,
                self.y: sample_labels
            }
        else:
            batch_idxs = min(len(self.data),
                             config.train_size) // config.batch_size
            sample_feed_dict = {self.z: sample_z, self.inputs: sample_inputs}

        for epoch in xrange(config.epoch):
            for idx in xrange(0, batch_idxs):

                #Prepare batch data for learning
                if config.dataset == 'mnist':
                    batch_images = self.data_X[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]
                    batch_labels = self.data_y[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]
                else:
                    batch_files = self.data[idx * config.batch_size:(idx + 1) *
                                            config.batch_size]
                    batch = [
                        get_image(batch_file,
                                  input_height=self.input_height,
                                  input_width=self.input_width,
                                  resize_height=self.output_height,
                                  resize_width=self.output_width,
                                  crop=self.crop,
                                  grayscale=self.grayscale)
                        for batch_file in batch_files
                    ]
                    if self.grayscale:
                        batch_images = np.array(batch).astype(
                            np.float32)[:, :, :, None]
                    else:
                        batch_images = np.array(batch).astype(np.float32)

                #Prepare batch random noises for learning
                batch_z = np.random.uniform(
                    -1, 1, [config.batch_size, self.z_dim]).astype(np.float32)

                #Make feed dictionary
                if config.dataset == 'mnist':
                    d_feed_dict = {
                        self.inputs: batch_images,
                        self.z: batch_z,
                        self.y: batch_labels
                    }
                    d_fake_feed_dict = {self.z: batch_z, self.y: batch_labels}
                    d_real_feed_dict = {
                        self.inputs: batch_images,
                        self.y: batch_labels
                    }
                    g_feed_dict = {self.z: batch_z, self.y: batch_labels}

                else:
                    d_feed_dict = {self.inputs: batch_images, self.z: batch_z}
                    d_fake_feed_dict = {self.z: batch_z}
                    d_real_feed_dict = {self.inputs: batch_images}
                    g_feed_dict = {self.z: batch_z}

                #Run Optimization and Summary Operation of Discriminator
                _, summary_str = self.sess.run([d_optim, self.d_sum],
                                               feed_dict=d_feed_dict)
                self.writer.add_summary(summary_str, counter)

                #Run Optimization and Summary Operation of Generator
                _, summary_str = self.sess.run([g_optim, self.g_sum],
                                               feed_dict=g_feed_dict)
                self.writer.add_summary(summary_str, counter)

                # Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
                _, summary_str = self.sess.run([g_optim, self.g_sum],
                                               feed_dict=g_feed_dict)
                self.writer.add_summary(summary_str, counter)

                # Calculate Loss Values of Discriminator and Generator

                errD_fake = self.d_loss_fake.eval(feed_dict=d_fake_feed_dict)
                errD_real = self.d_loss_real.eval(feed_dict=d_real_feed_dict)
                errG = self.g_loss.eval(feed_dict=g_feed_dict)

                counter += 1


                print("Epoch: [%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f" \
                  % (epoch, idx, batch_idxs, time.time() - start_time, errD_fake+errD_real, errG))

                if np.mod(counter, 100) == 1:
                    samples, d_loss, g_loss = self.sess.run(
                        [self.sampler, self.d_loss, self.g_loss],
                        feed_dict=sample_feed_dict)
                    save_images(
                        samples, image_manifold_size(samples.shape[0]),
                        './{}/train_{:02d}_{:04d}.png'.format(
                            config.sample_dir, epoch, idx))
                    print("[Sample] d_loss: %.8f, g_loss: %.8f" %
                          (d_loss, g_loss))

                if np.mod(counter, 500) == 2:
                    self.save(config.checkpoint_dir, counter)
コード例 #56
0
def wraptohalfpi(phi):
    return np.mod(phi + np.pi / 2, np.pi) - np.pi / 2
コード例 #57
0
def rolling_window(a, window, axis=-1, pad=False, mode='reflect', **kargs):
    """
        This function produces a rolling window shaped data with
        the rolled data in the last col
        a      :  n-D array of data
        window : integer is the window size
        axis   : integer, axis to move the window over
        default is the last axis.
        pad    : {Boolean} Pad the array to the origanal size
        mode : {str, function} from the function numpy.pad
        One of the following string values or a user supplied function.
        'constant'      Pads with a constant value.
        'edge'          Pads with the edge values of array.
        'linear_ramp'   Pads with the linear ramp between end_value and the
        array edge value.
        'maximum'       Pads with the maximum value of all or part of the
        vector along each axis.
        'mean'          Pads with the mean value of all or part of the
        con  vector along each axis.
        'median'        Pads with the median value of all or part of the
        vector along each axis.
        'minimum'       Pads with the minimum value of all or part of the
        vector along each axis.
        'reflect'       Pads with the reflection of the vector mirrored on
        the first and last values of the vector along each
        axis.
        'symmetric'     Pads with the reflection of the vector mirrored
        along the edge of the array.
        'wrap'          Pads with the wrap of the vector along the axis.
        The first values are used to pad the end and the
        end values are used to pad the beginning.
        <function>      of the form padding_func(vector, iaxis_pad_width,
        iaxis, **kwargs)
        see numpy.pad notes
        **kargs are passed to the function numpy.pad

        Returns:
        an array with shape = np.array(a.shape+(window,))
        and the rolled data on the last axis

        Example:
        import numpy as np
        data = np.random.normal(loc=1,
            scale=np.sin(5*np.pi*np.arange(10000).astype(float)/10000.)+1.1,
            size=10000)
        stddata = rolling_window(data, 400).std(axis=-1)
        """
    if axis == -1 :
        axis = len(a.shape)-1
    if pad :
        pad_width = []
        for i in range(a.ndim):
            if i == axis:
                pad_width += [(window // 2, window // 2 -1 +np.mod(window, 2))]
            else :
                pad_width += [(0, 0)]
        a = np.pad(a, pad_width=pad_width, mode=mode, **kargs)
    a1 = np.swapaxes(a, axis, -1) # Move target axis to last axis in array
    shape = a1.shape[:-1] + (a1.shape[-1] - window + 1, window)
    strides = a1.strides + (a1.strides[-1], )
    return np.lib.stride_tricks.as_strided(a1, shape=shape, strides=strides).swapaxes(-2, axis) # Move original axis to
コード例 #58
0
ファイル: beso_main.py プロジェクト: HabermannR/beso
    for dno in range(len(domains_from_config) - 1):
        msg += " " + str(FI_violated[i][dno + 1]).rjust(4, " ")
    if len(domains_from_config) > 1:
        msg += " " + str(sum(FI_violated[i])).rjust(4, " ")
    msg += " " + str(FI_mean[i]).rjust(17, " ") + " " + str(FI_mean_without_state0[i]).rjust(18, " ")
    FI_max_all = 0
    for dn in domains_from_config:
        msg += " " + str(FI_max[i][dn]).rjust(17, " ")
        FI_max_all = max(FI_max_all, FI_max[i][dn])
    if len(domains_from_config) > 1:
        msg += " " + str(FI_max_all).rjust(17, " ")
    msg += "\n"
    beso_lib.write_to_log(file_name, msg)

    # export element values
    if save_iteration_results and np.mod(float(i), save_iteration_results) == 0:
        if "csv" in save_resulting_format:
            beso_lib.export_csv(domains_from_config, domains, criteria, FI_step, FI_step_max, file_nameW, cg,
                                elm_states, sensitivity_number)
        if "vtk" in save_resulting_format:
            beso_lib.export_vtk(file_nameW, nodes, Elements, elm_states, sensitivity_number, criteria, FI_step,
                                FI_step_max)

    # relative difference in a mean stress for the last 5 iterations must be < tolerance
    if len(FI_mean) > 5:
        difference_last = []
        for last in range(1, 6):
            difference_last.append(abs(FI_mean[i] - FI_mean[i-last]) / FI_mean[i])
        difference = max(difference_last)
        if check_tolerance is True:
            print("maximum relative difference in FI_mean for the last 5 iterations = {}" .format(difference))
コード例 #59
0
ファイル: parameters.py プロジェクト: SylvainGuieu/delirium3
 def wrap(self, x, period):
     if period is None:
         return x
     return np.mod(x, period) * 2 * np.pi / period
コード例 #60
0
    else:
      suppr_all = dict();
    suppr_all[which_cell-1] = curr_suppr;
    np.save(dataPath + super_name, suppr_all);
  
  return curr_suppr;

 
if __name__ == '__main__':

    cell_num   = int(sys.argv[1]);
    if cell_num < -99: 
      # i.e. 3 digits AND negative, then we'll treat the first two digits as where to start, and the second two as when to stop
      # -- in that case, we'll do this as multiprocessing
      asMulti = 1;
      end_cell = int(np.mod(-cell_num, 100));
      start_cell = int(np.floor(-cell_num/100));
    else:
      asMulti = 0;
    expDir     = sys.argv[2];

    fitList=None; # TEMPORARY

    if asMulti:
      from functools import partial
      import multiprocessing as mp
      nCpu = mp.cpu_count()-1; # heuristics say you should reqeuest at least one fewer processes than their are CPU
      print('***cpu count: %02d***' % nCpu);

      with mp.Pool(processes = nCpu) as pool:
        sup_perCell = partial(plot_save_superposition, expDir=expDir, use_mod_resp=0, fitType=2, excType=1, useHPCfit=1, conType=None, lgnFrontEnd=None, to_save=0);