コード例 #1
0
    def __convertToFloats__(self, signal, annotation, time):
        """
        method converts all string values in signal, annotation arrays
        into float values;
        here is one assumption: time array is in float format already
        """
        floats = pl.ones(len(signal))
        if annotation == None:
            entities = zip(signal)
        else:
            entities = zip(signal, annotation)
        for idx, values in enumerate(entities):
            for value in values:
                try:
                    pl.float64(value)  # check if it can be converted to float
                except ValueError:
                    floats[idx] = 0  # the value is NOT like float type
                    break

        true_floats = pl.nonzero(floats)  # get indexes of non-zero positions
        signal = signal[true_floats].astype(float)
        if not annotation == None:
            annotation = annotation[true_floats].astype(float)
        if not time == None:
            time = time[true_floats]

        return signal, annotation, time
コード例 #2
0
def precision_accuracy():
    b = pl.float16(1.0)
    e16 = 0
    while pl.float16(1.0) - pl.float16(2**e16) != pl.float16(1.0):
        b = b / pl.float16(2)
        e16 -= 1

    b = pl.float32(1.0)
    e32 = 0
    while pl.float32(1.0) - pl.float32(2**e32) != pl.float32(1.0):
        b = b / pl.float32(2)
        e32 -= 1

    b = pl.float64(1.0)
    e64 = 0
    while pl.float64(1.0) - pl.float64(2**e64) != pl.float64(1.0):
        b = b / pl.float64(2)
        e64 -= 1

    b = pl.longdouble(1.0)
    e128 = 0
    while pl.longdouble(1.0) - pl.longdouble(2**e128) != pl.longdouble(1.0):
        b = b / pl.longdouble(2)
        e128 -= 1

    return e16 + 1, e32 + 1, e64 + 1, e128 + 1
コード例 #3
0
    def __convertToFloats__(self, signal, annotation, time):
        """
        method converts all string values in signal, annotation arrays
        into float values;
        here is one assumption: time array is in float format already
        """
        floats = pl.ones(len(signal))
        if annotation == None:
            entities = zip(signal)
        else:
            entities = zip(signal, annotation)
        for idx, values in enumerate(entities):
            for value in values:
                try:
                    pl.float64(value)  # check if it can be converted to float
                except ValueError:
                    floats[idx] = 0  # the value is NOT like float type
                    break

        true_floats = pl.nonzero(floats)  # get indexes of non-zero positions
        signal = signal[true_floats].astype(float)
        if not annotation == None:
            annotation = annotation[true_floats].astype(float)
        if not time == None:
            time = time[true_floats]

        return signal, annotation, time
コード例 #4
0
def alpha_psp(height, t1, t2, start, offset, time):
    t1 = p.maximum(t1, 0.)
    t2 = p.maximum(t2, 0.)

    tau_frac = t2 / p.float64(t1)
    t = p.maximum((time - start) / p.float64(t1), 0)
    epsilon = 1E-8
    if 1. - epsilon < tau_frac < 1. + epsilon:
        return parpsp_e(height, t) + offset
    else:
        return parpsp(height, tau_frac, t) + offset
コード例 #5
0
ファイル: Q_sigma.py プロジェクト: JFernando4/Q_sigma
 def average_q(self, state):
     q = self.get_q(state)
     average_q = float64(0)
     for action in range(0, self.env.get_num_actions()):
         p = self.optimal_p if action == argmax(q) else (self.epsilon / self.env.get_num_actions())
         average_q += p * q[action]
     return average_q
コード例 #6
0
ファイル: psp_shapes.py プロジェクト: electronicvisions/halbe
    def __call__(self, time, height, tau_1, tau_2, start, offset):
        """
        evaluate the psp for the given parameters
        """
        tau_1 = p.maximum(tau_1, 0.)
        tau_2 = p.maximum(tau_2, 0.)

        tau_frac = tau_2 / p.float64(tau_1)
        t = p.maximum((time - start) / p.float64(tau_1), 0)

        self.__shape_switch_limit = 1E-8

        if (1. - self.__shape_switch_limit < tau_frac <
                1. + self.__shape_switch_limit):
            return self.__psp_singular(height, t) + offset
        else:
            return self.__psp_normal(height, tau_frac, t) + offset
コード例 #7
0
ファイル: dave_loaiza_HW3.py プロジェクト: DaveLoaiza/UCBX433
def update_array(X):
    for x in range(0, len(X)):
        if X[x] > 9 or X[x] < 2:
            X[x] = avg
    for x in range(0, len(X)):
        X[x] = ((plb.float64(X[x]) - 2) * (0.1)) / (
            9 - 2
        )  #Did not fully understand step 6 request, used this formula: https://en.wikipedia.org/wiki/Feature_scaling#Rescaling
    return X
コード例 #8
0
    def select_data_pts(self, map_component, lon0_deg, lat0_deg, r_deg,
                        exclude_data):
        """
        drops the data outside a radius r_deg around (lon0_deg, lat0_deg)
        """
        lon0 = angles.deg_to_rad(pl.float64(lon0_deg))  # lambda
        lat0 = angles.deg_to_rad(pl.float64(lat0_deg))  # phi

        # rounding could be an issue...
        longitude_rad = angles.deg_to_rad(map_component['longitude'])
        latitude_rad = angles.deg_to_rad(map_component['latitude'])
        map_component['diff_to_ref_rad'] = angles.ang_dist(
            lon0, lat0, longitude_rad, latitude_rad)

        if exclude_data:
            diff_to_ref_rad = map_component['diff_to_ref_rad']
            invalid = angles.rad_to_deg(diff_to_ref_rad) > r_deg
            map_component.drop(map_component.index[invalid], inplace=True)
        return map_component
コード例 #9
0
ファイル: io.py プロジェクト: JacobDowns/cslvr
  def get_nearest(self, fn):
    """
    returns a dolfin Function object with values given by interpolated
    nearest-neighbor data <fn>.
    """
    #FIXME: get to work with a change of projection.
    # get the dofmap to map from mesh vertex indices to function indicies :
    df    = self.func_space.dofmap()
    dfmap = df.vertex_to_dof_map(self.mesh)

    unew  = Function(self.func_space)         # existing dataset projection
    uocom = unew.vector().array()             # mesh indexed main vertex values

    d     = float64(self.data[fn])            # original matlab spec dataset

    # get arrays of x-values for specific domain
    xs    = self.x
    ys    = self.y

    for v in vertices(self.mesh):
      # mesh vertex x,y coordinate :
      i   = v.index()
      p   = v.point()
      x   = p.x()
      y   = p.y()

      # indexes of closest datapoint to specific dataset's x and y domains :
      idx = abs(xs - x).argmin()
      idy = abs(ys - y).argmin()

      # data value for closest value :
      dv  = d[idy, idx]
      if dv > 0:
        dv = 1.0
      uocom[i] = dv

    # set the values of the empty function's vertices to the data values :
    unew.vector().set_local(uocom[dfmap])
    return unew
コード例 #10
0
    def queryDb(self, file_version = None, db_user = '******'):
        """
        Execute the DB queries for a month or 24 hours depending on self.mode.

        INPUT: None
        OUTPUT: None

        SIDE EFFECTS: popultes required arrays for plotting
        """
        if self.db in self.DB.keys():
            import psycopg2 as dbdrv
            hsql="""select count(file_id), sum(uncompressed_file_size)/
            (date_part('epoch', to_timestamp(max(ingestion_date),'YYYY-MM-DD"T"HH24:MI:SS.MS')-
            to_timestamp(min(ingestion_date),'YYYY-MM-DD"T"HH24:MI:SS.MS')) + 10.)/(1024^2) as average,
            max(ingestion_date) as last , min(ingestion_date) as first ,
            sum(uncompressed_file_size)/1024^4 as volume from
            ngas_files where ingestion_date between {0} and {1}"""
            try:
                t=dbpass
            except NameError:
                dbpass = getpass.getpass('%s DB password: '******'%s',max(ingestion_date))-
            strftime('%s',min(ingestion_date)))/1024./1024. as average,
            max(ingestion_date) as last , min(ingestion_date) as first ,
            sum(uncompressed_file_size)/1024/1024/1024./1024. as volume
            from ngas_files where ingestion_date between {0} and {1}"""
            dbconn = dbdrv.connect(self.db)

        if (file_version):
            hsql += " and file_version = %d" % file_version
        cur = dbconn.cursor()
        res = []
        for ii in range(1,self.loop+1):
            if self.mode[0] != 'Weekly':
                ssql = hsql.format(self.fdate % (self.date, (ii-1)), self.fdate % (self.date,ii))
            else:
                ssql = hsql.format(self.drange[ii-1][0], self.drange[ii-1][1])
            cur.execute(ssql)
            r = cur.fetchall()
            res.append(r)
            self.ssql = ssql
        res = pylab.array(res)
        y = res[:,:,1].reshape(len(res))
        y[where(y < -1)]=0

        n = res[:,:,0].reshape(len(res))
        n[where(n < -1)]=0

        self.y = pylab.float16(y)
        self.n = pylab.float16(n)
        vol = pylab.float16(res[:,:,4])
        self.tvol = pylab.float64(vol)[vol>0].sum()
        self.tfils = pylab.int32(self.n.sum())
        self.res=res
        dbconn.close()
        del(dbconn)
        print self.tfils, pylab.int32(self.n.sum())
        return
コード例 #11
0
#4. Create an array B with with 500 elements bound in the range [-3*pi:2*pi]
B = plb.linspace(-3*plb.pi,2*plb.pi, 500, endpoint=True)

#5. Using if, for or while, create a function that overwrites every element in A
# that falls outside of the interval [2:9), and overwrite
#that element with the average between the smallest and largest element in A
#6. Normalize each list element to be bound between [0:0.1]  
avg = (A.min() + A.max())/2

def update_array(X):
    for x in range(0, len(X)):
        if X[x] > 9 or X[x] < 2:
            X[x] = avg
    for x in range(0, len(X)):
        X[x] = ((plb.float64(X[x])-2)*(0.1))/(9-2)  #Did not fully understand step 6 request, used this formula: https://en.wikipedia.org/wiki/Feature_scaling#Rescaling
    return X

 
#7. Return the result from the functon to C
C = update_array(A)


#8. Cast C as an array
array(C)

#9. Add C to B (think of C as noise) and record the result in D … 
# (watch out: C is of different length. Truncate it)
D = B + C[:len(B)]

#Part 2 - plotting:
コード例 #12
0
ファイル: io.py プロジェクト: JacobDowns/cslvr
  def integrate_field(self, fn_spec, specific, fn_main, r=20, val=0.0):
    """
    Assimilate a field with filename <fn_spec>  from DataInput object
    <specific> into this DataInput's field with filename <fn_main>.  The
    parameter <val> should be set to the specific dataset's value for
    undefined regions, default is 0.0.  <r> is a parameter used to eliminate
    border artifacts from interpolation; increase this value to eliminate edge
    noise.
    """
    s    = "::: integrating %s field from %s :::" % (fn_spec, specific.name)
    print_text(s, self.color)
    # get the dofmap to map from mesh vertex indices to function indicies :
    df    = self.func_space.dofmap()
    dfmap = df.vertex_to_dof_map(self.mesh)

    unew  = self.get_projection(fn_main)      # existing dataset projection
    uocom = unew.compute_vertex_values()      # mesh indexed main vertex values

    uspec = specific.get_projection(fn_spec)  # specific dataset projection
    uscom = uspec.compute_vertex_values()     # mesh indexed spec vertex values

    d     = float64(specific.data[fn_spec])   # original matlab spec dataset

    # get arrays of x-values for specific domain
    xs    = specific.x
    ys    = specific.y
    nx    = specific.nx
    ny    = specific.ny

    for v in vertices(self.mesh):
      # mesh vertex x,y coordinate :
      i   = v.index()
      p   = v.point()
      x   = p.x()
      y   = p.y()

      # indexes of closest datapoint to specific dataset's x and y domains :
      idx = abs(xs - x).argmin()
      idy = abs(ys - y).argmin()

      # data value for closest value and square around the value in question :
      dv  = d[idy, idx]
      db  = d[max(0,idy-r) : min(ny, idy+r),  max(0, idx-r) : min(nx, idx+r)]

      # if the vertex is in the domain of the specific dataset, and the value
      # of the dataset at this point is not abov <val>, set the array value
      # of the main file to this new specific region's value.
      if dv > val:
        #print "found:", x, y, idx, idy, v.index()
        # if the values is not near an edge, make the value equal to the
        # nearest specific region's dataset value, otherwise, use the
        # specific region's projected value :
        if all(db > val):
          uocom[i] = uscom[i]
        else :
          uocom[i] = dv

    # set the values of the projected original dataset equal to the assimilated
    # dataset :
    unew.vector().set_local(uocom[dfmap])
    return unew