Beispiel #1
0
def morph(face1, face1pts, face2, face2pts, warp_frac=0.5, dissolve_frac=0.5):
    #we assume face1pts and face2pts contains the corners of the image
    face1 = np.copy(face1)
    face2 = np.copy(face2)
    avgpts = (1-warp_frac)*face1pts + warp_frac*face2pts
    avgpts[:4] = np.trunc(avgpts[:4])
    face = np.zeros((avgpts[3,0], avgpts[3,1], 3))
    delaunay_triangulation = Delaunay(avgpts)
    simplices = delaunay_triangulation.simplices
    triang1 = [[face1pts[s[0]], face1pts[s[1]], face1pts[s[2]]] for s in simplices]
    triang2 = [[face2pts[s[0]], face2pts[s[1]], face2pts[s[2]]] for s in simplices]
    triang = [[avgpts[s[0]], avgpts[s[1]], avgpts[s[2]]] for s in simplices]
    affine_t1 = [compute_transform(triang[i], triang1[i]) for i in range(len(triang1))]
    affine_t2 = [compute_transform(triang[i], triang2[i]) for i in range(len(triang2))]
    for y in range(face.shape[0]):
        for x in range(face.shape[1]):
            trinum1 = tsearch((y, x), triang1)
            vec1 = np.dot(affine_t1[trinum1], np.array([x, y, 1]))
            vec1 = np.trunc(vec1)
            trinum2 = tsearch((y, x), triang2)
            vec2 = np.dot(affine_t2[trinum2], np.array([x, y, 1]))
            vec2 = np.trunc(vec2)
            try:
                face[y,x,:] = (1-dissolve_frac)*face1[vec1[1]-1,vec1[0]-1] + dissolve_frac*face2[vec2[1]-1,vec2[0]-1]
            except:
                print (vec1[1]-1,vec1[0]-1), (vec2[1]-1,vec2[0]-1)
    return face
Beispiel #2
0
    def set_jds(self, val1, val2):
        self._check_scale(self._scale)  # Validate scale.

        sum12, err12 = two_sum(val1, val2)
        iy_start = np.trunc(sum12).astype(np.int)
        extra, y_frac = two_sum(sum12, -iy_start)
        y_frac += extra + err12

        val = (val1 + val2).astype(np.double)
        iy_start = np.trunc(val).astype(np.int)

        imon = np.ones_like(iy_start)
        iday = np.ones_like(iy_start)
        ihr = np.zeros_like(iy_start)
        imin = np.zeros_like(iy_start)
        isec = np.zeros_like(y_frac)

        # Possible enhancement: use np.unique to only compute start, stop
        # for unique values of iy_start.
        scale = self.scale.upper().encode('ascii')
        jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
                                          ihr, imin, isec)
        jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
                                      ihr, imin, isec)

        t_start = Time(jd1_start, jd2_start, scale=self.scale, format='jd')
        t_end = Time(jd1_end, jd2_end, scale=self.scale, format='jd')
        t_frac = t_start + (t_end - t_start) * y_frac

        self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2)
    def init_boid(amaze):
        pos = np.zeros(cfg.Dimensions, dtype=np.float32)
        # Start from a random cell
        pos[0] = np.float32(np.trunc(np.random.uniform(0, cfg.maze_width)))
        pos[1] = np.float32(np.trunc(np.random.uniform(0, cfg.maze_height)))

        # Ensure that we are not placing the boid into a wall ---------------------
        # Change position until hit a free cell
        while isinstance(
                amaze.matrix[int(pos[0])][int(pos[1])].shape,
                maze.Wall):
            # While the cell is filled with a wall (==1)
            pos[0] = np.float32(np.trunc(np.random.uniform(0, cfg.maze_width)))
            pos[1] = np.float32(np.trunc(np.random.uniform(0, cfg.maze_height)))
            # Check that we are not placing the boind into the wall

        # Move boid to the center of the cell
        pos[0] += 0.5
        pos[1] += 0.5
        vel = np.zeros(cfg.Dimensions, dtype=np.float32)

        # TODO: change pheromone_level type to uint16
        pheromone_level = np.zeros([1], dtype=np.float32)

        return np.concatenate((pos, vel, pheromone_level))
Beispiel #4
0
def split_data(dataset):
    '''
    Split the dataset into training, validation, and test set. The split will be 70, 20, 10
    :param dataset: the entire dataset containing positive and negative images. each row is a tuple containing the data and label
    :param pixel_res: the number of neurons per image
    :return: a tuple of training, validation, and test
    '''

    # shuffle the data to randomize
    shuffled = np.random.permutation(dataset)

    # get the sizes based on split percentage for each set
    print 'total dataset size:', len(shuffled)
    ts = np.trunc(len(shuffled) * 0.64) # train
    print ts
    vs = np.trunc(len(shuffled) * 0.16)# valid
    print vs
    tts = np.trunc(len(shuffled) * 0.20 )# test
    print tts

    train_input, train_label = get_input_label(shuffled[:ts])
    valid_input, valid_label = get_input_label(shuffled[ts:ts+vs])
    test_input, test_label = get_input_label(shuffled[ts+vs:])

    return train_input, train_label, valid_input, valid_label, test_input, test_label
Beispiel #5
0
    def sort_by_cells(self):
        zp, yp, xp = self.zp, self.yp, self.xp
        vz, vy, vx = self.vz, self.vy, self.vx
        Lz, Ly, Lx = self.Lz, self.Ly, self.Lx
        cell       = self.cell
        cell_span  = self.cell_span
        N_cells    = self.N_cells

        Cz = Lz/N_cells
        Cy = Ly/N_cells
        Cx = Lx/N_cells

        zp_cell = np.trunc(zp/Cz)
        yp_cell = np.trunc(yp/Cy)
        xp_cell = np.trunc(xp/Cx)

        cell[:] = xp_cell+yp_cell*N_cells+zp_cell*N_cells**2

        s = cell.argsort()
        zp[:] = zp[s]
        yp[:] = yp[s]
        xp[:] = xp[s]
        vz[:] = vz[s]
        vy[:] = vy[s]
        vx[:] = vx[s]
        cell[:] = cell[s]
        cell_span[:-1] = np.searchsorted(cell, self.cell_vals)
        cell_span[-1]  = self.N
Beispiel #6
0
def time2hms(time_in_seconds):
    temp = time_in_seconds/3600.0
    hours = np.trunc(temp)
    minutes = np.trunc((temp - hours)*60)
    seconds = (temp - hours - minutes/60)*3600
    
    return (hours, minutes, seconds)
def lim_precision_inputs(inputs, bits, precision):
        lim_inputs = np.empty(inputs.shape)
        for index, value in np.ndenumerate(inputs):
                        if value>=0:
                                lim_inputs[index] = np.trunc((value * (1 << precision)) % (1 << bits) + 0.5)# * (1.0 / (1 << precision))
                        else:
                                lim_inputs[index] = -np.trunc((-value * (1 << precision)) % (1 << bits) + 0.5)# * (1.0 / (1 << precision))
        return lim_inputs.astype(theano.config.floatX)
Beispiel #8
0
    def plot(self, database, dsid):
        """Plot positions of all counterparts for all (unique) sources for
        the given dataset.

        The positions of all (unique) sources in the running catalog are
        at the centre, whereas the positions of all their associated
        sources are scattered around the central point.  Axes are in
        arcsec relative to the running catalog position.
        """

        query = """\
SELECT x.id
      ,x.ra
      ,x.decl
      ,3600 * (x.ra - r.wm_ra) as ra_dist_arcsec
      ,3600 * (x.decl - r.wm_decl) as decl_dist_arcsec
      ,x.ra_err/2
      ,x.decl_err/2
      ,r.wm_ra_err/2
      ,r.wm_decl_err/2
  FROM assocxtrsource a
      ,extractedsource x
      ,runningcatalog r
      ,image im1
 WHERE a.runcat = r.id
   AND a.xtrsrc = x.id
   AND x.image = im1.id
   AND im1.dataset = %s

"""
        results = zip(*database.db.get(query, dsid))

        if not results:
            return None
        xtrsrc_id = results[0]
        ra = results[1]
        decl = results[2]
        ra_dist_arcsec = results[3]
        decl_dist_arcsec = results[4]
        ra_err = results[5]
        decl_err = results[6]
        wm_ra_err = results[7]
        wm_decl_err = results[8]

        axes = self.figure.add_subplot(1, 1, 1)
        axes.errorbar(ra_dist_arcsec, decl_dist_arcsec, xerr=ra_err, yerr=decl_err,
                      fmt='+', color='b', label="xtr")
        axes.set_xlabel(r'RA (arcsec)')
        axes.set_ylabel(r'DEC (arcsec)')
        lim = 1 + max(int(numpy.trunc(max(abs(min(ra_dist_arcsec)),
                                          abs(max(ra_dist_arcsec))))),
                      int(numpy.trunc(max(abs(min(decl_dist_arcsec)),
                                          abs(max(decl_dist_arcsec))))))
        axes.set_xlim(xmin= -lim, xmax=lim)
        axes.set_ylim(ymin= -lim, ymax=lim)
        axes.grid(False)
        # Shifts plot spacing to ensure that axes labels are displayed
        self.figure.tight_layout()
Beispiel #9
0
 def bprop_input (self, input, output):
     sx = None
     if self.thresh != 0:
         sx = input.x / self.thresh
         np.trunc(sx, sx)
         np.sign(sx, sx)
     else:
         sx = np.sign(input.x)
     input.dx += sx * output.dx
Beispiel #10
0
def trunc(x):
    """
    Truncate the values to the integer value without rounding
    """
    if isinstance(x, UncertainFunction):
        mcpts = np.trunc(x._mcpts)
        return UncertainFunction(mcpts)
    else:
        return np.trunc(x)
Beispiel #11
0
def gather(data,lc):
    i = numpy.trunc(lc[0])
    j = numpy.trunc(lc[1])
    di = lc[0] - i
    dj = lc[1] - j
    return  (data[i][j]*(1-di)*(1-dj) +
          data[i+1][j]*(di)*(1-dj) + 
          data[i][j+1]*(1-di)*(dj) + 
          data[i+1][j+1]*(di)*(dj)) 
Beispiel #12
0
 def truncate(self, h, m, s):
     d = np.trunc(s / self.slim)
     s = s % self.slim
     m += d
     d = np.trunc(m / self.mlim)
     m = m % self.mlim
     h += d
     d = np.trunc(h / self.hlim)
     h = h % self.hlim
     return [int(h), int(m), float(s)]
Beispiel #13
0
def scatter(data,lc,value):
    i = numpy.trunc(lc[0])
    j = numpy.trunc(lc[1])
    di = lc[0] - i
    dj = lc[1] - j
            
    data[i][j] += (1-di)*(1-dj)*value
    data[i+1][j] += (di)*(1-dj)*value
    data[i][j+1] += (1-di)*(dj)*value
    data[i+1][j+1] += (di)*(dj)*value
Beispiel #14
0
def julian_date(yr, mo, d, hr, minute, sec, leap_sec=False):
    x = (7*(yr + np.trunc((mo + 9)/12)))/4.0
    y = (275*mo)/9.0
    if leap_sec:
        t = 61.0
    else:
        t = 60.0
    z = (sec/t + minute)/60.0 + hr
    jd = 367*yr - np.trunc(x) + np.trunc(y) + d + 1721013.5 + z/24.0
    return jd
def tens_nort(drnms, labels,
               mza = 100.,
               mzb = 1000.,
               mz_step = 1e-0, # 0.01
               blcleaned = True):

    ulist = []
    llist = []
    elist = []

    for drnm in drnms:
        if blcleaned:
            fnm = drnm + '_blcleaned.npz'
        else:
            fnm = drnm + '.npz'
        f = np.load("./" + drnm + '/' + fnm)

        negsp = f['negsp']
        negmz = f['negmz']

        possp = f['possp']
        posmz = f['posmz']

        label = f['label']

        for k in xrange(negsp.shape[1]): # must be equal to pos_s.shape[1] // number of samples

            if label[k] not in labels:
                continue
            llist.append(label[k])
            elist.append(drnm)

            # u is 2D array: (m/z, polarity) = (m/z, 2), but polarity is fixednow
            
            u = np.zeros( ( (mzb - mza) / mz_step, 2))

            for i in xrange(negsp.shape[0]):
                if negsp[i, k] != 0:
                    if (negmz[i] < mza) or (negmz[i] >= mzb):
                        continue
                    mzind = np.trunc((negmz[i] - mza)/ mz_step)
                    mzind = int(mzind)
                    #if u[mzind, rtind, 0] != 0:
                    u[mzind, 0] = max(u[mzind, 0], negsp[i, k]) # to deal with crossing values
            for i in xrange(possp.shape[0]):
                if possp[i, k] != 0:
                    if (posmz[i] < mza) or (posmz[i] >= mzb):
                        continue
                    mzind = np.trunc((posmz[i] - mza)/ mz_step)
                    mzind = int(mzind)
                    #if u[mzind, rtind, 0] != 0:
                    u[mzind, 1] = max(u[mzind, 1], possp[i, k]) # to deal with crossing values
            ulist.append(u)
    u = np.array(ulist)
    return u, llist, elist
Beispiel #16
0
def jd2gregorian(jd):
    t1900 = (jd - 2415019.5)/365.25
    year = 1900 + np.trunc(t1900)
    leap_years = np.trunc((year - 1900 - 1)*0.25)
    days = (jd - 2415019.5) - ((year - 1900)*365.0 + leap_years)
    
    if days < 1.0:
        year = year - 1
        leap_years = np.trunc((year - 1900 - 1)*0.25)
        days = (jd - 2415019.5) - ((year - 1900)*365.0 + leap_years)
        
    (month, day, hours, minutes, seconds) = days2ymdhms(days, year)
    return (year, month, day, hours, minutes, seconds)
 def base_link_coords_lethal(self, origin, yaw, x, y,):
     x_rot = x*np.cos(yaw) - y*np.sin(yaw)        
     y_rot = y*np.cos(yaw) + x*np.sin(yaw)        
     x_index = np.trunc(origin[1] + x_rot/self.costmap_info.resolution).astype('i2')
     y_index = np.trunc(origin[0] + y_rot/self.costmap_info.resolution).astype('i2')
     if ( x_index < self.costmap_info.height and
          y_index < self.costmap_info.height and  
          x_index >= 0 and
          y_index >= 0 ):
         return self.costmap[y_index, x_index] > self._lethal_threshold
     else:
         rospy.logwarn("SEARCH_AREA_CHECK off costmap! Returning True.")
         return True
Beispiel #18
0
 def interpolate_at_equidistant_time_steps(self):
     #determine min and max of time series
     min_t=numpy.min(self.ta)/RESAMPLE_TIME_STEP
     max_t=numpy.max(self.ta)/RESAMPLE_TIME_STEP
     # mapping to quantization steps RESAMPLE_TIME_STEP
     min_ti=numpy.trunc(min_t)*RESAMPLE_TIME_STEP
     max_ti=numpy.trunc(max_t)*RESAMPLE_TIME_STEP
     number_of_samples=numpy.rint((max_ti-min_ti)/RESAMPLE_TIME_STEP+1+EPSILON)
     #print number_of_samples
     self.edta=numpy.linspace(min_ti, max_ti, number_of_samples)
     #print self.edta
     #scipy.interpolate.splrep: Find the B-spline representation of 1-D curve.
     rep = scipy.interpolate.splrep(self.ta,self.ca, k=1)
     self.edca=interpolate.splev(self.edta,rep)
 def _update_fields(self):
     from numpy import trunc
     
     # map mouse location to array index
     frotmp = int(trunc(self.custtool.yval))
     totmp = int(trunc(self.custtool.xval))
     
     # check if within range
     sh = self.matrix_data_ref[self.edge_parameter_name].shape
     # assume matrix whose shape is (# of rows, # of columns)
     if frotmp >= 0 and frotmp < sh[0] and totmp >= 0 and totmp < sh[1]:
         self.fro = self.labels[frotmp]
         self.to = self.labels[totmp]
         self.val = self.matrix_data_ref[self.edge_parameter_name][frotmp, totmp]
 def _update_fields(self):
     from numpy import trunc
     
     # map mouse location to array index
     frotmp = int(trunc(self.custtool.yval))
     totmp = int(trunc(self.custtool.xval))
     
     # check if within range
     sh = self.data[self.data_name].shape
     # assume matrix whose shape is (# of rows, # of columns)
     if frotmp >= 0 and frotmp < sh[0] and totmp >= 0 and totmp < sh[1]:
         self.fro = frotmp
         self.to = totmp
         self.val = self.data[self.data_name][self.fro, self.to]
Beispiel #21
0
def gyf2gms(gyf):
    """Conversion (g.frac) --> (gg,mm,ss.sssss)
    Por ejemplo:
        gyf2gms(-61.5) = (-61,30,0.0)
        gyf2gms(-0.75) = (0,-45,0.0)"""
    # hacer las cuentas
    g = trunc(gyf)
    m = (gyf - g) * 60
    s = (m - trunc(m)) * 60
    # preparar la presentación
    g = int(g)
    m = int(m) if g == 0 else int(abs(m))
    s = abs(s) if m != 0 and g != 0 else s
    return (g, m, s)
 def line_blocked(self, userdata):
     
     if self.debug_map_pub.get_num_connections() > 0:
         publish_debug = True
     else:
         publish_debug = False
     
     costmap = self.costmap
     lethal_threshold = 90
     blocked_threshold = 0.30
     check_width = userdata.blocked_check_width/2
     check_dist = userdata.blocked_check_distance
     resolution = costmap.info.resolution
     origin = (np.trunc(costmap.info.width/2),
               np.trunc(costmap.info.height/2))
     yaw = userdata.line_yaw
     ll = self.check_point(origin, check_width, (yaw - math.pi/2), resolution)
     ul = self.check_point(origin, check_width, (yaw + math.pi/2), resolution)
     lr = self.check_point(ll[0], check_dist, yaw, resolution)
     ur = self.check_point(ul[0], check_dist, yaw, resolution)
                
     map_np = np.array(costmap.data, dtype='i1').reshape((costmap.info.height,costmap.info.width))
     
     start_points = bresenham.points(ll, ul)
     end_points = bresenham.points(lr, ur)
     total_count = len(start_points)
     
     #check lines for lethal values
     blocked_count = 0
     for start, end in zip(start_points, end_points):
         line = bresenham.points(start[None,:], end[None,:])
         line_vals = map_np[line[:,1], line[:,0]]
         max_val = (np.amax(line_vals))
         if np.any(line_vals > lethal_threshold):
             blocked_count += 1
             #for debug, mark lethal lines
             if publish_debug: map_np[line[:,1], line[:,0]] = 64
             
     #if anything is subscribing to the test map, publish it
     if publish_debug:
         map_np[start_points[:,1], start_points[:,0]] = 64
         map_np[end_points[:,1], end_points[:,0]] = 64
         costmap.data = list(np.reshape(map_np, -1))
         self.debug_map_pub.publish(costmap)                        
     
     if (blocked_count/float(total_count)) > blocked_threshold:        
         return True
                 
     return False
Beispiel #23
0
    def contruct_classes(self, trn_idxs, corpus_mtrx_lst, cls_gnr_tgs, bagging_param=None):
        inds_per_gnr = dict()
        # inds = list()
        last_gnr_tag = None

        for gnr_tag in np.unique(cls_gnr_tgs[trn_idxs]):
            inds_per_gnr[self.genres_lst[gnr_tag - 1]] = trn_idxs[
                np.where(cls_gnr_tgs[trn_idxs] == gnr_tag)[0]
            ]

        gnr_classes = dict()
        for g, inds in inds_per_gnr.items():

            if self.bagging and bagging_param:
                # # # # # # #
                shuffled_train_idxs = np.random.permutation(inds)
                # print shuffled_train_idxs
                # keep bagging_parram percent
                bg_trn_ptg = int(np.trunc(shuffled_train_idxs.size * bagging_param))
                # print bg_trn_ptg
                bag_idxs = shuffled_train_idxs[0:bg_trn_ptg]
                # print bag_idxs

            elif not self.bagging and not bagging_param:
                bag_idxs = inds

            else:
                raise Exception(
                    'contruct_classes(): Bagging triggerd with not bagging_param argument'
                )

            # Merge All Term-Frequency Dictionaries created by the Raw Texts
            gnr_classes[g] = corpus_mtrx_lst[self.gnrlst_idx[g]][bag_idxs, :].mean(axis=0)

        return gnr_classes
Beispiel #24
0
def randomPairs(n_records, sample_size):
    """
    Return random combinations of indices for a square matrix of size
    n records
    """

    if n_records < 2 :
        raise ValueError("Needs at least two records")
    n = n_records * (n_records - 1) / 2

    # numpy doesn't always throw an overflow error so we need to 
    # check to make sure that the largest number we'll use is smaller
    # than the numpy's maximum unsigned integer
    if 8 * n > numpy.iinfo('uint').max :
        return randomPairsWithReplacement(n_records, sample_size)

    if sample_size >= n:
        if sample_size > n :
            warnings.warn("Requested sample of size %d, only returning %d possible pairs" % (sample_size, n))

        random_indices = numpy.arange(n)
    else :
        random_indices = numpy.random.randint(int(n), size=sample_size)

    random_indices = random_indices.astype('uint')

    b = 1 - 2 * n_records

    x = numpy.trunc((-b - numpy.sqrt(b ** 2 - 8 * random_indices)) / 2)
    y = random_indices + x * (b + x + 2) / 2 + 1

    stacked = numpy.column_stack((x, y)).astype(int)

    return [(p.item(), q.item()) for p, q in stacked]
def transfCartToCol2(m, linx, liny, linu, linv):
    start = time.time()
    lu, lv = len(linu), len(linv)
    print "lu, lv:", lu, lv
    minx, maxx = linx[0], linx[-1]
    miny, maxy = liny[0], liny[-1]
    dx = 1/abs(linx[1]-linx[0])
    #print minx, miny, maxx, maxy
    U, V = np.meshgrid(linu, linv)
    coord = np.zeros((lv, lu, 2))
    coord[:,:,:] = fromColi(U,V)
    coord[:,:,0] = (np.ceil(dx*coord[:,:,0])-1)/dx
    ## transforme: -0.8 en -1, et 0.8 en 0: utile par car on les x<0 n'existe pas
    #(si on transforme -0.8 en 0, on n'echoue la selection)
    coord[:,:,1] = (np.trunc(dx*coord[:,:,1]))/dx
    ## transforme: -0.8 en 0, et 0.8 en 0: utile pour ne pas depasser le y max
    mt = np.zeros((lv, lu))


    for u in xrange(0, lu):
        for v in xrange(0, lv):
            x, y = coord[v,u,0], coord[v, u, 1]

            if ((x > (maxx)) | (x < (minx)) | (y > (maxy)) | (y < (miny)) ):
                mt[v,u] = 0.0
            else:
                ind_x = np.nonzero(linx==x)[0][0]
                ind_y = np.nonzero(liny==y)[0][0]
                mt[v, u] = m[ind_y,ind_x]#np.sum(m[ind_y-1:ind_y+1,ind_x-1:ind_x+1])/9


    conn_time = time.time()-start
    a = time.gmtime(conn_time)
    print time.strftime("\ntask time for map transformation: %H:%M:%S",a )
    return(mt)
def transfColToCart(m, linx, liny, linu, linv): 
    ##picture tranformation
    start = time.time()
    lx, ly = len(linx), len(liny)
    #print "lu, lv:", lx, ly
    minu, maxu = linu[0], linu[-1]
    minv, maxv = linv[0], linv[-1]
    du = 1/abs(linu[1]-linu[0])
    #print minx, miny, maxx, maxy
    X, Y = np.meshgrid(linx, liny)
    coord = np.zeros((ly, lx, 2))
    coord[:,:,:] = toColi(X,Y)
    coord[:,:,0] = (np.ceil(du*coord[:,:,0])-1)/du
    coord[:,:,1] = (np.trunc(du*coord[:,:,1]))/du
    coord[np.isnan(coord)] = 0
    mt = np.zeros((ly, lx))

    for x in xrange(0, lx):
        for y in xrange(0, ly):
            u, v = coord[y,x,0], coord[y, x, 1]

            if ((u > (maxu)) | (u < (minu)) | (v > (maxv)) | (v < (minv)) ):
                mt[y,x] = 0.0
            else:
                #print u,v
                ind_u = np.nonzero(linu==u)[0][0]

                ind_v = np.nonzero(linv==v)[0][0]
                mt[y, x] = m[ind_v,ind_u]#np.sum(m[ind_y-1:ind_y+1,ind_x-1:ind_x+1])/9

    conn_time = time.time()-start
    a = time.gmtime(conn_time)
    print time.strftime("\ntask time for map transformation: %H:%M:%S",a )
    return(mt)
Beispiel #27
0
    def signal(self, fs, atten, caldb, calv):
        if self._filename is None:
            # allow lack of file to not cause error, catch in GUI when necessary?
            logger = logging.getLogger('main')
            logger.warn('Vocalization signal request without a file')
            return np.array([0,0])

        if not self._findFile():
            return np.array([0,0])

        fs, wavdata = audioread(self._filename)
        if fs != fs:
            print 'specified', fs, 'wav file', fs
            raise Exception("specified samplerate does not match wav stimulus")

        #truncate to nears ms
        duration = float(len(wavdata))/fs
        # print 'duration {}, desired {}'.format(duration, np.trunc(duration*1000)/1000)
        desired_npts = int((np.trunc(duration*1000)/1000)*fs)
        # print 'npts. desired', len(wavdata), desired_npts
        wavdata = wavdata[:desired_npts]

        amp_scale = signal_amplitude(wavdata, fs)

        signal = ((wavdata/amp_scale)*self.amplitude(caldb, calv))

        if self._risefall > 0:
            rf_npts = int(self._risefall * fs) / 2
            wnd = hann(rf_npts*2) # cosine taper
            signal[:rf_npts] = signal[:rf_npts] * wnd[:rf_npts]
            signal[-rf_npts:] = signal[-rf_npts:] * wnd[rf_npts:]
            
        return signal
 def get_pointcloud2(self, grid, position, target_frame, transform, range=12.0):
     header =  std_msg.Header(0, rospy.Time.now(), target_frame)
     
     grid_np = np.array(grid.data, dtype='u1').reshape((grid.info.height,grid.info.width))
     origin = np.array((grid.info.origin.position.x,
                        grid.info.origin.position.y,
                        grid.info.origin.position.z))
     world2map = lambda x:np.clip(trunc((x-origin)/grid.info.resolution),
                        zeros((3,)),
                        np.r_[grid.info.width-1, grid.info.height-1,0])       
     
     ll = world2map(np.r_[position.x-range, position.y-range, 0])
     ur = world2map(np.r_[position.x+range, position.y+range, 0])
 
     submap = grid_np[ll[1]:ur[1],ll[0]:ur[0]]
     mappts = np.c_[np.where(submap==100)][:,::-1] + ll[:2]
     map2world = lambda x:(x+0.5)*grid.info.resolution+origin[:2]
     wpts = np.array([map2world(x) for x in mappts])
     pcpts=[]
     if len(wpts>0):
         for z in np.linspace(0,1,11):
             pcpts.append(np.c_[wpts,z*ones_like(wpts[:,0])])
         pcpts = np.vstack(pcpts)
         pcpts = np.einsum("ki,...ji", transform, np.c_[pcpts,np.ones((len(pcpts),1))])[:,:3]   
     else:
         pcpts = []
     pc = pc2.create_cloud_xyz32(header, pcpts)
     return pc
Beispiel #29
0
def randomPairs(n_records, sample_size, zero_indexed=True):
    """
    Return random combinations of indices for a square matrix of size
    n records
    """

    if n_records < 2:
        raise ValueError("Needs at least two records")
    n = n_records * (n_records - 1) / 2

    if sample_size >= n:
        random_indices = numpy.arange(n)
        numpy.random.shuffle(random_indices)
    else:
        try:
            random_indices = numpy.array(random.sample(xrange(n), sample_size))
        except OverflowError:
            # If the population is very large relative to the sample
            # size than we'll get very few duplicates by chance
            logging.warning("There may be duplicates in the sample")
            sample = numpy.array([random.sample(xrange(n_records), 2) for _ in xrange(sample_size)])
            return numpy.sort(sample, axis=1)

    b = 1 - 2 * n_records

    x = numpy.trunc((-b - numpy.sqrt(b ** 2 - 8 * random_indices)) / 2)
    y = random_indices + x * (b + x + 2) / 2 + 1

    if not zero_indexed:
        x += 1
        y += 1

    return numpy.column_stack((x, y)).astype(int)
Beispiel #30
0
def gumr(xn, xu):
    z2 = np.trunc(np.log10(xu))+1
    z1 = np.around(xu/(10**z2), 3)
    y1 = np.around(xn*10**(-z2), 2)
    value = y1*10**z2
    uncert = z1*10**z2
    return('%g'%value, '%g'%uncert)
def ImColorNamingTSELabDescriptor(ima, positions=None, patchSize=1):

    # Constants
    numColors = 11  # Number of colors
    numAchromatics = 3  # Number of achromatic colors
    numChromatics = numColors - numAchromatics  # Number of chromatic colors

    # Initializations
    numLevels = np.size(thrL) - 1  # Number of Lightness levels in the model

    # Image conversion: sRGB to CIELab
    Lab = RGB2Lab(ima)
    if positions != None:
        if patchSize == 1:
            Lab = Lab[positions[:, 0], positions[:, 1], :].reshape((1, -1, 3))
        else:
            LabPatch = np.zeros(
                (positions.shape[0], (2 * np.trunc(patchSize / 2) + 1)**2, 3))
            padSz = (int(np.trunc(patchSize / 2)),
                     int(np.trunc(patchSize / 2)))
            Lab = np.pad(Lab, (padSz, padSz, (0, 0)), 'symmetric')
            positions += padSz[0]
            c = 0
            for x in range(-padSz[0], padSz[0] + 1):
                for y in range(-padSz[0], padSz[0] + 1):
                    LabPatch[:, c, :] = Lab[positions[:, 0] + y,
                                            positions[:, 1] + x, :]
                    c += 1
            Lab = LabPatch

    S = np.shape(Lab)
    if Lab.ndim == 2:
        L = Lab[:, 0].flatten()
        a = Lab[:, 1].flatten()
        b = Lab[:, 2].flatten()
        nr = S[0]
        nc = 1
        # Image dimensions: rows, columns, and channels
    else:
        L = Lab[:, :, 0].flatten()
        a = Lab[:, :, 1].flatten()
        b = Lab[:, :, 2].flatten()
        nr = S[0]
        nc = S[1]
        # Image dimensions: rows, columns, and channels

    npix = nr * nc  # Number of pixels
    CD = np.zeros((npix, numColors))  # Color descriptor to store results

    # Assignment of the sample to its corresponding level
    m = np.zeros(np.shape(L))
    m[np.where(L == 0)[0]] = 1  # Pixels with L=0 assigned to level 1
    for k in range(1, numLevels + 1):
        m = m + np.double(thrL[k - 1] < L) * np.double(
            L <= thrL[k]) * np.double(k)

    m = m.astype(int) - 1

    # Computing membership values to chromatic categories
    for k in range(numChromatics):
        tx = np.reshape(parameters[k, 0, m], (npix, 1))
        ty = np.reshape(parameters[k, 1, m], (npix, 1))
        alfa_x = np.reshape(parameters[k, 2, m], (npix, 1))
        alfa_y = np.reshape(parameters[k, 3, m], (npix, 1))
        beta_x = np.reshape(parameters[k, 4, m], (npix, 1))
        beta_y = np.reshape(parameters[k, 5, m], (npix, 1))
        beta_e = np.reshape(parameters[k, 6, m], (npix, 1))
        ex = np.reshape(parameters[k, 7, m], (npix, 1))
        ey = np.reshape(parameters[k, 8, m], (npix, 1))
        angle_e = np.reshape(parameters[k, 9, m], (npix, 1))
        #figure;plot(angle_e); show()
        CD[:, k] = (np.double(beta_e != 0.0) *
                    TripleSigmoid_E(np.vstack((a, b)), tx, ty, alfa_x, alfa_y,
                                    beta_x, beta_y, beta_e, ex, ey, angle_e)).T

    # Computing membership values to achromatic categories
    valueAchro = np.squeeze(
        np.maximum(1.0 - np.reshape(np.sum(CD, axis=1), (npix, 1)),
                   np.zeros((npix, 1))))
    CD[:, numChromatics +
       0] = valueAchro * Sigmoid(L, paramsAchro[0, 0], paramsAchro[0, 1])
    CD[:, numChromatics + 1] = valueAchro * Sigmoid(
        L, paramsAchro[1, 0], paramsAchro[1, 1]) * Sigmoid(
            L, paramsAchro[2, 0], paramsAchro[2, 1])
    CD[:, numChromatics +
       2] = valueAchro * Sigmoid(L, paramsAchro[3, 0], paramsAchro[3, 1])

    # Color descriptor with color memberships to all the categories (one color in each plane)
    if positions == None or patchSize > 1:
        CD = np.reshape(CD, (nr, nc, numColors))
    if patchSize > 1:
        CD = np.sum(CD, axis=1)
        CD = CD / np.tile(np.sum(CD, axis=1).reshape(-1, 1), (1, numColors))

    if Lab.ndim == 2:
        CD = np.reshape(CD, (-1, CD.shape[2]))

    CD = CD / np.expand_dims(np.sum(CD, axis=len(CD.shape) - 1),
                             axis=len(CD.shape) - 1)
    return CD
# %%
# 36. 用五种不同的方法去提取一个随机数组的整数部分(★★☆)
z = np.random.uniform(-10, 10, 10)

# 1.z - z%1 (向下取整)
print(z)
print(z - z % 1)
# 2.around  (四舍五入)
print(np.around(z, 0))
#  3.ceil(向上取整)
print(np.ceil(z))
#   4.floor(向下取整)
print(np.floor(z))
#   5.trunc(向0取整)
print(np.trunc(z))
#   6.rint   四舍五入
print(np.rint(z))
#   7.astype
print(z.astype(int))

# %%
'''37. 创建一个5x5的矩阵,其中每行的数值范围从0到4 (★★☆)'''
"""
当矩阵 x 非常大时,在Python中计算显式循环可能会很慢。注意,
向矩阵 x 的每一行添加向量 v 等同于通过垂直堆叠多个 v 副本来形成矩阵 vv,
然后执行元素的求和x 和 vv。 我们可以像如下这样实现这种方法:
"""
"""
如果数组不具有相同的rank,则将较低等级数组的形状添加1,
直到两个形状具有相同的长度。
 def get_tax_of_payment_transportation(self, engineer_history):
     if engineer_history:
         return numpy.trunc((self.payment_transportation or 0) * engineer_history.payment_tax.rate_if_exclude_tax)
     else:
         return 0
def dftregistration(buf1ft,
                    buf2ft,
                    usfac=1,
                    return_registered=False,
                    return_error=False,
                    zeromean=True,
                    DEBUG=False,
                    maxoff=None,
                    nthreads=1,
                    use_numpy_fft=False):
    """
    translated from matlab:
    http://www.mathworks.com/matlabcentral/fileexchange/18401-efficient-subpixel-image-registration-by-cross-correlation/content/html/efficient_subpixel_registration.html

    Efficient subpixel image registration by crosscorrelation. This code
    gives the same precision as the FFT upsampled cross correlation in a
    small fraction of the computation time and with reduced memory 
    requirements. It obtains an initial estimate of the crosscorrelation peak
    by an FFT and then refines the shift estimation by upsampling the DFT
    only in a small neighborhood of that estimate by means of a 
    matrix-multiply DFT. With this procedure all the image points are used to
    compute the upsampled crosscorrelation.
    Manuel Guizar - Dec 13, 2007

    Portions of this code were taken from code written by Ann M. Kowalczyk 
    and James R. Fienup. 
    J.R. Fienup and A.M. Kowalczyk, "Phase retrieval for a complex-valued 
    object by using a low-resolution image," J. Opt. Soc. Am. A 7, 450-458 
    (1990).

    Citation for this algorithm:
    Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup, 
    "Efficient subpixel image registration algorithms," Opt. Lett. 33, 
    156-158 (2008).

    Inputs
    buf1ft    Fourier transform of reference image, 
           DC in (1,1)   [DO NOT FFTSHIFT]
    buf2ft    Fourier transform of image to register, 
           DC in (1,1) [DO NOT FFTSHIFT]
    usfac     Upsampling factor (integer). Images will be registered to 
           within 1/usfac of a pixel. For example usfac = 20 means the
           images will be registered within 1/20 of a pixel. (default = 1)

    Outputs
    output =  [error,diffphase,net_row_shift,net_col_shift]
    error     Translation invariant normalized RMS error between f and g
    diffphase     Global phase difference between the two images (should be
               zero if images are non-negative).
    net_row_shift net_col_shift   Pixel shifts between images
    Greg      (Optional) Fourier transform of registered version of buf2ft,
           the global phase difference is compensated for.
    """

    # this function is translated from matlab, so I'm just going to pretend
    # it is matlab/pylab
    from numpy import conj, abs, arctan2, sqrt, real, imag, shape, zeros, trunc, ceil, floor, fix
    from numpy.fft import fftshift, ifftshift
    fft2, ifft2 = fftn, ifftn = fast_ffts.get_ffts(nthreads=nthreads,
                                                   use_numpy_fft=use_numpy_fft)

    # Compute error for no pixel shift
    if usfac == 0:
        raise ValueError("Upsample Factor must be >= 1")
        CCmax = sum(sum(buf1ft * conj(buf2ft)))
        rfzero = sum(abs(buf1ft)**2)
        rgzero = sum(abs(buf2ft)**2)
        error = 1.0 - CCmax * conj(CCmax) / (rgzero * rfzero)
        error = sqrt(abs(error))
        diffphase = arctan2(imag(CCmax), real(CCmax))
        output = [error, diffphase]

    # Whole-pixel shift - Compute crosscorrelation by an IFFT and locate the
    # peak
    elif usfac == 1:
        [m, n] = shape(buf1ft)
        CC = ifft2(buf1ft * conj(buf2ft))
        if maxoff is None:
            rloc, cloc = np.unravel_index(abs(CC).argmax(), CC.shape)
            CCmax = CC[rloc, cloc]
        else:
            # set the interior of the shifted array to zero
            # (i.e., ignore it)
            CC[maxoff:-maxoff, :] = 0
            CC[:, maxoff:-maxoff] = 0
            rloc, cloc = np.unravel_index(abs(CC).argmax(), CC.shape)
            CCmax = CC[rloc, cloc]
        rfzero = sum(abs(buf1ft)**2) / (m * n)
        rgzero = sum(abs(buf2ft)**2) / (m * n)
        error = 1.0 - CCmax * conj(CCmax) / (rgzero * rfzero)
        error = sqrt(abs(error))
        diffphase = arctan2(imag(CCmax), real(CCmax))
        md2 = fix(m / 2)
        nd2 = fix(n / 2)
        if rloc > md2:
            row_shift = rloc - m
        else:
            row_shift = rloc

        if cloc > nd2:
            col_shift = cloc - n
        else:
            col_shift = cloc
        #output=[error,diffphase,row_shift,col_shift];
        output = [row_shift, col_shift]

    # Partial-pixel shift
    else:

        if DEBUG: import pylab
        # First upsample by a factor of 2 to obtain initial estimate
        # Embed Fourier data in a 2x larger array
        [m, n] = shape(buf1ft)
        mlarge = m * 2
        nlarge = n * 2
        CClarge = zeros([mlarge, nlarge], dtype='complex')
        #CClarge[m-fix(m/2):m+fix((m-1)/2)+1,n-fix(n/2):n+fix((n-1)/2)+1] = fftshift(buf1ft) * conj(fftshift(buf2ft));
        CClarge[round(mlarge / 4.):round(mlarge / 4. * 3),
                round(nlarge /
                      4.):round(nlarge / 4. *
                                3)] = fftshift(buf1ft) * conj(fftshift(buf2ft))
        # note that matlab uses fix which is trunc... ?

        # Compute crosscorrelation and locate the peak
        CC = ifft2(ifftshift(CClarge))
        # Calculate cross-correlation
        if maxoff is None:
            rloc, cloc = np.unravel_index(abs(CC).argmax(), CC.shape)
            CCmax = CC[rloc, cloc]
        else:
            # set the interior of the shifted array to zero
            # (i.e., ignore it)
            CC[maxoff:-maxoff, :] = 0
            CC[:, maxoff:-maxoff] = 0
            rloc, cloc = np.unravel_index(abs(CC).argmax(), CC.shape)
            CCmax = CC[rloc, cloc]

        if DEBUG:
            pylab.figure(1)
            pylab.clf()
            pylab.subplot(131)
            pylab.imshow(real(CC))
            pylab.title("Cross-Correlation (upsampled 2x)")
            pylab.subplot(132)
            ups = dftups((buf1ft) * conj((buf2ft)), mlarge, nlarge, 2, 0, 0)
            pylab.title("dftups upsampled 2x")
            pylab.imshow(real(((ups))))
            pylab.subplot(133)
            pylab.imshow(real(CC) / real(ups))
            pylab.title("Ratio upsampled/dftupsampled")
            print "Upsample by 2 peak: ", rloc, cloc, " using dft version: ", np.unravel_index(
                abs(ups).argmax(), ups.shape)
            #print np.unravel_index(ups.argmax(),ups.shape)

        # Obtain shift in original pixel grid from the position of the
        # crosscorrelation peak
        [m, n] = shape(CC)
        md2 = trunc(m / 2)
        nd2 = trunc(n / 2)
        if rloc > md2:
            row_shift2 = rloc - m
        else:
            row_shift2 = rloc
        if cloc > nd2:
            col_shift2 = cloc - n
        else:
            col_shift2 = cloc
        row_shift2 = row_shift2 / 2.
        col_shift2 = col_shift2 / 2.
        if DEBUG:
            print "row_shift/col_shift from ups2: ", row_shift2, col_shift2

        # If upsampling > 2, then refine estimate with matrix multiply DFT
        if usfac > 2:
            #%% DFT computation %%%
            # Initial shift estimate in upsampled grid
            zoom_factor = 1.5
            if DEBUG: print row_shift2, col_shift2
            row_shift0 = round(row_shift2 * usfac) / usfac
            col_shift0 = round(col_shift2 * usfac) / usfac
            dftshift = trunc(ceil(usfac * zoom_factor) / 2)
            #% Center of output array at dftshift+1
            if DEBUG:
                print 'dftshift,rs,cs,zf:', dftshift, row_shift0, col_shift0, usfac * zoom_factor
            # Matrix multiply DFT around the current shift estimate
            roff = dftshift - row_shift0 * usfac
            coff = dftshift - col_shift0 * usfac
            upsampled = dftups((buf2ft * conj(buf1ft)),
                               ceil(usfac * zoom_factor),
                               ceil(usfac * zoom_factor), usfac, roff, coff)
            #CC = conj(dftups(buf2ft.*conj(buf1ft),ceil(usfac*1.5),ceil(usfac*1.5),usfac,...
            #    dftshift-row_shift*usfac,dftshift-col_shift*usfac))/(md2*nd2*usfac^2);
            CC = conj(upsampled) / (md2 * nd2 * usfac**2)
            if DEBUG:
                pylab.figure(2)
                pylab.clf()
                pylab.subplot(221)
                pylab.imshow(abs(upsampled))
                pylab.title('upsampled')
                pylab.subplot(222)
                pylab.imshow(abs(CC))
                pylab.title('CC upsampled')
                pylab.subplot(223)
                pylab.imshow(
                    np.abs(np.fft.fftshift(np.fft.ifft2(buf2ft *
                                                        conj(buf1ft)))))
                pylab.title('xc')
                yy, xx = np.indices([m * usfac, n * usfac], dtype='float')
                pylab.contour(
                    yy / usfac / 2. - 0.5 + 1, xx / usfac / 2. - 0.5 - 1,
                    np.abs(
                        dftups((buf2ft * conj(buf1ft)), m * usfac, n * usfac,
                               usfac)))
                pylab.subplot(224)
                pylab.imshow(
                    np.abs(
                        dftups((buf2ft * conj(buf1ft)),
                               ceil(usfac * zoom_factor),
                               ceil(usfac * zoom_factor), usfac)))
                pylab.title('unshifted ups')
            # Locate maximum and map back to original pixel grid
            rloc, cloc = np.unravel_index(abs(CC).argmax(), CC.shape)
            rloc0, cloc0 = np.unravel_index(abs(CC).argmax(), CC.shape)
            CCmax = CC[rloc, cloc]
            #[max1,loc1] = CC.max(axis=0), CC.argmax(axis=0)
            #[max2,loc2] = max1.max(),max1.argmax()
            #rloc=loc1[loc2];
            #cloc=loc2;
            #CCmax = CC[rloc,cloc];
            rg00 = dftups(buf1ft * conj(buf1ft), 1, 1,
                          usfac) / (md2 * nd2 * usfac**2)
            rf00 = dftups(buf2ft * conj(buf2ft), 1, 1,
                          usfac) / (md2 * nd2 * usfac**2)
            #if DEBUG: print rloc,row_shift,cloc,col_shift,dftshift
            rloc = rloc - dftshift  #+ 1 # +1 # questionable/failed hack + 1;
            cloc = cloc - dftshift  #+ 1 # -1 # questionable/failed hack - 1;
            #if DEBUG: print rloc,row_shift,cloc,col_shift,dftshift
            row_shift = row_shift0 + rloc / usfac
            col_shift = col_shift0 + cloc / usfac
            #if DEBUG: print rloc/usfac,row_shift,cloc/usfac,col_shift
            if DEBUG:
                print "Off by: ", (0.25 - float(rloc) / usfac) * usfac, (
                    -0.25 - float(cloc) / usfac) * usfac
            if DEBUG: print "correction was: ", rloc / usfac, cloc / usfac
            if DEBUG:
                print "Coordinate went from", row_shift2, col_shift2, "to", row_shift0, col_shift0, "to", row_shift, col_shift
            if DEBUG: print "dftsh - usfac:", dftshift - usfac
            if DEBUG:
                print rloc, cloc, row_shift, col_shift, CCmax, dftshift, rloc0, cloc0

        # If upsampling = 2, no additional pixel shift refinement
        else:
            rg00 = sum(sum(buf1ft * conj(buf1ft))) / m / n
            rf00 = sum(sum(buf2ft * conj(buf2ft))) / m / n
            row_shift = row_shift2
            col_shift = col_shift2
        error = 1.0 - CCmax * conj(CCmax) / (rg00 * rf00)
        error = sqrt(abs(error))
        diffphase = arctan2(imag(CCmax), real(CCmax))
        # If its only one row or column the shift along that dimension has no
        # effect. We set to zero.
        if md2 == 1:
            row_shift = 0
        if nd2 == 1:
            col_shift = 0
        #output=[error,diffphase,row_shift,col_shift];
        output = [row_shift, col_shift]

    if return_error:
        # simple estimate of the precision of the fft approach
        output += [1. / usfac, 1. / usfac]

    # Compute registered version of buf2ft
    if (return_registered):
        if (usfac > 0):
            nr, nc = shape(buf2ft)
            Nr = np.fft.ifftshift(
                np.linspace(-np.fix(nr / 2),
                            np.ceil(nr / 2) - 1, nr))
            Nc = np.fft.ifftshift(
                np.linspace(-np.fix(nc / 2),
                            np.ceil(nc / 2) - 1, nc))
            [Nc, Nr] = np.meshgrid(Nc, Nr)
            Greg = buf2ft * np.exp(
                1j * 2 * np.pi * (-row_shift * Nr / nr - col_shift * Nc / nc))
            Greg = Greg * np.exp(1j * diffphase)
        elif (usfac == 0):
            Greg = buf2ft * np.exp(1j * diffphase)
        output.append(Greg)

    return output
Beispiel #35
0
def hep(trange=['2017-03-27', '2017-03-28'],
        datatype='omniflux',
        level='l2',
        suffix='',
        get_support_data=False,
        varformat=None,
        varnames=[],
        downloadonly=False,
        notplot=False,
        no_update=False,
        uname=None,
        passwd=None,
        time_clip=False,
        ror=True,
        version=None):
    """
    This function loads data from the HEP experiment from the Arase mission

    Parameters:
        trange : list of str
            time range of interest [starttime, endtime] with the format
            'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
            ['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']

        datatype: str
            Data type; Valid options:

        level: str
            Data level; Valid options:

        suffix: str
            The tplot variable names will be given this suffix.  By default,
            no suffix is added.

        get_support_data: bool
            Data with an attribute "VAR_TYPE" with a value of "support_data"
            will be loaded into tplot.  By default, only loads in data with a
            "VAR_TYPE" attribute of "data".

        varformat: str
            The file variable formats to load into tplot.  Wildcard character
            "*" is accepted.  By default, all variables are loaded in.

        varnames: list of str
            List of variable names to load (if not specified,
            all data variables are loaded)

        downloadonly: bool
            Set this flag to download the CDF files, but not load them into
            tplot variables

        notplot: bool
            Return the data in hash tables instead of creating tplot variables

        no_update: bool
            If set, only load data from your local cache

        time_clip: bool
            Time clip the variables to exactly the range specified in the trange keyword

        ror: bool
            If set, print PI info and rules of the road

        version: str
            Set this value to specify the version of cdf files (such as "v01_02", "v01_03", ...)

    Returns:
        List of tplot variables created.

    """

    file_res = 3600. * 24
    prefix = 'erg_hep_'+level+'_'

    if level == 'l2':
        pathformat = 'satellite/erg/hep/'+level+'/'+datatype + \
            '/%Y/%m/erg_hep_'+level+'_'+datatype + '_%Y%m%d_'
        if version is None:
            pathformat += 'v??_??.cdf'
        else:
            pathformat += version + '.cdf'
    if level == 'l3':
        pathformat = 'satellite/erg/hep/'+level + \
            '/pa/%Y/%m/erg_hep_'+level+'_pa_%Y%m%d_'
        if version is None:
            pathformat += 'v??_??.cdf'
        else:
            pathformat += version + '.cdf'

    initial_notplot_flag = False
    if notplot:
        initial_notplot_flag = True

    if ((level == 'l2') and (datatype == 'omniflux')) or (datatype == '3dflux') or (level == 'l3'):
        # to avoid failure of creation plot variables (at store_data.py) of hep
        notplot = True

    loaded_data = load(pathformat=pathformat, trange=trange, level=level, datatype=datatype, file_res=file_res, prefix=prefix, suffix=suffix, get_support_data=get_support_data,
                       varformat=varformat, varnames=varnames, downloadonly=downloadonly, notplot=notplot, time_clip=time_clip, no_update=no_update, uname=uname, passwd=passwd, version=version)

    if (len(loaded_data) > 0) and ror:

        try:
            if isinstance(loaded_data, list):
                if downloadonly:
                    cdf_file = cdflib.CDF(loaded_data[-1])
                    gatt = cdf_file.globalattsget()
                else:
                    gatt = get_data(loaded_data[-1], metadata=True)['CDF']['GATT']
            elif isinstance(loaded_data, dict):
                gatt = loaded_data[list(loaded_data.keys())[-1]]['CDF']['GATT']
                
            # --- print PI info and rules of the road

            print(' ')
            print(
                '**************************************************************************')
            print(gatt["LOGICAL_SOURCE_DESCRIPTION"])
            print('')
            print('PI: ', gatt['PI_NAME'])
            print("Affiliation: "+gatt["PI_AFFILIATION"])
            print('')
            print('- The rules of the road (RoR) common to the ERG project:')
            print(
                '       https://ergsc.isee.nagoya-u.ac.jp/data_info/rules_of_the_road.shtml.en')
            print(
                '- RoR for HEP data: https://ergsc.isee.nagoya-u.ac.jp/mw/index.php/ErgSat/Hep')
            if level == 'l3':
                print(
                    '- RoR for MGF data: https://ergsc.isee.nagoya-u.ac.jp/mw/index.php/ErgSat/Mgf')
            print('')
            print('Contact: erg_hep_info at isee.nagoya-u.ac.jp')
            print(
                '**************************************************************************')
        except:
            print('printing PI info and rules of the road was failed')

    if initial_notplot_flag or downloadonly:
        return loaded_data

    if isinstance(loaded_data, dict):

        if (level == 'l2') and (datatype == 'omniflux'):
            tplot_variables = []
            if prefix + 'FEDO_L' + suffix in loaded_data:
                v_vars_min = loaded_data[prefix + 'FEDO_L' + suffix]['v'][0]
                v_vars_max = loaded_data[prefix + 'FEDO_L' + suffix]['v'][1]
                # log average of energy bins
                v_vars = np.power(
                    10., (np.log10(v_vars_min) + np.log10(v_vars_max)) / 2.)
                store_data(prefix + 'FEDO_L' + suffix, data={'x': loaded_data[prefix + 'FEDO_L' + suffix]['x'],
                                                             'y': loaded_data[prefix + 'FEDO_L' + suffix]['y'],
                                                             'v': v_vars},
                           attr_dict={'CDF':loaded_data[prefix + 'FEDO_L' + suffix]['CDF']})
                tplot_variables.append(prefix + 'FEDO_L' + suffix)

            if prefix + 'FEDO_H' + suffix in loaded_data:
                v_vars_min = loaded_data[prefix + 'FEDO_H' + suffix]['v'][0]
                v_vars_max = loaded_data[prefix + 'FEDO_H' + suffix]['v'][1]
                # log average of energy bins
                v_vars = np.power(
                    10., (np.log10(v_vars_min) + np.log10(v_vars_max)) / 2.)
                store_data(prefix + 'FEDO_H' + suffix, data={'x': loaded_data[prefix + 'FEDO_H' + suffix]['x'],
                                                             'y': loaded_data[prefix + 'FEDO_H' + suffix]['y'],
                                                             'v': v_vars},
                           attr_dict={'CDF':loaded_data[prefix + 'FEDO_H' + suffix]['CDF']})
                tplot_variables.append(prefix + 'FEDO_H' + suffix)

            # remove minus valuse of y array
            if prefix + 'FEDO_L' + suffix in tplot_variables:
                clip(prefix + 'FEDO_L' + suffix, 0., 1.0e+10)
            if prefix + 'FEDO_H' + suffix in tplot_variables:
                clip(prefix + 'FEDO_H' + suffix, 0., 1.0e+10)

            # set spectrogram plot option
            options(prefix + 'FEDO_L' + suffix, 'Spec', 1)
            options(prefix + 'FEDO_H' + suffix, 'Spec', 1)

            # set y axis to logscale
            options(prefix + 'FEDO_L' + suffix, 'ylog', 1)
            options(prefix + 'FEDO_H' + suffix, 'ylog', 1)

            # set yrange
            options(prefix + 'FEDO_L' + suffix, 'yrange', [3.0e+01, 2.0e+03])
            options(prefix + 'FEDO_H' + suffix, 'yrange', [7.0e+01, 2.0e+03])

            # set ytitle
            options(prefix + 'FEDO_L' + suffix, 'ytitle',
                    'HEP-L\nomniflux\nLv2\nEnergy')
            options(prefix + 'FEDO_H' + suffix, 'ytitle',
                    'HEP-H\nomniflux\nLv2\nEnergy')

            # set ysubtitle
            options(prefix + 'FEDO_L' + suffix, 'ysubtitle', '[keV]')
            options(prefix + 'FEDO_H' + suffix, 'ysubtitle', '[keV]')

            # set ylim
            if prefix + 'FEDO_L' + suffix in tplot_variables:
                ylim(prefix + 'FEDO_L' + suffix, 30, 1800)
            if prefix + 'FEDO_H' + suffix in tplot_variables:
                ylim(prefix + 'FEDO_H' + suffix, 500, 2048)

            # set z axis to logscale
            options(prefix + 'FEDO_L' + suffix, 'zlog', 1)
            options(prefix + 'FEDO_H' + suffix, 'zlog', 1)

            # set zrange
            options(prefix + 'FEDO_L' + suffix, 'zrange', [1.0e-15, 1.0e+06])
            options(prefix + 'FEDO_H' + suffix, 'zrange', [1.0e-10, 1.0e+5])

            # set ztitle
            options(prefix + 'FEDO_L' + suffix,
                    'ztitle', '[/cm^{2}-str-s-keV]')
            options(prefix + 'FEDO_H' + suffix,
                    'ztitle', '[/cm^{2}-str-s-keV]')

            # set zlim
            if prefix + 'FEDO_L' + suffix in tplot_variables:
                zlim(prefix + 'FEDO_L' + suffix, 1e+0, 1e+5)
            if prefix + 'FEDO_H' + suffix in tplot_variables:
                zlim(prefix + 'FEDO_H' + suffix, 1e+0, 1e+5)

            # change colormap option
            options(prefix + 'FEDO_L' + suffix,  'Colormap', 'jet')
            options(prefix + 'FEDO_H' + suffix,  'Colormap', 'jet')

            return tplot_variables

        if (level == 'l2') and (datatype == '3dflux'):
            tplot_variables = []
            v2_array = [i for i in range(15)]

            if prefix + 'FEDU_L' + suffix in loaded_data:

                store_data(prefix + 'FEDU_L' + suffix, data={'x': loaded_data[prefix + 'FEDU_L' + suffix]['x'],
                                                             'y': loaded_data[prefix + 'FEDU_L' + suffix]['y'],
                                                             'v1': np.sqrt(loaded_data[prefix + 'FEDU_L' + suffix]['v'][0, :] *
                                                                           loaded_data[prefix + 'FEDU_L' + suffix]['v'][1, :]),  # geometric mean for 'v1'
                                                             'v2': v2_array},
                           attr_dict={'CDF':loaded_data[prefix + 'FEDU_L' + suffix]['CDF']})
                tplot_variables.append(prefix + 'FEDU_L' + suffix)
                clip(prefix + 'FEDU_L' + suffix, -1.0e+10, 1.0e+10)

            if prefix + 'FEDU_H' + suffix in loaded_data:

                store_data(prefix + 'FEDU_H' + suffix, data={'x': loaded_data[prefix + 'FEDU_H' + suffix]['x'],
                                                             'y': loaded_data[prefix + 'FEDU_H' + suffix]['y'],
                                                             'v1': np.sqrt(loaded_data[prefix + 'FEDU_H' + suffix]['v'][0, :] *
                                                                           loaded_data[prefix + 'FEDU_H' + suffix]['v'][1, :]),  # geometric mean for 'v1'
                                                             'v2': v2_array},
                           attr_dict={'CDF':loaded_data[prefix + 'FEDU_H' + suffix]['CDF']})
                tplot_variables.append(prefix + 'FEDU_H' + suffix)
                clip(prefix + 'FEDU_H' + suffix, -1.0e+10, 1.0e+10)

            return tplot_variables

        if level == 'l3':  # implementation for level = 'l3'

            tplot_variables = []

            if prefix + 'FEDU_L' + suffix in loaded_data:

                L_energy_array_ave = np.sqrt(loaded_data[prefix + 'FEDU_L' + suffix]['v1'][0, :] *
                                             loaded_data[prefix + 'FEDU_L' + suffix]['v1'][1, :])  # geometric mean for 'v1'

                # get energy [keV] array for ytitle options
                L_energy_array = np.trunc(L_energy_array_ave).astype(int)
                non_negative_y_array = np.where(
                    loaded_data[prefix + 'FEDU_L' + suffix]['y'] < 0., np.nan, loaded_data[prefix + 'FEDU_L' + suffix]['y'])
                store_data(prefix + 'FEDU_L' + suffix, data={'x': loaded_data[prefix + 'FEDU_L' + suffix]['x'],
                                                             'y': non_negative_y_array,
                                                             'v1': L_energy_array_ave,
                                                             'v2': loaded_data[prefix + 'FEDU_L' + suffix]['v2']},
                           attr_dict={'CDF':loaded_data[prefix + 'FEDU_L' + suffix]['CDF']})

                options(prefix + 'FEDU_L' + suffix, 'spec', 1)
                # set ylim
                ylim(prefix + 'FEDU_L' + suffix, 0, 180)
                # set zlim
                zlim(prefix + 'FEDU_L' + suffix, 1e+2, 1e+6)

                tplot_variables.append(prefix + 'FEDU_L' + suffix)

                # make Tplot Variables of erg_hep_l3_FEDU_L_paspec_ene?? (??: 00, 01, 02, ..., 15)
                for i in range(loaded_data[prefix + 'FEDU_L' + suffix]['y'].shape[1]):
                    tplot_name = prefix + 'FEDU_L_paspec_ene' + \
                        str(i).zfill(2) + suffix
                    store_data(tplot_name, data={'x': loaded_data[prefix + 'FEDU_L' + suffix]['x'],
                                                 'y': non_negative_y_array[:, i, :],
                                                 'v': loaded_data[prefix + 'FEDU_L' + suffix]['v2']},
                               attr_dict={'CDF':loaded_data[prefix + 'FEDU_L' + suffix]['CDF']})

                    options(tplot_name, 'spec', 1)
                    # set ylim
                    ylim(tplot_name, 0, 180)
                    # set zlim
                    zlim(tplot_name, 1e+2, 1e+6)
                    # set ytitle
                    options(
                        tplot_name, 'ytitle', f'HEP-L\nEne{str(i).zfill(2)}\n{L_energy_array[i]} keV')

                    tplot_variables.append(tplot_name)

            if prefix + 'FEDU_H' + suffix in loaded_data:

                H_energy_array_ave = np.sqrt(loaded_data[prefix + 'FEDU_H' + suffix]['v1'][0, :] *
                                             loaded_data[prefix + 'FEDU_H' + suffix]['v1'][1, :])  # geometric mean for 'v1'

                # get energy [keV] array for ytitle options
                H_energy_array = np.trunc(H_energy_array_ave).astype(int)
                non_negative_y_array = np.where(
                    loaded_data[prefix + 'FEDU_H' + suffix]['y'] < 0., np.nan, loaded_data[prefix + 'FEDU_H' + suffix]['y'])
                store_data(prefix + 'FEDU_H' + suffix, data={'x': loaded_data[prefix + 'FEDU_H' + suffix]['x'],
                                                             'y': non_negative_y_array,
                                                             'v1': H_energy_array_ave,
                                                             'v2': loaded_data[prefix + 'FEDU_H' + suffix]['v2']},
                           attr_dict={'CDF':loaded_data[prefix + 'FEDU_H' + suffix]['CDF']})

                options(prefix + 'FEDU_H' + suffix, 'spec', 1)
                # set ylim
                ylim(prefix + 'FEDU_H' + suffix, 0, 180)
                # set zlim
                zlim(prefix + 'FEDU_H' + suffix, 1e+1, 1e+4)

                tplot_variables.append(prefix + 'FEDU_H' + suffix)

                # make Tplot Variables of erg_hep_l3_FEDU_H_paspec_ene?? (??: 00, 01, 02, ..., 10)
                for i in range(loaded_data[prefix + 'FEDU_H' + suffix]['y'].shape[1]):
                    tplot_name = prefix + 'FEDU_H_paspec_ene' + \
                        str(i).zfill(2) + suffix
                    store_data(tplot_name, data={'x': loaded_data[prefix + 'FEDU_H' + suffix]['x'],
                                                 'y': non_negative_y_array[:, i, :],
                                                 'v': loaded_data[prefix + 'FEDU_H' + suffix]['v2']},
                               attr_dict={'CDF':loaded_data[prefix + 'FEDU_H' + suffix]['CDF']})

                    options(tplot_name, 'spec', 1)
                    # set ylim
                    ylim(tplot_name, 0, 180)
                    # set zlim
                    zlim(tplot_name, 1e+1, 1e+4)
                    # set ytitle
                    options(
                        tplot_name, 'ytitle', f'HEP-H\nEne{str(i).zfill(2)}\n{H_energy_array[i]} keV')

                    tplot_variables.append(tplot_name)

            # set z axis to logscale
            options(tplot_variables, 'zlog', 1)
            # change colormap option
            options(tplot_variables, 'colormap', 'jet')
            # set ysubtitle
            options(tplot_variables, 'ysubtitle', 'PA [deg]')
            # set ztitle
            options(tplot_variables, 'ztitle', '[/keV/cm^{2}/sr/s]')

            return tplot_variables

    return loaded_data
Beispiel #36
0
def dec2date(indata,
             calendar='standard',
             refdate=None,
             units=None,
             excelerr=True,
             fulldate=None,
             yr=False,
             mo=False,
             dy=False,
             hr=False,
             mi=False,
             sc=False,
             ascii=False,
             en=False,
             eng=False):
    """
    Converts scale and array_like with decimal dates into
    calendar dates. Supported time formats are:
    standard, gregorian, julian, proleptic_gregorian,
    excel1900, excel1904, 365_day, noleap, 366_day, all_leap,
    and 360_day.

    Input is decimal date in units of days.

    Output is year, month, day, hour, minute, second
    or any combination of them. Output in string format is possible.


    Parameters
    ----------
    indata : array_like
        Input decimal dates. Dates must be positive.
    calendar : str, optional
        Calendar of input dates (default: 'standard').

        Possible values are:

        'standard', 'gregorian' = julian calendar from
        01.01.-4712 12:00:00 (BC) until 05.03.1583 00:00:00 and
        gregorian calendar from 15.03.1583 00:00:00 until now.
        Missing 10 days do not exsist.

        'julian' = julian calendar from 01.01.-4712 12:00:00 (BC)
         until now.

        'proleptic_gregorian' = gregorian calendar from
        01.01.0001 00:00:00 until now.

        'excel1900' = Excel dates with origin at
        01.01.1900 00:00:00.

        'excel1904' = Excel 1904 (Lotus) format.
        Same as excel1904 but with origin at
        01.01.1904 00:00:00.

        '365_day', 'noleap' = 365 days format,
        i.e. common years only (no leap years)
        with origin at 01.01.0001 00:00:00.

        '366_day', 'all_leap' = 366 days format,
        i.e. leap years only (no common years)
        with origin at 01.01.0001 00:00:00.

        '360_day' = 360 days format,
        i.e. years with only 360 days (30 days per month)
        with origin at 01.01.0001 00:00:00.

        'decimal' = decimal year instead of decimal days.

        'decimal360' = decimal year with a year of 360 days, i.e. 12 month with 30 days each.


    Optional Arguments
    ------------------
    refdate : str, optional
        Reference date for 'days since refdate' can be set by user. Input must be a
        string in the format 'yyyy-mm-dd hh:mm:ss'.
        Default values for different `calendars` are set automatically.
    units : str, optional
        Units of the the time stamp can be given. This can only be the following two:

        'day as %Y%m%d.%f', or

        'days since refdate', with refdate in the format 'yyyy-mm-dd hh:mm:ss'.

        If `units='day as %Y%m%d.%f'` then `calendar` is ignored and the dates will be returned
        assuming that %f is fractional date from 00:00:00 h.
    excelerr : bool, optional
       In Excel, the year 1900 is normally considered a leap year,
       which it was not. By default, this error is taken into account
       if calendar='excel1900' (default: True).

       1900 is not considered a leap year if excelerr=False.


    Returns
    -------
    list of array_like
        `fulldate` -> output arrays with year, month, day, hour, minute, second

                      Default fulldate is overwritten by selection of special output `yr`,`mn`,`dy`,`hr`,`mi`,`sc`

        `yr`       -> output array with year

        `mo`       -> output array with month

        `dy`       -> output array with day

        `hr`       -> output array with hour

        `mi`       -> output array with minute

        `sc`       -> output array with second

        `ascii`    -> output array with strings of the format 'dd.mm.yyyy hh:mm:ss'

        `en`      -> output array with strings of the format 'yyyy-mm-dd hh:mm:ss'

        `eng`     -> Same as en: obsolete.

    Notes
    -----
    Most versions of `datetime do` not support negative years,
    i.e. Julian days < 1721423.5 = 01.01.0001 00:00.

    There is an issue in `netcdftime` version < 0.9.5 in proleptic_gregorian for dates before year 301:
      dec2date(date2dec(ascii='01.01.0300 00:00:00', calendar='proleptic_gregorian'), calendar='proleptic_gregorian')
        [300, 1, 2, 0, 0, 0]
      dec2date(date2dec(ascii='01.01.0301 00:00:00', calendar='proleptic_gregorian'), calendar='proleptic_gregorian')
        [301, 1, 1, 0, 0, 0]

    Requires `netcdftime.py` from module netcdftime available at:
        http://netcdf4-python.googlecode.com

    Examples
    --------
    # Some implementations of datetime have problems with negative years
    >>> import datetime
    >>> if datetime.MINYEAR > 0:
    ...     print('The minimum year in your datetime implementation is ', datetime.MINYEAR)
    ...     print('i.e. it does not support negative years (BC).')
    The minimum year in your datetime implementation is  1
    i.e. it does not support negative years (BC).

    #calendar = 'standard'
    >>> year   = np.array([2000,1810,1630,1510,1271,619,1])
    >>> month  = np.array([1,4,7,9,3,8,1])
    >>> day    = np.array([5,24,15,20,18,27,1])
    >>> hour   = np.array([12,16,10,14,19,11,12])
    >>> minute = np.array([30,15,20,35,41,8,0])
    >>> second = np.array([15,10,40,50,34,37,0])
    >>> from date2dec import date2dec
    >>> decimal = date2dec(calendar='standard', yr=year, mo=month, dy=day, hr=hour, mi=minute, sc=second)
    >>> year1, month1, day1, hour1, minute1, second1 = dec2date(decimal, calendar= 'standard', fulldate=True)
    >>> print(year1)
    [2000 1810 1630 1510 1271  619    1]
    >>> print(month1)
    [1 4 7 9 3 8 1]
    >>> print(day1)
    [ 5 24 15 20 18 27  1]
    >>> print(hour1)
    [12 16 10 14 19 11 12]
    >>> print(minute1)
    [30 15 20 35 41  8  0]
    >>> print(second1)
    [15 10 40 50 34 37  0]

    # calendar = 'julian'
    >>> decimal = date2dec(calendar='julian', yr=year, mo=month, dy=day, hr=hour, mi=minute, sc=second)
    >>> year1 = dec2date(decimal, calendar='julian', yr=True)
    >>> print(year1)
    [2000 1810 1630 1510 1271  619    1]

    # calendar = 'proleptic_gregorian'
    >>> decimal = date2dec(calendar='proleptic_gregorian', yr=year, mo=month, dy=day, hr=hour, mi=minute, sc=second)
    >>> ascii = dec2date(decimal, calendar='proleptic_gregorian', ascii=True)
    >>> print(ascii[::4])
    ['05.01.2000 12:30:15' '18.03.1271 19:41:34']

    # calendar = 'excel1900' WITH excelerr = True -> 1900 considered as leap year
    >>> decimal = date2dec(calendar='excel1900', yr=year, mo=month, dy=day, hr=hour, mi=minute, sc=second)
    >>> year1, day1 = dec2date(decimal, calendar='excel1900', yr=True, dy=True)
    >>> print(year1)
    [2000 1810 1630 1510 1271  619    1]
    >>> print(day1)
    [ 5 24 15 20 18 27  1]

    # calendar = 'excel1900' WITH excelerr = False -> 1900 considered as NO leap year
    # Older versions of netcdftime.py produced unnecessary output (Line 262)
    # >>> decimal = date2dec(calendar='excel1900',yr=year,mo=month,dy=day,hr=hour,mi=minute,sc=second,excelerr=False)
    # >>> if nt.__version__ < '0.9.4':
    # ...     asciidate = dec2date(decimal, calendar='excel1900', ascii = True, excelerr = False)
    # ... elif nt.__version__ == '0.9.4':
    # ...     asciidate = dec2date(decimal, calendar='excel1900', ascii = True, excelerr = False)
    # ...     for i in range(3):
    # ...         print('0 300')
    # ... else:
    # ...     asciidate = dec2date(decimal, calendar='excel1900', ascii = True, excelerr = False)
    # ...     for i in range(7):
    # ...         print('0 300')
    # 0 300
    # 0 300
    # 0 300
    # 0 300
    # 0 300
    # 0 300
    # 0 300

    # >>> print(asciidate[::4])
    # ['05.01.2000 12:30:15' '18.03.1271 19:41:34']

    # calendar = 'excel1904'
    >>> decimal = date2dec(calendar='excel1904', yr=year, mo=month, dy=day, hr=hour, mi=minute, sc=second)
    >>> asciidate = dec2date(decimal, calendar='excel1904', ascii = True)
    >>> print(asciidate[::4])
    ['05.01.2000 12:30:15' '18.03.1271 19:41:34']
    >>> asciidate = dec2date(decimal, calendar='excel1904', ascii=True, refdate='1909-12-31 00:00:00')
    >>> print(asciidate[::4])
    ['05.01.2006 12:30:15' '18.03.1277 19:41:34']
    >>> print(dec2date(decimal[::4], calendar='excel1904', ascii=True, units='days since 1909-12-31 00:00:00'))
    ['05.01.2006 12:30:15' '18.03.1277 19:41:34']
    >>> print(dec2date(decimal[::4], calendar='excel1904', ascii=True, units='days since 1910-01-01 00:00:00'))
    ['06.01.2006 12:30:15' '19.03.1277 19:41:34']

    # check especially 1900 (no) leap year in Excel
    >>> year1   = np.array([1900,1900,1900,1900])
    >>> month1  = np.array([2,2,3,1])
    >>> day1    = np.array([28,29,1,1])
    >>> decimal = date2dec(calendar='excel1900', yr=year1, mo=month1, dy=day1)
    >>> month2, day2 = dec2date(decimal, calendar='excel1900', mo=True, dy=True)
    >>> print(month2)
    [2 2 3 1]
    >>> print(day2)
    [28 29  1  1]
    >>> decimal = date2dec(calendar='excel1900', yr=year1, mo=month1, dy=day1, excelerr=False)
    >>> month2, day2 = dec2date(decimal, calendar='excel1900', mo=True, dy=True, excelerr=False)
    >>> print(month2)
    [2 3 3 1]
    >>> print(day2)
    [28  1  1  1]
    >>> decimal = date2dec(calendar='excel1904', yr=year1, mo=month1, dy=day1)
    >>> month2, day2 = dec2date(decimal, calendar='excel1904', mo=True, dy=True)
    >>> print(month2)
    [2 3 3 1]
    >>> print(day2)
    [28  1  1  1]

    # calendar = '365_day'
    >>> decimal = date2dec(calendar='365_day',yr=year,mo=month,dy=day,hr=hour,mi=minute,sc=second)
    >>> asciidate = dec2date(decimal, calendar='365_day', ascii = True)
    >>> print(asciidate[::4])
    ['05.01.2000 12:30:15' '18.03.1271 19:41:34']

    # calendar = '366_day'
    >>> decimal = date2dec(calendar='366_day',yr=year,mo=month,dy=day,hr=hour,mi=minute,sc=second)
    >>> asciidate = dec2date(decimal, calendar='366_day', ascii = True)
    >>> print(asciidate[::4])
    ['05.01.2000 12:30:15' '18.03.1271 19:41:34']

    # calendar = '360_day'
    >>> decimal = date2dec(calendar='360_day',yr=year,mo=month,dy=day,hr=hour,mi=minute,sc=second)
    >>> asciidate = dec2date(decimal, calendar='360_day', ascii = True)
    >>> print(asciidate[::4])
    ['05.01.2000 12:30:15' '18.03.1271 19:41:34']

    >>> print(dec2date(719644.52101, calendar='proleptic_gregorian', ascii = True))
    28.04.1971 12:30:15
    >>> dec = date2dec(ascii='02.03.1910 03:44:55', calendar='decimal')
    >>> print(dec2date(dec, calendar='decimal', ascii=True))
    02.03.1910 03:44:55
    >>> dec = date2dec(ascii='02.03.1910 03:44:55', calendar='decimal360')
    >>> print(dec2date(dec, calendar='decimal360', ascii=True))
    02.03.1910 03:44:55
    >>> print(dec2date([dec,dec], calendar='decimal360', ascii=True))
    ['02.03.1910 03:44:55', '02.03.1910 03:44:55']
    >>> print(dec2date([[dec,dec],[dec,dec],[dec,dec]], calendar='decimal360', ascii=True)[0])
    ['02.03.1910 03:44:55', '02.03.1910 03:44:55']
    >>> print(dec2date(np.array([dec,dec]), calendar='decimal360', ascii=True))
    ['02.03.1910 03:44:55' '02.03.1910 03:44:55']
    >>> print(dec2date(np.array([[dec,dec],[dec,dec],[dec,dec]]), calendar='decimal360', ascii=True)[0:2,0])
    ['02.03.1910 03:44:55' '02.03.1910 03:44:55']

    >>> absolut = np.array([20070102.0034722, 20070102.0069444])
    >>> print(dec2date(absolut, units='day as %Y%m%d.%f', ascii=True))
    ['02.01.2007 00:05:00' '02.01.2007 00:10:00']
    >>> absolut = [20070102.0034722, 20070102.0069444]
    >>> print(dec2date(absolut, units='day as %Y%m%d.%f', ascii=True))
    ['02.01.2007 00:05:00', '02.01.2007 00:10:00']

    >>> absolut = np.array([200401.5, 200402.5, 201011.5, 201002.5])
    >>> print(dec2date(absolut, units='month as %Y%m.%f', ascii=True))
    ['15.01.2004 12:00:00' '14.02.2004 12:00:00' '15.11.2010 00:00:00' '14.02.2010 00:00:00']

    >>> absolut = np.array([2004.5, 2010.5])
    >>> print(dec2date(absolut, units='year as %Y.%f', ascii=True))
    ['01.07.2004 00:00:00' '01.07.2010 12:00:00']

    # en, eng
    >>> print(dec2date(719644.52101, calendar='proleptic_gregorian', en=True))
    1971-04-28 12:30:15
    >>> print(dec2date(719644.52101, calendar='proleptic_gregorian', eng=True))
    1971-04-28 12:30:15

    History
    -------
    Written  Arndt Piayda,   Jun 2010
    Modified Matthias Cuntz, Feb 2012 - Input can be scalar or array
                                      - Default: fulldate=True
                                      - Changed checks for easier extension
                                      - decimal, decimal360
             Matthias Cuntz, Jun 2012 - former units keyword is now called refdate
                                      - units has now original meaning as in netcdftime
                                      - units='day as %Y%m%d.%f'
             Matthias Cuntz, Feb 2013 - solved Excel leap year problem.
             Matthias Cuntz, Feb 2013 - ported to Python 3
             Arndt Piayda,   May 2013 - solved eng output problem.
             Matthias Cuntz, Oct 2013 - Excel starts at 1 not at 0
             Matthias Cuntz, Oct 2013 - units bugs, e.g. 01.01.0001 was substracted if Julian calendar even with units
             Matthias Cuntz, May 2016 - units=='month as %Y%m.%f', units=='year as %Y.%f'
             Matthias Cuntz, Oct 2016 - netcdftime provided even with netCDF4 > 1.0.0; make leap always integer
             Matthias Cuntz, May 2020 - numpy docstring format
             Matthias Cuntz, Jul 2020 - en for eng
             Matthias Cuntz, Jul 2020 - use proleptic_gregorian for Excel dates
    """
    #
    # Constants
    calendars = [
        'standard', 'gregorian', 'julian', 'proleptic_gregorian', 'excel1900',
        'excel1904', '365_day', 'noleap', '366_day', 'all_leap', '360_day',
        'decimal', 'decimal360'
    ]
    #
    # Checks
    import netCDF4 as nt
    try:
        tst = nt.date2num
        tst = nt.datetime
    except:
        try:
            import netcdftime as nt
            if ((nt.__version__ <= '0.9.2') & (calendar == '360_day')):
                raise ValueError(
                    "date2dec error: Your version of netcdftime.py is equal"
                    " or below 0.9.2. The 360_day calendar does not work with"
                    " arrays here. Please download a newer one.")
        except:
            import cftime as nt
    #
    calendar = calendar.lower()
    if (calendar not in calendars):
        raise ValueError("dec2date error: Wrong calendar! Choose: " +
                         ''.join([i + ' ' for i in calendars]))
    if refdate and units:
        raise ValueError(
            "dec2date error: either refdate or units can be given.")
    # obsolete eng
    if en and eng:
        raise ValueError(
            "dec2date error: 'eng' was succeeded by 'en'. Only one can be given."
        )
    if eng and (not en): en = eng
    #
    # Default
    if np.sum(np.array([yr, mo, dy, hr, mi, sc])) >= 1:
        ii = True
    else:
        ii = False
    if ((ascii | en | ii) and (not fulldate)):
        fulldate = False
    if ((not (ascii | en | ii)) and (not fulldate)):
        fulldate = True
    if fulldate == True:
        yr = True
        mo = True
        dy = True
        hr = True
        mi = True
        sc = True
        # Further checks
    if np.sum(np.array([ascii, fulldate, en])) > 1:
        raise ValueError(
            "dec2date error: Only one of ascii, fulldate, or en can be chosen."
        )
    if np.sum(np.array([ascii, en, ii])) > 1:
        raise ValueError(
            "dec2date error: If ascii, fulldate or en then no special selection yr,mo,dy,hr,mi,sc possible."
        )
    #
    # Input size and shape
    islist = type(indata) != type(np.array(indata))
    isarr = np.ndim(indata)
    if (islist & (isarr > 2)):
        raise ValueError("dec2date error: input is list > 2D; Use array input")
    if isarr == 0: indata = np.array([indata])
    else: indata = np.array(indata)
    insize = indata.size
    inshape = indata.shape
    indata = indata.flatten()
    #
    # depending on chosen calendar and optional set of the time refdate
    # calendar date is calculated
    if units == 'day as %Y%m%d.%f':
        fdy = indata % 1.  # day fraction
        indata = indata - fdy
        day = np.rint(indata % 100.).astype(np.int)
        indata = indata - day
        tmp = indata % 10000.
        month = np.rint(tmp / 100.).astype(np.int)
        indata = indata - tmp
        year = np.rint(indata / 10000.).astype(np.int)
        secs = np.rint(fdy * 86400.)
        hour = np.floor(secs / 3600.).astype(np.int)
        secs = secs - 3600. * hour
        minute = np.floor(secs / 60.).astype(np.int)
        second = np.rint(secs - 60. * minute).astype(np.int)
    elif units == 'month as %Y%m.%f':
        fmo = indata % 1.  # month fraction
        indata = indata - fmo
        month = np.rint(indata % 100.).astype(np.int)
        indata = indata - month
        year = np.rint(indata / 100.).astype(np.int)
        leap = np.where(
            (((year % 4) == 0) & ((year % 100) != 0)) | ((year % 400) == 0), 1,
            0)
        dim = np.array([[-9, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
                        [-9, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]])
        indata = dim[(leap, month)] * fmo
        fdy = indata % 1.  # day fraction
        indata = indata - fdy
        day = np.rint(indata % 100.).astype(np.int)
        secs = np.rint(fdy * 86400.)
        hour = np.floor(secs / 3600.).astype(np.int)
        secs = secs - 3600. * hour
        minute = np.floor(secs / 60.).astype(np.int)
        second = np.rint(secs - 60. * minute).astype(np.int)
    elif units == 'year as %Y.%f':
        fyr = indata % 1.  # year fraction
        year = np.rint(indata - fyr).astype(np.int)
        leap = np.where(
            (((year % 4) == 0) & ((year % 100) != 0)) | ((year % 400) == 0), 1,
            0)
        dsiy = np.array([365, 366])
        indata = dsiy[leap] * fyr
        fdy = indata % 1.  # day fraction
        doy = np.rint(indata - fdy).astype(np.int)
        diy = np.array(
            [[-9, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365],
             [-9, 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366]])
        month = np.zeros(insize, dtype=np.int)
        day = np.zeros(insize, dtype=np.int)
        for i in range(insize):
            month[i] = np.where(doy[i] > np.squeeze(diy[leap[i], :]))[0][-1]
            day[i] = doy[i] - diy[leap[i], month[i]]
        secs = np.rint(fdy * 86400.)
        hour = np.floor(secs / 3600.).astype(np.int)
        secs = secs - 3600. * hour
        minute = np.floor(secs / 60.).astype(np.int)
        second = np.rint(secs - 60. * minute).astype(np.int)
    else:
        if (calendar == 'standard') or (calendar == 'gregorian'):
            dec0 = 0
            if units:
                unit = units
            elif refdate:
                #unit = 'days since %s' % (refdate)
                unit = 'days since {0:s}'.format(refdate)
            else:
                unit = 'days since 0001-01-01 12:00:00'
                dec0 = 1721424
            timeobj = nt.num2date(indata - dec0, unit, calendar='gregorian')
        elif calendar == 'julian':
            dec0 = 0
            if units:
                unit = units
            elif refdate:
                unit = 'days since {0:s}'.format(refdate)
            else:
                unit = 'days since 0001-01-01 12:00:00'
                dec0 = 1721424
            timeobj = nt.num2date(indata - dec0, unit, calendar='julian')
        elif calendar == 'proleptic_gregorian':
            if units:
                unit = units
            elif refdate:
                unit = 'days since {0:s}'.format(refdate)
            else:
                unit = 'days since 0001-01-01 00:00:00'
            timeobj = nt.num2date(indata, unit, calendar='proleptic_gregorian')
        elif calendar == 'excel1900':
            doerr = False
            if units:
                unit = units
            elif refdate:
                unit = 'days since {0:s}'.format(refdate)
            else:
                unit = 'days since 1899-12-31 00:00:00'
                if excelerr: doerr = True
            if doerr:
                indata1 = np.where(indata >= 61., indata - 1, indata)
                timeobj = nt.num2date(indata1,
                                      unit,
                                      calendar='proleptic_gregorian')
            else:
                timeobj = nt.num2date(indata,
                                      unit,
                                      calendar='proleptic_gregorian')
        elif calendar == 'excel1904':
            if units:
                unit = units
            elif refdate:
                unit = 'days since {0:s}'.format(refdate)
            else:
                unit = 'days since 1903-12-31 00:00:00'
            timeobj = nt.num2date(indata, unit, calendar='proleptic_gregorian')
        elif (calendar == '365_day') or (calendar == 'noleap'):
            if units:
                unit = units
            elif refdate:
                unit = 'days since {0:s}'.format(refdate)
            else:
                unit = 'days since 0001-01-01 00:00:00'
            timeobj = nt.num2date(indata, unit, calendar='365_day')
        elif (calendar == '366_day') or (calendar == 'all_leap'):
            if units:
                unit = units
            elif refdate:
                unit = 'days since {0:s}'.format(refdate)
            else:
                unit = 'days since 0001-01-01 00:00:00'
            timeobj = nt.num2date(indata, unit, calendar='366_day')
        elif calendar == '360_day':
            if units:
                unit = units
            elif refdate:
                unit = 'days since {0:s}'.format(refdate)
            else:
                unit = 'days since 0001-01-01 00:00:00'
            timeobj = nt.num2date(indata, unit, calendar='360_day')
        elif calendar == 'decimal':
            fyear = np.trunc(indata)
            year = np.array(fyear, dtype=np.int)
            leap = ((((year % 4) == 0) & ((year % 100) != 0)) |
                    ((year % 400) == 0)).astype(np.int)
            fleap = leap.astype(np.float)
            fract_date = indata - fyear
            days_year = 365.
            # date in hours
            fhoy = fract_date * (days_year + fleap) * 24.
            fihoy = np.trunc(fhoy)
            ihoy = np.array(fihoy, dtype=np.int)
            # minutes
            fmoy = (fhoy - fihoy) * 60.
            fminute = np.trunc(fmoy)
            minute = np.array(fminute, dtype=np.int)
            # seconds
            second = np.array(np.trunc((fmoy - fminute) * 60.), dtype=np.int)
            # months
            fdoy = (fihoy / 24.) + 1.
            fidoy = np.trunc(fdoy)
            idoy = np.array(fidoy, dtype=np.int)
            month = np.zeros(insize, dtype=np.int)
            diy = np.array([[
                -9, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365
            ], [
                -9, 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366
            ]])
            for i in range(insize):
                ii = np.squeeze(
                    np.where(idoy[i] > np.squeeze(diy[leap[i], :])))
                month[i] = ii[-1]
            # days
            fday = np.zeros(insize, dtype=np.float)
            for i in range(insize):
                fday[i] = np.trunc(fdoy[i] - np.float(diy[leap[i], month[i]]))
            day = np.array(fday, dtype=np.int)
            # hours
            hour = ihoy % 24
        elif calendar == 'decimal360':
            fyear = np.trunc(indata)
            year = np.array(fyear, dtype=np.int)
            fract_date = indata - fyear
            days_year = 360.
            # date in hours
            fhoy = fract_date * days_year * 24.
            fihoy = np.trunc(fhoy)
            ihoy = np.array(fihoy, dtype=np.int)
            # minutes
            fmoy = (fhoy - fihoy) * 60.
            fminute = np.trunc(fmoy)
            minute = np.array(fminute, dtype=np.int)
            # seconds
            second = np.array(np.trunc((fmoy - fminute) * 60.), dtype=np.int)
            # months
            fdoy = (fihoy / 24.) + 1.
            fidoy = np.trunc(fdoy)
            idoy = np.array(fidoy, dtype=np.int)
            month = np.zeros(insize, dtype=np.int)
            diy = np.array([
                -9, 0, 30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330, 360
            ])
            for i in range(insize):
                ii = np.squeeze(np.where(idoy[i] > diy))
                month[i] = ii[-1]
            # days
            fday = np.zeros(insize, dtype=np.float)
            for i in range(insize):
                fday[i] = np.trunc(fdoy[i] - np.float(diy[month[i]]))
            day = np.array(fday, dtype=np.int)
            # hours
            hour = ihoy % 24
        else:
            raise ValueError(
                "dec2date error: calendar not implemented; should have been catched before."
            )

        if (calendar not in ['decimal', 'decimal360']):
            timeobjfl = timeobj.flatten()
            year = np.array([timeobjfl[i].year for i in range(insize)],
                            dtype=np.int)
            month = np.array([timeobjfl[i].month for i in range(insize)],
                             dtype=np.int)
            day = np.array([timeobjfl[i].day for i in range(insize)],
                           dtype=np.int)
            hour = np.array([timeobjfl[i].hour for i in range(insize)],
                            dtype=np.int)
            minute = np.array([timeobjfl[i].minute for i in range(insize)],
                              dtype=np.int)
            second = np.array([timeobjfl[i].second for i in range(insize)],
                              dtype=np.int)
            if (calendar == 'excel1900') & excelerr:
                ii = np.where((indata >= 60.) & (indata < 61.))[0]
                if np.size(ii) > 0:
                    month[ii] = 2
                    day[ii] = 29
    #
    # Ascii output
    if ascii:
        output = ([
            '%02d.%02d.%04d %02d:%02d:%02d' %
            (day[i], month[i], year[i], hour[i], minute[i], second[i])
            for i in range(insize)
        ])
        output = np.reshape(output, inshape)
        if isarr == 0:
            output = output[0]
    # Ascii english output
    elif en:
        output = ([
            '%04d-%02d-%02d %02d:%02d:%02d' %
            (year[i], month[i], day[i], hour[i], minute[i], second[i])
            for i in range(insize)
        ])
        output = np.reshape(output, inshape)
        if isarr == 0:
            output = output[0]
    else:
        # Individual output
        # if one, some or all of yr, mo, dy, hr, mi or sc is
        # choosen by the user as output, arrays for datetime
        year = np.reshape(year, inshape)
        month = np.reshape(month, inshape)
        day = np.reshape(day, inshape)
        hour = np.reshape(hour, inshape)
        minute = np.reshape(minute, inshape)
        second = np.reshape(second, inshape)
        if isarr == 0:
            year = np.int(year)
            month = np.int(month)
            day = np.int(day)
            hour = np.int(hour)
            minute = np.int(minute)
            second = np.int(second)
        # filling of output list:
        output = []
        if yr: output += [year]
        if mo: output += [month]
        if dy: output += [day]
        if hr: output += [hour]
        if mi: output += [minute]
        if sc: output += [second]
        # return output arrays:
        if len(output) == 1:
            output = output[0]

    if isarr != 0:
        if islist:
            ns = np.size(inshape)
            if ns == 1:
                output = [i for i in output]
            elif ns == 2:
                loutput = [i for i in output[:, 0]]
                for i in range(np.size(output[:, 0])):
                    loutput[i] = list(np.squeeze(output[i, :]))
                output = loutput
            else:
                raise ValueError(
                    "dec2date error: list output > 2D; should have been catched before."
                )

    return output
def scalar_trunc(x: Number) -> Number:
    """Implement `scalar_trunc`."""
    _assert_scalar(x)
    return np.trunc(x)
Beispiel #38
0
 def __init__(self, dim, N=4.5):
     # Log spaced CR from 1/dim to dim/dim
     self._set_CRs(logspace(0, log10(dim), trunc(N * log10(dim) + 1)) / dim)
Beispiel #39
0
def slide_window_search(binary_warped, left_current, right_current):
    out_img = np.dstack((binary_warped, binary_warped, binary_warped))

    nwindows = 4
    window_height = np.int(binary_warped.shape[0] / nwindows)
    nonzero = binary_warped.nonzero()
    nonzero_y = np.array(nonzero[0])
    nonzero_x = np.array(nonzero[1])
    margin = 100
    minpix = 50
    left_lane = []
    right_lane = []
    color = [0, 255, 0]
    thickness = 2

    for w in range(nwindows):
        win_y_low = binary_warped.shape[0] - (w + 1) * window_height
        win_y_high = binary_warped.shape[0] - w * window_height
        win_xleft_low = left_current - margin
        win_xleft_high = left_current + margin
        win_xright_low = right_current - margin
        win_xright_high = right_current + margin

        cv2.rectangle(out_img, (win_xleft_low, win_y_low),
                      (win_xleft_high, win_y_high), color, thickness)
        cv2.rectangle(out_img, (win_xright_low, win_y_low),
                      (win_xright_high, win_y_high), color, thickness)
        good_left = ((nonzero_y >= win_y_low) & (nonzero_y < win_y_high) &
                     (nonzero_x >= win_xleft_low) &
                     (nonzero_x < win_xleft_high)).nonzero()[0]
        good_right = ((nonzero_y >= win_y_low) & (nonzero_y < win_y_high) &
                      (nonzero_x >= win_xright_low) &
                      (nonzero_x < win_xright_high)).nonzero()[0]
        left_lane.append(good_left)
        right_lane.append(good_right)

        if len(good_left) > minpix:
            left_current = np.int(np.mean(nonzero_x[good_left]))
        if len(good_right) > minpix:
            right_current = np.int(np.mean(nonzero_x[good_right]))

    left_lane = np.concatenate(left_lane)
    right_lane = np.concatenate(right_lane)

    leftx = nonzero_x[left_lane]
    lefty = nonzero_y[left_lane]
    rightx = nonzero_x[right_lane]
    righty = nonzero_y[right_lane]

    left_fit = np.polyfit(lefty, leftx, 2)
    right_fit = np.polyfit(righty, rightx, 2)

    ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
    left_fitx = left_fit[0] * ploty**2 + left_fit[1] * ploty + left_fit[2]
    right_fitx = right_fit[0] * ploty**2 + right_fit[1] * ploty + right_fit[2]

    ltx = np.trunc(left_fitx)
    rtx = np.trunc(right_fitx)

    out_img[nonzero_y[left_lane], nonzero_x[left_lane]] = [255, 0, 0]
    out_img[nonzero_y[right_lane], nonzero_x[right_lane]] = [0, 0, 255]

    if len(leftx) < len(rightx):
        mid_size = len(leftx)
        midx = (rightx[:mid_size] + leftx) // 2
        midy = (righty[:mid_size] + lefty) // 2
    else:
        mid_size = len(rightx)
        midx = (rightx[:mid_size] + rightx) // 2
        midy = (righty[:mid_size] + righty) // 2

    new_midx = int(np.mean(midx) / 2)
    new_midy = int(np.mean(midy) / 2)
    new_mid = (new_midx, new_midy)

    #   print(new_mid, frame_size)

    ret = {
        'left_fitx': ltx,
        'right_fitx': rtx,
        'ploty': ploty,
        'new_mid': new_mid
    }

    return ret
Beispiel #40
0
ax3.axhline(y=0, color='k')
ax3.legend(("Home", "Away"), loc=0)

ax4 = plt.subplot(2, 3, 4)
ax4.set_title("Average Wins v. Goals in MLS")
ax4.set_xlabel("# of Goals")
ax4.set_ylabel("# of Wins")
ax4.set_xticks(range(0, np.max(hGoals) + 1, 30))
ax4.set_yticks(range(0, np.max(hWins), 10))
ax4.plot(hGoals, hWins, 'o', color='blue')
ax4.plot(vGoals, vWins, 'o', color='red')
ax4.plot(hGoals, modelH.fittedvalues, 'b--', label='Home OLS')
ax4.plot(vGoals, modelV.fittedvalues, 'r--', label='Vis OLS')
ax4.set_ylim(ymin=0)
ax4.axhline(y=0, color='k')
ax4.text(360, 40, ("Home R^2 = {}%".format(np.trunc(100 * modelH.rsquared))))
ax4.text(360, 20, ("Vis R^2 = {}%".format(np.trunc(100 * modelV.rsquared))))
ax4.legend(("Home", "Away"), loc=0)

ax5 = plt.subplot(2, 3, 5)
ax5.set_title("Home v. Away Wins in MLS")
ax5.set_xlabel("Year")
ax5.set_ylabel("# of Wins")
ax5.set_xticks(range(np.min(years), np.max(years) + 1, 2))
ax5.set_yticks(range(0, 300, 50))
ax5.plot(years, hWins, color='blue', linestyle='solid')
ax5.plot(years, vWins, color='red', linestyle='solid')
ax5.set_ylim(ymin=0)
ax5.axhline(y=0, color='k')
ax5.legend(("Home", "Away"), loc=2)
#but home and away goals aren't the whole story.  What about goals per # of teams?  More teams each year so see if goals per team increases
        fh.write('# Pixel values in e-/sec\n')
        fh.write(' '.join(colnames) + '\n')

for time in np.arange(now.secs, now.secs + 3600, 4.1):
    print(time - now.secs)
    t_ccd = get_t_ccd(time)
    scale = dark_cal.dark_temp_scale(t_ccd0, t_ccd_ref=t_ccd)
    pix_readout_electrons = pixels * scale * t_readout

    # Add gaussian count noise
    count_noise = np.sqrt(pix_readout_electrons)
    pix_readout_electrons += np.random.normal(loc=0,
                                              scale=count_noise,
                                              size=128)

    pix_readout_dn = np.trunc(pix_readout_electrons / 5)

    first_image = pix_readout_dn.tolist()[0:64]
    second_image = pix_readout_dn.tolist()[64:]

    for image, pix_filename, slot in zip(
        [first_image, second_image], [opt.pix_filename1, opt.pix_filename2],
        [6, 7]):
        vals = [time, t_ccd, slot, t_readout] + image
        vals = ['{:.2f}'.format(val) for val in vals]
        with open(pix_filename, 'a') as fh:
            fh.write(' '.join(vals) + '\n')

    if opt.delay:
        sleep(opt.delay)
Beispiel #42
0
print("yesterday: ", yesterday)
print("today: ", today)
print("tomorrow: ", tomorrow)

# #### 58. 使用五种不同的方法去提取一个随机数组的整数部分

# In[67]:

Z = np.random.uniform(0, 10, 10)
print("原始值: ", Z)

print("方法 1: ", Z - Z % 1)
print("方法 2: ", np.floor(Z))
print("方法 3: ", np.ceil(Z) - 1)
print("方法 4: ", Z.astype(int))
print("方法 5: ", np.trunc(Z))

# In[85]:

Z = np.random.uniform(0, 10, 10)
print("origin:", Z)
print("1: ", Z - Z % 1)
print("2: ", np.floor(Z))
print("3: ", np.ceil(Z) - 1)
print("4: ", Z.astype(int))
print("5: ", np.trunc(Z))

# #### 59. 创建一个 5x5 的矩阵,其中每行的数值范围从 1 到 5

# In[68]:
Beispiel #43
0
#                   cbox = (int((ymin+ymax)/2),int((xmin+xmax)/2)) #centro:(Y,X)
#                   scentros.append(cbox)
#                   dbox = (ymax-ymin,xmax-xmin)
#                   sdims.append(dbox) #dimension: (alto,ancho)
#                   #print('Silla ',index,' [Xmin,Ymin,Xmax,Ymax]=',rbox,' Centro=',cbox,' Dimensiones:',dbox)

s = [[163, 279, 441, 484], [143, 292, 289, 482], [87, 201, 295, 338],
     [108, 323, 276, 500], [107, 198, 252, 305]]
c = [(302, 381), (216, 387), (191, 269), (192, 411), (179, 251)]
d = [(278, 205), (146, 190), (208, 137), (168, 177), (145, 107)]
dmin = [[145, 107]]
#print(s,c,d)
sillas = np.array(s)
scentros = np.trunc(
    np.concatenate(
        (np.expand_dims(np.divide(sillas[:, 0] + sillas[:, 2], 2), axis=1),
         np.expand_dims(np.divide(sillas[:, 1] + sillas[:, 3], 2), axis=1)),
        axis=1))
sdims = np.concatenate((np.expand_dims(sillas[:, 2] - sillas[:, 0], axis=1),
                        np.expand_dims(sillas[:, 3] - sillas[:, 1], axis=1)),
                       axis=1).astype('int32')
#print(sillas,scentros,sdims)
x = [[1., 0.39381477, 0.10023424, 0.26546335, 0.03296719],
     [0.39381477, 1., 0.13562197, 0.58209695, 0.03386873],
     [0.10023424, 0.13562197, 1., 0.04523262, 0.52124019],
     [0.26546335, 0.58209695, 0.04523262, 1., 0.],
     [0.03296719, 0.03386873, 0.52124019, 0., 1.]]
id = [[2., 3.], [1., 3.], [1., 2.], [1., 3.], [1., 2.]]
if True:
    if len(sillas) > 0:
        print('Sillas:\n', sillas)
Beispiel #44
0
def avgstrainfit(V,
                 E,
                 V0,
                 nmax=16,
                 MODE=1,
                 strain='eulerian',
                 LOG=0,
                 nargout=1):
    """avgstrainfit - Fit to an average strain polynomials."""

    import sys
    if (len(V) != len(E)):
        sys.exit('avgstrainfit: V and E must be vectors of the same length!')
    elif (len(V) < 7):
        sys.exit('avgstrainfit: dataset must have at least 7 points!')
    elif MODE != 1 and MODE != 2:
        sys.exit('avgstrainfit: weighting mode must be 1 or 2!')

    ndata = len(V)
    Vrange = [np.amin(V), np.amax(V)]

    # Determine the maximum degree of the polynomials:
    if (nmax < 0):
        MaxDegree = int(np.amin([ndata - 5, np.trunc(ndata / 2.)]))
    else:
        MaxDegree = int(
            np.amin([nmax, np.amin([ndata - 5, np.trunc(ndata / 2.)])]))

    # Some statistics of the averaging proccess:
    Morder = 0
    npol = 0
    pol = []
    s2 = []
    for n in range(2, MaxDegree + 1):
        c = strainfit(V, E, V0, n, strain, 0, nargout=1)
        sm = strainmin(c, V0, Vrange, strain)
        Efit = strainevalE(c, V0, V, strain)
        SSerr = np.sum((E - Efit)**2)
        pol.append(polynomial())
        pol[npol].c = c
        pol[npol].SSerr = SSerr
        s2.append(SSerr)
        pol[npol].order = n
        pol[npol].data = ndata
        pol[npol].smin = sm
        Morder = np.max([n, Morder])
        npol += 1
        #print(c)
        #print('n,Morder,npol = ',n,Morder,npol)
        #print(sm)
        #print(Efit)
        #print(SSerr)

    #print('s2 = ',s2)
    #print(pol)

    # Get the polynomial weights:
    SSmin = np.amin(s2)
    Q = 0
    if (MODE == 1):
        for k in range(npol):
            w = (pol[k].SSerr / SSmin) * (pol[k].order / pol[k].data)
            pol[k].w = np.exp(-w)
            Q += pol[k].w
    elif (MODE == 2):
        for k in range(npol):
            w = (pol[k].SSerr / SSmin) * (pol[k].order / pol[k].data)
            pol[k].w = np.exp(-w * w)
            Q += pol[k].w
    ##ww = zeros(1,1:npol);
    ww = np.zeros(npol)
    for k in range(npol):
        ww[k] = pol[k].w / Q
        pol[k].w = pol[k].w / Q

    #print(pol)

    # Form the average polynomial:
    cavg = np.zeros(Morder + 1)
    for k in range(npol):
        n = pol[k].order
        cavg[Morder - n:] += pol[k].c * pol[k].w

    #"""
    # Extra output and optional LOG record:
    # If the average of polynomials has a minimum, analyze the equilibrium
    # geometry.
    # Otherwise, analyze the reference volume.
    if (LOG > 0 or nargout > 1):
        avgmin = strainmin(cavg, V0, Vrange, strain)
        if (avgmin.err == 0):
            # Analyze and report the equilibrium geometry:
            if (LOG > 0):
                print(
                    '\n\navgpolyfit: AVERAGE OF {0} STRAIN POLYNOMIALS'.format(
                        strain))
                print('Volume reference (V0): {0:.6f}'.format(V0))
                print('Range of degrees: 2--{0}'.format(MaxDegree))
                print('Number of polynomials: {0}'.format(npol))
                print('\nProperties at the minimum of each polynomial:')
                print(
                    '--i-- npol data order --SSerr-- ---w---- ----Vmin--- -----Emin---  ----Bmin--- ---B1min--- ---B2min---   ---B3min---'
                )
            isrt = np.argsort(ww)[::-1]
            srt = ww[isrt]
            pmean = np.zeros(6)
            pstd = np.zeros(6)
            for k in range(npol):
                k1 = isrt[k]
                ###[smin] = strainmin(pol{k1}.c, V0, Vrange, strain);
                smin = pol[k1].smin
                prop = straineval(pol[k1].c, V0, smin.Vmin, strain)
                if (LOG > 0 and k <= 25):
                    # In the conversion of the bulk modulus and derivatives we
                    # assume the units: volume (bohr^3), energy (Hy).
                    output_string = "{0:4} {1:4} {2:4} {3:4}".format(
                        k, k1 + 1, pol[k1].data, pol[k1].order)
                    output_string += "   {0:9.2e} {1:8.6f}".format(
                        pol[k1].SSerr, pol[k1].w)
                    output_string += " {0:10.6f}".format(smin.Vmin)
                    output_string += " {0:13.6f}".format(prop.E)
                    output_string += "  {0:10.6f}".format(prop.B *
                                                          hybohr3togpa)
                    output_string += "  {0:9.6f}".format(prop.B1p)
                    output_string += "  {0:13.9f}".format(prop.B2p /
                                                          hybohr3togpa)
                    output_string += " {0:13.9f}".format(prop.B3p /
                                                         hybohr3togpa**2)
                    print(output_string)
                pr = np.array(
                    [smin.Vmin, prop.E, prop.B, prop.B1p, prop.B2p, prop.B3p])
                pmean = pmean + pr * pol[k1].w
                pstd = pstd + (pr**2) * pol[k1].w
            pstd = np.lib.scimath.sqrt(pstd - pmean**2)
            if (LOG > 0):
                print('\nAverage properties (weighted polynomials):')
                print(
                    '------ ---volume-- ---energy--   --B-(GPa)-- ----B1p---- B2p-(1/GPa) B3p--(1/GPa^2)'
                )
                print(
                    '-mean- {0:11.6f} {1:11.6f} {2:11.6f} {3:11.6f} {4:11.6f} {5:14.9f}'
                    .format(pmean[0], pmean[1], pmean[2] * hybohr3togpa,
                            pmean[3], pmean[4] / hybohr3togpa,
                            pmean[5] / hybohr3togpa**2))
                print(
                    'stdvev {0:11.6f} {1:11.6f} {2:11.6f} {3:11.6f} {4:11.6f} {5:14.9f}\n'
                    .format(float(np.real(pstd[0])), float(np.real(pstd[1])),
                            float(np.real(pstd[2])) * hybohr3togpa,
                            float(np.real(pstd[3])),
                            float(np.real(pstd[4])) / hybohr3togpa,
                            float(np.real(pstd[5])) / hybohr3togpa**2))
        else:
            # Analyze and report the reference geometry:
            if (LOG > 0):
                print(
                    '\n\navgpolyfit: AVERAGE OF {0} STRAIN POLYNOMIALS'.format(
                        strain))
                print('Volume reference (V0): {0:.6f}'.format(V0))
                print('Range of degrees: 2--{0}'.format(MaxDegree))
                print('Number of polynomials: {0}'.format(npol))
                print('\nProperties at the reference volume: {0}'.format(V0))
                print(
                    '--i-- npol data order --SSerr-- ---w---- -----Eref---  ----Bref--- ---B1ref--- ---B2ref---   ---B3ref---'
                )
            isrt = np.argsort(ww)[::-1]
            srt = ww[isrt]
            pmean = np.zeros(6)
            pstd = np.zeros(6)
            for k in range(npol):
                k1 = isrt[k]
                prop = straineval(pol[k1].c, V0, V0, strain)
                if (LOG > 0 and k <= 25):
                    # In the conversion of the bulk modulus and derivatives we
                    # assume the units: volume (bohr^3), energy (Hy).
                    output_string = "{0:4} {1:4} {2:4} {3:4}".format(
                        k, k1 + 1, pol[k1].data, pol[k1].order)
                    output_string += "   {0:9.2e} {1:8.6f}".format(
                        pol[k1].SSerr, pol[k1].w)
                    output_string += " {0:13.6f}".format(prop.E)
                    output_string += "  {0:10.6f}".format(prop.B *
                                                          hybohr3togpa)
                    output_string += "  {0:9.6f}".format(prop.B1p)
                    output_string += "  {0:13.9f}".format(prop.B2p /
                                                          hybohr3togpa)
                    output_string += " {0:13.9f}".format(prop.B3p /
                                                         hybohr3togpa**2)
                    print(output_string)
                pr = np.array(
                    [V0, prop.E, prop.B, prop.B1p, prop.B2p, prop.B3p])
                pmean = pmean + pr * pol[k1].w
                pstd = pstd + (pr**2) * pol[k1].w
            pstd = np.lib.scimath.sqrt(pstd - pmean**2)
            if (LOG > 0):
                print(
                    '\nAverage properties at the ref. volume: {0}'.format(V0))
                print(
                    '------ ---energy--   --B-(GPa)-- ----B1p---- B2p-(1/GPa) B3p--(1/GPa^2)'
                )
                print(
                    '-mean- {0:11.6f} {1:11.6f} {2:11.6f} {3:11.6f} {4:14.9f}'.
                    format(pmean[1], pmean[2] * hybohr3togpa, pmean[3],
                           pmean[4] / hybohr3togpa,
                           pmean[5] / hybohr3togpa**2))
                print(
                    'stdvev {0:11.6f} {1:11.6f} {2:11.6f} {3:11.6f} {4:14.9f}\n'
                    .format(float(np.real(pstd[1])),
                            float(np.real(pstd[2])) * hybohr3togpa,
                            float(np.real(pstd[3])),
                            float(np.real(pstd[4])) / hybohr3togpa,
                            float(np.real(pstd[5])) / hybohr3togpa**2))

        Efit = strainevalE(cavg, V0, V, strain)
        SSerr = np.sum((E - Efit)**2)
        SStot = np.sum((E - np.mean(E))**2)
        R2 = 1. - SSerr / SStot
        if (nargout > 1):
            savg = savg_class()
            savg.eqmean = pmean
            savg.eqstd = pstd
            savg.R2 = R2
            savg.Efit = Efit
    #"""
    if (nargout > 1):
        # Putting error bars to the pressure, bulk modulus, etc at all volumes
        savg.Emean, savg.Estd = np.zeros(len(V)), np.zeros(len(V))
        savg.pmean, savg.pstd = np.zeros(len(V)), np.zeros(len(V))
        savg.Bmean, savg.Bstd = np.zeros(len(V)), np.zeros(len(V))
        savg.B1pmean, savg.B1pstd = np.zeros(len(V)), np.zeros(len(V))
        savg.B2pmean, savg.B2pstd = np.zeros(len(V)), np.zeros(len(V))
        savg.B3pmean, savg.B3pstd = np.zeros(len(V)), np.zeros(len(V))
        for k in range(npol):
            k1 = isrt[k]
            prop = straineval(pol[k1].c, V0, V, strain)
            savg.Emean = savg.Emean + prop.E * pol[k1].w
            savg.Estd = savg.Estd + (prop.E**2) * pol[k1].w
            savg.pmean = savg.pmean + prop.p * pol[k1].w
            savg.pstd = savg.pstd + (prop.p**2) * pol[k1].w
            savg.Bmean = savg.Bmean + prop.B * pol[k1].w
            savg.Bstd = savg.Bstd + (prop.B**2) * pol[k1].w
            savg.B1pmean = savg.B1pmean + prop.B1p * pol[k1].w
            savg.B1pstd = savg.B1pstd + (prop.B1p**2) * pol[k1].w
            savg.B2pmean = savg.B2pmean + prop.B2p * pol[k1].w
            savg.B2pstd = savg.B2pstd + (prop.B2p**2) * pol[k1].w
            savg.B3pmean = savg.B3pmean + prop.B3p * pol[k1].w
            savg.B3pstd = savg.B3pstd + (prop.B3p**2) * pol[k1].w
        savg.Estd = np.sqrt(savg.Estd - savg.Emean**2)
        savg.pstd = np.sqrt(savg.pstd - savg.pmean**2)
        savg.Bstd = np.sqrt(savg.Bstd - savg.Bmean**2)
        savg.B1pstd = np.sqrt(savg.B1pstd - savg.B1pmean**2)
        savg.B2pstd = np.sqrt(savg.B2pstd - savg.B2pmean**2)
        savg.B3pstd = np.sqrt(savg.B3pstd - savg.B3pmean**2)

    if nargout == 1:
        return cavg
    elif nargout == 2:
        return cavg, savg
def point_cloud_to_panorama(points,
                            v_res=0.42,
                            h_res=0.35,
                            v_fov=(-24.9, 2.0),
                            d_range=(0, 100),
                            y_fudge=3):
    """ Takes point cloud data as input and creates a 360 degree panoramic
        image, returned as a numpy array.

    Args:
        points: (np array)
            The numpy array containing the point cloud. .
            The shape should be at least Nx3 (allowing for more columns)
            - Where N is the number of points, and
            - each point is specified by at least 3 values (x, y, z)
        v_res: (float)
            vertical angular resolution in degrees. This will influence the
            height of the output image.
        h_res: (float)
            horizontal angular resolution in degrees. This will influence
            the width of the output image.
        v_fov: (tuple of two floats)
            Field of view in degrees (-min_negative_angle, max_positive_angle)
        d_range: (tuple of two floats) (default = (0,100))
            Used for clipping distance values to be within a min and max range.
        y_fudge: (float)
            A hacky fudge factor to use if the theoretical calculations of
            vertical image height do not match the actual data.
    Returns:
        A numpy array representing a 360 degree panoramic image of the point
        cloud.
    """
    # Projecting to 2D
    x_points = points[:, 0]
    y_points = points[:, 1]
    z_points = points[:, 2]
    r_points = points[:, 3]
    d_points = np.sqrt(x_points**2 +
                       y_points**2)  # map distance relative to origin
    #d_points = np.sqrt(x_points**2 + y_points**2 + z_points**2) # abs distance

    # We use map distance, because otherwise it would not project onto a cylinder,
    # instead, it would map onto a segment of slice of a sphere.

    # RESOLUTION AND FIELD OF VIEW SETTINGS
    v_fov_total = -v_fov[0] + v_fov[1]

    # CONVERT TO RADIANS
    v_res_rad = v_res * (np.pi / 180)
    h_res_rad = h_res * (np.pi / 180)

    # MAPPING TO CYLINDER
    x_img = np.arctan2(y_points, x_points) / h_res_rad
    y_img = -(np.arctan2(z_points, d_points) / v_res_rad)

    # THEORETICAL MAX HEIGHT FOR IMAGE
    d_plane = (v_fov_total / v_res) / (v_fov_total * (np.pi / 180))
    h_below = d_plane * np.tan(-v_fov[0] * (np.pi / 180))
    h_above = d_plane * np.tan(v_fov[1] * (np.pi / 180))
    y_max = int(np.ceil(h_below + h_above + y_fudge))

    # SHIFT COORDINATES TO MAKE 0,0 THE MINIMUM
    x_min = -360.0 / h_res / 2
    x_img = np.trunc(-x_img - x_min).astype(np.int32)
    x_max = int(np.ceil(360.0 / h_res))

    y_min = -((v_fov[1] / v_res) + y_fudge)
    y_img = np.trunc(y_img - y_min).astype(np.int32)

    # CLIP DISTANCES
    d_points = np.clip(d_points, a_min=d_range[0], a_max=d_range[1])

    # CONVERT TO IMAGE ARRAY
    img = np.zeros([y_max + 1, x_max + 1], dtype=np.uint8)
    img[y_img, x_img] = scale_to_255(d_points, min=d_range[0], max=d_range[1])

    return img
Beispiel #46
0
def trunc(a):
    return np.trunc(a)
Beispiel #47
0
    def direction(self):
        """
        Perform and plot the two main directions of the peaks, considering their previously
        calculated scale ,by calculating the Hessian at different sizes as the combination of
        gaussians and their first and second derivatives

        """
        import pylab
        j = 0
        vals = []
        vects = []
        kpx = self.keypoints.x
        kpy = self.keypoints.y
        sigma = self.keypoints.sigma
        img = self.raw
        pylab.figure()
        pylab.imshow(img, interpolation='nearest')

        for y, x, s in zip(kpy, kpx, sigma):
            s_patch = numpy.trunc(s * 2)

            if s_patch % 2 == 0:
                s_patch += 1

            if s_patch < 3: s_patch = 3

            if (x > s_patch / 2 and x < img.shape[1] - s_patch / 2 - 1
                    and y > s_patch / 2 and y < img.shape[0] - s_patch / 2):

                patch = img[y - (s_patch - 1) / 2:y + (s_patch - 1) / 2 + 1,
                            x - (s_patch - 1) / 2:x + (s_patch - 1) / 2 + 1]
                x_patch = numpy.arange(s_patch)
                Gx = numpy.exp(-4 * numpy.log(2) *
                               (x_patch - numpy.median(x_patch))**2 / s)
                Gy = Gx[:, numpy.newaxis]
                dGx = -Gx * 4 * numpy.log(2) / s * 2 * (x_patch -
                                                        numpy.median(x_patch))
                dGy = dGx[:, numpy.newaxis]
                d2Gx = -8 * numpy.log(2) / s * (
                    (x_patch - numpy.median(x_patch)) * dGx + Gx)
                d2Gy = d2Gx[:, numpy.newaxis]

                Hxx = d2Gx * Gy
                Hyy = d2Gy * Gx
                Hxy = dGx * dGy

                d2x = (Hxx.ravel() * patch.ravel()).sum()
                d2y = (Hyy.ravel() * patch.ravel()).sum()
                dxy = (Hxy.ravel() * patch.ravel()).sum()
                H = numpy.array([[d2y, dxy], [dxy, d2x]])
                val, vect = numpy.linalg.eig(H)

                #                 print 'new point'
                #                 print x, y
                #                 print val
                #                 print vect
                #                 print numpy.dot(vect[0],vect[1])
                e = numpy.abs(val[0] - val[1]) / numpy.abs(val[0] + val[1])
                j += 1
                #                 print j
                #                 print e
                if numpy.abs(val[1]) < numpy.abs(
                        val[0]
                ):  # reorganisation des valeurs propres et vecteurs propres
                    val[0], val[1] = val[1], val[0]
                    vect = vect[-1::-1, :]

                pylab.annotate(
                    "",
                    xy=(x + vect[0][0] * val[0], y + vect[0][1] * val[0]),
                    xytext=(x, y),
                    arrowprops=dict(facecolor='red', shrink=0.05),
                )

                pylab.annotate(
                    "",
                    xy=(x + vect[1][0] * val[1], y + vect[1][1] * val[1]),
                    xytext=(x, y),
                    arrowprops=dict(facecolor='red', shrink=0.05),
                )
                pylab.plot(x, y, 'og')
                vals.append(val)
                vects.append(vect)
        return vals, vects
Beispiel #48
0
def check_ltc_residuals(dataframe,
                        ccd,
                        rawy_range=(1, 200),
                        binned=None,
                        filter_select=None,
                        plot_it=True,
                        png_file=None,
                        title=''):
    """Validation for the LTC correction by means of checking the residuals
    
     Parameters
     ----------
        dataframe : dataframe, mandatory 
            the pandas dataframe with the Cu Kalpha fit results (from Michael Smith monitoring run). Produced by `ff_monitoring_work2.ipynb`
        ccd : int, mandatory 
            the EPIC-pn CCD number (from 1 to 12)
        rawy_range : list
            the RAWY range to select, can be (1,200) and then can be (x,x+19) for x in range(1,201,20)
        binned: float, optional
            bin the data grouping with binned years, if None, then no binning
        filter_select : str
            if not None, then a selection on filter wheel is requested, can be one of 'CalClosed', 'CalMedium', 'CalThick', 'CalThin1', 'Closed',
           'Medium', 'Thick', 'Thin1', 'Thin2'. If None, then all are selected.
        plot_it : bool
            if set, will plot the results.
        png_file : str
            is set, then the plotting results will be saved to this file.
        title : str
            Text to append to the end of the plot title, e.g. the version or other comment to apper on the plot title
     Output
     ------
        output: dict
            {'ccd': []}

    Method
    ------
            
    Modification history
    --------------------
    
        Created 17 Mar 2021, Ivan Valchanov, XMM SOC

    """
    #
    ntot = np.count_nonzero((dataframe.ccd == ccd)
                            & (dataframe.rawy0 == rawy_range[0])
                            & (dataframe.rawy1 == rawy_range[1]))
    #
    xtab = select_data(dataframe,
                       ccd,
                       rawy_range=rawy_range,
                       filter_select=filter_select)
    ntab = len(xtab)
    xmode = xtab.xmode
    if (filter_select is not None):
        print(
            f"CCD {ccd}, {xmode} mode, filter {filter_select}: filtered {ntab} results out of {ntot}"
        )
    else:
        print(
            f"CCD {ccd}, {xmode} mode: filtered {ntab} results out of {ntot}")
    #
    #line = dataframe.line
    #
    xin = xtab.delta_time
    residual = (xtab.energy - line0)  # in eV
    residual_err = (xtab.energy_err1 + xtab.energy_err2) / 2.0  # in eV
    qmean = np.mean(residual)
    qstd = np.std(residual)
    xstat = stats.sigma_clipped_stats(residual, sigma=3, maxiters=3)
    #
    if (binned is not None):
        # add those as columns in the dataframe
        qt = QTable.from_pandas(xtab)
        qt['residual'] = residual
        qt['residual_err'] = residual_err
        #
        year_bin = np.trunc(qt['delta_time'] / binned)
        year_run = np.unique(year_bin)
        #year_bin = np.trunc(qt['delta_time']/binned)
        dat_grouped = qt.group_by(year_bin)
        #
        dat_binned = dat_grouped.groups.aggregate(np.median)
        dat_binned_std = dat_grouped.groups.aggregate(mad)
        xin_bin = dat_binned['delta_time']
        yin_bin = dat_binned['residual']
        yin_bin_err = dat_binned_std['residual']
    #
    # prepare the output
    #
    output = [
        ccd, xmode, rawy_range[0], rawy_range[1], xstat[0], xstat[2], ntab,
        year_run.data, xin_bin.data, yin_bin.data, yin_bin_err.data
    ]
    #
    # plotting
    #
    if (plot_it):
        fig, ax = plt.subplots(figsize=(10, 6))
        ax.errorbar(xin,
                    residual,
                    yerr=residual_err,
                    fmt='o',
                    label=f'CCDNR {ccd}',
                    zorder=0)
        if (binned is not None):
            ax.step(year_run,
                    yin_bin,
                    where='pre',
                    zorder=2,
                    color='cyan',
                    label='Per bin median')
            #ax.step(xin_bin,yin_bin,where='mid',zorder=2,color='cyan',label='Per bin median')
            ax.errorbar(xin_bin,
                        yin_bin,
                        yerr=yin_bin_err,
                        fmt='o',
                        color='cyan',
                        zorder=1)
        ax.axhline(0.0, linestyle='dashed', linewidth=3, color='red', zorder=1)
        ax.axhline(20.0,
                   linestyle='dotted',
                   linewidth=2,
                   color='red',
                   zorder=1)
        ax.axhline(-20.0,
                   linestyle='dotted',
                   linewidth=2,
                   color='red',
                   zorder=1)
        ax.text(0.1,
                0.9,
                fr'mean={qmean:.1f} eV, st.dev.={qstd:.1f} eV',
                fontsize=14,
                transform=ax.transAxes)
        ax.text(
            0.1,
            0.8,
            fr'mean={xstat[0]:.1f} eV, st.dev.={xstat[2]:.1f} eV (3-$\sigma$ clipped)',
            fontsize=14,
            transform=ax.transAxes)
        ax.set_xlim((0.0, 22.0))
        ax.set_ylim((-100.0, 100.0))
        ax.grid(True)
        ax.legend(loc=3)
        ax.set_title(
            f"Cu-Ka data for EPIC-PN CCD{ccd:02}, mode={xmode}, RAWY in [{rawy_range[0]},{rawy_range[1]}], {title}"
        )
        ax.set_ylabel(r"E$_{corr}$ - E$_{lab}$ (eV)")
        ax.set_xlabel("Time since 2000-01-01 (years)")
        if (png_file is not None):
            plt.savefig(png_file, dpi=100)
            plt.show()
            plt.close()
    return output
Beispiel #49
0
 def set_fold(self):
     fc_mins = np.trunc(np.min(self.log2fc, axis=0))
     fc_maxs = np.trunc(np.max(self.log2fc, axis=0))
     fold = np.stack([fc_mins, fc_maxs])
     return fold
    lg = np.zeros((rN, rN, rN, 3))
    local_mass = 0.0e0
    local_g = 0.0e0
    local_v = 0.0e0
    local_sigma0k = np.zeros((rN, rN, rN, 3, 3))
    local_sigmav = np.zeros((rN, rN, rN, 3, 3))
    for t in range(fhstep / atmstep):
        print('t;', t)
        t_ = fhstep / atmstep * it + t
        pos = infh['coordinates'][t_]
        vel = infh['velocities'][t_]
        box = infh['cell_lengths']
        L = box[0, 0]
        rN = int(L / cutoff)
        # unset PBC
        pos -= np.trunc(pos / L) * L
        pos += np.round((0.50e0 * L - pos) / L) * L
        pos = cp.asarray(pos, dtype=np.float32)

        # grid parameters
        r_min = 0.0e0
        r_max = L
        dr = (r_max - r_min) / float(rN)
        dV = dr**3
        if t == 0:
            r_ = np.array([r_min + dr * ir for ir in range(rN)])
            rax = np.array([r_, r_, r_])
        # calculate rho, g, v, sigma_ab, tau_ab, S_ab, eta as local variables
        m = cp.asarray(masses, dtype=np.float32)
        lm = cp.zeros((rN, rN, rN), dtype=cp.float32)
        r0 = cp.asarray(pos[:, 0] / dr, dtype=cp.int32)
Beispiel #51
0
import numpy as np
import cv2
import pandas as pd
import dataframe

#下载数据
data_all=np.loadtxt("Image_all.txt")
data_i=np.unique(data_all[:,0])
print(data_i)
k = 0
data_i = np.array(data_i)
data_i = np.trunc(data_i)
data_i = data_i.astype('int')
print(len(data_i))
for i in range(len(data_i)):
 data=[]
 for i in range(len(data_all)):
     if(data_i[k]==data_all[i,0]):
         data.append(data_all[i])
     else:
         continue
 k=k+1
 data=np.array(data)
 data=np.trunc(data)
 data=data.astype('int')
 data3=data.copy()
 #找多个元素是否同行的循环次数
 # s=0

 # 按第3列X0进行排序,按高度排列
 data = data[data[:, 2].argsort()]
Beispiel #52
0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np

a = np.array([20, 20, -20, -20])
b = np.array([3, -3, 6, -6])
# 真除
c = np.true_divide(a, b)
c = np.divide(a, b)
c = a / b
print('array:', c)
# 对ndarray做floor操作
d = np.floor(a / b)
print('floor_divide:', d)
# 对ndarray做ceil操作
e = np.ceil(a / b)
print('ceil ndarray:', e)
# 对ndarray做trunc操作
f = np.trunc(a / b)
print('trunc ndarray:', f)
# 对ndarray做around操作
g = np.around(a / b)
print('around ndarray:', g)
Beispiel #53
0
         signif_clusters = [clusters[s] for s in signif]
         signif_cluster_pvals = cluster_pvals[signif]
         # plot stats
         for clu, pv in zip(signif_clusters, signif_cluster_pvals):
             '''
             # this index tells direction of tval, hence could be used to
             # decide which color to draw the significant cluster region
             # based on which curve is higher:
             idx = (np.sign(tvals[clu[0][0], 0]).astype(int) + 1) // 2
             '''
             clu = clu[0]
             cluster_ymin = ylim[0] * np.ones_like(t[clu])
             cluster_ymax = np.max(contr_mean[:, clu], axis=0)
             pval_x = t[int(np.mean(clu[[0, -1]]))]
             pval_y = -0.1 * ylim[1]
             pval_ord = np.trunc(np.log10(pv)).astype(int)
             _ = axs[ii].fill_between(t[clu], cluster_ymin, cluster_ymax,
                                      alpha=1, facecolor='0.9', zorder=1,
                                      edgecolor='none')
             if show_pval:
                 pval_txt = '$p < 10^{{{}}}$'.format(pval_ord)
                 _ = axs[ii].text(pval_x, pval_y, pval_txt, ha='center',
                                  va='baseline', fontdict=dict(size=10))
 # set axis limits
 _ = axs[ii].set_ylim(*ylim)
 _ = axs[ii].set_xlim(*xlim)
 # remove yaxis / ticks / ticklabels near bottom
 ytck = [-0.1 * ymax, ymax]
 ytl = axs[ii].yaxis.get_ticklocs()
 _ = axs[ii].spines['left'].set_bounds(*ytck)
 _ = axs[ii].yaxis.set_ticks(ytl[ytl > ytck[0]])
Beispiel #54
0
plt.ylabel('True label')
plt.xlabel('Predicted label')

classes = ['A-antigen negative', 'A-antigen positive']

plt.grid('off')
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)

for i in range(cnf_matrix.shape[0]):
    for j in range(cnf_matrix.shape[1]):
        plt.text(j,
                 i,
                 cnf_matrix[i, j],
                 horizontalalignment="center",
                 color="orangered")

plt.gcf().subplots_adjust(left=0.25, bottom=0.35)
plt.savefig('Images/A_ConfusionWeighed.png', format='png', dpi=300)

coefPaths = justVarPathsNew[idxNZ[1]]

tile_path = np.trunc(coefPaths / (16**5))
tile_step = np.trunc((coefPaths - tile_path * 16**5) / 2)
tile_phase = np.trunc((coefPaths - tile_path * 16**5 - 2 * tile_step))

vtile_path = vhex(tile_path.astype('int'))
vitle_step = vhex(tile_step.astype('int'))
Beispiel #55
0
def adaPoint(box, pro):
    box_pro = box
    if pro != 1.0:
        box_pro = box / pro
    box_pro = np.trunc(box_pro)
    return box_pro
Beispiel #56
0
'''
Created on 2019年8月18日

@author: lingyulong
'''
import numpy as np

#np.abs():每个元素取绝对值
data = np.array([1, 2, -3, 4, -5, 6])
print("原数组:", data)
print("数组进行abs操作:", np.abs(data))

data = np.array([[1.1, 1.5, 1.7], [-1.1, -1.5, -1.7], [2.1, 2.6, 3.9]])
print(np.rint(data))  #对每个元素进行四舍五入
print(np.trunc(data))  #对每个元素进行向0取整
print(np.isinf(data))
print(np.isfinite(data))
print(np.isnan(data))

#np.add():两个数组的对应元素相加
a = np.array([[1, 2, 3], [11, 22, 33]])
b = np.array([[1, -1, 2], [2, 3, 1]])
print("两个数组对应元素相加得到的新数组:")
c = np.add(a, b)
print(c)

#np.maximum():两个数组的对应元素相加
a = np.array([[1, 2, 3], [11, 22, 33]])
b = np.array([[1, 15, 9], [2, 3, 100]])
print("两个数组对应元素的最大值/最小值:")
print(np.maximum(a, b))
Beispiel #57
0
    def plot_gain_circles(self, two_port, gains_db=None, plane='source', \
                          gain_cursor=True, surface=False):
        self.two_port = two_port
        self.gain_cursor = gain_cursor
        self.current_plane = plane

        max_gain = self.two_port.g_max()

        if (self.two_port.kt() < 1).any():
            max_gain = self.two_port.max_double_sided_mismatched_gain()

        if gains_db == None:
            gains_db = np.trunc(
                10 * db(max_gain)[0] * linspace(0.5, 1, 4)) / 10
        if surface:
            filled = False
            circle_alpha = 0.7
            linestyle = 'dashed'
        else:
            filled = True
            circle_alpha = 0.2
            linestyle = 'solid'

        for g in gains_db:
            if plane == 'source':
                center, radius = self.two_port.available_gain_circle(un_db(g))
                self.ax.set_title('Source plane')
            elif plane == 'load':
                center, radius = self.two_port.operating_gain_circle(un_db(g))
                self.ax.set_title('Load plane')

            text = str(g) + ' dB'

            self.plot_circle((center[0].real, center[0].imag), radius[0], text=text, filled=filled,\
                text_color='black', circle_color='orange', circle_alpha=circle_alpha,\
                linestyle=linestyle)

        if not surface:
            self.save_background()

            return

        num_segments = 1000

        i, r = meshgrid(linspace(-1.1, 1.1, num_segments),
                        linspace(-1.1, 1.1, num_segments))
        gamma = i + 1j * r
        term = OnePort(s=gamma.reshape(-1))

        if plane == 'source':
            g = self.two_port.g_a(term)
        elif plane == 'load':
            g = self.two_port.g_p(term)

        g = db(g).reshape(num_segments, num_segments)
        g[abs(gamma) > 1] = nan

        im = self.ax.imshow(g,
                            origin='lower',
                            extent=(-1.1, 1.1, -1.1, 1.1),
                            interpolation='bicubic',
                            alpha=0.9)
        im.set_clim(gains_db[0], db(max_gain))

        self.save_background()
Beispiel #58
0
    def init(self):


        global b1

        #devel06

        b1_fname='/home/ian/Data/dipy/brain2_scan1_fiber_track_mni.trk'

        #devel07

        #b1_fname='/home/eg01/Data_Backup/Data/PBC/pbc2009icdm/brain2/brain2_scan1_fiber_track_mni.trk'

         
        
        b1=tracks.Tracks(b1_fname,subset=[0,200])

        b1.angular_speed = 0.

        b1.picking_example = True

        b1.min_length = 20.

        b1.opacity = 0.8

        b1.manycolors = False

        b1.brain_color = [0, 0, 0]


        b1.init()




        global texim


        #devel06

        fname = '/home/ian/Data/dipy/Streaks4.bmp'

        #devel07

        #fname = '/home/eg01/Devel/Fos/fos/core/tests/data/Streaks4.bmp'

        texim = texture.Texture_Demo(fname,red=False,green=False, blue=True)

        #texim.orbit = b1.data[4246]

        #print 'len(b1.data)', len(b1.data)

        random_inx=np.trunc(len(b1.data)*np.random.rand(200)).astype(np.int)
        #print random_inx

        #texim.orbits = [b1.data[4246],b1.data[3000],b1.data[2000],b1.data[1000]]

        texim.orbits =[]
        
        for i in random_inx:

            #print i

            if tm.length(b1.data[i]) > 20.:

                #print i

                texim.orbits.append(b1.data[i])

                
        

        texim.orbits_index = np.zeros((len(texim.orbits),),np.int)
        
        texim.init()

        self.slots={0:{'actor':texim,'slot':( 0, 800*MS )}}#,
Beispiel #59
0
def trunc(x, ndecimals=0):
    """Truncate a floating point number to a given number of decimals."""
    decade = 10**ndecimals
    return np.trunc(x * decade) / decade
 def tax_of_payment_confirmation_money(self, engineer_history):
     if engineer_history:
         return numpy.trunc((self.payment_confirmation_money or 0) * engineer_history.payment_tax.rate)
     else:
         return 0