Example #1
0
def check_orbits(p1, t1, p2, t2, tmn, tmx, tol):
    n1 = t1 + p1 * np.arange(np.floor((tmn-t1)/p1), np.ceil((tmx-t1)/p1))
    n1 = n1[(tmn <= n1) * (n1 <= tmx)]
    n2 = t2 + p2 * np.arange(np.floor((tmn-t2)/p2), np.ceil((tmx-t2)/p2))
    n2 = n2[(tmn <= n2) * (n2 <= tmx)]
    delta = np.fabs(n1[:, None] - n2[None, :])
    return max(len(n1), len(n2)) == np.sum(delta < tol)
Example #2
0
def rand_checkers(n1=100, n2=100, n3=100, n4=100, sigma=0.1):
    """ Sample n1 and n2 points from a noisy checker"""
    nb1 = int(np.floor(n1 / 8))
    nb2 = int(np.floor(n2 / 8))
    nb3 = int(np.floor(n3 / 8))
    nb4 = int(np.floor(n4 / 8))

    xapp = np.reshape(np.zeros((nb1 + nb2 + nb3 + nb4) * 16), 
                      [(nb1 + nb2 + nb3 + nb4) * 8, 2])
    yapp = np.ones((nb1 + nb2 + nb3 + nb4) * 8)
    idx = 0
    nb = 2*nb1
    for i in xrange(-2, 2):
        for j in xrange(-2, 2):
           
            yapp[idx:(idx + nb)] = [ fmod(i - j+100, 4)] * nb
            xapp[idx:(idx + nb), 0] = np.random.rand(nb)
            xapp[idx:(idx + nb), 0] += i + sigma * np.random.randn(nb)
            xapp[idx:(idx + nb), 1] = np.random.rand(nb)
            xapp[idx:(idx + nb), 1] += j + sigma * np.random.randn(nb)
            idx += nb

    ind = np.arange((nb1 + nb2 + nb3 + nb4) * 8)
    np.random.shuffle(ind)
    res = np.hstack([xapp, yapp[:, np.newaxis]])
    return np.array(res[ind, :])
Example #3
0
def shot(
    I, exp=0.1, flux=1e5, sensitivity=1.0, dark_c=None, io_noise=0.0, full_well=2 ** 10 - 1, el_per_ADU=1.0, offset=50.0
):
    """\
    I : intensity distribution  
    flux : overall photon photons per seconds coming in
    exp : exposition time in sec
    io_noise : readout noise rms
    full_well : electron capacity of each pixel
    el_per_ADU : conversion effficiency of electrons to digitally counted units
    dark_curr : electrons per second per pixel on average
    """

    I = np.asarray(I).astype(float)

    if I.sum() != 0.0:
        I = I / I.sum()

    photo_el = np.floor(sensitivity * np.random.poisson(exp * flux * I))
    if dark_c is not None:
        therm_el = np.floor(np.random.poisson(dark_c * exp * np.ones_like(I)))
    else:
        therm_el = np.zeros_like(I)

    el = photo_el + therm_el
    el[el > full_well] = full_well
    out = offset + io_noise * np.random.standard_normal(I.shape) + el / el_per_ADU
    out[out < 0.0] = 0.0
    return out.astype(int)
def build_X(bids, bot_or_human):
    X = bot_or_human
    X = X.drop("payment_account", 1)
    X = X.drop("address", 1)

    bids["day"] = np.floor((bids["time"] - startt) / one_day)
    bids["week"] = np.floor(bids["day"] / 7.0)

    # print bids.bidder_id[0]
    print "starting ips"
    X = ip(X, bids)
    print "starting bid order"
    X = bid_order(X, bids)
    print "starting dt"
    X = dt(X, bids)
    print "startin day"
    X = day(X, bids)
    print "starting n_bids"
    X = n_bids(X, bids)
    print "starting urls"
    X = urls(X, bids)
    # print 'starting bid order'
    # X = bid_order(X, bids)
    print "starting countries"
    X = user_countries_per_auction(X, bids)
    print "starting merch"
    X = merch(X, bids)

    return X
    def __init__(self, filepath, qually, mode='444'):
        '''
        '''
        imOrig = cv2.imread(filepath,1)
        self.filepath = filepath
        self.mode = mode
        #Taxa de compressão e Redundancia
        self.CRate = 0; self.Redunc = 0
        self.avgBits = 0
        #Qualidade
        self.qually = qually
        #Dimensões da imagem original
        self.Mo, self.No, self.Do = imOrig.shape
        self.r, self.c = [8, 8]       #DIMENSAO DOS BLOCOS
        #TRATA AS DIMENSOES DA IMAGEM
        (self.M, self.N, self.D), self.img = h.adjImg(imOrig)
        #NUMERO DE BLOCOS NA VERTICAL E HORIZONTAL
        self.nBlkRows = int(np.floor(self.M/self.r))
        self.nBlkCols = int(np.floor(self.N/self.c))
        #Gera Tabela de Qunatizaçao
        self.Z = h.genQntb(self.qually)
        #TRANSFORMA DE RGB PARA YCbCr
        self.Ymg = cv2.cvtColor(self.img, cv2.COLOR_BGR2YCR_CB)
        self.NumBits = 0
        if self.Do == 2:
            self.NCHNL = 1
        elif self.Do == 3:
            self.NCHNL = 3
            
#        self.OUTCOMES = self._run_()
        self._run_()
Example #6
0
def prepare_data(data_x, data_mask, data_y):
    '''
    将数据分为训练集,验证集和测试集

    注意,因为要进行hstack, 行向量会变为列向量
    '''
    data_len = len(data_y)
    train_end = numpy.floor(data_len * 0.5)
    test_end = numpy.floor(data_len * 0.8)

    if data_x.ndim == 1:
        data_x.resize((data_x.shape[0],1))
    if data_mask != []  and data_mask.ndim == 1:
        data_mask.resize((data_mask.shape[0],1))
    if data_y.ndim == 1:
        data_y.resize((data_y.shape[0],1))

    if data_mask == []:
        allData = numpy.concatenate((data_x,data_y), axis=1)
    else:
        allData = numpy.concatenate((data_x,data_mask,data_y), axis=1)

    train_data = allData[:train_end,...]
    test_data = allData[train_end:test_end,...]
    valid_data = allData[test_end:,...]

    return train_data, valid_data, test_data 
def svd_example():
    a = np.floor(np.random.rand(4, 4)*20-6)
    logger.info("Matrix A:\n %s", a)
    b = np.floor(np.random.rand(4, 1)*20-6)
    logger.info("Matrix B:\n %s", b)

    u, s, v_t = np.linalg.svd(a) # SVD decomposition of A
    logger.info("Matrix U:\n %s", u)
    logger.info("Matrix S:\n %s", s)
    logger.info("Matrix V(transpose:\n %s", u)

    logger.info("Computing inverse using linalg.pinv")
    # Computing the inverse using pinv
    inv_pinv = np.linalg.pinv(a)
    logger.info("pinv:\n %s", inv_pinv)

    # Computing inverse using matrix decomposition
    logger.info("Computing inverse using svd matrix decomposition")
    inv_svd = np.dot(np.dot(v_t.T, np.linalg.inv(np.diag(s))), u.T)
    logger.info("svd inverse:\n %s", inv_svd)
    logger.info("comparing the results from pinv and svd_inverse:\n %s",
                np.allclose(inv_pinv, inv_svd))

    logger.info("Sol1: Solving x using pinv matrix... x=A^-1 x b")
    result_pinv_x = np.dot(inv_pinv, b)

    logger.info("Sol2: Solving x using svd_inverse matrix... x=A^-1 x b")
    result_svd_x = np.dot(inv_svd, b)

    if not np.allclose(result_pinv_x, result_svd_x):
        raise ValueError('Should have been True')
Example #8
0
 def spectrum(self, shape, surface_point, bound):
     """Returns the counts histogram (bins,counts) for """
     
     wavelengths = []
     key = shape.surface_identifier(surface_point)
     if not self.store.has_key(key):
         return None
     
     entries = self.store[key]
     if len(entries) == 0:
         return None
     
     for entry in entries:
         if entry[2] == bound:
             wavelengths.append(float(entry[1]))
     
     if len(wavelengths) is 0:
         return None
     
     wavelengths = np.array(wavelengths)
     min = wavelengths.min()
     max = wavelengths.max()
     
     if len(wavelengths) is 1:
         bins = np.arange(np.floor( wavelengths[0] - 1), np.ceil(wavelengths[0] + 2))
         freq, bins  = np.histogram(wavelengths, bins=bins)
     else:
         bins = np.arange(np.floor( wavelengths.min()-1), np.ceil(wavelengths.max()+2))
         freq, bins  = np.histogram(wavelengths, bins=bins)
     return Spectrum(bins[0:-1], freq)
Example #9
0
    def _find_tails(self, mag, rounding=True, half=5, full=10, flag=50):
        '''
        Find how many of each of the tail pieces is necessary.  Flag
        specifies the increment for a flag, barb for a full barb, and half for
        half a barb. Mag should be the magnitude of a vector (ie. >= 0).

        This returns a tuple of:

            (*number of flags*, *number of barbs*, *half_flag*, *empty_flag*)

        *half_flag* is a boolean whether half of a barb is needed,
        since there should only ever be one half on a given
        barb. *empty_flag* flag is an array of flags to easily tell if
        a barb is empty (too low to plot any barbs/flags.
        '''

        #If rounding, round to the nearest multiple of half, the smallest
        #increment
        if rounding:
            mag = half * (mag / half + 0.5).astype(np.int)

        num_flags = np.floor(mag / flag).astype(np.int)
        mag = np.mod(mag, flag)

        num_barb = np.floor(mag / full).astype(np.int)
        mag = np.mod(mag, full)

        half_flag = mag >= half
        empty_flag = ~(half_flag | (num_flags > 0) | (num_barb > 0))

        return num_flags, num_barb, half_flag, empty_flag
Example #10
0
def _search_fine(sino, srad, step, init_cen, ratio, drop):
    """
    Fine search for finding the rotation center.
    """
    Nrow, Ncol = sino.shape
    centerfliplr = (Ncol + 1.0) / 2.0 - 1.0

    # Use to shift the sinogram 2 to the raw CoR.
    shiftsino = np.int16(2 * (init_cen - centerfliplr))
    _copy_sino = np.roll(np.fliplr(sino[1:]), shiftsino, axis=1)
    lefttake = 0
    righttake = Ncol - 1
    if init_cen <= centerfliplr:
        lefttake = np.ceil(srad + 1)
        righttake = np.floor(2 * init_cen - srad - 1)
    else:
        lefttake = np.ceil(
            init_cen - (Ncol - 1 - init_cen) + srad + 1)
        righttake = np.floor(Ncol - 1 - srad - 1)
    Ncol1 = righttake - lefttake + 1
    mask = _create_mask(2 * Nrow - 1, Ncol1, 0.5 * ratio * Ncol, drop)
    numshift = np.int16((2 * srad + 1.0) / step)
    listshift = np.linspace(-srad, srad, num=numshift)
    listmetric = np.zeros(len(listshift), dtype='float32')
    num1 = 0
    for i in listshift:
        _sino = ndimage.interpolation.shift(
            _copy_sino, (0, i), prefilter=False)
        sinojoin = np.vstack((sino, _sino))
        listmetric[num1] = np.sum(np.abs(np.fft.fftshift(
            pyfftw.interfaces.numpy_fft.fft2(
                sinojoin[:, lefttake:righttake + 1]))) * mask)
        num1 = num1 + 1
    minpos = np.argmin(listmetric)
    return init_cen + listshift[minpos] / 2.0
    def connect_composition_III(self):
        synapse_list = []
        Mt3v_list = self.non_columnar_neurons['Mt3v']
        Mt3h_list = self.non_columnar_neurons['Mt3h']
        
        for neuron in Mt3v_list:
            neuron.assign_pos(0., 0.)
        for neuron in Mt3h_list:
            neuron.assign_pos(0., 0.)
        
        rule3synapsesv = self.other_synapse_dict[self.other_synapse_dict['postname'] == 'Mt3v']
        rule3synapsesh = self.other_synapse_dict[self.other_synapse_dict['postname'] == 'Mt3h']
        
        dtnames = rule3synapsesv.dtype.names
        for cartridge in self.cartridges:
            synapse = Synapse(dict(zip(dtnames, [np.asscalar(p) for p in rule3synapsesv[0]])))
            mtn = int(np.floor(cartridge.neurons['L2'].ypos / ((self.hexarray.Y[-1][-1]+1)/4)))
            synapse.link(cartridge.neurons['L2'], Mt3v_list[mtn])
            synapse_list.append(synapse)
            synapse = Synapse(dict(zip(dtnames, [np.asscalar(p) for p in rule3synapsesh[0]])))
            mtn = int(np.floor(cartridge.neurons['L2'].xpos / ((self.hexarray.X[-1][-1]+1)/4)))
            synapse.link(cartridge.neurons['L2'], Mt3h_list[mtn])
            synapse_list.append(synapse)

        self.composition_rules.append({'synapses': synapse_list})
Example #12
0
def qwtCanvasClip(canvas, canvasRect):
    x1 = np.ceil(canvasRect.left())
    x2 = np.floor(canvasRect.right())
    y1 = np.ceil(canvasRect.top())
    y2 = np.floor(canvasRect.bottom())
    r = QRect(x1, y1, x2-x1-1, y2-y1-1)
    return canvas.borderPath(r)
Example #13
0
 def getRowCol(self,lat,lon,returnFloat=False):
     """Return data row and column from given geographic coordinates (lat/lon decimal degrees).
     
     :param lat: 
        Input latitude.
     :param lon: 
        Input longitude.
     :param returnFloat: 
        Boolean indicating whether floating point row/col coordinates should be returned.
     :returns: 
        Tuple of row and column.
     """
     ulx = self._geodict['xmin']
     uly = self._geodict['ymax']
     xdim = self._geodict['xdim']
     ydim = self._geodict['ydim']
     #check to see if we're in a scenario where the grid crosses the meridian
     if self._geodict['xmax'] < ulx and lon < ulx:
         lon += 360
     col = (lon-ulx)/xdim
     row = (uly-lat)/ydim
     if returnFloat:
         return (row,col)
     
     return (np.floor(row).astype(int),np.floor(col).astype(int))
Example #14
0
 def _get_interv_graticule(self,pmin,pmax,dpar,mmin,mmax,dmer,verbose=True):
     def set_prec(d,n,nn=2):
         arcmin=False
         if d/n < 1.:
             d *= 60
             arcmin = True
             nn = 1
         x = d/n
         y = nn*x
         ex = np.floor(np.log10(y))
         z = np.around(y/10**ex)*10**ex/nn
         if arcmin:
             z = 1./np.around(60./z)
         return z
     max_n_par = 18
     max_n_mer = 36
     n_par = (pmax-pmin)/dpar
     n_mer = (mmax-mmin)/dmer
     if n_par > max_n_par:
         dpar = set_prec((pmax-pmin)/dtor,max_n_par/2)*dtor
     if n_mer > max_n_mer:
         dmer = set_prec((mmax-mmin)/dtor,max_n_mer/2,nn=1)*dtor
     if dmer/dpar < 0.2 or dmer/dpar > 5.:
         dmer = dpar = max(dmer,dpar)
     vdeg = np.floor(np.around(dpar/dtor,10))
     varcmin = (dpar/dtor-vdeg)*60.
     if verbose: print "The interval between parallels is %d deg %.2f'."%(vdeg,varcmin)
     vdeg = np.floor(np.around(dmer/dtor,10))
     varcmin = (dmer/dtor-vdeg)*60.
     if verbose: print "The interval between meridians is %d deg %.2f'."%(vdeg,varcmin)
     return dpar,dmer
def interp(pic,flow):
    ys=np.arange(pic.shape[0]*pic.shape[1])/pic.shape[1]
    ud=(flow[:,:,0].reshape(-1)+ys)%pic.shape[0]
    xs=np.arange(pic.shape[0]*pic.shape[1])%pic.shape[1]
    lr=(flow[:,:,1].reshape(-1)+xs)%pic.shape[1]

    u=np.int32(np.floor(ud))
    d=np.int32(np.ceil(ud))%pic.shape[0]
    udiffs=ud-u
    udiffs=np.dstack((udiffs,udiffs,udiffs))
    l=np.int32(np.floor(lr))
    r=np.int32(np.ceil(lr))%pic.shape[1]
    ldiffs=lr-l
    ldiffs=np.dstack((ldiffs,ldiffs,ldiffs))

    ul=pic[u,l,:]
    ur=pic[u,r,:]
    dl=pic[d,l,:]
    dr=pic[d,r,:]


    udl=ul*(1-udiffs)+dl*udiffs
    udr=ur*(1-udiffs)+dr*udiffs
    ans=np.zeros(pic.shape)
    ans[ys,xs,:]=udl*(1-ldiffs)+udr*ldiffs
    return ans
Example #16
0
def genice_lattice(atoms, box, matchfunc=None):
    global cell, celltype, waters, coord, density
    logger = logging.getLogger()
    filtered = []
    if matchfunc is not None:
        for a in atoms:
            if matchfunc(a[0]):
                filtered.append(a)
    else:
        filtered = atoms
    dmin = shortest_distance(filtered)
    scale = 2.76 / dmin

    celltype = "triclinic"
    if (len(box) == 6):
        cell = np.array([[box[0],0,0],[box[1],box[2],0],[box[3],box[4],box[5]]])
    else:
        cell = np.diag(box)
    volume = np.linalg.det(cell)
    icell  = np.linalg.inv(cell)
    uniques = []
    for name,x,y,z in filtered:
        rpos = np.dot([x,y,z], icell)
        rpos -= np.floor(rpos)
        #Do twice to reduce the floating point uncertainty.
        #(Hint: assume the case when x=-1e-33.)
        rpos -= np.floor(rpos)
        if is_unique(uniques, rpos):
            uniques.append(rpos)
    waters = uniques
    coord = "relative"
    # bondlen = 3
    density = len(filtered)*18.0/(volume*scale**3*1e-24*6.022e23)
Example #17
0
    def allowable_ref_dividers(self, f_ref, require_integer_n, require_fractional_n):
        """
        given a reference frequency and whether an integer-N solution is needed,
        return a qualified list of reference dividers that are within the limits
        of the PLL
        """
        # first, we establish a minimum and maximum reference divider modulus....
        # if we are forcing an integer-N solution, use those phase-detector frequency limits

        int_n_nref_max = (np.floor(f_ref / self.f_pfd_limits_integer_n[0])).astype(int)
        int_n_nref_min = (np.ceil(f_ref / self.f_pfd_limits_integer_n[1])).astype(int)

        frac_n_nref_max = (np.floor(f_ref / self.f_pfd_limits_fractional_n[0])).astype(int)
        frac_n_nref_min = (np.ceil(f_ref / self.f_pfd_limits_fractional_n[1])).astype(int)

        if require_integer_n:
            ref_divider_min = int_n_nref_min
            ref_divider_max = int_n_nref_max
        elif require_fractional_n:
            ref_divider_min = frac_n_nref_min
            ref_divider_max = frac_n_nref_max
        else:
            ref_divider_min = min(int_n_nref_min, frac_n_nref_min)
            ref_divider_max = max(int_n_nref_max, frac_n_nref_max)

        n_ref_allowed, n_ref_digital_codes = zip(*self.n_ref_data)

        # now, make a list of all the divider moduli in that range from min to max
        # making sure that the PLL can do each one
        allowable_divs = []
        for n_ref_modulus in range(ref_divider_min, ref_divider_max+1):
            if n_ref_modulus in n_ref_allowed:
                allowable_divs.append(n_ref_modulus)
        return allowable_divs
Example #18
0
def subsample(feature_list, trigger_list, clf, subsampling_rate = 0.75):
	"""
	clf: string -> 'perc' or 'nb'
	"""

	None_indices = [i for (i,trigger) in enumerate(trigger_list) if trigger == u'None']
	All_other_indices = [i for (i,trigger) in enumerate(trigger_list) if trigger != u'None']


	N = len(None_indices)
	N_pick = np.floor((1.0 - subsampling_rate) * N)
	#N_pick = len(All_other_indices)

	#now pick N_pick random 'None' samples among all of them.
	random_indices = np.floor(np.random.uniform(0, N , N_pick) )    
	subsample_of_None_indices = [None_indices[int(i)] for i in random_indices]

	# Identify indices of remaining samples after subsampling + randomise them.
	remaining_entries = subsample_of_None_indices + All_other_indices
	perm = np.random.permutation(len(remaining_entries))
	remaining_entries = [remaining_entries[p] for p in perm]

	# Return the subsampled list of samples.
	if clf=='perc':
		subsampled_feature_list = [feature_list[i] for i in remaining_entries ]
		subsampled_trigger_list = [trigger_list[i] for i in remaining_entries ]
		return subsampled_feature_list, subsampled_trigger_list
	elif clf=='nb':
		subsampled_feature_list = feature_list.tocsr()[remaining_entries].tocoo()
		subsampled_trigger_list = np.asarray([trigger_list[i] for i in remaining_entries ])
		return subsampled_feature_list, subsampled_trigger_list
Example #19
0
def test_integer_inputs(data, method, center_data, fit_mean, with_errors,
                        normalization):
    if method == 'scipy' and (fit_mean or with_errors):
        return

    t, y, dy = data

    t = np.floor(100 * t)
    t_int = t.astype(int)

    y = np.floor(100 * y)
    y_int = y.astype(int)

    dy = np.floor(100 * dy)
    dy_int = dy.astype('int32')

    frequency = 1E-2 * (0.8 + 0.01 * np.arange(40))

    if not with_errors:
        dy = None
        dy_int = None

    kwds = dict(center_data=center_data,
                fit_mean=fit_mean,
                normalization=normalization)
    P_float = LombScargle(t, y, dy, **kwds).power(frequency,method=method)
    P_int = LombScargle(t_int, y_int, dy_int,
                        **kwds).power(frequency, method=method)
    assert_allclose(P_float, P_int)
Example #20
0
def PlotErrBoxPlot(x, y, delta, ax, showXTicks):
  if x.size < 1:
    return
  ids = np.floor((x)/delta).astype(np.int)
  data = []
  for i in range(ids.min(), ids.max()+1):
    if (ids==i).any():
      data.append(y[ids==i])
  bp = plt.boxplot(data)
#  plt.plot(x,y,'.', color=c1, alpha=0.3)
  # set xticks
  if showXTicks:
    ticks = np.floor((np.arange(ids.min(), ids.max()+1)+0.5)*delta).astype(np.int)
    if np.unique(ticks).size < ticks.size:
      ticks = np.floor((np.arange(ids.min(), ids.max()+1)+0.5)*delta*10.)/10.
    xtickNames = plt.setp(ax, xticklabels=ticks)
    plt.setp(xtickNames, rotation=45)
  else:
    plt.setp(ax.get_xticklabels(), visible=False) 
  for box in bp["boxes"]:
    box.set(color=c1)
    #box.set(facecolor=c1)
  for whisker in bp["whiskers"]:
    whisker.set(color=c1)
  for cap in bp["caps"]:
    cap.set(color=c1)
  for median in bp["medians"]:
    median.set(color=c2)
  for flier in bp["fliers"]:
    flier.set(color=c3, marker=".", alpha=0.15) #,s=6)
Example #21
0
def plot_scatter_with_histograms(xvals, yvals, colour='k', oneToOneLine=True, xlabel=None, ylabel=None, title=None):
    gs = gridspec.GridSpec(5, 5)
    xmin = np.floor(min(xvals))
    xmax = np.ceil(max(xvals))
    ymin = np.floor(min(yvals))
    ymax = np.ceil(max(yvals))
    plt.subplot(gs[1:, 0:4])
    plt.plot(xvals, yvals, 'o', color=colour)
    if xlabel is not None:
        plt.xlabel(xlabel)
    if ylabel is not None:
        plt.ylabel(ylabel)
    if oneToOneLine:
        oneToOneMax = max([max(xvals),max(yvals)])
        plt.plot([0,oneToOneMax],[0,oneToOneMax],'b--')
    plt.xlim(xmin,xmax)
    plt.ylim(ymin,ymax)
    plt.subplot(gs[0, 0:4])
    plt.hist(xvals, np.linspace(xmin,xmax,50))
    plt.axis('off')
    plt.subplot(gs[1:,4])
    plt.hist(yvals, np.linspace(ymin,ymax,50), orientation='horizontal')
    plt.axis('off')
    if title is not None:
        plt.suptitle(title)
Example #22
0
def get_border_to_nucleus_properties(segment):


	pts = np.array(segment. isotropic_border_coords)
	res = min([segment.xres, segment.yres, segment.zres])
	centroid = np.floor(np.mean(pts,0))

	centroid_with_offset = np.floor(np.mean(segment.border_coords,0) + np.array([segment.bounding_box.xmin, segment.bounding_box.ymin, segment.bounding_box.zmin]))

	centroid_by_res = centroid_with_offset * np.array([segment.xres, segment.yres, segment.zres])

#	if centroid_with_offset[2] == 26:
#		pdb.set_trace()

	dists =  ((pts[:,0] - centroid[0])**2 + (pts[:,1] - centroid[1])**2 + (pts[:,2] - centroid[2])**2 ) **0.5
	max_dist = dists.max()
	dists = dists/max_dist	

	bins = np.arange(0, 1.05, 0.05)
	dist_hist = histogram(dists, bins = bins) [0]

	
	segment.add_feature("centroid_res", tuple(centroid_by_res))
	segment.add_feature("border_to_nucleus_dist_hist", dist_hist)
	segment.add_feature("border_to_nucleus_dist_mean", np.mean(dists))
	segment.add_feature("border_to_nucleus_dist_std", np.std(dists))
	segment.add_feature("distance_to_border_scale_factor", max_dist)


	pts = np.array(segment.border_coords)
	segment.add_feature("centroid", tuple(centroid_with_offset.astype("int")))
Example #23
0
 def __init__(self, hipparcos, symbad, intervals):
     self.hipparcos = hipparcos
     self.symbad = symbad
     self.ra_min = min(hipparcos['RA_J2000'])
     self.ra_max = max(hipparcos['RA_J2000'])
     self.de_min = min(hipparcos['DE_J2000'])
     self.de_max = max(hipparcos['DE_J2000'])
     self.intervals = intervals
     self.ra_dif = self.ra_max - self.ra_min
     self.de_dif = self.de_max - self.de_min
     self.ra_sp = self.ra_dif/(self.intervals-1)
     self.de_sp = self.de_dif/(self.intervals-1)
     
     # set the position at the space grid for hipparcos catalog
     self.stars = pandas.DataFrame(
                numpy.zeros((len(self.hipparcos),2)), columns=['rec','dec'])
     self.stars['rec'] = numpy.floor(
         (self.hipparcos['RA_J2000'] - self.ra_min) / self.ra_sp) + 1
     self.stars['dec'] = numpy.floor(
         (self.hipparcos['DE_J2000'] - self.de_min) / self.de_sp) + 1
     
     # set the position at the space grid for symbad catalog
     self.symbad['rac'] = numpy.floor(
         (self.symbad['RA_J2000'] - self.ra_min) / self.ra_sp) + 1
     self.symbad['dec'] = numpy.floor(
         (self.symbad['DE_J2000'] - self.de_min) / self.de_sp) + 1
Example #24
0
def calc_slit_box_aps_1id(slit_box_corners, inclip=(1, 10, 1, 10)):
    """
    Calculate the clip box based on given slip corners.

    Parameters
    ----------
    slit_box_corners : np.ndarray
        Four corners of the slit box as a 4x2 matrix
    inclip : tuple, optional
        Extra inclipping to avoid clipping artifacts

    Returns
    -------
    Tuple:
        Cliping indices as a tuple of four
        (clipFromTop, clipToBottom, clipFromLeft, clipToRight)

    """
    return (
        np.floor(slit_box_corners[:, 0].min()).astype(
            int) + inclip[0],  # clip top    row
        np.ceil(slit_box_corners[:, 0].max()).astype(
            int) - inclip[1],  # clip bottom row
        np.floor(slit_box_corners[:, 1].min()).astype(
            int) + inclip[2],  # clip left   col
        np.ceil(slit_box_corners[:, 1].max()).astype(
            int) - inclip[3],  # clip right  col
    )
Example #25
0
def solver_dt(t0, y0, f, dt, tk, method='expl_RK4', tuning='', v=0, outform='arr'):
	
	step=methods[method]
	#we calculate number of steps
	if np.abs(t0-tk/dt)-np.floor(np.abs(t0-tk/dt)) >= 0.5:
		nos = int(np.ceil(np.abs(t0-tk)/dt))
	else:
		nos = int(np.floor(np.abs(t0-tk)/dt))
		
	if v==2: print "Number of steps I`ll take is "+str(nos) 
	
	#in solver_nos above code will be changed!
	
	
	soln = [[t0, y0]]
	
	for i in range(nos-1):
		tmp1, tmp2 = step(f, soln[i][1], soln[i][0], dt, tuning=tuning)
		soln.append([tmp1, tmp2])
	
	if v>=1: print "solved. Took "+str(nos)+" steps to solve on interval ["+str(t0)+";"+str(tk)+"]"
	if v==2: print "solve method was "+method+". Additional tweaks for this solver were:"+tuning
	if v==2: print "output form was "+outform+"(arr- numpy array such that arr[i] = [t, y1, ..., yn], list - nested list list[i]=[t, array(y)])"
	
	if outform=='arr':
		return np.array([[i[0]]+ ([a for a in i[1]] if type(i[1])==np.ndarray else [i[1]])  for i in soln])
	elif outform=='list':
		return soln
Example #26
0
def multilook_attributes(atr_dict,lks_az,lks_rg):
    #####
    atr = dict()
    for key, value in atr_dict.iteritems():  atr[key] = str(value)
  
    ##### calculate new data size
    length = int(atr['FILE_LENGTH'])
    width  = int(atr['WIDTH'])
    length_mli = int(np.floor(length/lks_az))
    width_mli  = int(np.floor(width/lks_rg))
  
    ##### Update attributes
    atr['FILE_LENGTH'] = str(length_mli)
    atr['WIDTH']       = str(width_mli)
    try:
        atr['Y_STEP'] = str(lks_az*float(atr['Y_STEP']))
        atr['X_STEP'] = str(lks_rg*float(atr['X_STEP']))
    except: pass
    try:
        atr['AZIMUTH_PIXEL_SIZE'] = str(lks_az*float(atr['AZIMUTH_PIXEL_SIZE']))
        atr['RANGE_PIXEL_SIZE']   = str(lks_rg*float(atr['RANGE_PIXEL_SIZE']))
    except: pass
  
    try:
        atr['ref_y'] = str(int(int(atr['ref_y'])/lks_az))
        atr['ref_x'] = str(int(int(atr['ref_x'])/lks_rg))
    except: pass
    try:
        atr['subset_y0'] = str(int(int(atr['subset_y0'])/lks_az))
        atr['subset_y1'] = str(int(int(atr['subset_y1'])/lks_az))
        atr['subset_x0'] = str(int(int(atr['subset_x0'])/lks_rg))
        atr['subset_x1'] = str(int(int(atr['subset_x1'])/lks_rg))
    except: pass
  
    return atr
Example #27
0
 def xzCrossingTime(self):
   """
     Calculate times of crossing the xz-plane.
     
     This method calculates the times at which
     the xz-plane is crossed by the orbit. This
     is equivalent to finding the times where
     y=0.
     
     Returns
     -------
     Time 1 : float
         First crossing time defined as having POSITIVE
         x position.
     Time 2 : float
         Second crossing time defined as having NEGATIVE
         x position.
   """
   f = -self._w - arctan(tan(self._Omega)/cos(self._i))
   E = 2.*arctan(sqrt((1-self.e)/(1.+self.e)) * tan(f/2.))
   t1 = (E - self.e*sin(E))/self._n + self.tau
   p1 = self.xyzPos(t1)
   f += pi
   E = 2.*arctan(sqrt((1-self.e)/(1.+self.e)) * tan(f/2.))
   t2 = (E - self.e*sin(E))/self._n + self.tau
   
   t1 -= self._per * numpy.floor(t1/self._per)
   t2 -= self._per * numpy.floor(t2/self._per)
   
   if p1[0] >= 0.0:
     # y position of p1 is > 0
     return (t1, t2)
   else:
     return (t2, t1)
Example #28
0
def errorScalingFactor(observable, beta):
  """
  Look up the numerical factors to apply to the sky averaged parallax error in order to obtain error
  values for a given astrometric parameter, taking the Ecliptic latitude and the number of transits into
  account.

  Parameters
  ----------

  observable - Name of astrometric observable (one of: alphaStar, delta, parallax, muAlphaStar, muDelta)
  beta       - Values(s) of the Ecliptic latitude.

  Returns
  -------

  Numerical factors to apply to the errors of the given observable.
  """
  if isscalar(beta):
    index=int(floor(abs(sin(beta))*_numStepsSinBeta))
    if index == _numStepsSinBeta:
      return _astrometricErrorFactors[observable][_numStepsSinBeta-1]
    else:
      return _astrometricErrorFactors[observable][index]
  else:
    indices = array(floor(abs(sin(beta))*_numStepsSinBeta), dtype=int)
    indices[(indices==_numStepsSinBeta)] = _numStepsSinBeta-1
    return _astrometricErrorFactors[observable][indices]
Example #29
0
    def solve(self, pixel=None, solver='cvxopt.coneqp'):
        '''
        Solves minimization problem using quadratic program formulation.
        '''
        n, m = self.image.shape
        if not pixel:
            i, j = floor(n/2), floor(m/2)
        else:
            i, j = pixel
        r, p, q = self.psf_tensor.shape
        psf_bbox = (max(0, p/2-i), max(0, q/2-j),
                    min(p, p/2-i+n), min(q, q/2-j+m))
        img_bbox = (max(0, i-n/2), max(0, j-q/2),
                    min(n, i+n/2), min(m, j+m/2))
        self.A = self._compute_A(psf_bbox)
        self.normalization_factor = self._compute_normalization(psf_bbox)
        self.y = self._compute_y(img_bbox)

        Q = self._compute_Q()
        p = self._compute_p()
        G = self._compute_G()
        h = self._compute_h()
        result = cvxopt.solvers.qp(Q, p, G, h)

        self.result = result
        self.x = result['x']

        return result['status']
Example #30
0
    def _drawGraticules(self,m,gd):
        par = np.arange(np.ceil(gd.ymin),np.floor(gd.ymax)+1,1.0)
        mer = np.arange(np.ceil(gd.xmin),np.floor(gd.xmax)+1,1.0)
        merdict = m.drawmeridians(mer,labels=[0,0,0,1],fontsize=10,
                                  linewidth=0.5,color='gray',zorder=GRATICULE_ZORDER)
        pardict = m.drawparallels(par,labels=[1,0,0,0],fontsize=10,
                                  linewidth=0.5,color='gray',zorder=GRATICULE_ZORDER)

        #loop over meridian and parallel dicts, change/increase font, draw ticks
        xticks = []
        for merkey,mervalue in merdict.items():
            merline,merlablist = mervalue
            merlabel = merlablist[0]
            merlabel.set_family('sans-serif')
            merlabel.set_fontsize(12.0)
            xticks.append(merline[0].get_xdata()[0])

        yticks = []
        for parkey,parvalue in pardict.items():
            parline,parlablist = parvalue
            parlabel = parlablist[0]
            parlabel.set_family('sans-serif')
            parlabel.set_fontsize(12.0)
            yticks.append(parline[0].get_ydata()[0])

        #plt.tick_params(axis='both',color='k',direction='in')
        plt.xticks(xticks,())
        plt.yticks(yticks,())
        m.ax.tick_params(direction='out')
    def vert_cbar(self, resolution, log_scale, ax, label=None, label_fmt=None):
        r"""Display an image of the transfer function

        This function loads up matplotlib and displays the current transfer function.

        Parameters
        ----------

        Examples
        --------

        >>> tf = TransferFunction( (-10.0, -5.0) )
        >>> tf.add_gaussian(-9.0, 0.01, 1.0)
        >>> tf.show()
        """
        from matplotlib.ticker import FuncFormatter

        if label is None:
            label = ""
        alpha = self.alpha.y
        max_alpha = alpha.max()
        i_data = np.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
        i_data[:, :, 0] = np.outer(self.funcs[0].y, np.ones(self.alpha.x.size))
        i_data[:, :, 1] = np.outer(self.funcs[1].y, np.ones(self.alpha.x.size))
        i_data[:, :, 2] = np.outer(self.funcs[2].y, np.ones(self.alpha.x.size))

        ax.imshow(i_data, origin="lower", aspect="auto")
        ax.plot(alpha, np.arange(self.alpha.y.size), "w")

        # Set TF limits based on what is visible
        visible = np.argwhere(self.alpha.y > 1.0e-3 * self.alpha.y.max())

        # Display colobar values
        xticks = (
            np.arange(np.ceil(self.alpha.x[0]), np.floor(self.alpha.x[-1]) + 1, 1)
            - self.alpha.x[0]
        )
        xticks *= (self.alpha.x.size - 1) / (self.alpha.x[-1] - self.alpha.x[0])
        if len(xticks) > 5:
            xticks = xticks[:: len(xticks) // 5]

        # Add colorbar limits to the ticks (May not give ideal results)
        xticks = np.append(visible[0], xticks)
        xticks = np.append(visible[-1], xticks)
        # remove dupes
        xticks = list(set(xticks))
        ax.yaxis.set_ticks(xticks)

        def x_format(x, pos):
            val = (
                x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size - 1)
                + self.alpha.x[0]
            )
            if log_scale:
                val = 10 ** val
            if label_fmt is None:
                if abs(val) < 1.0e-3 or abs(val) > 1.0e4:
                    if not val == 0.0:
                        e = np.floor(np.log10(abs(val)))
                        return r"${:.2f}\times 10^{{ {:d} }}$".format(
                            val / 10.0 ** e, int(e)
                        )
                    else:
                        return r"$0$"
                else:
                    return f"{val:.1g}"
            else:
                return label_fmt % (val)

        ax.yaxis.set_major_formatter(FuncFormatter(x_format))

        yticks = np.linspace(0, 1, 2, endpoint=True) * max_alpha
        ax.xaxis.set_ticks(yticks)

        def y_format(y, pos):
            s = f"{y:0.2f}"
            return s

        ax.xaxis.set_major_formatter(FuncFormatter(y_format))
        ax.set_xlim(0.0, max_alpha)
        ax.get_xaxis().set_ticks([])
        ax.set_ylim(visible[0], visible[-1])
        ax.tick_params(axis="y", colors="white", size=10)
        ax.set_ylabel(label, color="white", size=10 * resolution / 512.0)
Example #32
0
def main(eval_args):
    # ensures that weight initializations are all the same
    logging = utils.Logger(eval_args.local_rank, eval_args.save)

    # load a checkpoint
    logging.info('loading the model at:')
    logging.info(eval_args.checkpoint)
    checkpoint = torch.load(eval_args.checkpoint, map_location='cpu')
    args = checkpoint['args']

    logging.info('loaded the model at epoch %d', checkpoint['epoch'])
    arch_instance = utils.get_arch_cells(args.arch_instance)
    model = AutoEncoder(args, None, arch_instance)
    model.load_state_dict(checkpoint['state_dict'])
    model = model.cuda()

    logging.info('args = %s', args)
    logging.info('num conv layers: %d', len(model.all_conv_layers))
    logging.info('param size = %fM ', utils.count_parameters_in_M(model))

    if eval_args.eval_mode == 'evaluate':
        # load train valid queue
        args.data = eval_args.data
        train_queue, valid_queue, num_classes, test_queue = datasets.get_loaders(args)

        if eval_args.eval_on_train:
            logging.info('Using the training data for eval.')
            valid_queue = train_queue
        if eval_args.eval_on_test:
            logging.info('Using the test data for eval.')
            valid_queue = test_queue

        # get number of bits
        num_output = utils.num_output(args.dataset, args)
        bpd_coeff = 1. / np.log(2.) / num_output

        valid_neg_log_p, valid_nelbo = test(valid_queue, model, num_samples=eval_args.num_iw_samples, args=args, logging=logging)
        logging.info('final valid nelbo %f', valid_nelbo)
        logging.info('final valid neg log p %f', valid_neg_log_p)
        logging.info('final valid nelbo in bpd %f', valid_nelbo * bpd_coeff)
        logging.info('final valid neg log p in bpd %f', valid_neg_log_p * bpd_coeff)

    else:
        bn_eval_mode = not eval_args.readjust_bn
        num_samples = 16
        with torch.no_grad():
            n = int(np.floor(np.sqrt(num_samples)))
            set_bn(model, bn_eval_mode, num_samples=36, t=eval_args.temp, iter=500)
            for ind in range(eval_args.repetition):     # sampling is repeated.
                torch.cuda.synchronize()
                start = time()
                with autocast():
                    logits = model.sample(num_samples, eval_args.temp)
                output = model.decoder_output(logits)
                output_img = output.mean if isinstance(output, torch.distributions.bernoulli.Bernoulli) \
                    else output.sample()
                torch.cuda.synchronize()
                end = time()

                # save to file
                total_name = "{}/data_to_save_{}_{}.pickle".format(eval_args.save, eval_args.name_to_save, ind)
                with open(total_name, 'wb') as handle:
                    pickle.dump(output_img.deatach().numpy(), handle, protocol=pickle.HIGHEST_PROTOCOL)

                output_tiled = utils.tile_image(output_img, n).cpu().numpy().transpose(1, 2, 0)
                logging.info('sampling time per batch: %0.3f sec', (end - start))
                output_tiled = np.asarray(output_tiled * 255, dtype=np.uint8)
                output_tiled = np.squeeze(output_tiled)

                plt.imshow(output_tiled)
                plt.savefig("{}/generation_{}_{}".format(eval_args.save, eval_args.name_to_save, ind))
Example #33
0
def load_twix_vd(fin, builder):
    twix_id, num_measurements = struct.unpack("II", fin.read(8))
    # vd file can contain multiple measurements, but we only want the MRS
    # assume that the MRS is the last measurement
    measurement_index = num_measurements - 1

    # measurement headers are each 152 bytes at start of file
    fin.seek(8 + 152 * measurement_index)
    meas_id, file_id, offset, length, patient_name, protocol_name = struct.unpack(
        "IIQQ64s64s", fin.read(152))
    # offset points to where the actual data is in the file
    fin.seek(offset)

    # start with the header
    header_size = struct.unpack("I", fin.read(4))[0]
    header = fin.read(header_size - 4)
    header = header.decode('latin-1')
    builder.set_header_string(header)

    # read each scan until we hit the acq_end flag
    while True:

        # read the initial position, combined with DMA_length below that
        # tells us how to get to the start of the next scan
        initial_position = fin.tell()

        # the first four bytes contain some composite information,
        # read in an int and do bit shift magic to get the values
        temp = struct.unpack("I", fin.read(4))[0]
        DMA_length = temp & (2**26 - 1)
        pack_flag = (temp >> 25) & 1
        PCI_rx = temp >> 26
        meas_uid, scan_counter, time_stamp, pmu_time_stamp = struct.unpack(
            "IIII", fin.read(16))
        system_type, ptab_pos_delay, ptab_pos_x, ptab_pos_y, ptab_pos_z, reserved = struct.unpack(
            "HHIIII", fin.read(20))

        # more composite information
        eval_info_mask = struct.unpack("Q", fin.read(8))[0]
        acq_end = eval_info_mask & 1
        rt_feedback = eval_info_mask >> 1 & 1
        hp_feedback = eval_info_mask >> 2 & 1
        sync_data = eval_info_mask >> 5 & 1
        raw_data_correction = eval_info_mask >> 10 & 1
        ref_phase_stab_scan = eval_info_mask >> 14 & 1
        phase_stab_scan = eval_info_mask >> 15 & 1
        sign_rev = eval_info_mask >> 17 & 1
        phase_correction = eval_info_mask >> 21 & 1
        pat_ref_scan = eval_info_mask >> 22 & 1
        pat_ref_ima_scan = eval_info_mask >> 23 & 1
        reflect = eval_info_mask >> 24 & 1
        noise_adj_scan = eval_info_mask >> 25 & 1

        # if acq_end is set, there is no more data
        if acq_end:
            break

        # there are some data frames that contain auxilliary data, we ignore those for now
        if rt_feedback or hp_feedback or phase_correction or noise_adj_scan or sync_data:
            fin.seek(initial_position + DMA_length)
            continue

        num_samples, num_channels = struct.unpack("HH", fin.read(4))
        builder.set_num_channels(num_channels)
        loop_counters = struct.unpack("14H", fin.read(28))
        cut_off_data, kspace_centre_column, coil_select, readout_offcentre = struct.unpack(
            "IHHI", fin.read(12))
        time_since_rf, kspace_centre_line_num, kspace_centre_partition_num = struct.unpack(
            "IHH", fin.read(8))
        slice_position = struct.unpack("7f", fin.read(28))
        ice_program_params = struct.unpack("24H", fin.read(48))
        reserved_params = struct.unpack("4H", fin.read(8))
        fid_start_offset = ice_program_params[4]
        num_dummy_points = reserved_params[0]
        fid_start = fid_start_offset + num_dummy_points
        np = int(2**numpy.floor(numpy.log2(num_samples - fid_start)))
        builder.set_np(np)
        application_counter, application_mask, crc = struct.unpack(
            "HHI", fin.read(8))

        # read the data for each channel in turn
        scan_data = numpy.zeros((num_channels, np), dtype='complex')
        for channel_index in range(num_channels):

            # start with the header
            dma_length, meas_uid, scan_counter, sequence_time, channel_id = struct.unpack(
                "III4xI4xH6x", fin.read(32))

            # now the data itself, which consists of num_samples * 4 (bytes per float) * 2 (two floats per complex)
            raw_data = struct.unpack("<{}f".format(num_samples * 2),
                                     fin.read(num_samples * 4 * 2))

            # we need to massage the list of floats into a numpy array of complex numbers
            data_iter = iter(raw_data)
            complex_iter = (complex(r, -i)
                            for r, i in zip(data_iter, data_iter))
            scan_data[channel_index, :] = numpy.fromiter(
                complex_iter, "complex64",
                num_samples)[fid_start:(fid_start + np)]

        builder.add_scan(loop_counters, scan_data)

        # move the file pointer to the start of the next scan
        fin.seek(initial_position + DMA_length)
Example #34
0
def load_twix_vb(fin, builder):

    # first four bytes are the size of the header
    header_size = struct.unpack("I", fin.read(4))[0]

    # read the rest of the header minus the four bytes we already read
    header = fin.read(header_size - 4)
    # for some reason the last 24 bytes of the header contain some junk that is not a string
    header = header[:-24].decode('latin-1')
    builder.set_header_string(header)

    # the way that vb files are set up we just keep reading scans until the acq_end flag is set

    while True:
        # start by keeping track of where in the file this scan started
        # this will be used to jump to the start of the next scan
        start_position = fin.tell()

        # the first four bytes contain composite information
        temp = struct.unpack("I", fin.read(4))[0]

        # 25 LSBs contain DMA length (size of this scan)
        DMA_length = temp & (2**26 - 1)
        # next we have the "pack" flag bit and the rest is PCI_rx
        # not sure what either of these are for but break them out in case
        pack_flag = (temp >> 25) & 1
        PCI_rx = temp >> 26

        meas_uid, scan_counter, time_stamp, pmu_time_stamp = struct.unpack(
            "IIII", fin.read(16))

        # next long int is actually a lot of bit flags
        # a lot of them don't seem to be relevant for spectroscopy
        eval_info_mask = struct.unpack("Q", fin.read(8))[0]
        acq_end = eval_info_mask & 1
        rt_feedback = eval_info_mask >> 1 & 1
        hp_feedback = eval_info_mask >> 2 & 1
        sync_data = eval_info_mask >> 5 & 1
        raw_data_correction = eval_info_mask >> 10 & 1
        ref_phase_stab_scan = eval_info_mask >> 14 & 1
        phase_stab_scan = eval_info_mask >> 15 & 1
        sign_rev = eval_info_mask >> 17 & 1
        phase_correction = eval_info_mask >> 21 & 1
        pat_ref_scan = eval_info_mask >> 22 & 1
        pat_ref_ima_scan = eval_info_mask >> 23 & 1
        reflect = eval_info_mask >> 24 & 1
        noise_adj_scan = eval_info_mask >> 25 & 1

        if acq_end:
            break

        # if any of these flags are set then we should ignore the scan data
        if rt_feedback or hp_feedback or phase_correction or noise_adj_scan or sync_data:
            fin.seek(start_position + DMA_length)
            continue

        # now come the actual parameters of the scan
        num_samples, num_channels = struct.unpack("HH", fin.read(4))
        builder.set_num_channels(num_channels)

        # the loop counters are a set of 14 shorts which are used as indices
        # for the parameters an acquisition might loop over, including
        # averaging repetitions, COSY echo time increments and CSI phase
        # encoding steps
        # we have no prior knowledge about which counters might loop in a given
        # scan so we have to read in all scans and then sort out the data shape
        loop_counters = struct.unpack("14H", fin.read(28))

        cut_off_data, kspace_centre_column, coil_select, readout_offcentre = struct.unpack(
            "IHHI", fin.read(12))
        time_since_rf, kspace_centre_line_num, kspace_centre_partition_num = struct.unpack(
            "IHH", fin.read(8))

        ice_program_params = struct.unpack("4H", fin.read(8))
        free_params = struct.unpack("4H", fin.read(8))

        # there are some dummy points before the data starts
        num_dummy_points = free_params[0]

        # we want our np to be the largest power of two within the num_samples - num_dummy_points
        np = int(2**numpy.floor(numpy.log2(num_samples - num_dummy_points)))
        builder.set_np(np)

        slice_position = struct.unpack("7f", fin.read(28))

        # construct a numpy ndarray to hold the data from all the channels in this scan
        scan_data = numpy.zeros((num_channels, np), dtype='complex')

        # loop over all the channels and extract data
        for channel_index in range(num_channels):
            channel_id, ptab_pos_neg = struct.unpack("Hh", fin.read(4))
            raw_data = struct.unpack("<{}f".format(num_samples * 2),
                                     fin.read(num_samples * 4 * 2))
            # turn the raw data into complex pairs
            data_iter = iter(raw_data)
            complex_iter = (complex(r, -i)
                            for r, i in zip(data_iter, data_iter))
            scan_data[channel_index, :] = numpy.fromiter(
                complex_iter, "complex64",
                num_samples)[num_dummy_points:(num_dummy_points + np)]

            # the vb format repeats all the header data for each channel in
            # turn, obviously this is redundant so we read all but the channel
            # index from the next header here
            fin.read(124)

        # pass the data from this scan to the builder
        builder.add_scan(loop_counters, scan_data)

        # go to the next scan and the top of the loop
        fin.seek(start_position + DMA_length)
Example #35
0
######## geometry setup (moritz.huetten@pik) #########

boxWidth = float(boxWidth)
accumrate = float(accumrate)

### CONSTANTS ###

secpera = 31556926.
ice_density = 910.0  # [kg m-3]

yExtent = 2 * boxWidth  # in km
xExtent = 2 * 800  # in km

# grid size: # of boxes

ny = int(np.floor(yExtent / boxWidth / 2) * 2 + 1)  # make it an odd number
nx = int(np.floor(xExtent / boxWidth / 2) * 2 + 1)  # make it an odd number

# grid size: extent in km's, origin (0,0) in the center of the domain

x = np.linspace(-xExtent / 2, xExtent / 2, nx) * 1000.0
y = np.linspace(-yExtent / 2, yExtent / 2, ny) * 1000.0

nxcenter = int(np.floor(0.5 * nx))
nycenter = int(np.floor(0.5 * ny))

thk = np.zeros((ny, nx))
topg = np.zeros((ny, nx))
ice_surface_temp = np.zeros((ny, nx))
precip = np.zeros((ny, nx))
Example #36
0
def xmlToStructuredSong(xml_path, datasetToMusic21,
                        datasetToMidiChords, datasetToChordComposition, datasetChords):
    '''
    # Import xml file
    possible_durations = [4, 2, 1, 1/2, 1/4, 1/8, 1/16,
                          3, 3/2, 3/4, 3/8, 
                          1/6, 1/12]

    # Define durations dictionary
    dur_dict = {}
    dur_dict[possible_durations[0]] = 'full'
    dur_dict[possible_durations[1]] = 'half'
    dur_dict[possible_durations[2]] = 'quarter'
    dur_dict[possible_durations[3]] = '8th'
    dur_dict[possible_durations[4]] = '16th'
    dur_dict[possible_durations[5]] = '32th'
    dur_dict[possible_durations[6]] = '64th'
    dur_dict[possible_durations[7]] = 'dot half'
    dur_dict[possible_durations[8]] = 'dot quarter'
    dur_dict[possible_durations[9]] = 'dot 8th'
    dur_dict[possible_durations[10]] = 'dot 16th'
    dur_dict[possible_durations[11]] = 'half note triplet'
    dur_dict[possible_durations[12]] = 'quarter note triplet'
    '''
    
    # Import xml file
    possible_durations = [4, 2, 1, 1/2, 1/4, 1/8,
                          3, 3/2, 3/4,
                          1/6, 1/12]
    
    # Define durations dictionary
    dur_dict = {}
    dur_dict[possible_durations[0]] = 'full'
    dur_dict[possible_durations[1]] = 'half'
    dur_dict[possible_durations[2]] = 'quarter'
    dur_dict[possible_durations[3]] = '8th'
    dur_dict[possible_durations[4]] = '16th'
    dur_dict[possible_durations[5]] = '32th'
    dur_dict[possible_durations[6]] = 'dot half'
    dur_dict[possible_durations[7]] = 'dot quarter'
    dur_dict[possible_durations[8]] = 'dot 8th'
    dur_dict[possible_durations[9]] = 'half note triplet'
    dur_dict[possible_durations[10]] = 'quarter note triplet'    
    
    # invert dict from Wjazz to Music21 chords
    Music21ToWjazz = {v: k for k, v in datasetToMusic21.items()}
    
    s = m21.converter.parse(xml_path)
    
    new_structured_song = {}
    new_structured_song['title'] = xml_path.split('/')[-1].split('.')[0]
    new_structured_song['tempo'] = s.metronomeMarkBoundaries()[0][2].number
    new_structured_song['beat duration [sec]'] = 60 / new_structured_song['tempo']
    
    if not s.hasMeasures():
        s = s.makeMeasures()
    #s.show('text')
    bar_num = 0
    bars = []
    beats = []
    beat_pitch = []
    beat_duration = []
    beat_offset = []
    chord = 'NC'

    for measure in s.getElementsByClass('Measure'):
        #measure.show('text')
        bar_num += 1
        beat_num = 0
        bar_duration = 0
        for note in measure.notesAndRests:
            if 'Rest' in note.classSet:
                # detect rests
                pitch = 'R'
                distance = np.abs(np.array(possible_durations) - note.quarterLength)
                idx = distance.argmin()
                duration = dur_dict[possible_durations[idx]]
                offset = int(bar_duration * 96 / 4)
                # update beat arrays 
                beat_pitch.append(pitch)
                beat_duration.append(duration)
                beat_offset.append(offset)
            
            elif 'ChordSymbol' in note.classSet:
                #chord
                m21chord = note.figure
                if m21chord in Music21ToWjazz.keys():
                    chord = Music21ToWjazz[m21chord]
                else:
                    # add to WjazzToMusic21
                    datasetToMusic21[m21chord] = m21chord
                    # derive chord composition and make it of 4 notes
                    pitchNames = [str(p) for p in note.pitches]
                    # The following bit is added 
                    # just for parameter modeling purposes
                    if len(pitchNames) < 4:
                        hd = m21.harmony.ChordStepModification('add', 7)
                        note.addChordStepModification(hd, updatePitches=True)
                        #chord = m21.chord.Chord(pitchNames)
                        pitchNames = [str(p) for p in note.pitches]   
                    
                    # midi conversion
                    midiChord = []
                    for p in pitchNames:
                        c = m21.pitch.Pitch(p)
                        midiChord.append(c.midi)
                    
                    chord = m21chord
                    NewChord = {}
                    NewChord['Wjazz name'] = chord
                    NewChord['music21 name'] = m21chord
                    NewChord['chord composition'] = pitchNames
                    NewChord['midi chord composition'] = midiChord
                    NewChord['one-hot encoding'] = []
                    datasetChords.append(NewChord)

                    # update dictionaries
                    datasetToMidiChords[chord] = midiChord[:4]
                    datasetToChordComposition[chord] = pitchNames[:4]
            
            # check for rests
            else:
                pitch = note.pitch.midi
                distance = np.abs(np.array(possible_durations) - note.quarterLength)
                idx = distance.argmin()
                duration = dur_dict[possible_durations[idx]]
                offset = int(bar_duration * 96 / 4)
                # update beat arrays 
                beat_pitch.append(pitch)
                beat_duration.append(duration)
                beat_offset.append(offset)

            # update bar duration
            bar_duration += note.quarterLength
            # check if the beat is ended
            if np.floor(bar_duration) != beat_num:
                
                #print(np.floor(bar_duration), beat_num)
                while np.floor(bar_duration) - beat_num > 1:
                    new_beat = {}
                    new_beat['num beat'] = int(beat_num) + 1
                    new_beat['chord'] = chord
                    new_beat['pitch'] = [] 
                    new_beat['duration'] = [] 
                    new_beat['offset'] = []
                    new_beat['scale'] = []
                    new_beat['bass'] = datasetToMidiChords[chord][0]
                    new_beat['this beat duration [sec]'] = new_structured_song['beat duration [sec]']
                    beats.append(new_beat)
                    beat_num += 1
                    
                if not chord:
                    chord = 'NC'
                new_beat = {}
                new_beat['num beat'] = int(beat_num) + 1
                new_beat['chord'] = chord
                new_beat['pitch'] = beat_pitch 
                new_beat['duration'] = beat_duration 
                new_beat['offset'] = beat_offset
                new_beat['scale'] = []
                new_beat['bass'] = datasetToMidiChords[chord][0]
                new_beat['this beat duration [sec]'] = new_structured_song['beat duration [sec]']
                beats.append(new_beat)
                if beat_num == 3:
                    # append bar
                    new_bar = {}
                    new_bar['num bar'] = bar_num # over all song
                    new_bar['beats'] = beats # beats 1,2,3,4
                    bars.append(new_bar)
                    beats = []
                
                beat_num = np.floor(bar_duration)
                beat_pitch = []
                beat_duration = []
                beat_offset = []
    
    # compute chords array
    chord_array = []
    for bar in bars:
        for beat in bar['beats']:
            chord_array.append(beat['chord'])
    
    # compute next chord 
    last_chord = chord_array[0]
    next_chords = []
    for i in range(len(chord_array)):
        if chord_array[i] != last_chord:
            next_chords.append(chord_array[i])
            last_chord = chord_array[i]
    
    # compute array of next chords
    next_chords.append('NC')
    next_chord_array = []
    next_chord_pointer = 0
    last_chord = chord_array[0]
    for i in range(len(chord_array)):
        if chord_array[i] != last_chord:
            last_chord = chord_array[i]
            next_chord_pointer += 1
        next_chord_array.append(next_chords[next_chord_pointer])
    
    
    # compute next chord 
    last_chord = bars[0]['beats'][0]['chord']
    next_chords2 = []
    for bar in bars:
        for beat in bar['beats']:
            if beat['chord'] != last_chord:
                next_chords2.append(beat['chord'])
                last_chord = beat['chord']
    
    # add next chord to the beats
    last_chord = bars[0]['beats'][0]['chord']
    next_chords2.append('NC')
    next_chord_pointer = 0
    for bar in bars:
        for beat in bar['beats']:
            if beat['chord'] != last_chord:
                last_chord = beat['chord']
                next_chord_pointer += 1
            beat['next chord'] = next_chords2[next_chord_pointer]

    new_structured_song['bars'] = bars    

    return new_structured_song, datasetToMusic21, datasetToMidiChords, datasetToChordComposition, datasetChords
Example #37
0
def plot(newest_changes):
    filelist = os.listdir(
        '/home/maximilianklein/snapshot_data/{}/'.format(newest_changes))
    site_linkss_file = [f for f in filelist if f.startswith('worldmap')][0]
    if newest_changes == 'newest-changes':
        date_range = site_linkss_file.split('worldmap-index-from-')[1].split(
            '.csv')[0].replace('-', ' ')
        print(date_range)
    csv_to_read = '/home/maximilianklein/snapshot_data/{}/{}'.format(
        newest_changes, site_linkss_file)
    df = pandas.DataFrame.from_csv(csv_to_read)
    major = df[df['total'] > 100]

    table_html = major.sort('Score', ascending=False).to_html(max_rows=10)

    # https://github.com/chdoig/pyladiesatx-bokeh-tutorial
    world_countries = wc.data.copy()

    country_xs = [world_countries[code]['lons'] for code in world_countries]
    country_ys = [world_countries[code]['lats'] for code in world_countries]
    country_names = [world_countries[code]['name'] for code in world_countries]

    def lookup_wigi(code):
        try:
            return df.ix[code]['Score']
        except KeyError:
            return -1

    index_vals = np.array([lookup_wigi(code) for code in world_countries])

    def fmt(c):
        return int(np.nan_to_num(c))

    colors = [
        "#%02x%02x%02x" % (fmt(r), fmt(g), fmt(b))
        for r, g, b in zip(np.floor(250 * (1 - index_vals)),
                           np.floor(200 *
                                    (1 -
                                     index_vals)), np.floor(100 * index_vals))
    ]

    source = ColumnDataSource(data=dict(
        name=country_names, wigi_index=[str(idx) for idx in index_vals]))

    # setup widgets
    TOOLS = "pan,wheel_zoom,box_zoom,reset,hover,save"
    title_suffix = 'Changes since {}'.format(
        date_range) if newest_changes == 'newest-changes' else 'All Time'

    p = figure(title="Gender by Country {}".format(title_suffix), tools=TOOLS)

    p.patches(country_xs, country_ys, fill_color=colors, source=source)

    hover = p.select(dict(type=HoverTool))
    hover.point_policy = "follow_mouse"
    hover.tooltips = OrderedDict([
        ("wigi", "@wigi_index"),
        ("Country", "@name"),
    ])

    js_filename = "gender_by_country_{}.js".format(newest_changes)
    script_path = "./assets/js/"
    output_path = "./files/assets/js/"

    # generate javascript plot and corresponding script tag
    js, tag = autoload_static(p, CDN, script_path + js_filename)

    with open(output_path + js_filename, 'w') as js_file:
        js_file.write(js)

    return {'plot_tag': tag, 'table_html': table_html}
Example #38
0
    def detect_image_bboxes(self, image, true_boxes):
        # modified function to also plot ground truth bboxes along /w predictions
        start = timer()

        if self.model_image_size != (None, None):
            assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
            boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
        else:
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            boxed_image = letterbox_image(image, new_image_size)
        image_data = np.array(boxed_image, dtype='float32')

        print(image_data.shape)
        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })

        print('Found {} boxes for {}'.format(len(out_boxes), 'img'))

        font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                    size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
        thickness = (image.size[0] + image.size[1]) // 300

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)
            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
            print(label, (left, top), (right, bottom))

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            # My kingdom for a good redistributable image drawing library.
            for i in range(thickness):
                draw.rectangle(
                    [left + i, top + i, right - i, bottom - i],
                    outline=self.colors[c])
            draw.rectangle(
                [tuple(text_origin), tuple(text_origin + label_size)],
                fill=self.colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw

        true_classes = true_boxes[:,-1]
        true_boxes = true_boxes[:,:4]
        
        for i, c in reversed(list(enumerate(true_classes))):
            try:
                predicted_class = self.class_names[c]
            except:
                print('Exception! class:', c)
            box = true_boxes[i]
            score = 1.0

            # label = '{} {:.2f}'.format(predicted_class, score)
            label = 'Box'
            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
            print(label, (left, top), (right, bottom))

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            # My kingdom for a good redistributable image drawing library.
            for i in range(thickness):
                draw.rectangle(
                    [left + i, top + i, right - i, bottom - i],
                    outline=(255,255,255))
            draw.rectangle(
                [tuple(text_origin), tuple(text_origin + label_size)],
                fill=(255,255,255))
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw

        end = timer()
        print(end - start)
        return image
Example #39
0
def country(iso3,
            monthyear,
            proj="EPSG:3395",
            data_country=True,
            keep_data_raw=False,
            fcc_source="gfc",
            perc=50,
            gs_bucket=None):
    """Function formating the country data.

    This function downloads, computes and formats the country data.

    :param iso3: Country ISO 3166-1 alpha-3 code.

    :param proj: Projection definition (EPSG, PROJ.4, WKT) as in
    GDAL/OGR. Default to "EPSG:3395" (World Mercator).

    :param monthyear: Date (month and year) for WDPA data
    (e.g. "Aug2017").

    :param data_country: Boolean for running data_country.sh to
    compute country landscape variables. Default to "True".
    
    :param keep_data_raw: Boolean to keep the data_raw folder. Default
    to "False".

    :param fcc_source: Source for forest-cover change data. Can be
    "gfc" (Global Forest Change 2015 Hansen data) or
    "roadless". Default to "gfc".

    :param perc: Tree cover percentage threshold to define forest
    (online used if fcc_source="gcf").

    :param gs_bucket: Name of the google storage bucket to use.

    """

    # Identify continent and country from iso3
    print("Identify continent and country from iso3")
    # Geofabrik data
    file_geofab = pkg_resources.resource_filename("deforestprob",
                                                  "data/ctry_geofab.csv")
    data_geofab = pd.read_csv(file_geofab, sep=";", header=0)
    # Country
    ctry_link_geofab = data_geofab.ctry_link[data_geofab.iso3 == iso3]
    ctry_link_geofab = ctry_link_geofab.iloc[0]
    # Continent
    continent = data_geofab.continent[data_geofab.iso3 == iso3]
    continent = continent.iloc[0].lower()

    # Create data_raw directory
    print("Create data_raw directory")
    make_dir("data_raw")

    # Download the zipfile from gadm.org
    print("Download data")
    url = "http://biogeo.ucdavis.edu/data/gadm2.8/shp/" + iso3 + "_adm_shp.zip"
    fname = "data_raw/" + iso3 + "_adm_shp.zip"
    urlretrieve(url, fname)

    # Extract files from zip
    print("Extract files from zip")
    destDir = "data_raw"
    f = ZipFile(fname)
    f.extractall(destDir)
    f.close()
    print("Files extracted")

    # Reproject
    cmd = "ogr2ogr -overwrite -s_srs EPSG:4326 -t_srs '" + proj + "' -f 'ESRI Shapefile' \
    -lco ENCODING=UTF-8 data_raw/ctry_PROJ.shp data_raw/" + iso3 + "_adm0.shp"
    os.system(cmd)

    # Compute extent
    print("Compute extent")
    extent_latlong = extent_shp("data_raw/" + iso3 + "_adm0.shp")
    extent_proj = extent_shp("data_raw/ctry_PROJ.shp")

    # Region with buffer of 5km
    print("Region with buffer of 5km")
    xmin_reg = np.floor(extent_proj[0] - 5000)
    ymin_reg = np.floor(extent_proj[1] - 5000)
    xmax_reg = np.ceil(extent_proj[2] + 5000)
    ymax_reg = np.ceil(extent_proj[3] + 5000)
    extent_reg = (xmin_reg, ymin_reg, xmax_reg, ymax_reg)
    extent = " ".join(map(str, extent_reg))

    # Tiles for SRTM data (see http://dwtkns.com/srtm/)
    print("Tiles for SRTM data")
    # SRTM tiles are 5x5 degrees
    # x: -180/+180
    # y: +60/-60
    xmin_latlong = np.floor(extent_latlong[0])
    ymin_latlong = np.floor(extent_latlong[1])
    xmax_latlong = np.ceil(extent_latlong[2])
    ymax_latlong = np.ceil(extent_latlong[3])
    # Compute SRTM tile numbers
    tile_left = np.int(np.ceil((xmin_latlong + 180.0) / 5.0))
    tile_right = np.int(np.ceil((xmax_latlong + 180.0) / 5.0))
    if (tile_right == tile_left):
        # Trick to make curl globbing work in data_country.sh
        tile_right = tile_left + 1
    tile_top = np.int(np.ceil((-ymax_latlong + 60.0) / 5.0))
    tile_bottom = np.int(np.ceil((-ymin_latlong + 60.0) / 5.0))
    if (tile_bottom == tile_top):
        tile_bottom = tile_top + 1
    # Format variables, zfill is for having 01 and not 1
    tiles_long = str(tile_left).zfill(2) + "-" + str(tile_right).zfill(2)
    tiles_lat = str(tile_top).zfill(2) + "-" + str(tile_bottom).zfill(2)

    # Google EarthEngine task
    if (fcc_source == "gfc"):
        # Check data availability
        data_availability = ee_hansen.check(gs_bucket, iso3)
        # If not available, run GEE
        if data_availability is False:
            print("Run Google Earth Engine")
            task = ee_hansen.run_task(perc=perc,
                                      iso3=iso3,
                                      extent_latlong=extent_latlong,
                                      scale=30,
                                      proj=proj,
                                      gs_bucket=gs_bucket)
            print("GEE running on the following extent:")
            print(str(extent_latlong))

    # Google EarthEngine task
    if (fcc_source == "roadless"):
        # Check data availability
        data_availability = ee_roadless.check(gs_bucket, iso3)
        # If not available, run GEE
        if data_availability is False:
            print("Run Google Earth Engine")
            task = ee_roadless.run_task(iso3=iso3,
                                        extent_latlong=extent_latlong,
                                        scale=30,
                                        proj=proj,
                                        gs_bucket=gs_bucket)
            print("GEE running on the following extent:")
            print(str(extent_latlong))

    # Call data_country.sh
    if (data_country):
        script = pkg_resources.resource_filename("deforestprob",
                                                 "shell/data_country.sh")
        args = [
            "sh ", script, continent, ctry_link_geofab, iso3, "'" + proj + "'",
            "'" + extent + "'", tiles_long, tiles_lat, monthyear
        ]
        cmd = " ".join(args)
        os.system(cmd)

    # Forest computations
    if (fcc_source == "gfc"):
        # Download Google EarthEngine results
        print("Download Google Earth Engine results locally")
        ee_hansen.download(gs_bucket, iso3, path="data_raw")
        # Call forest_country.sh
        print("Forest computations")
        script = pkg_resources.resource_filename("deforestprob",
                                                 "shell/forest_country.sh")
        args = ["sh ", script, "'" + proj + "'", "'" + extent + "'"]
        cmd = " ".join(args)
        os.system(cmd)

    # Forest computations
    if (fcc_source == "roadless"):
        # Download Google EarthEngine results
        print("Download Google Earth Engine results locally")
        ee_roadless.download(gs_bucket, iso3, path="data_raw")
        # Call forest_country.sh
        print("Forest computations")
        script = pkg_resources.resource_filename("deforestprob",
                                                 "shell/forest_country.sh")
        args = ["sh ", script, "'" + proj + "'", "'" + extent + "'"]
        cmd = " ".join(args)
        os.system(cmd)

    # Delete data_raw
    if (keep_data_raw is False):
        for root, dirs, files in os.walk("data_raw", topdown=False):
            for name in files:
                os.remove(os.path.join(root, name))
            for name in dirs:
                os.rmdir(os.path.join(root, name))
  def collect(self, idata, edata, rdata, params):

    #print "  collecting statistics ...",
    #sys.stdout.flush()

    lstats = dict()
    species = len(params.target)

    # co-ordinate meshes
    lstats["XYx"] = self.XYx
    lstats["XYy"] = self.XYy
    lstats["YZy"] = self.YZy
    lstats["YZz"] = self.YZz
    lstats["XZx"] = self.XZx
    lstats["XZz"] = self.XZz

    # allocate storage for the other arrays
    lstats["iidXY"]   = np.zeros((self.xbins, self.ybins))
    lstats["iidXZ"]   = np.zeros((self.xbins, self.zbins))
    lstats["iidYZ"]   = np.zeros((self.ybins, self.zbins))

    lstats["evdXY"]   = np.zeros((species+1, self.xbins, self.ybins))
    lstats["evdXZ"]   = np.zeros((species+1, self.xbins, self.zbins))
    lstats["evdYZ"]   = np.zeros((species+1, self.ybins, self.zbins))

    lstats["rvdXY"]   = np.zeros((species+1, self.xbins, self.ybins))
    lstats["rvdXZ"]   = np.zeros((species+1, self.xbins, self.zbins))
    lstats["rvdYZ"]   = np.zeros((species+1, self.ybins, self.zbins))

    lstats["ridXY"]   = np.zeros((species+1, self.xbins, self.ybins))
    lstats["ridXZ"]   = np.zeros((species+1, self.xbins, self.zbins))
    lstats["ridYZ"]   = np.zeros((species+1, self.ybins, self.zbins))

    lstats["rddXYx"]   = np.zeros((species+1, self.xbins, self.ybins))
    lstats["rddXYy"]   = np.zeros((species+1, self.xbins, self.ybins))
    lstats["rddXZx"]   = np.zeros((species+1, self.xbins, self.zbins))
    lstats["rddXZz"]   = np.zeros((species+1, self.xbins, self.zbins))
    lstats["rddYZy"]   = np.zeros((species+1, self.ybins, self.zbins))
    lstats["rddYZz"]   = np.zeros((species+1, self.ybins, self.zbins))


    # how do implanted ions affect the statistics?
    if idata != None:
      for entry in idata:

        # extract stored data
        sid  = entry[0]	# species
        pos0 = entry[1]	# final position

        # 2D projections of distributions
        binx = np.floor( (pos0[0]-self.xMin) / self.dx)
        biny = np.floor( (pos0[1]-self.yMin) / self.dy)
        binz = np.floor( (pos0[2]-self.zMin) / self.dz)

        if (binx >=0 and binx < self.xbins and biny >=0 and biny < self.ybins and binz >=0 and binz < self.zbins):

          lstats["iidXY"][binx, biny] += 1
          lstats["iidXZ"][binx, binz] += 1
          lstats["iidYZ"][biny, binz] += 1


    # how do eroded atoms affect the statistics?
    if edata != None:
      for entry in edata:

        # extract stored data
        sid  = entry[0]	# species ID
        pos0 = entry[1]	# initial position of sputtered atom

        # 2D projections of distributions
        binx = np.floor((pos0[0]-self.xMin) / self.dx)
        biny = np.floor((pos0[1]-self.yMin) / self.dy)
        binz = np.floor((pos0[2]-self.zMin) / self.dz)

        if (binx >=0 and binx < self.xbins and biny >=0 and biny < self.ybins and binz >=0 and binz < self.zbins):

          lstats["evdXY"][sid, binx, biny] += 1
          lstats["evdXZ"][sid, binx, binz] += 1
          lstats["evdYZ"][sid, biny, binz] += 1


    # how do redistributed atoms affect the statistics?
    if rdata != None:
      for entry in rdata:

        # extract stored data
        sid  = entry[0]	# species ID
        pos0 = entry[1]	# initial position
        pos1 = entry[2]	# final position
        dpos = pos1-pos0	# displacement

        # 2D projections of distributions
        binx = np.floor((pos0[0]-self.xMin) / self.dx)
        biny = np.floor((pos0[1]-self.yMin) / self.dy)
        binz = np.floor((pos0[2]-self.zMin) / self.dz)

        if (binx >=0 and binx < self.xbins and biny >=0 and biny < self.ybins and binz >=0 and binz < self.zbins):

          lstats["rvdXY"][sid, binx, biny] += 1
          lstats["rvdXZ"][sid, binx, binz] += 1
          lstats["rvdYZ"][sid, biny, binz] += 1

          lstats["rddXYx"][sid, binx, biny] += dpos[0]
          lstats["rddXYy"][sid, binx, biny] += dpos[1]
          lstats["rddXZx"][sid, binx, binz] += dpos[0]
          lstats["rddXZz"][sid, binx, binz] += dpos[2]
          lstats["rddYZy"][sid, biny, binz] += dpos[1]
          lstats["rddYZz"][sid, biny, binz] += dpos[2]

        # 2D projections of distributions
        binx = np.floor((pos1[0]-self.xMin) / self.dx)
        biny = np.floor((pos1[1]-self.yMin) / self.dy)
        binz = np.floor((pos1[2]-self.zMin) / self.dz)

        if (binx >=0 and binx < self.xbins and biny >=0 and biny < self.ybins and binz >=0 and binz < self.zbins):

          lstats["ridXY"][sid, binx, biny] += 1
          lstats["ridXZ"][sid, binx, binz] += 1
          lstats["ridYZ"][sid, biny, binz] += 1


      # and update 2D projected distributions (also total of all species in position '0')
      for part1 in ["iid", "evd", "rvd", "rid"]:
        for part2 in ["XY", "XZ", "YZ"]:
          key = "%s%s" % (part1, part2)
          lstats[key][0] = np.sum(lstats[key][1:], axis=0)

      # and update 2D projected distributions (also total of all species in position '0')
      for part1 in ["rdd"]:
        for part2 in ["XYx", "XYy", "XZx", "XZz", "YZy", "YZz"]:
          key = "%s%s" % (part1, part2)
          lstats[key][0] = np.sum(lstats[key][1:], axis=0)


    return lstats
 def _np_frame(data, window_length, hop_length):
   num_frames = 1 + int(np.floor((len(data) - window_length) // hop_length))
   shape = (num_frames, window_length)
   strides = (data.strides[0] * hop_length, data.strides[0])
   return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
# Import data
data = pd.read_csv('01_data/data_stocks.csv')

# Drop date variable
data = data.drop(['DATE'], 1)

# Dimensions of dataset
n = data.shape[0]
p = data.shape[1]

# Make data a np.array
data = data.values

# Training and test data
train_start = 0
train_end = int(np.floor(0.8*n))
test_start = train_end + 1
test_end = n
data_train = data[np.arange(train_start, train_end), :]
data_test = data[np.arange(test_start, test_end), :]

# Scale data
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler.fit(data_train)
data_train = scaler.transform(data_train)
data_test = scaler.transform(data_test)

# Build X and y
X_train = data_train[:, 1:]
y_train = data_train[:, 0]
X_test = data_test[:, 1:]
from sklearn.externals import joblib
from sklearn import preprocessing




if __name__ == '__main__':
    np.random.seed(0)

    patient_data=pd.read_pickle("../../data/df/dataset.pickle").values
    patient_data=preprocessing.scale(patient_data)
    target=pd.read_pickle("../../data/df/target.pickle").values
    indices = np.random.permutation(len(target))

    print "Total dataset size: "+str(patient_data.shape[0])
    num_train=int(np.floor(patient_data.shape[0]*0.75))
    print "Train set size: "+str(num_train)


    patient_data_train=patient_data[indices[0:num_train]]
    target_train=target[indices[0:num_train]]

    patient_data_test=patient_data[indices[num_train:]]
    target_test=target[indices[num_train:]]
    print "Test set size: "+str(target_test.shape[0])


    #keep 95% of variance
    pca = decomposition.PCA(n_components=0.95)
    pca.fit(patient_data_train)
Example #44
0
def get_boxes_grid(image_height, image_width):
    """
    Return the boxes on image grid.
    calling this function when cfg.IS_MULTISCALE is True, otherwise, calling rdl_roidb.prepare_roidb(imdb) instead.
    """

    # fixed a bug, change cfg.TRAIN.SCALES to cfg.TRAIN.SCALES_BASE
    # coz, here needs a ratio around 1.0, not the accutual size.
    # height and width of the feature map
    if cfg.NET_NAME == 'CaffeNet':
        height = np.floor((image_height * max(cfg.TRAIN.SCALES_BASE) - 1) / 4.0 + 1)
        height = np.floor((height - 1) / 2.0 + 1 + 0.5)
        height = np.floor((height - 1) / 2.0 + 1 + 0.5)

        width = np.floor((image_width * max(cfg.TRAIN.SCALES_BASE) - 1) / 4.0 + 1)
        width = np.floor((width - 1) / 2.0 + 1 + 0.5)
        width = np.floor((width - 1) / 2.0 + 1 + 0.5)
    elif cfg.NET_NAME == 'VGGnet':
        height = np.floor(image_height * max(cfg.TRAIN.SCALES_BASE) / 2.0 + 0.5)
        height = np.floor(height / 2.0 + 0.5)
        height = np.floor(height / 2.0 + 0.5)
        height = np.floor(height / 2.0 + 0.5)

        width = np.floor(image_width * max(cfg.TRAIN.SCALES_BASE) / 2.0 + 0.5)
        width = np.floor(width / 2.0 + 0.5)
        width = np.floor(width / 2.0 + 0.5)
        width = np.floor(width / 2.0 + 0.5)
    else:
        assert (1), 'The network architecture is not supported in utils.get_boxes_grid!'

    # compute the grid box centers
    h = np.arange(height)
    w = np.arange(width)
    y, x = np.meshgrid(h, w, indexing='ij') 
    centers = np.dstack((x, y))
    centers = np.reshape(centers, (-1, 2))
    num = centers.shape[0]

    # compute width and height of grid box
    area = cfg.TRAIN.KERNEL_SIZE * cfg.TRAIN.KERNEL_SIZE
    aspect = cfg.TRAIN.ASPECTS  # height / width
    num_aspect = len(aspect)
    widths = np.zeros((1, num_aspect), dtype=np.float32)
    heights = np.zeros((1, num_aspect), dtype=np.float32)
    for i in xrange(num_aspect):
        widths[0,i] = math.sqrt(area / aspect[i])
        heights[0,i] = widths[0,i] * aspect[i]

    # construct grid boxes
    centers = np.repeat(centers, num_aspect, axis=0)
    widths = np.tile(widths, num).transpose()
    heights = np.tile(heights, num).transpose()

    x1 = np.reshape(centers[:,0], (-1, 1)) - widths * 0.5
    x2 = np.reshape(centers[:,0], (-1, 1)) + widths * 0.5
    y1 = np.reshape(centers[:,1], (-1, 1)) - heights * 0.5
    y2 = np.reshape(centers[:,1], (-1, 1)) + heights * 0.5
    
    boxes_grid = np.hstack((x1, y1, x2, y2)) / cfg.TRAIN.SPATIAL_SCALE

    return boxes_grid, centers[:,0], centers[:,1]
Example #45
0
# Set random seed to make sure models are validated on the same validation images.
# So you can compare the results of different models more intuitively.
random.seed(16)
val_indices=random.sample(range(0,len(val_input_names)),num_vals)

# Do the training here
for epoch in range(args.epoch_start_i, args.num_epochs):

    current_losses = []

    cnt=0

    # Equivalent to shuffling
    id_list = np.random.permutation(len(train_input_names))

    num_iters = int(np.floor(len(id_list) / args.batch_size))
    st = time.time()
    epoch_st=time.time()
    for i in range(num_iters):
        # st=time.time()

        input_image_batch = []
        output_image_batch = []

        # Collect a batch of images
        for j in range(args.batch_size):
            index = i*args.batch_size + j
            id = id_list[index]
            input_image = utils.load_image(train_input_names[id])
            output_image = utils.load_image(train_output_names[id])
Example #46
0
def preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):
    '''Preprocess true boxes to training input format

    Parameters
    ----------
    true_boxes: array, shape=(m, T, 5)
        Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape.
    input_shape: array-like, hw, multiples of 32
    anchors: array, shape=(N, 2), wh
    num_classes: integer

    Returns
    -------
    y_true: list of array, shape like yolo_outputs, xywh are reletive value

    '''
    assert (true_boxes[..., 4]<num_classes).all(), 'class id must be less than num_classes'
    num_layers = len(anchors)//3 # default setting
    anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [0,1,2]]

    true_boxes = np.array(true_boxes, dtype='float32')
    input_shape = np.array(input_shape, dtype='int32')
    boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
    boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
    true_boxes[..., 0:2] = boxes_xy/input_shape[::-1]
    true_boxes[..., 2:4] = boxes_wh/input_shape[::-1]

    m = true_boxes.shape[0]
    grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)]
    y_true = [np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes),
        dtype='float32') for l in range(num_layers)]

    # Expand dim to apply broadcasting.
    anchors = np.expand_dims(anchors, 0)
    anchor_maxes = anchors / 2.
    anchor_mins = -anchor_maxes
    valid_mask = boxes_wh[..., 0]>0

    for b in range(m):
        # Discard zero rows.
        wh = boxes_wh[b, valid_mask[b]]
        if len(wh)==0: continue
        # Expand dim to apply broadcasting.
        wh = np.expand_dims(wh, -2)
        box_maxes = wh / 2.
        box_mins = -box_maxes

        intersect_mins = np.maximum(box_mins, anchor_mins)
        intersect_maxes = np.minimum(box_maxes, anchor_maxes)
        intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
        intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
        box_area = wh[..., 0] * wh[..., 1]
        anchor_area = anchors[..., 0] * anchors[..., 1]
        iou = intersect_area / (box_area + anchor_area - intersect_area)

        # Find best anchor for each true box
        best_anchor = np.argmax(iou, axis=-1)

        for t, n in enumerate(best_anchor):
            for l in range(num_layers):
                if n in anchor_mask[l]:
                    i = np.floor(true_boxes[b,t,0]*grid_shapes[l][1]).astype('int32')
                    j = np.floor(true_boxes[b,t,1]*grid_shapes[l][0]).astype('int32')
                    k = anchor_mask[l].index(n)
                    c = true_boxes[b,t, 4].astype('int32')
                    y_true[l][b, j, i, k, 0:4] = true_boxes[b,t, 0:4]
                    y_true[l][b, j, i, k, 4] = 1
                    y_true[l][b, j, i, k, 5+c] = 1

    return y_true
Example #47
0
####################
# SORT BY RICHNESS #
####################

mock = mock[mock.argsort(order = ('rich', 'id'))[::-1]]
cluster = cluster[cluster.argsort(order = ('rich', 'id'))[::-1]]

###################
# BIN BY RICHNESS #
###################

#set number of richness bins
n_rich_bins = int(math.floor((opts.max_rich_bin - opts.min_rich_bin) / opts.rich_bin_size)) + 1

c_rich_bin_index = np.floor((cluster.rich - opts.min_rich_bin) / opts.rich_bin_size).astype('int')
m_rich_bin_index = np.floor((mock.rich - opts.min_rich_bin) / opts.rich_bin_size).astype('int')

x_rich_vals = (np.arange(n_rich_bins) + 0.5) * opts.rich_bin_size + opts.min_rich_bin

############
# BIN BY Z #
############

#set number of redshift bins
n_z_bins = int(math.floor((opts.max_z_bin - opts.min_z_bin) / opts.z_bin_size)) + 1 

c_z_bin_index = np.floor((cluster.z - opts.min_z_bin) / opts.z_bin_size).astype('int')
m_z_bin_index = np.floor((mock.z - opts.min_z_bin) / opts.z_bin_size).astype('int')

x_z_vals = (np.arange(n_z_bins) + 0.5) * opts.z_bin_size + opts.min_z_bin
Example #48
0
 def calc_xy_index_from_position(self, pos, lower_pos, max_index):
     ind = int(np.floor((pos - lower_pos) / self.resolution))
     if 0 <= ind <= max_index:
         return ind
     else:
         return None
Example #49
0
def create_grid_and_edges(data, drone_altitude, safety_distance):
    """
    Returns a grid representation of a 2D configuration space
    along with Voronoi graph edges given obstacle data and the
    drone's altitude.
    """

    # minimum and maximum north coordinates
    north_min = np.floor(np.min(data[:, 0] - data[:, 3]))
    north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))

    # minimum and maximum east coordinates
    east_min = np.floor(np.min(data[:, 1] - data[:, 4]))
    east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))

    # given the minimum and maximum coordinates we can
    # calculate the size of the grid.
    north_size = int(np.ceil((north_max - north_min)))
    east_size = int(np.ceil((east_max - east_min)))

    # Initialize an empty grid
    grid = np.zeros((north_size, east_size))

    # Define a list to hold Voronoi points
    points = []
    # Populate the grid with obstacles
    for i in range(data.shape[0]):
        north, east, alt, d_north, d_east, d_alt = data[i, :]

        if alt + d_alt + safety_distance > drone_altitude:
            obstacle = [
                int(north - d_north - safety_distance - north_min),
                int(north + d_north + safety_distance - north_min),
                int(east - d_east - safety_distance - east_min),
                int(east + d_east + safety_distance - east_min),
            ]
            grid[obstacle[0]:obstacle[1] + 1, obstacle[2]:obstacle[3] + 1] = 1

            # add center of obstacles to points list
            points.append([north - north_min, east - east_min])

    # create a voronoi graph based on
    # location of obstacle centres
    graph = Voronoi(points)
    # check each edge from graph.ridge_vertices for collision
    edges = []
    for v in graph.ridge_vertices:
        p1 = graph.vertices[v[0]].astype(int)
        p2 = graph.vertices[v[1]].astype(int)
        # test each pair p1 and p2 for collision using Bresenham
        # If the edge does not hit an obstacle add it to the list
        in_collision = False
        ridgeline = bresenham(p1[0], p1[1], p2[0], p2[1])
        for b in ridgeline:
            # eliminate out of range points in the line
            if b[0] < 0 or b[0] >= grid.shape[0]:
                in_collision = True
                break
            if b[1] < 0 or b[1] >= grid.shape[1]:
                in_collision = True
                break
            # check if grid cell is an obstacle
            if grid[b[0], b[1]] == 1:
                in_collision = True
                break
        # keep ridge points not in collision
        if not in_collision:
            p1 = (p1[0], p1[1])
            p2 = (p2[0], p2[1])
            edges.append((p1, p2))

    return grid, edges
    def detect_all_signs(self, image, score_threshold):
        """
        Will draw all the bounding boxes and return the list of classes, confidence scores
        in the boxes and the boxes themselves.
        """
        start = timer()

        if self.model_image_size != (None, None):
            assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'
            boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
        else:
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            boxed_image = letterbox_image(image, new_image_size)
        image_data = np.array(boxed_image, dtype='float32')
        if self.gray_scale:
            image_data = rgb_2_gray(image_data)

        # print(image_data.shape)
        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })

        # print('Found {} boxes for {}'.format(len(out_boxes), 'img'))

        font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                                  size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
        thickness = (image.size[0] + image.size[1]) // 300

        list_c, list_score, list_box = [], [], []
        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            box = out_boxes[i]
            score = out_scores[i]
            if score < score_threshold:
                continue

            label = '{} {:.2f}'.format(predicted_class, score)
            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
            # print(label, (left, top), (right, bottom))

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            # My kingdom for a good redistributable image drawing library.
            for i in range(thickness):
                draw.rectangle(
                    [left + i, top + i, right - i, bottom - i],
                    outline=self.colors[c])
            draw.rectangle(
                [tuple(text_origin), tuple(text_origin + label_size)],
                fill=self.colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw

            list_c.append(c)
            list_score.append(score)
            list_box.append((left, top, right, bottom))  # x_min, y_min, x_max, y_max

        return image, list_c, list_score, list_box  # (returns this if no sign is found)
def MergeDatCol_SigDat(SigDat_clean,
                       DataColDat_Sub,
                       LaneDict,
                       StartVeh=4,
                       EndVeh=14,
                       RunNum=99):
    """
    Merge Signal and Data col 
    """
    # Use pandas.merge_asof for merging t_Entry in data collection results to the nearest phase start time.
    # Similar to infimum ~ Greatest lower bound
    # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.merge_asof.html
    # A “backward” search selects the last row in the right DataFrame whose ‘on’ key is less than or equal to the left’s key.
    CombData_Ln_Dict = {}
    for Ln in LaneDict.keys():
        CombData_Ln_Dict[Ln] = pd.merge_asof(
            DataColDat_Sub[DataColDat_Sub.Lane == Ln],
            SigDat_clean,
            left_on="t_Entry",
            right_on="G_st",
            direction="backward",
        )
        # ***********************Reactivate Later**********************************#
        # assert(max(CombData_Ln_Dict[Ln].t_Entry -CombData_Ln_Dict[Ln].G_st) < 50) #Make sure you don't merge vehs to a diff cycle

        # Check the vehicles that eneter the intersection during Amber Indication
        CombData_Ln_Dict[Ln].loc[:,
                                 "VehinAmber"] = (CombData_Ln_Dict[Ln].t_Entry
                                                  > CombData_Ln_Dict[Ln].G_end)
    CombData = pd.concat(CombData_Ln_Dict.values())
    # #Debug:
    # Ln =1
    # CombData_Ln_Dict[Ln].loc[:,"Debug"] = CombData_Ln_Dict[Ln].t_Entry -CombData_Ln_Dict[Ln].G_st
    # CombData = pd.concat(CombData_Ln_Dict.values())
    # Get the vehicle numbers
    CombData.sort_values(["CycNum", "Lane"], inplace=True)
    # Get length of each group and then use arange
    CombData.loc[:, "VehNum"] = np.hstack(
        CombData.groupby(
            ["CycNum",
             "Lane"])["t_Entry"].apply(lambda x: np.arange(1,
                                                           len(x) + 1)).values)
    CombData = CombData[[
        "CycNum",
        "Lane",
        "LaneDesc",
        "tQueue",
        "t_Entry",
        "G_st",
        "G_end",
        "VehNum",
        "PhaseNum",
    ]]
    ###################         Define conditions for Sat Flow         ###########################################
    mask_SatFlow = (CombData.VehNum >= StartVeh) & (
        CombData.VehNum <= EndVeh
    )  # Start vehicle is 4th (timeStamp for 4th vehicle is used for 5th vehicle)
    # mask_Headway = (CombData.VehNum >= StartVeh+1) & (CombData.VehNum <= EndVeh) # Start vehicle is 5th as we are directly getting the headway

    ##
    CombData_SatFlow = CombData[mask_SatFlow]
    CombData_Headway = CombData.copy()
    CombData_Headway.loc[:, "Headway"] = CombData_Headway.groupby(
        ["CycNum", "Lane"])["t_Entry"].diff()
    # mask2 = CombData_Headway.Headway<=5
    # CombData_Headway = CombData_Headway[mask2]
    CombData_SatFlow = CombData_SatFlow.groupby(["CycNum", "LaneDesc"]).agg({
        "t_Entry": ["min", "max"],
        "VehNum": ["min", "max"]
    })
    CombData_SatFlow.columns = [
        "_".join(col).strip() for col in CombData_SatFlow.columns.values
    ]
    CombData_SatFlow.loc[:, "AvgHeadway"] = (
        CombData_SatFlow.t_Entry_max - CombData_SatFlow.t_Entry_min) / (
            CombData_SatFlow.VehNum_max - CombData_SatFlow.VehNum_min)
    CombData_SatFlow.reset_index(drop=False, inplace=True)
    CombDataSum = CombData_SatFlow.groupby(["LaneDesc"
                                            ])["AvgHeadway"].describe()
    CombDataSum.loc[:, "SatFlow"] = np.floor(3600 / CombDataSum["mean"])
    CombDataSum.loc[:, "RunNum"] = RunNum
    return (CombDataSum, CombData_SatFlow, CombData_Headway)
def probability_v_rank(counts, out = None):
	'''
	Plots the probability (normalized frequency) versus rank for an
	arbitrary 1D vector of counts
	'''
	counts = np.sort(counts)
	probs = counts / np.sum(counts)
	ranks = range(1, len(probs) + 1)
	ranks.reverse()
	
	# Probability
	fig = plt.figure(figsize=(6,4), dpi = 300)
	ax = plt.gca()
	ax.scatter(ranks, probs, alpha = 0.5)
	ax.set_yscale('log')
	ax.set_xscale('log')
	ax.axis([1, np.power(10, np.ceil(np.log10(max(ranks)))), \
		     np.power(10, np.floor(np.log10(min(probs)))), \
		     np.power(10, np.ceil(np.log10(max(probs))))])
	plt.xlabel('Rank')
	plt.ylabel('Normalized frequency')
	plt.title('Transform templates from {}'.format(collection.name))
	plt.grid(True)
	if out:
		fig.savefig(out + ' prob_rank.png')
		np.savetxt(out + ' probs.txt', sorted(probs, reverse = True))
	# plt.show()

	# Count
	fig = plt.figure(figsize=(6,4), dpi = 300)
	ax = plt.gca()
	ax.scatter(ranks, counts, alpha = 0.5)
	ax.set_yscale('log')
	ax.set_xscale('log')
	ax.axis([1, np.power(10, np.ceil(np.log10(max(ranks)))), \
		     np.power(10, np.floor(np.log10(min(counts)))), \
		     np.power(10, np.ceil(np.log10(max(counts))))])
	plt.xlabel('Rank')
	plt.ylabel('Counts')
	plt.title('Transform templates from {}'.format(collection.name))
	plt.grid(True)
	if out:
		fig.savefig(out + ' count_rank.png')
		np.savetxt(out + ' counts.txt', sorted(counts, reverse = True))
	# plt.show()

	# Coverage
	missing = np.ones_like(probs)
	missing[0] = 0.0
	for i in range(1, len(probs)):
		missing[i] = missing[i-1] + probs[i]
	missing = np.ones_like(missing) - missing
	fig = plt.figure(figsize=(6,4), dpi = 300)
	ax = plt.gca()
	ax.scatter(ranks, missing, alpha = 0.5)
	ax.set_xscale('log')
	ax.axis([1, np.power(10, np.ceil(np.log10(max(ranks)))), \
		     0, 1])
	plt.xlabel('Rank threshold for inclusion')
	plt.ylabel('Estimated minimum coverage')
	plt.title('Transform templates from {}'.format(collection.name))
	plt.grid(True)
	if out:
		fig.savefig(out + ' missing_rank.png')
	# plt.show()

	return
Example #53
0
def i4_sobol(dim_num, seed):
    """
    Parameters:
      Input, integer DIM_NUM, the number of spatial dimensions.
      DIM_NUM must satisfy 1 <= DIM_NUM <= 40.
      Input/output, integer SEED, the "seed" for the sequence.
      This is essentially the index in the sequence of the quasirandom
      value to be generated.  On output, SEED has been set to the
      appropriate next value, usually simply SEED+1.
      If SEED is less than 0 on input, it is treated as though it were 0.
      An input value of 0 requests the first (0-th) element of the sequence.
      Output, real QUASI(DIM_NUM), the next quasirandom vector.
    """
    global atmost
    global dim_max
    global dim_num_save
    global initialized
    global lastq
    global log_max
    global maxcol
    global poly
    global recipd
    global seed_save
    global v

    if 'initialized' not in list(globals().keys()):
        initialized = 0
        dim_num_save = -1

    if not initialized or dim_num != dim_num_save:
        initialized = 1
        dim_max = 40
        dim_num_save = -1
        log_max = 30
        seed_save = -1

        #  Initialize (part of) V.
        v = np.zeros((dim_max, log_max))
        v[0:40, 0] = np.transpose([
            1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
            1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
        ])

        v[2:40, 1] = np.transpose([
            1, 3, 1, 3, 1, 3, 3, 1, 3, 1, 3, 1, 3, 1, 1, 3, 1, 3, 1, 3, 1, 3,
            3, 1, 3, 1, 3, 1, 3, 1, 1, 3, 1, 3, 1, 3, 1, 3
        ])

        v[3:40, 2] = np.transpose([
            7, 5, 1, 3, 3, 7, 5, 5, 7, 7, 1, 3, 3, 7, 5, 1, 1, 5, 3, 3, 1, 7,
            5, 1, 3, 3, 7, 5, 1, 1, 5, 7, 7, 5, 1, 3, 3
        ])

        v[5:40, 3] = np.transpose([
            1, 7, 9, 13, 11, 1, 3, 7, 9, 5, 13, 13, 11, 3, 15, 5, 3, 15, 7, 9,
            13, 9, 1, 11, 7, 5, 15, 1, 15, 11, 5, 3, 1, 7, 9
        ])

        v[7:40, 4] = np.transpose([
            9, 3, 27, 15, 29, 21, 23, 19, 11, 25, 7, 13, 17, 1, 25, 29, 3, 31,
            11, 5, 23, 27, 19, 21, 5, 1, 17, 13, 7, 15, 9, 31, 9
        ])

        v[13:40, 5] = np.transpose([
            37, 33, 7, 5, 11, 39, 63, 27, 17, 15, 23, 29, 3, 21, 13, 31, 25, 9,
            49, 33, 19, 29, 11, 19, 27, 15, 25
        ])

        v[19:40, 6] = np.transpose([
            13, 33, 115, 41, 79, 17, 29, 119, 75, 73, 105, 7, 59, 65, 21, 3,
            113, 61, 89, 45, 107
        ])

        v[37:40, 7] = np.transpose([7, 23, 39])

        #  Set POLY.
        poly = [
            1, 3, 7, 11, 13, 19, 25, 37, 59, 47, 61, 55, 41, 67, 97, 91, 109,
            103, 115, 131, 193, 137, 145, 143, 241, 157, 185, 167, 229, 171,
            213, 191, 253, 203, 211, 239, 247, 285, 369, 299
        ]

        atmost = 2**log_max - 1

        #  Find the number of bits in ATMOST.
        maxcol = i4_bit_hi1(atmost)

        #  Initialize row 1 of V.
        v[0, 0:maxcol] = 1

    #  Things to do only if the dimension changed.
    if dim_num != dim_num_save:

        #  Check parameters.
        if dim_num < 1 or dim_max < dim_num:
            print('I4_SOBOL - Fatal error!')
            print('  The spatial dimension DIM_NUM should satisfy:')
            print('    1 <= DIM_NUM <= %d' % dim_max)
            print('  But this input value is DIM_NUM = %d' % dim_num)
            return

        dim_num_save = dim_num

        #  Initialize the remaining rows of V.
        for i in range(2, dim_num + 1):

            #  The bits of the integer POLY(I) gives the form of polynomial I.
            #  Find the degree of polynomial I from binary encoding.
            j = poly[i - 1]
            m = 0
            j //= 2
            while j > 0:
                j //= 2
                m += 1

            #  Expand this bit pattern to separate components of the logical array INCLUD.
            j = poly[i - 1]
            includ = np.zeros(m)
            for k in range(m, 0, -1):
                j2 = j // 2
                includ[k - 1] = (j != 2 * j2)
                j = j2

            #  Calculate the remaining elements of row I as explained
            #  in Bratley and Fox, section 2.
            for j in range(m + 1, maxcol + 1):
                newv = v[i - 1, j - m - 1]
                l = 1
                for k in range(1, m + 1):
                    l *= 2
                    if includ[k - 1]:
                        newv = np.bitwise_xor(int(newv),
                                              int(l * v[i - 1, j - k - 1]))
                v[i - 1, j - 1] = newv

        #  Multiply columns of V by appropriate power of 2.
        l = 1
        for j in range(maxcol - 1, 0, -1):
            l *= 2
            v[0:dim_num, j - 1] = v[0:dim_num, j - 1] * l

        #  RECIPD is 1/(common denominator of the elements in V).
        recipd = 1.0 / (2 * l)
        lastq = np.zeros(dim_num)

    seed = int(np.floor(seed))

    if seed < 0:
        seed = 0

    l = 1
    if seed == 0:
        lastq = np.zeros(dim_num)

    elif seed == seed_save + 1:

        #  Find the position of the right-hand zero in SEED.
        l = i4_bit_lo0(seed)

    elif seed <= seed_save:

        seed_save = 0
        lastq = np.zeros(dim_num)

        for seed_temp in range(int(seed_save), int(seed)):
            l = i4_bit_lo0(seed_temp)
            for i in range(1, dim_num + 1):
                lastq[i - 1] = np.bitwise_xor(int(lastq[i - 1]),
                                              int(v[i - 1, l - 1]))

        l = i4_bit_lo0(seed)

    elif seed_save + 1 < seed:

        for seed_temp in range(int(seed_save + 1), int(seed)):
            l = i4_bit_lo0(seed_temp)
            for i in range(1, dim_num + 1):
                lastq[i - 1] = np.bitwise_xor(int(lastq[i - 1]),
                                              int(v[i - 1, l - 1]))

        l = i4_bit_lo0(seed)

    #  Check that the user is not calling too many times!
    if maxcol < l:
        print('I4_SOBOL - Fatal error!')
        print('  Too many calls!')
        print('  MAXCOL = %d\n' % maxcol)
        print('  L =      %d\n' % l)
        return

    #  Calculate the new components of QUASI.
    quasi = np.zeros(dim_num)
    for i in range(1, dim_num + 1):
        quasi[i - 1] = lastq[i - 1] * recipd
        lastq[i - 1] = np.bitwise_xor(int(lastq[i - 1]), int(v[i - 1, l - 1]))

    seed_save = seed
    seed += 1

    return [quasi, seed]