Exemplo n.º 1
1
def nearest_odd(N):
    """
    Get the nearest odd number for each value of N.

    :param N: int / sequence of ints
    :return: int / sequence of ints
    :Example:
    >>> from __future__ import print_function
    >>> print(nearest_odd(range(1, 11)))
    [  1.   3.   3.   5.   5.   7.   7.   9.   9.  11.]
    >>> nearest_odd(0)
    1
    >>> nearest_odd(3)
    3.0
    """
    if hasattr(N, "__iter__"):
        N = np.array(N)
        y = np.floor(N)
        y[np.remainder(y, 2) == 0] = np.ceil(N[np.remainder(y, 2) == 0])
        y[np.remainder(y, 2) == 0] += 1
        return y
    if N % 2 == 0:
        return N + 1
    elif np.floor(N) % 2 == 0:
        return np.ceil(N)
    elif np.floor(N) % 2 != 0:
        return np.floor(N)
    return N
Exemplo n.º 2
1
def get_spin_density_file(filename):
# edits a CHGCAR file to have only the spin difference table, "spin_up - spin_down"
	import re
	from StringIO import StringIO
	f = open(filename)
	lines = f.read().splitlines()
	n_atoms = numpy.sum(numpy.genfromtxt(StringIO(lines[6]),dtype=(int)))
	n_points = numpy.product(numpy.genfromtxt(StringIO(lines[9+n_atoms]),dtype=(int)))
	f.close
	# Remove the first spin table
	#get start line of the potential table for majority spin
	start_line = 10+n_atoms
	#get lastline of the potential table for majority spin
	if numpy.remainder(n_points,5) == 0:
		last_line = 9+n_atoms+n_points/5
	elif numpy.remainder(n_points,5) != 0:
		last_line = 9+n_atoms+n_points/5+1
	del lines[start_line:last_line]

	# delete lines until you next match the "number of grid points line"
	finished = 0;
	count = 0;
	while finished != 1:
		l_match = re.match(lines[9+n_atoms],lines[9+n_atoms+1],0)
		if (l_match):
			finished = 1
		else:
			del lines[9+n_atoms+1]

	del lines[9+n_atoms+1]
	outfile = 'CHGCAR-spin_density'
	fout = open(outfile,'w')
	for line in lines:
		print>>fout, line
	fout.close
Exemplo n.º 3
0
def plot_mwd(RA,Dec,org=0,title='Mollweide projection', projection='mollweide'):
    ''' RA, Dec are arrays of the same length.
    RA takes values in [0,360), Dec in [-90,90],
    which represent angles in degrees.
    org is the origin of the plot, 0 or a multiple of 30 degrees in [0,360).
    title is the title of the figure.
    projection is the kind of projection: 'mollweide', 'aitoff', 'hammer', 'lambert'
    '''
    x = N.remainder(RA+360-org,360) # shift RA values
    ind = x>180
    x[ind] -=360    # scale conversion to [-180, 180]
    x=-x    # reverse the scale: East to the left
    tick_labels = N.array([150, 120, 90, 60, 30, 0, 330, 300, 270, 240, 210])
    tick_labels = N.remainder(tick_labels+360+org,360)
    fig = P.figure(figsize=(10, 5))
    ax = fig.add_subplot(111, projection=projection,)# axisbg ='LightCyan')
    ax.scatter(N.radians(x),N.radians(Dec))  # convert degrees to radians
    ax.set_xticklabels(tick_labels)     # we add the scale on the x axis
    ax.set_title(title)
    ax.title.set_fontsize(15)
    ax.set_xlabel("RA")
    ax.xaxis.label.set_fontsize(12)
    ax.set_ylabel("Dec")
    ax.yaxis.label.set_fontsize(12)
    ax.grid(True)
Exemplo n.º 4
0
    def __init__(self, x,y,z,sig):
        """Requires periodic BC, box length 1"""
        self.points = np.remainder(array((x,y,z), dtype=float), 1)
        self.sigmas = array(sig, dtype=float)
        self.N = N = len(sig)
        pts = array(list(self.points) + [sig])
        pts1 = np.concatenate([pts.T +(n,m,p,0) for n in [0,-1] for m in [0,-1] for p in [0,-1]], axis=0)
        self.allpoints = pts2 = np.remainder(pts1[:,:3] + 0.5,2)-1
        self.allsigmas = s2 = pts1[:,3]
        d = self.delaunay = Delaunay(pts2)
        
        d.simplices
        triangs = [(d.simplices[:,0], d.simplices[:,1], d.simplices[:,2]),
                   (d.simplices[:,0], d.simplices[:,1], d.simplices[:,3]),
                   (d.simplices[:,0], d.simplices[:,2], d.simplices[:,3]),
                   (d.simplices[:,1], d.simplices[:,2], d.simplices[:,3])]

        triangs = np.concatenate(triangs,axis=1).T
        #print(shape(array(triangs)))

        triangs.sort(1)
        triangs2 = triangs[triangs[:,0] < self.N]
        #print(shape(array(triangs2)))

        trirem = np.remainder(triangs2,N)
        #trirem.sort(1)
        self.triangles = triangs2[unique_rows(trirem)]
Exemplo n.º 5
0
def nmer_neighbors3d(xs,ys,zs,sigmas, nmersize, tol=1e-8):
    """
    Same as neighbors, but for 3D
    """
    xdiff = np.remainder(np.subtract.outer(xs, xs)+.5, 1)-.5
    ydiff = np.remainder(np.subtract.outer(ys, ys)+.5, 1)-.5
    zdiff = np.remainder(np.subtract.outer(zs, zs)+.5, 1)-.5
    sigmadists = np.add.outer(sigmas, sigmas)/2
    dists = np.sqrt((xdiff**2) + (ydiff**2) + (zdiff**2))
    #~ print(dists, sigmadists)
    #~ exit()
    matr = dists - sigmadists < tol
    bigN = len(matr)
    N = bigN // int(nmersize)
    n = int(nmersize)
    smallmatr = np.zeros((N,N))
    for i in range(N):
        li,hi = i*n, (i+1)*n
        for j in range(N):
            lj,hj = j*n, (j+1)*n
            ijcontacts = np.sum(matr[li:hi, lj:hj])
            #~ print('ijcontacts:', i, j, matr[li:hi, lj:hj], ijcontacts)
            #~ print(np.shape(dists), np.shape(sigmadists))
            #~ print('dists - sigmadists:', dists[li:hi, lj:hj] - sigmadists[li:hi, lj:hj])
            if i == j: ijcontacts = 1
            smallmatr[i,j] = ijcontacts
    return smallmatr
Exemplo n.º 6
0
    def generic_test(self, iname):

        _log.info("Testing image output sizes for %s " % iname)
        inst = webbpsf.Instrument(iname)
        pxscale = inst.pixelscale
        fov_arcsec = 5.0

        PSF = inst.calcPSF(nlambda=1, fov_pixels = 100, oversample=1)
        self.assertEqual(PSF[0].data.shape[0], 100)

        PSF = inst.calcPSF(nlambda=1, fov_arcsec = fov_arcsec, oversample=1)
        fov_pix = int(np.round(fov_arcsec / pxscale))
        self.assertEqual(PSF[0].data.shape[0], fov_pix)

        inst.options['parity'] = 'odd'
        PSF = inst.calcPSF(nlambda=1, fov_arcsec = fov_arcsec, oversample=1)
        self.assertTrue( np.remainder(PSF[0].data.shape[0],2) == 1)

        inst.options['parity'] = 'even'
        PSF = inst.calcPSF(nlambda=1, fov_arcsec = fov_arcsec, oversample=1)
        self.assertTrue( np.remainder(PSF[0].data.shape[0],2) == 0)

        # odd array, even oversampling = even
        inst.options['parity'] = 'odd'
        PSF = inst.calcPSF(nlambda=1, fov_arcsec = fov_arcsec, oversample=2)
        self.assertTrue( np.remainder(PSF[0].data.shape[0],2) == 0)

        # odd array, odd oversampling = odd
        inst.options['parity'] = 'odd'
        PSF = inst.calcPSF(nlambda=1, fov_arcsec = fov_arcsec, oversample=3)
        self.assertTrue( np.remainder(PSF[0].data.shape[0],2) == 1)
Exemplo n.º 7
0
def _detect(alpha, beta_1, beta_2, phi_range, tau, As, Au, tau_shift):
	omega_p = np.pi/(2*T)
	phi_p   = omega_p * tau

	tau_1_  = np.remainder(tau, 2*T)
	tau_2_  = np.remainder(tau+tau_shift, 2*T)
	k_tau_1 = int(np.floor(tau / (2*T)))
	k_tau_2 = int(np.floor((tau+tau_shift) / (2*T)))

	bkn_1, bk_1 = shift_indices(beta_1, k_tau_1)
	bkn_2, bk_2 = shift_indices(beta_2, k_tau_2)

	arg1 = np.cos(phi_p) * (tau_1_ * bkn_1 + (2*T - tau_1_) * bk_1)
	arg2 = ((2*T) / np.pi) * np.sin(phi_p) * (bkn_1 - bk_1)
	arg3 = np.sin(phi_p) * (tau_2_ * bkn_2 + (2*T - tau_2_) * bk_2)
	arg4 = ((2*T) / np.pi) * np.cos(phi_p) * (bkn_2 - bk_2)

	PHI_C, ALPHA = np.meshgrid(phi_range, alpha)
	ARG12 = np.meshgrid(phi_range, (arg1 - arg2))[1]
	ARG34 = np.meshgrid(phi_range, (arg3 + arg4))[1]

	# TODO: fix Au in partial overlap! One strong chip can dominate the complete correlation of a symbol ...
	result = (T/2) * ALPHA * As + (Au/4) * (np.cos(PHI_C) * ARG12 - np.sin(PHI_C) * ARG34)

	# modified for Au only transmissions
	if As > 0:
		return result / ((T/2) * As * Au)
	else:
		return result / ((T/2) * Au)
Exemplo n.º 8
0
def coord(x,y,field_of_view=100,window_size=(640,480)): # field of view in m
    "Convert world coordinates to pixel coordinates."
    fov_x = field_of_view
    fov_y = field_of_view/float(window_size[0])*float(window_size[1])
    wrapped_coord_x = np.remainder(x+fov_x/2.,fov_x)
    wrapped_coord_y = np.remainder(y+fov_y/2.,fov_y)
    return int(wrapped_coord_x/fov_x*window_size[0]), int(window_size[1]-wrapped_coord_y/fov_y*window_size[1])
Exemplo n.º 9
0
def generate_matching_df(template,color_lookup,top=20,sample=15):
    '''generate_matching_df (worst function name ever)
    this will generate a dataframe with x,y, corr, and png file path for images that most highly match each sampled pixel. The df gets parsed to json that plugs into d3 grid 
    '''
    # Now read in our actual image
    base = Image.open(template)
    width, height = base.size
    pixels = base.load()
    data = []

    count=0
    new_image = pandas.DataFrame(columns=["x","y","corr","png"])

    for x in range(width):
        for y in range(height):
            # And take only every [sample]th pixel
            if np.remainder(x,sample)==0 and np.remainder(y,sample)==0:
                cpixel = pixels[x, y]
                tmp = color_lookup.copy()
                tmp = (tmp-cpixel).abs().sum(axis=1)
                tmp.sort()
                png = choice(tmp.loc[tmp.index[0:top]].index.tolist(),1)[0]
                new_image.loc[count] = [x,y,0,png]
                count+=1

    new_image["x"] = [int(x) for x in (new_image["x"] / sample) * 10]
    new_image["y"] = [int(x) for x in (new_image["y"] / sample) * 10]
    
    return new_image
Exemplo n.º 10
0
    def positionPolygon(self, theta=None):
        """Calculates (R,Z) position at given theta angle by joining points
        by straight lines rather than a spline. This avoids the
        overshoots which can occur with splines.

        Parameters
        ----------
        theta : array_like, optional
            Theta locations to find R, Z at. If None (default), use the
            values of theta stored in the instance

        Returns
        -------
        R, Z : (ndarray, ndarray)
            Value of R, Z at each input theta point
        """
        if theta is None:
            return self.R, self.Z
        n = len(self.R)
        theta = np.remainder(theta, 2.*pi)
        dtheta = 2.*np.pi/n
        ind = np.trunc(theta/dtheta )
        rem = np.remainder(theta, dtheta)
        indp = (ind+1) % n
        return (rem*self.R[indp] + (1.-rem)*self.R[ind]), (rem*self.Z[indp] + (1.-rem)*self.Z[ind])
Exemplo n.º 11
0
	def create_tag_noleap(self, time, timeunits):
		''' create a datetime object from reference date and ocean_time (noleap version)'''
		# first we need to figure out how many seconds are ellaped between ref_date
		# and start date of the run
		delta_type  = timeunits.split()[0]
		date_string = timeunits.split()[2]
		time_string = timeunits.split()[3]
		ref_string = date_string + ' ' + time_string
		fmt = '%Y-%m-%d %H:%M:%S'
		dateref_dstart = dt.datetime.strptime(ref_string,fmt)

		if delta_type == 'seconds':
			seconds_from_init = float(time)
		elif delta_type == 'days':
			seconds_from_init = float(time) * 86400.

		nyear  = int(np.floor(seconds_from_init / 365 / 86400))
		rm     = np.remainder(seconds_from_init,365*86400)
		ndays  = int(np.floor(rm / 86400))
		rm2    = np.remainder(rm,86400)
		nhours = int(np.floor(rm2 / 3600))
		rm3    = np.remainder(rm2,3600)
		nmin   = int(np.floor(rm3 / 60))
		nsec   = int(np.remainder(rm3,60))

		# pick a year we are sure is not a leap year
		fakeref  = dt.datetime(1901,1,1,0,0)
		fakedate = fakeref + dt.timedelta(days=ndays)
		month    = fakedate.month
		day      = fakedate.day

		tag=dt.datetime(nyear + dateref_dstart.year,month, day, nhours, nmin, nsec)
		return tag
Exemplo n.º 12
0
def edit_LOCPOT_file(filename):
# Removes a spurious set of lines between potential tables for major and minor spins so ASE will read it correctly
	from StringIO import StringIO
	f = open(filename)
	lines = f.read().splitlines()
	n_atoms = numpy.sum(numpy.genfromtxt(StringIO(lines[6]),dtype=(int)))
	n_points = numpy.product(numpy.genfromtxt(StringIO(lines[9+n_atoms]),dtype=(int)))
	f.close
	#get start line of the potential table for majority spin
	start_line = 10+n_atoms

	#get lastline of the potential table for majority spin
	if numpy.remainder(n_points,5) == 0:
		last_line = 9+n_atoms+n_points/5
	elif numpy.remainder(n_points,5) != 0:
		last_line = 9+n_atoms+n_points/5+1
	#print  "%d %s" % (last_line+1, lines[last_line+1])
	if numpy.remainder(n_atoms,5) == 0:
		n_atom_table_lines = n_atoms/5
	elif numpy.remainder(n_atoms,5) != 0:
		n_atom_table_lines = n_atoms/5+1
	last_atom_table_line = n_atom_table_lines+last_line
	#print  "%d %s" % (last_atom_table_line, lines[last_atom_table_line])
	del lines[last_line+1:last_atom_table_line+1]
	outfile = 'editted_LOCPOT_file'
	fout = open(outfile,'w')
	for line in lines:
		print>>fout, line
	fout.close
Exemplo n.º 13
0
def createDistanceList(mergedDict, direction='ra'):

    cov = []
    unit = []
    X = 4
    indexDict=mergedDict[direction]
    if direction=='ra':
        for RA_grid, starList in indexDict.items():
            if np.remainder(RA_grid, X)==0:
                for i in range(len(starList)-1):
                    for j in range(i+1, len(starList)):
                        A = starList[i]
                        B = starList[j]
                        cov.append(A.e1*B.e1 + A.e2*B.e2)
                        unit.append(int(abs(B.DEC_grid - A.DEC_grid)))
    if direction=='dec':
        for DEC_grid, starList in indexDict.items():
            if np.remainder(DEC_grid, X)==0:
                for i in range(len(starList)-1):
                    for j in range(i+1, len(starList)):
                        A = starList[i]
                        B = starList[j]
                        cov.append(A.e1*B.e1 + A.e2*B.e2)
                        unit.append(int(abs(B.RA_grid - A.RA_grid)))

    x=list(set(unit))
    mean = [0]*len(x)
    d = [[] for i in range(len(x))]
    for i in range(len(unit)):
        d[unit[i]-1].append(cov[i])

    for j in range(len(d)):
        mean[j] = np.mean(d[j])

    return x, mean
Exemplo n.º 14
0
def get_indices_peak(sequence, ind_max, threshold=0.8):
    """returns the indices of the peak around ind_max,
    with values down to  ``threshold * sequence[ind_max]``
    """
    thres_value = sequence[ind_max] * threshold
    # considering sequence is circular, we put ind_max to the center,
    # and find the bounds:
    lenSeq = sequence.size
    ##midSeq = lenSeq / 2
    ##indices = np.remainder(np.arange(lenSeq) - midSeq + ind_max, lenSeq)
    ##
    #newseq = sequence[]
    indSup = ind_max
    indInf = ind_max
    while sequence[indSup]>thres_value:
        indSup += 1
        indSup = np.remainder(indSup, lenSeq)
    while sequence[indInf]>thres_value:
        indInf -= 1
        indInf = np.remainder(indInf, lenSeq)
        
    indices = np.zeros(lenSeq, dtype=bool)
    if indInf < indSup:
        indices[(indInf+1):indSup] = True
    else:
        indices[:indSup] = True
        indices[(indInf+1):] = True
    
    return indices
Exemplo n.º 15
0
def randgen(key, image_size):
    
    from numpy import zeros, remainder, uint8
    dt = 0.001
    n = image_size
    
    xs = zeros((n + 1,))
    ys = zeros((n + 1,))
    zs = zeros((n + 1,))
    
    xs[0], ys[0], zs[0] = key[0], key[1], key[2]
    
    for i in xrange(n) :

        x_dot, y_dot, z_dot = lorenz(xs[i], ys[i], zs[i])
        xs[i + 1] = xs[i] + (x_dot * dt)
        ys[i + 1] = ys[i] + (y_dot * dt)
        zs[i + 1] = zs[i] + (z_dot * dt)
        
    Xs =  remainder(abs(xs*10**14), 2).astype(uint8)
    Ys =  remainder(abs(ys*10**14), 2).astype(uint8)
    Zs =  remainder(abs(zs*10**14), 2).astype(uint8)
    
    rand_array = Xs ^ Ys ^ Zs
    
    return rand_array
Exemplo n.º 16
0
def largest_odd_factor(var_arr):
    """
    Function that computes the larges odd factors of an array of integers

    Parameters
    -----------------
    var_arr: numpy array
        Array of integers whose largest odd factors needs to be computed

    Returns
    ------------
    odd_d: numpy array
        Array of largest odd factors of each integer in var_arr
    """
    if var_arr.ndim == 1:
        odd_d = np.empty(np.shape(var_arr))
        odd_d[:] = np.NaN

        ind1 = np.where((np.remainder(var_arr, 2) != 0) | (var_arr == 0))[0]
        if np.size(ind1) != 0:
            odd_d[ind1] = var_arr[ind1]

        ind2 = np.where((np.remainder(var_arr, 2) == 0) & (var_arr != 0))[0]
        if np.size(ind2) != 0:
            odd_d[ind2] = largest_odd_factor(var_arr[ind2] / 2.0)
        return odd_d
    else:
        raise Exception('Wrong Input Type')
Exemplo n.º 17
0
def compute_inp_params(lattice, sig_type):
    """
    tau and kmax necessary for possible integer quadruple combinations
    are computed

    Parameters
    ----------------
    lattice: Lattice class
        Attributes of the underlying lattice

    sig_type: {'common', 'specific'}

    Returns
    -----------
    tau: float
        tau is a rational number :math:`= \\frac{\\nu}{\\mu}`

    kmax: float
        kmax is an integer that depends on :math:`\\mu \\ , \\nu`
    """
    lat_params = lattice.lat_params
    cryst_ptgrp = proper_ptgrp(lattice.cryst_ptgrp)

    if cryst_ptgrp == 'D3':
        c_alpha = np.cos(lat_params['alpha'])
        tau = c_alpha / (1 + 2 * c_alpha)
        if sig_type == 'specific':
            [nu, mu] = int_man.rat(tau)
            rho = mu - 3 * nu
            kmax = 4 * mu * rho
        elif sig_type == 'common':
            kmax = []

    if cryst_ptgrp == 'D4':
        tau = (lat_params['a'] ** 2) / (lat_params['c'] ** 2)
        if sig_type == 'specific':
            [nu, mu] = int_man.rat(tau)
            kmax = 4 * mu * nu
        if sig_type == 'common':
            kmax = []

    if cryst_ptgrp == 'D6':
        tau = (lat_params['a'] ** 2) / (lat_params['c'] ** 2)
        if sig_type == 'specific':
            [nu, mu] = int_man.rat(tau)
            if np.remainder(nu, 2) == 0:
                if np.remainder(nu, 4) == 0:
                    kmax = 3 * mu * nu
                else:
                    kmax = 6 * mu * nu
            else:
                kmax = 12 * mu * nu
        if sig_type == 'common':
            kmax = []

    if cryst_ptgrp == 'O':
        tau = 1
        kmax = []

    return tau, kmax
Exemplo n.º 18
0
def idx2xyz(idx, xl, yl, zl):
    """Transform a list of indices of 1D array into coordinates of a 3D volume of certain sizes.
    
    Parameters
    ----------
    idx : np.ndarray
        1D array to be converted. An increment of ``idx``
        corresponds to a an increment of x. When reaching ``xl``, x is reset and 
        y is incremented of one. When reaching ``yl``, x and y are reset and z is
        incremented.
    
    xl, yl, zl : int 
        Sizes for 3D volume.
    
    Returns
    -------
    list 
        List of 3 ``np.ndarray`` objects (for x, y and z), containing coordinate value.

    """
    
    z = np.floor(idx / (xl*yl))
    r = np.remainder(idx, xl*yl)
    y = np.floor(r / xl)
    x = np.remainder(r, xl)
    return x, y, z
Exemplo n.º 19
0
def TimestampMerge(min_node,tot,stamp_ID):
    """Merge timestamps files across nodes and write merged list to file"""
    masterlist=[]
    diff=[]
    n=min_node
    max_node = min_node + tot
    while n < max_node:
        mastertime=[]
        j=0
        while j<5:
            fname='node{0}/timestamp{1}.{2}.dat'.format(n,stamp_ID,j)
            try:
                times=man.LoadData(fname)
                year=man.IterativeStrAppend(times,0)
                month=man.IterativeStrAppend(times,1)
                day=man.IterativeStrAppend(times,2)
                hour=man.IterativeStrAppend(times,3)
                minute=man.IterativeStrAppend(times,4)

                seconds=[]
                for i in range(len(times)):
                    a=man.IterativeIntAppend(times,5)
                    b=man.IterativeFloatAppend(times,6)
                    point=a[i]+b[i]
                    if np.remainder(i,100) == 0:
                        print 'Done {0} of {1} seconds'.format(i,len(times))
                    seconds.append(point)

                time=[]
                for i in range(len(times)):
                    point="{0}-{1}-{2} {3}:{4}:{5}".format(year[i],month[i],day[i],hour[i],minute[i],seconds[i])
                    if np.remainder(i,100) == 0:
                        print 'Done {0} of {1} stamps'.format(i,len(times))
                    time.append(point)

                t=Time(time, format='iso',scale='utc')
                mjd=t.mjd
                mjd.sort()
                diff.append(mjd)
                r=Time(mjd,format='mjd',scale='utc')
                iso=r.iso
                for i in range(len(iso)):
                    point=[iso[i],j-1]
                    mastertime.append(point)
                    masterlist.append(point)
                print "Done node{0}/timestamp{1}.{2}.dat".format(n,stamp_ID,j)
                j+=1 
            except IOError:
                print "Missing node{0}/timestamp{1}.{2}.dat".format(n,stamp_ID,j)
                j+=1
                pass

        mastertime.sort()
        name='node{0}/MergedTimeStamp{1}.dat'.format(n,stamp_ID)
        man.WriteFileCols(mastertime,name)
        print 'Done master time stamp node{0}'.format(n)
        n+=1
    masterlist.sort()
    name='MasterTimeStamp{0}.dat'.format(stamp_ID)
Exemplo n.º 20
0
def matrix_mod_exp(n,T,I,mod):
  Accum = I
  while n:
    if n % 2:
      Accum = np.remainder(T.dot(Accum),mod)
    T = np.remainder(T.dot(T),mod)
    n /= 2
  return Accum
Exemplo n.º 21
0
def modrange(lb,ub,x):
  if(x-lb >0):
    denom = ub-lb
    numer = x-lb
    return lb + np.remainder(numer,denom);
  else:
    denom = ub-lb
    numer = ub - x
    return ub - np.remainder(numer,denom);
Exemplo n.º 22
0
    def initialize(self, frame, target_centre, target_shape, context=2,
                   features=no_op, learn_filter=learn_mosse,
                   increment_filter=increment_mosse, response_cov=3,
                   n_perturbations=10, noise_std=0.04, l=0.01,
                   normalize=normalizenorm_vec, mask=True,
                   boundary='constant'):

        self.target_shape = target_shape
        self.learn_filter = learn_filter
        self.increment_filter = increment_filter
        self.features = features
        self.l = l
        self.normalize = normalize
        self.boundary = boundary

        # compute context shape
        self.context_shape = np.round(context * np.asarray(target_shape))
        self.context_shape[0] += (0 if np.remainder(self.context_shape[0], 2)
                                  else 1)
        self.context_shape[1] += (0 if np.remainder(self.context_shape[1], 2)
                                  else 1)

        # compute subframe size
        self.subframe_shape = self.context_shape + 8
        # compute target centre coordinates in subframe
        self.subframe_target_centre = PointCloud((
            self.subframe_shape // 2)[None])

        # extract subframe
        subframe = frame.extract_patches(target_centre,
                                         patch_size=self.subframe_shape)[0]

        # compute features
        subframe = self.features(subframe)

        # obtain targets
        targets = extract_targets(subframe, self.subframe_target_centre,
                                  self.context_shape, n_perturbations,
                                  noise_std)

        # generate gaussian response
        self.response = generate_gaussian_response(self.target_shape[-2:],
                                                   response_cov)

        if mask:
            cy = np.hanning(self.context_shape[0])
            cx = np.hanning(self.context_shape[1])
            self.cosine_mask = cy[..., None].dot(cx[None, ...])

        targets_pp = []
        for j, t in enumerate(targets):
            targets_pp.append(self._preprocess_vec(t))
        targets_pp = np.asarray(targets_pp)

        self.filter, self.num, self.den = self.learn_filter(
            targets_pp, self.response, l=self.l, boundary=self.boundary)
Exemplo n.º 23
0
def neighbors3d(xs,ys,zs,sigmas, tol=1e-8):
    """
    Same as neighbors, but for 3D
    """
    xdiff = np.remainder(np.subtract.outer(xs, xs)+.5, 1)-.5
    ydiff = np.remainder(np.subtract.outer(ys, ys)+.5, 1)-.5
    zdiff = np.remainder(np.subtract.outer(zs, zs)+.5, 1)-.5
    sigmadists = np.add.outer(sigmas, sigmas)/2
    dists = np.sqrt((xdiff**2) + (ydiff**2) + (zdiff**2))
    return dists - sigmadists < tol, xdiff, ydiff, zdiff
Exemplo n.º 24
0
def nearest_odd(N):
    """Get the nearest odd number for each value of N."""
    if isinstance(N, np.ndarray):
        y = np.floor(N)
        y[np.remainder(y, 2) == 0] = np.ceil(N[np.remainder(y, 2) == 0])
        y[np.remainder(y, 2) == 0] += 1
        return y
    if N % 2 == 0:
        return N + 1
    return N
Exemplo n.º 25
0
def readout(mesh, pos, mode="raise", period=None, transform=None, out=None):
    """ CIC approximation, reading out mesh values at pos,
        see document of paint. 
    """
    pos = numpy.array(pos)
    if out is None:
        out = numpy.zeros(len(pos), dtype='f8')
    else:
        out[:] = 0
    chunksize = 1024 * 16 * 4
    Ndim = pos.shape[-1]
    Np = pos.shape[0]
    if transform is None:
        transform = lambda x: x

    neighbours = ((numpy.arange(2 ** Ndim)[:, None] >> \
            numpy.arange(Ndim)[None, :]) & 1)
    for start in range(0, Np, chunksize):
        chunk = slice(start, start+chunksize)

        if mode == 'raise':
            gridpos = transform(pos[chunk])
            rmi_mode = 'raise'
            intpos = numpy.intp(numpy.floor(gridpos))
        elif mode == 'ignore':
            gridpos = transform(pos[chunk])
            rmi_mode = 'raise'
            intpos = numpy.intp(numpy.floor(gridpos))

        for i, neighbour in enumerate(neighbours):
            neighbour = neighbour[None, :]

            targetpos = intpos + neighbour

            kernel = (1.0 - numpy.abs(gridpos - targetpos)).prod(axis=-1)

            if period is not None:
                period = numpy.int32(period)
                numpy.remainder(targetpos, period, targetpos)

            if mode == 'ignore':
                # filter out those outside of the mesh
                mask = (targetpos >= 0).all(axis=-1)
                for d in range(Ndim):
                    mask &= (targetpos[..., d] < mesh.shape[d])
                targetpos = targetpos[mask]
                kernel = kernel[mask]
            else:
                mask = Ellipsis

            if len(targetpos) > 0:
                targetindex = numpy.ravel_multi_index(
                        targetpos.T, mesh.shape, mode=rmi_mode)
                out[chunk][mask] += kernel * mesh.flat[targetindex]
    return out
Exemplo n.º 26
0
def scan_pose_list(FOVV, resV, FOVH, resH, axis=0):
    ''' Create a list of poses that cover the angular region defined by 
    the input arguments, about the defined axis'''
    assert axis in [0,1,2], "axis must be 0, 1 or 2 (x, y or z)"
    print "scan poses based on:"
    print FOVV, resV, FOVH, resH
    assert np.remainder(np.round(FOVV/resV),2) == 0, "FOVV/resV must be an even integer"
    assert np.remainder(np.round(FOVH/resH),2) == 0, "FOVH/resH must be an even integer"
    numV = 2*np.ceil(FOVV/resV/2)+1
    numH = 2*np.ceil(FOVH/resH/2)+1
    # must be odd so we have a definitive center pixel
    if np.remainder(numV,2) != 1:
        numV = numV + 1
    if np.remainder(numV,2) != 1:
        numV = numV + 1
    numposes = numV*numH
    center = np.floor(numposes/2)
    poses_ang = np.zeros((numposes,3))
    poses_lin = np.zeros((numposes,3))
    # top left
    count = 0
    for i in np.arange(-FOVH/2,0,resH):
        horz_rot = i
        for j in np.arange(0,FOVV/2+resV,resV):
            vert_rot = j
            poses_ang[count,:] = axis_order(horz_rot, vert_rot, axis)
            count = count + 1
        vert_rot = 0
    # top right, center pixel
    for i in np.arange(0,FOVH/2+resH,resH):
        horz_rot = i
        for j in np.arange(0,FOVV/2+resV,resV):
            vert_rot = j
            poses_ang[count,:] = axis_order(horz_rot, vert_rot, axis)
            count = count + 1
        vert_rot = 0
    # bottom left
    for i in np.arange(-FOVH/2,0,resH):
        horz_rot = i
        for j in np.arange(-FOVV/2,0,resV):
            vert_rot = j
            poses_ang[count,:] = axis_order(horz_rot, vert_rot, axis)
            count = count + 1
        vert_rot = 0
    # bottom right
    for i in np.arange(0,FOVH/2+resH,resH):
        horz_rot = i
        for j in np.arange(-FOVV/2,0,resV):
            vert_rot = j
            poses_ang[count,:] = axis_order(horz_rot, vert_rot, axis)
            count = count + 1
        vert_rot = 0
    poses = np.hstack([poses_lin,poses_ang])
    return poses
Exemplo n.º 27
0
def is_leap_year(yr):
    if (np.remainder(yr,4) != 0):
        return False
    else:
        if (np.remainder(yr, 100) == 0):
            if (np.remainder(yr, 400) == 0):
                return True
            else:
                return False
        else:
            return True
Exemplo n.º 28
0
def s2hms(secs):
  '''
  convert seconds to interger hour, minute, and seconds

  from air_sea toolbox (sea-mat)
  '''
  sec = np.round(secs)
  hr  = np.floor(sec/3600.)
  min = np.floor(np.remainder(sec,3600)/60)
  sec = np.round(np.remainder(sec,60))
  return hr,min,sec
Exemplo n.º 29
0
def odd_even_perm(N,dim):
    # generates an odd-even block row permutation matrix in dense form.
    assert np.remainder(N+1,2) == 0
    P = np.zeros([N*dim,N*dim])
    n = (N-1)/2
    for i in np.arange(N):
        if np.remainder(i,2) == 0: # even-indexed rows go up top
            P[(i/2)*dim:((i/2)+1)*dim,i*dim:(i+1)*dim] = np.identity(dim)
        else:   # odd-indexed rows go on the bottom
            P[(((i+1)/2)+n)*dim:(((i+3)/2)+n)*dim,i*dim:(i+1)*dim] = np.identity(dim)
    return np.matrix(P)
Exemplo n.º 30
0
def neighbors(xs,ys,sigmas, tol=1e-8):
    """
    For a set of particles at xs,ys with diameters sigmas, finds the 
    distance vector matrix (xdiff,ydiff) and the adjacency matrix.
    
    Assumes box size 1, returns (adjacency matrix, xdiff, ydiff)
    """
    xdiff = np.remainder(np.subtract.outer(xs, xs)+.5, 1)-.5
    ydiff = np.remainder(np.subtract.outer(ys, ys)+.5, 1)-.5
    sigmadists = np.add.outer(sigmas, sigmas)/2
    dists = np.sqrt((xdiff**2) + (ydiff**2))
    return dists - sigmadists < tol, xdiff, ydiff
Exemplo n.º 31
0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
a = np.array([5, 5, -5, -5])
b = np.array([2, -2, 2, -2])
print(a, b, sep='\n')
c = np.true_divide(a, b)
d = np.divide(a, b)
e = a / b
print(c, d, e, sep='\n')
f = np.floor_divide(a, b)
g = a // b
print(f, g, sep='\n')
h = np.ceil(a / b).astype(int)
print(h)
i = np.trunc(a / b).astype(int)
j = (a / b).astype(int)
print(i, j, sep='\n')
k = np.remainder(a, b)
l = np.mod(a, b)
m = a % b
print(k, l, m, sep='\n')
n = np.fmod(a, b)
print(n)
Exemplo n.º 32
0
    def fit_concatenated(self, train_sequence, train_cluster_id, args):
        """Fit UISRNN model to concatenated sequence and cluster_id.

    Args:
      train_sequence: the training observation sequence, which is a
        2-dim numpy array of real numbers, of size `N * D`.

        - `N`: summation of lengths of all utterances.
        - `D`: observation dimension.

        For example,
      ```
      train_sequence =
      [[1.2 3.0 -4.1 6.0]    --> an entry of speaker #0 from utterance 'iaaa'
       [0.8 -1.1 0.4 0.5]    --> an entry of speaker #1 from utterance 'iaaa'
       [-0.2 1.0 3.8 5.7]    --> an entry of speaker #0 from utterance 'iaaa'
       [3.8 -0.1 1.5 2.3]    --> an entry of speaker #0 from utterance 'ibbb'
       [1.2 1.4 3.6 -2.7]]   --> an entry of speaker #0 from utterance 'ibbb'
      ```
        Here `N=5`, `D=4`.

        We concatenate all training utterances into this single sequence.
      train_cluster_id: the speaker id sequence, which is 1-dim list or
        numpy array of strings, of size `N`.
        For example,
      ```
      train_cluster_id =
        ['iaaa_0', 'iaaa_1', 'iaaa_0', 'ibbb_0', 'ibbb_0']
      ```
        'iaaa_0' means the entry belongs to speaker #0 in utterance 'iaaa'.

        Note that the order of entries within an utterance are preserved,
        and all utterances are simply concatenated together.
      args: Training configurations. See `arguments.py` for details.

    Raises:
      TypeError: If train_sequence or train_cluster_id is of wrong type.
      ValueError: If train_sequence or train_cluster_id has wrong dimension.
    """
        # check type
        if (not isinstance(train_sequence, np.ndarray)
                or train_sequence.dtype != float):
            raise TypeError(
                'train_sequence should be a numpy array of float type.')
        if isinstance(train_cluster_id, list):
            train_cluster_id = np.array(train_cluster_id)
        if (not isinstance(train_cluster_id, np.ndarray)
                or not train_cluster_id.dtype.name.startswith(
                    ('str', 'unicode'))):
            raise TypeError(
                'train_cluster_id type be a numpy array of strings.')
        # check dimension
        if train_sequence.ndim != 2:
            raise ValueError('train_sequence must be 2-dim array.')
        if train_cluster_id.ndim != 1:
            raise ValueError('train_cluster_id must be 1-dim array.')
        # check length and size
        train_total_length, observation_dim = train_sequence.shape
        if observation_dim != self.observation_dim:
            raise ValueError(
                'train_sequence does not match the dimension specified '
                'by args.observation_dim.')
        if train_total_length != len(train_cluster_id):
            raise ValueError('train_sequence length is not equal to '
                             'train_cluster_id length.')

        if args.batch_size < 1:
            args.batch_size = None

        self.rnn_model.train()
        optimizer = self._get_optimizer(optimizer=args.optimizer,
                                        learning_rate=args.learning_rate)

        sub_sequences, seq_lengths = utils.resize_sequence(
            sequence=train_sequence,
            cluster_id=train_cluster_id,
            num_permutations=args.num_permutations)

        # For batch learning, pack the entire dataset.
        if args.batch_size is None:
            packed_train_sequence, rnn_truth = utils.pack_sequence(
                sub_sequences, seq_lengths, args.batch_size,
                self.observation_dim, self.device, args.max_seq_len)
            batch_size = len(sub_sequences)
            print('No batch_size given, using all training data')
        train_loss = []
        for num_iter in range(args.train_iteration):
            optimizer.zero_grad()
            # For online learning, pack a subset in each iteration.
            if args.batch_size is not None:
                batch_size = args.batch_size
                packed_train_sequence, rnn_truth = utils.pack_sequence(
                    sub_sequences, seq_lengths, batch_size,
                    self.observation_dim, self.device, args.max_seq_len)

            hidden = self.rnn_init_hidden.repeat(1, batch_size, 1)
            mean, _ = self.rnn_model(packed_train_sequence, hidden)
            # use mean to predict
            mean = torch.cumsum(mean, dim=0)
            mean_size = mean.size()
            mean = torch.mm(
                torch.diag(
                    1.0 /
                    torch.arange(1, mean_size[0] + 1).float().to(self.device)),
                mean.view(mean_size[0], -1))
            mean = mean.view(mean_size)

            # Likelihood part.
            loss1 = loss_func.weighted_mse_loss(
                input_tensor=(rnn_truth != 0).float() * mean[:-1, :, :],
                target_tensor=rnn_truth,
                weight=1 / (2 * self.sigma2))

            # Sigma2 prior part.
            weight = (((rnn_truth != 0).float() * mean[:-1, :, :] -
                       rnn_truth)**2).view(-1, observation_dim)
            num_non_zero = torch.sum((weight != 0).float(), dim=0).squeeze()
            loss2 = loss_func.sigma2_prior_loss(num_non_zero, args.sigma_alpha,
                                                args.sigma_beta, self.sigma2)

            # Regularization part.
            loss3 = loss_func.regularization_loss(self.rnn_model.parameters(),
                                                  args.regularization_weight)

            loss = loss1 + loss2 + loss3
            loss.backward()
            nn.utils.clip_grad_norm_(self.rnn_model.parameters(),
                                     args.grad_max_norm)
            optimizer.step()
            # avoid numerical issues
            self.sigma2.data.clamp_(min=1e-6)

            if (np.remainder(num_iter, 10) == 0
                    or num_iter == args.train_iteration - 1):
                self.logger.print(
                    2, 'Iter: {:d}  \t'
                    'Training Loss: {:.4f}    \n'
                    '    Negative Log Likelihood: {:.4f}\t'
                    'Sigma2 Prior: {:.4f}\t'
                    'Regularization: {:.4f}'.format(num_iter, float(loss.data),
                                                    float(loss1.data),
                                                    float(loss2.data),
                                                    float(loss3.data)))
            train_loss.append(float(
                loss1.data))  # only save the likelihood part
        self.logger.print(
            1, 'Done training with {} iterations'.format(args.train_iteration))
Exemplo n.º 33
0
def CalcSatPos(satData, transTime):

    satPos = np.zeros(3)

    # Define constants
    mu = 3.986005e14
    # meters^3/sec^2 - WGS-84 value of the Earth's universal gravitational parameter
    omegaE = 7.2921151467e-5
    # rad/sec − WGS-84 value of the Earth's rotation rate
    piVal = 3.1415926535898
    F = -4.442807633e-10
    # Constant, [sec/(meter)^(1/2)]

    #--- Find time difference ---------------------------------------------
    dt = transTime - satData.Toc
    dt = CheckTimeWrap(dt)

    #--- Calculate clock correction ---------------------------------------
    clkCorr = satData.af2 * dt**2 + satData.af1 * dt + satData.af0 - satData.Tgd
    time = transTime - clkCorr

    ## Find satellite's position ----------------------------------------------

    # Restore semi-major axis
    a = satData.sqrtA**2

    # Time correction
    tk = time - satData.Toe
    tk = CheckTimeWrap(tk)

    # Initial mean motion
    n0 = np.sqrt(mu / np.power(a, 3))
    # Mean motion
    n = n0 + satData.deltaN

    # Mean anomaly
    M = satData.M0 + n * tk
    # Reduce mean anomaly to between 0 and 360 deg
    M = np.remainder(M + 2 * piVal, 2 * piVal)

    #Initial guess of eccentric anomaly
    E = M

    #--- Iteratively compute eccentric anomaly ----------------------------
    for ndx in range(0, 10):
        E_old = E
        E = M + satData.e * np.sin(E)
        dE = np.remainder(E - E_old, 2 * piVal)

        if (abs(dE) < 1.e-12):
            # Necessary precision is reached, exit from the loop
            break

    #Reduce eccentric anomaly to between 0 and 360 deg
    E = np.remainder(E + 2 * piVal, 2 * piVal)

    #Compute relativistic correction term
    dtr = F * satData.e * satData.sqrtA * np.sin(E)

    #Calculate the true anomaly
    nu = np.arctan2(
        np.sqrt(1 - satData.e**2) * np.sin(E),
        np.cos(E) - satData.e)

    #Compute angle phi
    phi = nu + satData.omegaSmall
    # <--------------------------------------- NOT SURE
    #Reduce phi to between 0 and 360 deg
    phi = np.remainder(phi, 2 * piVal)

    #Correct argument of latitude
    u = phi + satData.Cuc * np.cos(2 * phi) + satData.Cus * np.sin(2 * phi)
    #Correct radius
    r = a * (1 - satData.e * np.cos(E)) + satData.Crc * np.cos(
        2 * phi) + satData.Crs * np.sin(2 * phi)
    #Correct inclination
    i = satData.i0 + satData.IDOT * tk + satData.Cic * np.cos(
        2 * phi) + satData.Cis * np.sin(2 * phi)

    #Compute the angle between the ascending node and the Greenwich meridian
    Omega = satData.omegaBig + (satData.omegaDot -
                                omegaE) * tk - omegaE * satData.Toe
    #Reduce to between 0 and 360 deg
    Omega = np.remainder(Omega + 2 * piVal, 2 * piVal)

    #--- Compute satellite coordinates ------------------------------------
    satPos[0] = np.cos(u) * r * np.cos(Omega) - np.sin(u) * r * np.cos(
        i) * np.sin(Omega)
    satPos[1] = np.cos(u) * r * np.sin(Omega) + np.sin(u) * r * np.cos(
        i) * np.cos(Omega)
    satPos[2] = np.sin(u) * r * np.sin(i)

    # Include relativistic correction in clock correction --------------------
    clkCorr = satData.af2 * dt**2 + satData.af1 * dt + satData.af0 - satData.Tgd + dtr

    return (satPos, clkCorr)
Exemplo n.º 34
0
    def genAsGrid(self, shape=[1, 1024, 1024], start=[0, 0, 0]) -> np.ndarray:
        """
        Generates noise according to the set properties along a rectilinear 
        (evenly-spaced) grid.  

        Args:
            shape: Tuple[int]
                the shape of the output noise volume. 
            start: Tuple[int]
                the starting coordinates for generation of the grid.
                I.e. the coordinates are essentially `start: start + shape`

        Example::

            import numpy as np
            import pyfastnoisesimd as fns 
            noise = fns.Noise()
            result = noise.genFromGrid(shape=[256,256,256], start=[0,0,0])
            nextResult = noise.genFromGrid(shape=[256,256,256], start=[256,0,0])
        """
        if isinstance(shape, (int, np.integer)):
            shape = (shape, )

        # There is a minimum array size before we bother to turn on futures.
        size = np.product(shape)
        # size needs to be evenly divisible by ext.SIMD_ALINGMENT
        if np.remainder(size, ext.SIMD_ALIGNMENT /
                        np.dtype(np.float32).itemsize) != 0.0:
            raise ValueError(
                'The size of the array (in bytes) must be evenly divisible by the SIMD vector length'
            )

        result = empty_aligned(shape)

        # Shape could be 1 or 2D, so we need to expand it with singleton
        # dimensions for the FillNoiseSet call
        if len(start) == 1:
            start = [start[0], 0, 0]
        elif len(start) == 2:
            start = [start[0], start[1], 1]
        else:
            start = list(start)
        start_zero = start[0]

        if self._num_workers <= 1 or size < _MIN_CHUNK_SIZE:
            # print('Grid single-threaded')
            if len(shape) == 1:
                shape = (*shape, 1, 1)
            elif len(shape) == 2:
                shape = (*shape, 1)
            else:
                shape = shape

            self._fns.FillNoiseSet(result, *start, *shape)
            return result

        # else run in threaded mode.
        n_chunks = np.minimum(self._num_workers, shape[0])
        # print(f'genAsGrid using {n_chunks} chunks')

        # print('Grid multi-threaded')
        workers = []
        for I, (chunk,
                consumed) in enumerate(aligned_chunks(result, n_chunks,
                                                      axis=0)):
            # print(f'{I}: Got chunk of shape {chunk.shape} with {consumed} consumed')

            if len(chunk.shape) == 1:
                chunk_shape = (*chunk.shape, 1, 1)
            elif len(chunk.shape) == 2:
                chunk_shape = (*chunk.shape, 1)
            else:
                chunk_shape = chunk.shape

            start[0] = start_zero + consumed
            # print('len start: ', len(start), ', len shape: ', len(chunk_shape))
            peon = self._asyncExecutor.submit(self._fns.FillNoiseSet, chunk,
                                              *start, *chunk_shape)
            workers.append(peon)

        for peon in workers:
            peon.result()
        # For memory management we have to tell NumPy it's ok to free the memory
        # region when it is dereferenced.
        # self._fns._OwnSplitArray(noise)
        return result
file = path + "train_data.csv"
df_train = pd.read_csv(file,
                       usecols=[
                           'custid', 'platform', 'currency', 'groupid',
                           'timestamp', 'invest'
                       ])  #,nrows=50000)
days = 182

# In[ ]:

## add time of day column
mintimestamp = np.min(df_train['timestamp'])
df_train['timestamp'] = df_train['timestamp'] - mintimestamp
divide = 3600.  # 1 hour
df_train['time_hr'] = (np.rint(
    np.remainder(df_train['timestamp'], 3600 * 24) / (divide)))
# print df_train.head()

# In[ ]:

## groupby currency, time of day
grouped2 = df_train[df_train['invest'] > 2.0].groupby(['currency', 'time_hr'])
grouped10 = df_train[df_train['invest'] > 10.0].groupby(
    ['currency', 'time_hr'])
grouped = df_train.groupby(['currency', 'time_hr'])
bin_hr_currency = pd.DataFrame({
    'meanInv.curHr':
    grouped['invest'].mean().round(decimals=2),
    'rInvGT2.curHr':
    (grouped2['invest'].count() / grouped['invest'].count()).round(decimals=3),
    'rInvGT10.curHr': (grouped10['invest'].count() /
Exemplo n.º 36
0
def deg_modulus(x):
    return np.remainder(x, 360)
Exemplo n.º 37
0
winlength = tausec - 1

ts = time1

tfr = np.zeros((n_fbins, ts.shape[0]), dtype=complex)

taulens = np.min(np.c_[np.arange(signal.shape[0]),
                       signal.shape[0] - np.arange(signal.shape[0]) - 1,
                       winlength * np.ones(ts.shape)],
                 axis=1)

conj_signal = np.conj(signal)
for icol in range(0, len(time1)):
    taumax = taulens[icol]
    tau = np.arange(-taumax, taumax + 1).astype(int)
    indices = np.remainder(n_fbins + tau, n_fbins).astype(int)

    tfr[indices, icol] = signal[icol + tau] * conj_signal[icol - tau]

    if (icol <= signal.shape[0] - tausec) and (icol >= tausec + 1):

        tfr[tausec, icol] = signal[icol + tausec - 1] * np.conj(
            signal[icol - tausec - 1]
        )  #+ signal[icol - tausec-1] * conj_signal[icol + tausec-1]

tfr = np.fft.fft(tfr, axis=0)
tfr = np.real(tfr)
print(tfr.shape)
print(Y.shape)
print(X.shape)
Exemplo n.º 38
0
lossSumT = tf.summary.scalar("TrnLoss", lossT)

with tf.Session(config=config) as sess:
    sess.run(tf.global_variables_initializer())

    feedDict = {orgP: trnOrg, atbP: trnAtb, maskP: trnMask, csmP: trnCsm}
    sess.run(iterator.initializer, feed_dict=feedDict)
    savedFile = saver.save(sess, sessFileName)
    print("Model meta graph saved in::%s" % savedFile)

    writer = tf.summary.FileWriter(directory, sess.graph)
    for step in tqdm(range(nSteps)):
        try:
            tmp, _, _ = sess.run([loss, update_ops, opToRun])
            totalLoss.append(tmp)
            if np.remainder(step + 1, nBatch) == 0:
                ep = ep + 1
                avgTrnLoss = np.mean(totalLoss)
                lossSum = sess.run(lossSumT, feed_dict={lossT: avgTrnLoss})
                writer.add_summary(lossSum, ep)
                totalLoss = []  #after each epoch empty the list of total loos
        except tf.errors.OutOfRangeError:
            break
    savedfile = saver.save(sess,
                           sessFileName,
                           global_step=ep,
                           write_meta_graph=True)
    writer.close()

end_time = time.time()
print('Trianing completed in minutes ', ((end_time - start_time) / 60))
Exemplo n.º 39
0
def analyze_figure_ground(datafiles,
                          stimfile,
                          retfile=None,
                          frame_adjust=None,
                          rg=None,
                          nbefore=4,
                          nafter=4):
    nbydepth = get_nbydepth(datafiles)
    #trialwise,ctrialwise,strialwise,dfof,straces = ut.gen_precise_trialwise(datafiles,frame_adjust=frame_adjust)
    trialwise, ctrialwise, strialwise, dfof, straces, dtrialwise, proc1 = ut.gen_precise_trialwise(
        datafiles,
        rg=rg,
        frame_adjust=frame_adjust,
        nbefore=nbefore,
        nafter=nafter)
    trialwise_t_offset = proc1['trialwise_t_offset']
    raw_trialwise = proc1['raw_trialwise']
    neuropil_trialwise = proc1['neuropil_trialwise']
    print(strialwise.shape)
    zstrialwise = sst.zscore(strialwise.reshape(
        (strialwise.shape[0], -1)).T).T.reshape(strialwise.shape)

    result = sio.loadmat(stimfile, squeeze_me=True)['result'][()]

    infofile = sio.loadmat(datafiles[0][:-12] + '.mat', squeeze_me=True)
    frame = infofile['info'][()]['frame'][()]
    if frame_adjust:
        print('adjusted')
        frame = frame_adjust(frame)
    if np.remainder(frame.shape[0], 2):
        print('deleted one')
        frame = frame[:-1]

    data = strialwise  #[:,:,nbefore:-nafter]

    try:
        dxdt = sio.loadmat(datafiles[1], squeeze_me=True)['dxdt']
    except:
        with h5py.File(datafiles[1], mode='r') as f:
            dxdt = f['dxdt'][:].T

    trialrun = np.zeros(frame[0::2].shape)
    for i in range(len(trialrun)):
        trialrun[i] = dxdt[frame[0::2][i]:frame[1::2][i]].mean()
    runtrial = trialrun > 100

    pval = np.zeros(strialwise.shape[0])
    for i in range(strialwise.shape[0]):
        _, pval[i] = sst.ttest_rel(strialwise[i, :, nbefore - 1],
                                   strialwise[i, :, nbefore + 1])

    stimparams = result['stimParams']

    order = ['ctrl', 'fig', 'grnd', 'iso', 'cross']
    norder = len(order)
    ori = stimparams[0]
    sz = stimparams[1]
    figContrast = stimparams[-2]
    grndContrast = stimparams[-1]

    paramdict = {}
    paramdict['ctrl'] = np.logical_and(figContrast == 0, grndContrast == 0)
    paramdict['fig'] = np.logical_and(figContrast == 1, grndContrast == 0)
    paramdict['grnd'] = np.logical_and(
        np.logical_and(figContrast == 0, grndContrast == 1), sz > 0)
    paramdict['iso'] = sz == 0
    paramdict['cross'] = np.logical_and(figContrast == 1, grndContrast == 1)

    indexlut, stimp = np.unique(stimparams, axis=1, return_inverse=True)

    angle = stimparams[0]
    size = stimparams[1]
    contrast = stimparams[4]

    ucontrast = np.unique(contrast)
    uangle = np.unique(angle)
    usize = np.unique(size)
    ncontrast = len(ucontrast)
    nangle = len(uangle)
    nsize = len(usize)

    angle180 = np.remainder(angle, 180)
    uangle180 = np.unique(angle180)
    nangle180 = len(uangle180)

    Smean = np.zeros(
        (strialwise.shape[0], norder, nangle180, strialwise.shape[2]))
    Stavg = np.zeros((strialwise.shape[0], norder, nangle180,
                      int(strialwise.shape[1] / nangle / norder)))

    Strials = {}
    Sspont = {}
    #print(runtrial.shape)
    #for i,name in enumerate(order):
    #    for j,theta in enumerate(uangle180):
    #        lkat = np.logical_and(runtrial,np.logical_and(angle180==theta,paramdict[name]))
    #        if lkat.sum()==1:
    #            print('problem')
    #        Smean[:,i,j,:] = data[:,lkat,:].mean(1)
    #        Strials[(i,j)] = data[:,lkat,nbefore:-nafter].mean(2)
    #        Sspont[(i,j)] = data[:,lkat,:nbefore].mean(2)

    lb = np.zeros((strialwise.shape[0], norder, nangle180))
    ub = np.zeros((strialwise.shape[0], norder, nangle180))

    #for i in range(norder):
    #    print(i)
    #    for j in range(nangle180):
    #        lb[:,i,j],ub[:,i,j] = ut.bootstrap(Strials[(i,j)],np.mean,axis=1,pct=(16,84))
    # mn[:,i,j,k] = np.nanmean(Strials[(i,j,k)],axis=1)

    pval_fig = np.zeros((strialwise.shape[0], nangle180))
    #for j,theta in enumerate(uangle180):
    #    print(theta)
    #    figind = int(np.where(np.array([x=='fig' for x in order]))[0])
    #    _,pval_fig[:,j] = sst.ttest_rel(Sspont[(figind,j)],Strials[(figind,j)],axis=1)
    #
    pval_grnd = np.zeros((strialwise.shape[0], nangle180))
    #for j,theta in enumerate(uangle180):
    #    print(theta)
    #    grndind = int(np.where(np.array([x=='grnd' for x in order]))[0])
    #    _,pval_grnd[:,j] = sst.ttest_rel(Sspont[(grndind,j)],Strials[(grndind,j)],axis=1)

    Savg = np.nanmean(np.nanmean(Smean[:, :, :, nbefore:-nafter], axis=-1),
                      axis=2)

    Storiavg = Stavg.mean(1)
    # _,pval = sst.ttest_ind(Storiavg[:,0,-1].T,Storiavg[:,0,0].T)

    #suppressed = np.logical_and(pval<0.05,Savg[:,0,-1]<Savg[:,0,0])
    #facilitated = np.logical_and(pval<0.05,Savg[:,0,-1]>Savg[:,0,0])
    proc = {}
    proc['Smean'] = Smean
    proc['lb'] = lb
    proc['ub'] = ub
    proc['pval_fig'] = pval_fig
    proc['pval_grnd'] = pval_grnd
    proc['trialrun'] = trialrun
    proc['strialwise'] = strialwise
    proc['dtrialwise'] = dtrialwise
    proc['trialwise'] = trialwise
    proc['dfof'] = dfof
    proc['trialwise_t_offset'] = trialwise_t_offset
    proc['raw_trialwise'] = raw_trialwise
    proc['neuropil_trialwise'] = neuropil_trialwise
    proc['order'] = order
    proc['angle'] = angle
    proc['paramdict'] = paramdict
    proc['Sspont'] = Sspont

    #return Savg,Smean,lb,ub,pval_fig,pval_grnd,trialrun
    return Savg, proc
Exemplo n.º 40
0
 def CMP2D_timestep(self):
     """A single timestep"""
     if np.remainder(self.t,self.pace_rate)==0:
         self.SinusRhythm()
     self.Relaxing()
     self.Conduct()
Exemplo n.º 41
0
 def wrapToPi(rad_list):
     xwrap = np.remainder(rad_list, 2 * np.pi)
     mask = np.abs(xwrap) > np.pi
     xwrap[mask] -= 2 * np.pi * np.sign(xwrap[mask])
     return xwrap
Exemplo n.º 42
0
def fold(fh,
         comm,
         samplerate,
         fedge,
         fedge_at_top,
         nchan,
         nt,
         ntint,
         ngate,
         ntbin,
         ntw,
         dm,
         fref,
         phasepol,
         dedisperse='incoherent',
         do_waterfall=True,
         do_foldspec=True,
         verbose=True,
         progress_interval=100,
         rfi_filter_raw=None,
         rfi_filter_power=None,
         return_fits=False):
    """
    FFT data, fold by phase/time and make a waterfall series

    Folding is done from the position the file is currently in

    Parameters
    ----------
    fh : file handle
        handle to file holding voltage timeseries
    comm: MPI communicator or None
        will use size, rank attributes
    samplerate : Quantity
        rate at which samples were originally taken and thus double the
        band width (frequency units)
    fedge : float
        edge of the frequency band (frequency units)
    fedge_at_top: bool
        whether edge is at top (True) or bottom (False)
    nchan : int
        number of frequency channels for FFT
    nt, ntint : int
        total number nt of sets, each containing ntint samples in each file
        hence, total # of samples is nt*ntint, with each sample containing
        a single polarisation
    ngate, ntbin : int
        number of phase and time bins to use for folded spectrum
        ntbin should be an integer fraction of nt
    ntw : int
        number of time samples to combine for waterfall (does not have to be
        integer fraction of nt)
    dm : float
        dispersion measure of pulsar, used to correct for ism delay
        (column number density)
    fref: float
        reference frequency for dispersion measure
    phasepol : callable
        function that returns the pulsar phase for time in seconds relative to
        start of the file that is read.
    dedisperse : None or string (default: incoherent).
        None, 'incoherent', 'coherent', 'by-channel'.
        Note: None really does nothing
    do_waterfall, do_foldspec : bool
        whether to construct waterfall, folded spectrum (default: True)
    verbose : bool or int
        whether to give some progress information (default: True)
    progress_interval : int
        Ping every progress_interval sets
    return_fits : bool (default: False)
        return a subint fits table for rank == 0 (None otherwise)

    """
    assert dedisperse in (None, 'incoherent', 'by-channel', 'coherent')
    need_fine_channels = dedisperse in ['by-channel', 'coherent']
    assert nchan % fh.nchan == 0
    if dedisperse in ['incoherent', 'by-channel'] and fh.nchan > 1:
        oversample = nchan // fh.nchan
        assert ntint % oversample == 0
    else:
        oversample = 1

    if dedisperse == 'coherent' and fh.nchan > 1:
        raise ValueError("Cannot coherently dedisperse channelized data.")

    if comm is None:
        mpi_rank = 0
        mpi_size = 1
    else:
        mpi_rank = comm.rank
        mpi_size = comm.size

    npol = getattr(fh, 'npol', 1)
    assert npol == 1 or npol == 2
    if verbose > 1 and mpi_rank == 0:
        print("Number of polarisations={}".format(npol))

    # initialize folded spectrum and waterfall
    # TODO: use estimated number of points to set dtype
    if do_foldspec:
        foldspec = np.zeros((ntbin, nchan, ngate, npol**2), dtype=np.float32)
        icount = np.zeros((ntbin, nchan, ngate), dtype=np.int32)
    else:
        foldspec = None
        icount = None

    if do_waterfall:
        nwsize = nt * ntint // ntw // oversample
        waterfall = np.zeros((nwsize, nchan, npol**2), dtype=np.float64)
    else:
        waterfall = None

    if verbose and mpi_rank == 0:
        print('Reading from {}'.format(fh))

    nskip = fh.tell() / fh.blocksize
    if nskip > 0:
        if verbose and mpi_rank == 0:
            print('Starting {0} blocks = {1} bytes out from start.'.format(
                nskip, nskip * fh.blocksize))

    dt1 = (1. / samplerate).to(u.s)
    # need 2*nchan real-valued samples for each FFT
    if fh.telescope == 'lofar':
        dtsample = fh.dtsample
    else:
        dtsample = nchan // oversample * 2 * dt1
    tstart = dtsample * ntint * nskip

    # pre-calculate time delay due to dispersion in coarse channels
    # for channelized data, frequencies are known

    tb = -1. if fedge_at_top else +1.
    if fh.nchan == 1:
        if getattr(fh, 'data_is_complex', False):
            # for complex data, really each complex sample consists of
            # 2 real ones, so multiply dt1 by 2.
            freq = fedge + tb * fftfreq(nchan, 2. * dt1)
            if dedisperse == 'coherent':
                fcoh = fedge + tb * fftfreq(nchan * ntint, 2. * dt1)
                fcoh.shape = (-1, 1)
            elif dedisperse == 'by-channel':
                fcoh = freq + tb * fftfreq(ntint, dtsample)[:, np.newaxis]
        else:  # real data
            freq = fedge + tb * rfftfreq(nchan * 2, dt1)
            if dedisperse == 'coherent':
                fcoh = fedge + tb * rfftfreq(ntint * nchan * 2, dt1)
                fcoh.shape = (-1, 1)
            elif dedisperse == 'by-channel':
                fcoh = freq + tb * fftfreq(ntint, dtsample)[:, np.newaxis]
        freq_in = freq

    else:
        # Input frequencies may not be the ones going out.
        freq_in = fh.frequencies
        if oversample == 1:
            freq = freq_in
        else:
            freq = freq_in[:, np.newaxis] + tb * fftfreq(oversample, dtsample)

        fcoh = freq_in + tb * fftfreq(ntint, dtsample)[:, np.newaxis]

    # print('fedge_at_top={0}, tb={1}'.format(fedge_at_top, tb))
    # By taking only up to nchan, we remove the top channel at the Nyquist
    # frequency for real, unchannelized data.
    ifreq = freq[:nchan].ravel().argsort()

    # pre-calculate time offsets in (input) channelized streams
    dt = dispersion_delay_constant * dm * (1. / freq_in**2 - 1. / fref**2)

    if need_fine_channels:
        # pre-calculate required turns due to dispersion.
        #
        # set frequency relative to which dispersion is coherently corrected
        if dedisperse == 'coherent':
            _fref = fref
        else:
            _fref = freq_in[np.newaxis, :]
        # (check via eq. 5.21 and following in
        # Lorimer & Kramer, Handbook of Pulsar Astronomy
        dang = (dispersion_delay_constant * dm * fcoh *
                (1. / _fref - 1. / fcoh)**2) * u.cycle
        with u.set_enabled_equivalencies(u.dimensionless_angles()):
            dd_coh = np.exp(dang * 1j).conj().astype(np.complex64)

        # add dimension for polarisation
        dd_coh = dd_coh[..., np.newaxis]

    # Calculate the part of the whole file this node should handle.
    size_per_node = (nt - 1) // mpi_size + 1
    start_block = mpi_rank * size_per_node
    end_block = min((mpi_rank + 1) * size_per_node, nt)
    for j in range(start_block, end_block):
        if verbose and j % progress_interval == 0:
            print('#{:4d}/{:4d} is doing {:6d}/{:6d} [={:6d}/{:6d}]; '
                  'time={:18.12f}'.format(
                      mpi_rank, mpi_size, j + 1, nt, j - start_block + 1,
                      end_block - start_block,
                      (tstart +
                       dtsample * j * ntint).value))  # time since start

        # Just in case numbers were set wrong -- break if file ends;
        # better keep at least the work done.
        try:
            raw = fh.seek_record_read(int((nskip + j) * fh.blocksize),
                                      fh.blocksize)
        except (EOFError, IOError) as exc:
            print("Hit {0!r}; writing data collected.".format(exc))
            break
        if verbose >= 2:
            print("#{:4d}/{:4d} read {} items".format(mpi_rank, mpi_size,
                                                      raw.size),
                  end="")

        if npol == 2 and raw.dtype.fields is not None:
            raw = raw.view(raw.dtype.fields.values()[0][0])

        if fh.nchan == 1:  # raw.shape=(ntint*npol)
            raw = raw.reshape(-1, npol)
        else:  # raw.shape=(ntint, nchan*npol)
            raw = raw.reshape(-1, fh.nchan, npol)

        if dedisperse == 'incoherent' and oversample > 1:
            raw = ifft(raw, axis=1, **_fftargs).reshape(-1, nchan, npol)
            raw = fft(raw, axis=1, **_fftargs)

        if rfi_filter_raw is not None:
            raw, ok = rfi_filter_raw(raw)
            if verbose >= 2:
                print("... raw RFI (zap {0}/{1})".format(
                    np.count_nonzero(~ok), ok.size),
                      end="")

        if np.can_cast(raw.dtype, np.float32):
            vals = raw.astype(np.float32)
        else:
            assert raw.dtype.kind == 'c'
            vals = raw

        # For pre-channelized data, data are always complex,
        # and should have shape (ntint, nchan, npol).
        # For baseband data, we wish to get to the same shape for
        # incoherent or by_channel, or just to fully channelized for coherent.
        if fh.nchan == 1:
            # If we need coherent dedispersion, do FT of whole thing,
            # otherwise to output channels, mimicking pre-channelized data.
            if raw.dtype.kind == 'c':  # complex data
                nsamp = len(vals) if dedisperse == 'coherent' else nchan
                vals = fft(vals.reshape(-1, nsamp, npol), axis=1, **_fftargs)
            else:  # real data
                nsamp = len(vals) if dedisperse == 'coherent' else nchan * 2
                vals = rfft(vals.reshape(-1, nsamp, npol), axis=1, **_rfftargs)
                # Sadly, the way data are stored depends on what FFT routine
                # one is using.  We cannot deal with scipy's.
                if vals.dtype.kind == 'f':
                    raise TypeError("Can no longer deal with scipy's format "
                                    "for storing FTs of real data.")

        if fedge_at_top:
            # take complex conjugate to ensure by-channel de-dispersion is
            # applied correctly.
            # This needs to be done for ARO data, since we are in 2nd Nyquist
            # zone; not clear it is needed for other telescopes.
            np.conj(vals, out=vals)

        # Now we coherently dedisperse, either all of it or by channel.
        if need_fine_channels:
            # for by_channel, we have vals.shape=(ntint, nchan, npol),
            # and want to FT over ntint to get fine channels;
            if vals.shape[0] > 1:
                fine = fft(vals, axis=0, **_fftargs)
            else:
                # for coherent, we just reshape:
                # (1, ntint*nchan, npol) -> (ntint*nchan, 1, npol)
                fine = vals.reshape(-1, 1, npol)

            # Dedisperse.
            fine *= dd_coh

            # Still have fine.shape=(ntint, nchan, npol),
            # w/ nchan=1 for coherent.
            if fine.shape[1] > 1 or raw.dtype.kind == 'c':
                vals = ifft(fine, axis=0, **_fftargs)
            else:
                vals = irfft(fine, axis=0, **_rfftargs)

            if fine.shape[1] == 1 and nchan > 1:
                # final FT to get requested channels
                if vals.dtype.kind == 'f':
                    vals = vals.reshape(-1, nchan * 2, npol)
                    vals = rfft(vals, axis=1, **_rfftargs)
                else:
                    vals = vals.reshape(-1, nchan, npol)
                    vals = fft(vals, axis=1, **_fftargs)
            elif dedisperse == 'by-channel' and oversample > 1:
                vals = vals.reshape(-1, oversample, fh.nchan, npol)
                vals = fft(vals, axis=1, **_fftargs)
                vals = vals.transpose(0, 2, 1, 3).reshape(-1, nchan, npol)

            # vals[time, chan, pol]
            if verbose >= 2:
                print("... dedispersed", end="")

        if npol == 1:
            power = vals.real**2 + vals.imag**2
        else:
            p0 = vals[..., 0]
            p1 = vals[..., 1]
            power = np.empty(vals.shape[:-1] + (4, ), np.float32)
            power[..., 0] = p0.real**2 + p0.imag**2
            power[..., 1] = p0.real * p1.real + p0.imag * p1.imag
            power[..., 2] = p0.imag * p1.real - p0.real * p1.imag
            power[..., 3] = p1.real**2 + p1.imag**2

        if verbose >= 2:
            print("... power", end="")

        # current sample positions and corresponding time in stream
        isr = j * (ntint // oversample) + np.arange(ntint // oversample)
        tsr = (isr * dtsample * oversample)[:, np.newaxis]

        if rfi_filter_power is not None:
            power = rfi_filter_power(power, tsr.squeeze())
            print("... power RFI", end="")

        # correct for delay if needed
        if dedisperse in ['incoherent', 'by-channel']:
            # tsample.shape=(ntint/oversample, nchan_in)
            tsr = tsr - dt

        if do_waterfall:
            # # loop over corresponding positions in waterfall
            # for iw in range(isr[0]//ntw, isr[-1]//ntw + 1):
            #     if iw < nwsize:  # add sum of corresponding samples
            #         waterfall[iw, :] += np.sum(power[isr//ntw == iw],
            #                                    axis=0)[ifreq]
            iw = np.round(
                (tsr / dtsample / oversample).to(1).value / ntw).astype(int)
            for k, kfreq in enumerate(ifreq):  # sort in frequency while at it
                iwk = iw[:, (0 if iw.shape[1] == 1 else kfreq // oversample)]
                iwk = np.clip(iwk, 0, nwsize - 1, out=iwk)
                iwkmin = iwk.min()
                iwkmax = iwk.max() + 1
                for ipow in range(npol**2):
                    waterfall[iwkmin:iwkmax, k,
                              ipow] += np.bincount(iwk - iwkmin,
                                                   power[:, kfreq, ipow],
                                                   iwkmax - iwkmin)
            if verbose >= 2:
                print("... waterfall", end="")

        if do_foldspec:
            ibin = (j * ntbin) // nt  # bin in the time series: 0..ntbin-1

            # times and cycles since start time of observation.
            tsample = tstart + tsr
            phase = (phasepol(tsample.to(u.s).value.ravel()).reshape(
                tsample.shape))
            # corresponding PSR phases
            iphase = np.remainder(phase * ngate, ngate).astype(np.int)

            for k, kfreq in enumerate(ifreq):  # sort in frequency while at it
                iph = iphase[:, (0 if iphase.shape[1] == 1 else kfreq //
                                 oversample)]
                # sum and count samples by phase bin
                for ipow in range(npol**2):
                    foldspec[ibin, k, :,
                             ipow] += np.bincount(iph, power[:, kfreq, ipow],
                                                  ngate)
                icount[ibin,
                       k, :] += np.bincount(iph, power[:, kfreq, 0] != 0.,
                                            ngate).astype(np.int32)

            if verbose >= 2:
                print("... folded", end="")

        if verbose >= 2:
            print("... done")

    #Commented out as workaround, this was causing "Referenced before assignment" errors with JB data
    #if verbose >= 2 or verbose and mpi_rank == 0:
    #    print('#{:4d}/{:4d} read {:6d} out of {:6d}'
    #          .format(mpi_rank, mpi_size, j+1, nt))

    if npol == 1:
        if do_foldspec:
            foldspec = foldspec.reshape(foldspec.shape[:-1])
        if do_waterfall:
            waterfall = waterfall.reshape(waterfall.shape[:-1])

    return foldspec, icount, waterfall
Exemplo n.º 43
0
    def plot_mwd(RA,
                 Dec,
                 observed_flag,
                 org=0,
                 title='Mollweide projection',
                 projection='mollweide',
                 observed_plot=0):
        ''' 
        Plots targets on the sky in a 'Mollweide' projection.
        RA, Dec are arrays of the same length.
        RA takes values in [0,360), Dec in [-90,90],
        which represent angles in degrees.
        org is the origin of the plot, 0 or a multiple of 30 degrees in [0,360).
        title is the title of the figure.
        projection is the kind of projection: 'mollweide', 'aitoff', 'hammer', 'lambert'
        '''

        x = np.remainder(RA + 360 - org, 360)  # shift RA values
        ind = x > 180
        x[ind] -= 360  # scale conversion to [-180, 180]
        x = -x  # reverse the scale: East to the left
        x_tick_labels = np.array(
            [150, 120, 90, 60, 30, 0, 330, 300, 270, 240,
             210])  #Label in degrees
        #x_tick_labels = np.array([150,140,130,120,110,100,90,80,70,60,50,40,30,20,10,0,350,340,330,320,310,300,290,280,270,260,250,240,230,220,210]) #FinerLabel in degrees

        x_tick_labels = np.remainder(x_tick_labels + 360 + org, 360)
        # x_tick_labels = np.array([150, 120, 90, 60, 30, 0, 330, 300, 270, 240, 210])/15 #Label in hours
        # x_tick_labels = np.remainder(x_tick_labels+24+org/15,24)
        x_tick_labels = [int(i) for i in x_tick_labels]
        fig = plt.figure(figsize=(15 * .8, 7 * .8))
        ax = fig.add_subplot(111, projection=projection)
        #ax.scatter(np.radians(x),np.radians(Dec),color=color,alpha=0.4,zorder=1, label='Targets')  # convert degrees to radians
        for i in range(len(x)):
            if np.array(observed_flag)[i] == 0:
                color = 'k'
            else:
                color = 'k'
                if observed_plot == 1:
                    color = 'g'  #Turn on observed targest plotting.
            ax.scatter(np.radians(x[i]),
                       np.radians(Dec[i]),
                       color=color,
                       alpha=0.4,
                       zorder=1,
                       s=25)
        ax.set_yticklabels([
            str(int(i)) + '$^\circ$'
            for i in np.round(ax.get_yticks() * 180 / np.pi)
        ],
                           fontsize=15)
        ax.title.set_fontsize(20)
        ax.set_xlabel('RA')
        ax.xaxis.label.set_fontsize(20)
        ax.set_ylabel("Dec")
        ax.yaxis.label.set_fontsize(20)
        ax.set_xticklabels([], fontsize=16)  # we add the scale on the x axis
        ax.grid(True, alpha=0.3)
        month_texts = [
            'Sep', 'Aug', 'Jul', 'Jun', 'May', 'Apr', 'Mar', 'Feb', 'Jan',
            'Dec', 'Nov', 'Oct'
        ]
        for i in range(len(month_texts)):
            ax.text(-180 * np.pi / 180 + 15 * np.pi / 180 +
                    30 * np.pi / 180 * i,
                    -35 * np.pi / 180,
                    month_texts[i],
                    ha='center',
                    va='center',
                    fontsize=14)
        for i in range(len(x_tick_labels)):
            ax.text(-150 * np.pi / 180 + 30 * np.pi / 180 * i,
                    -22.5 * np.pi / 180,
                    str(x_tick_labels[i]) + '$^\circ$',
                    ha='center',
                    va='center',
                    fontsize=15)

        #Plot monsoon season.
        monsoon_x_vertices = np.array([-150, -150, -90, -90, -150
                                       ]) * np.pi / 180
        monsoon_y_vertices = np.array([-90, 90, 90, -90, -90]) * np.pi / 180
        monsoon_polygon = Polygon(np.array(
            [[monsoon_x_vertices[i], monsoon_y_vertices[i]]
             for i in range(len(monsoon_x_vertices))]),
                                  color='r',
                                  alpha=0.15,
                                  label='Flagstaff monsoon season')
        ax.add_patch(monsoon_polygon)
        plt.show()
        return ax
Exemplo n.º 44
0
def observed_sample_plots(upload=True):
    def plot_mwd(RA,
                 Dec,
                 observed_flag,
                 org=0,
                 title='Mollweide projection',
                 projection='mollweide',
                 observed_plot=0):
        ''' 
        Plots targets on the sky in a 'Mollweide' projection.
        RA, Dec are arrays of the same length.
        RA takes values in [0,360), Dec in [-90,90],
        which represent angles in degrees.
        org is the origin of the plot, 0 or a multiple of 30 degrees in [0,360).
        title is the title of the figure.
        projection is the kind of projection: 'mollweide', 'aitoff', 'hammer', 'lambert'
        '''

        x = np.remainder(RA + 360 - org, 360)  # shift RA values
        ind = x > 180
        x[ind] -= 360  # scale conversion to [-180, 180]
        x = -x  # reverse the scale: East to the left
        x_tick_labels = np.array(
            [150, 120, 90, 60, 30, 0, 330, 300, 270, 240,
             210])  #Label in degrees
        #x_tick_labels = np.array([150,140,130,120,110,100,90,80,70,60,50,40,30,20,10,0,350,340,330,320,310,300,290,280,270,260,250,240,230,220,210]) #FinerLabel in degrees

        x_tick_labels = np.remainder(x_tick_labels + 360 + org, 360)
        # x_tick_labels = np.array([150, 120, 90, 60, 30, 0, 330, 300, 270, 240, 210])/15 #Label in hours
        # x_tick_labels = np.remainder(x_tick_labels+24+org/15,24)
        x_tick_labels = [int(i) for i in x_tick_labels]
        fig = plt.figure(figsize=(15 * .8, 7 * .8))
        ax = fig.add_subplot(111, projection=projection)
        #ax.scatter(np.radians(x),np.radians(Dec),color=color,alpha=0.4,zorder=1, label='Targets')  # convert degrees to radians
        for i in range(len(x)):
            if np.array(observed_flag)[i] == 0:
                color = 'k'
            else:
                color = 'k'
                if observed_plot == 1:
                    color = 'g'  #Turn on observed targest plotting.
            ax.scatter(np.radians(x[i]),
                       np.radians(Dec[i]),
                       color=color,
                       alpha=0.4,
                       zorder=1,
                       s=25)
        ax.set_yticklabels([
            str(int(i)) + '$^\circ$'
            for i in np.round(ax.get_yticks() * 180 / np.pi)
        ],
                           fontsize=15)
        ax.title.set_fontsize(20)
        ax.set_xlabel('RA')
        ax.xaxis.label.set_fontsize(20)
        ax.set_ylabel("Dec")
        ax.yaxis.label.set_fontsize(20)
        ax.set_xticklabels([], fontsize=16)  # we add the scale on the x axis
        ax.grid(True, alpha=0.3)
        month_texts = [
            'Sep', 'Aug', 'Jul', 'Jun', 'May', 'Apr', 'Mar', 'Feb', 'Jan',
            'Dec', 'Nov', 'Oct'
        ]
        for i in range(len(month_texts)):
            ax.text(-180 * np.pi / 180 + 15 * np.pi / 180 +
                    30 * np.pi / 180 * i,
                    -35 * np.pi / 180,
                    month_texts[i],
                    ha='center',
                    va='center',
                    fontsize=14)
        for i in range(len(x_tick_labels)):
            ax.text(-150 * np.pi / 180 + 30 * np.pi / 180 * i,
                    -22.5 * np.pi / 180,
                    str(x_tick_labels[i]) + '$^\circ$',
                    ha='center',
                    va='center',
                    fontsize=15)

        #Plot monsoon season.
        monsoon_x_vertices = np.array([-150, -150, -90, -90, -150
                                       ]) * np.pi / 180
        monsoon_y_vertices = np.array([-90, 90, 90, -90, -90]) * np.pi / 180
        monsoon_polygon = Polygon(np.array(
            [[monsoon_x_vertices[i], monsoon_y_vertices[i]]
             for i in range(len(monsoon_x_vertices))]),
                                  color='r',
                                  alpha=0.15,
                                  label='Flagstaff monsoon season')
        ax.add_patch(monsoon_polygon)
        plt.show()
        return ax

    '''Plots the current sample as given in 'PINES sample.xlsx' on Google drive and uploads to the PINES website.'''
    pines_path = pines_dir_check()
    sample_path = pines_path / ('Misc/PINES Sample.xlsx')
    print('Make sure an up-to-date copy of PINES Sample.xlsx exists in {}.'.
          format(pines_path / 'Misc/'))
    print('Download from the PINES Google Drive.\n')

    df = pd.read_excel(sample_path)
    df = df.dropna(how='all')  #Remove rows that are all NaNs.

    good_locs = np.where(df['Good'] == 1)[0]  #Get only "good" targets
    ra = np.array(df['RA (deg)'][good_locs])
    dec = np.array(df['Dec (deg)'][good_locs])
    group_ids = df['Group ID'][good_locs]
    observed_flag = df['Observed?'][good_locs]
    observed_groups = np.unique(
        np.array(group_ids)[np.where(
            observed_flag != 0)[0]])  #Get the groups that have been observed.
    number_observed = len(np.array(group_ids)[np.where(observed_flag != 0)[0]])

    #Plot 1: Sky map of good targets based on group.
    print('Updating sky plot...')
    ax = plot_mwd(ra,
                  dec,
                  observed_flag,
                  org=180,
                  projection='mollweide',
                  observed_plot=1)
    handles, labels = plt.gca().get_legend_handles_labels()
    by_label = dict(zip(labels, handles))
    ax.legend(by_label.values(),
              by_label.keys(),
              loc=1,
              bbox_to_anchor=(1.1, 1.1),
              fontsize=16)
    ax.grid(alpha=0.2)

    group_id_inds = np.arange(0, max(group_ids) + 1)

    #Now loop over group_id inds, and draw boundaries around each group.
    for i in group_id_inds:
        targs_in_group = np.where(group_ids == i)[0]
        try:
            cluster_coords = np.array([[ra[i], dec[i]]
                                       for i in targs_in_group])
        except:
            pdb.set_trace()
        hull = ConvexHull(cluster_coords)
        for s in range(len(hull.simplices)):
            simplex = hull.simplices[s]
            x = np.remainder(cluster_coords[simplex, 0] + 360 - 180,
                             360)  # shift RA values
            ind = x > 180
            x[ind] -= 360  # scale conversion to [-180, 180]
            x = -x  # reverse the scale: East to the left
            if i in observed_groups:
                color = 'g'
                ax.plot(x * np.pi / 180,
                        cluster_coords[simplex, 1] * np.pi / 180,
                        color=color,
                        lw=2,
                        zorder=0,
                        alpha=0.6,
                        label='Observed')
            else:
                color = 'k'
                ax.plot(x * np.pi / 180,
                        cluster_coords[simplex, 1] * np.pi / 180,
                        color=color,
                        lw=2,
                        zorder=0,
                        alpha=0.6,
                        label='Not yet observed')

    ax.grid(alpha=0.4)
    handles, labels = plt.gca().get_legend_handles_labels()
    by_label = dict(zip(labels, handles))

    ax.legend(by_label.values(),
              by_label.keys(),
              loc=1,
              bbox_to_anchor=(0.65, 0.225))
    ax.set_title('PINES sample \n ' + str(int(max(group_ids) + 1)) +
                 ' groups, ' + str(len(good_locs)) + ' targets',
                 fontsize=20)
    plt.tight_layout()
    sky_map_output_path = pines_path / ('Misc/updated_sky_plot.png')
    plt.savefig(sky_map_output_path, dpi=300)
    plt.close()

    ntargs = len(df)
    #Now do magnitude/SpT histograms
    print('Updating target histograms...')
    mags = np.zeros(ntargs)
    observed_SpTs = []
    observed_mags = []
    SpT = []
    for i in range(ntargs):
        try:
            #mags[i] = float(df['2MASS H'][i][0:6])
            mags[i] = float(df['2MASS J'][i][0:6])
            SpT.append(df['SpT'][i])
            if df['Observed?'][i] != 0:
                observed_SpTs.append(df['SpT'][i])
                observed_mags.append(mags[i])
        except:  #Some values don't follow the normal +/- convention (they were upper limits in the Gagne sheet), so have to read them in differently.
            #mags[i] = float(df['2MASS H'][i])
            mags[i] = float(df['2MASS J'][i])
            SpT.append(df['SpT'][i])
            if df['Observed?'][i] != 0:
                observed_SpTs.append(df['SpT'][i])
                observed_mags.append(mags[i])

    mags = mags[good_locs]
    SpT = np.array(SpT)
    observed_SpTs = np.array(observed_SpTs)
    observed_mags = np.array(observed_mags)
    SpT = SpT[good_locs]

    SpT_number = np.zeros(ntargs)
    observed_SpT_numbers = []
    for i in range(ntargs):
        if df['SpT'][i][0] == 'L':
            SpT_number[i] = float(df['SpT'][i][1:])
            if df['Observed?'][i] != 0:
                observed_SpT_numbers.append(SpT_number[i])
        else:
            SpT_number[i] = 10 + float(df['SpT'][i][1:])
            if df['Observed?'][i] != 0:
                observed_SpT_numbers.append(SpT_number[i])
    SpT_number = SpT_number[good_locs]
    SpT_number = np.array(SpT_number)
    observed_SpT_numbers = np.array(observed_SpT_numbers)

    median_mag = np.median(mags)

    scale_factor = 0.5
    fig, ax = plt.subplots(nrows=2,
                           ncols=1,
                           figsize=(18 * scale_factor, 15 * scale_factor))
    bins = np.array([
        11.25, 11.75, 12.25, 12.75, 13.25, 13.75, 14.25, 14.75, 15.25, 15.75,
        16.25, 16.75
    ]) - 0.25
    ax[0].hist(mags,
               bins=bins,
               histtype='step',
               lw=3,
               ls='--',
               label='Full sample')
    ax[0].hist(observed_mags,
               bins=bins,
               histtype='bar',
               label='Observed sample',
               color='tab:blue')
    ax[0].axvline(median_mag,
                  color='r',
                  label='Median $m_J$ = {:2.1f}'.format(median_mag))
    ticks = [11, 11.5, 12, 12.5, 13, 13.5, 14, 14.5, 15, 15.5, 16, 16.5]
    ax[0].plot()
    ax[0].set_xticks(ticks)
    ax[0].set_xticklabels([str(i) for i in ticks])
    ax[0].set_xlabel('$m_J$', fontsize=20)
    ax[0].set_ylabel('Number of targets', fontsize=20)
    ax[0].tick_params(axis='both', which='major', labelsize=16)
    ax[0].legend(fontsize=16, loc='upper left')
    #ax[0].grid(alpha=0.2)

    ax[1].hist(SpT_number,
               bins=np.arange(-0.5,
                              max(SpT_number) + 0.5, 1),
               histtype='step',
               lw=3,
               color='orange',
               ls='--',
               label='Full sample')
    ax[1].hist(observed_SpT_numbers,
               bins=np.arange(-0.5,
                              max(SpT_number) + 0.5, 1),
               histtype='bar',
               lw=3,
               color='orange',
               label='Observed sample')
    ticks = np.arange(0, max(SpT_number), 1)
    ax[1].set_xticks(ticks)
    ax[1].set_xticklabels([
        'L0', 'L1', 'L2', 'L3', 'L4', 'L5', 'L6', 'L7', 'L8', 'L9', 'T0', 'T1',
        'T2', 'T3', 'T4', 'T5', 'T6', 'T7'
    ])
    ax[1].set_xlabel('Spectral Type', fontsize=20)
    ax[1].set_ylabel('Number of targets', fontsize=20)
    ax[1].tick_params(axis='both', which='major', labelsize=16)
    ax[1].legend(fontsize=16, loc='upper right')
    #ax[1].grid(alpha=0.2)

    plt.tight_layout()
    histogram_output_path = pines_path / 'Misc/target_histograms.png'
    plt.savefig(histogram_output_path, dpi=300)
    plt.close()

    #Edit the observing.html page to update the number of observed targets.
    print('Updating observing.html...')
    if not (pines_path / 'Misc/observing.html').exists():
        print('Grabbing copy of observing.html from the PINES server.')
        sftp = pines_login()
        sftp.chdir('/web')
        remote_path = '/web/observing.html'
        local_path = pines_path / ('Misc/observing.html')
        sftp.get(remote_path, local_path)
        sftp.close()

    with open(str(pines_path / ('Misc/observing.html')), 'r') as f:
        lines = f.readlines()

    edit_line_ind = np.where(
        ['To date, PINES has observed' in i for i in lines])[0][0]
    edit_line = lines[edit_line_ind]
    edit_line = edit_line.replace(
        edit_line.split('<u>')[1].split('</u>')[0], str(number_observed))
    lines[edit_line_ind] = edit_line
    with open(str(pines_path / ('Misc/observing.html')), 'w') as f:
        f.writelines(lines)

    if upload:
        sftp = pines_login()
        print('Uploading plots and observing.html to the PINES server.')
        sftp.chdir('/web/images')
        sftp.put(sky_map_output_path, '/web/images/updated_sky_plot.png')
        sftp.put(histogram_output_path, '/web/images/target_histograms.png')
        sftp.chdir('/web')
        sftp.put(pines_path / ('Misc/observing.html'), '/web/observing.html')
        print('PINES website updated!')
Exemplo n.º 45
0
Arquivo: lmp.py Projeto: changks/tamoc
def calculate(t0, q0, q0_local, profile, p, particles, derivs, dt_max, sd_max):
    """
    Integrate an the Lagrangian plume solution
    
    Compute the solution tracking along the centerline of the plume until 
    the plume reaches the water surface, reaches a neutral buoyancy level 
    within the intrusion layer, or propagates a given maximum number of
    nozzle diameters downstream.
    
    Parameters
    ----------
    t0 : float
        Initial time (s)
    q0 : ndarray
        Initial values of the state space vector
    q0_local : `bent_plume_model.LagElement`
        Object containing the numerical solution at the initial condition
    profile : `ambient.Profile` object
        The ambient CTD object used by the simulation.
    p : `ModelParams` object
        Object containing the fixed model parameters for the bent
        plume model.
    particles : list of `Particle` objects
        List of `bent_plume_model.Particle` objects containing the dispersed 
        phase local conditions and behavior.
    derivs : function handle
        Pointer to the function where the derivatives of the ODE system are
        stored.  Should be `lmp.derivs`.
    dt_max : float
        Maximum step size to use in the simulation (s).  The ODE solver 
        in `calculate` is set up with adaptive step size integration, so 
        this value determines the largest step size in the output data, but 
        not the numerical stability of the calculation.
    sd_max : float
        Maximum number of nozzle diameters to compute along the plume 
        centerline (s/D)_max.  This is the only stop criteria that is user-
        selectable.
    
    Returns
    -------
    t : ndarray
        Vector of times when the plume solution is obtained (s).
    y : ndarray
        Matrix of the plume state space solutions.  Each row corresponds to
        a time in `t`.
    
    See Also
    --------
    derivs, bent_plume_mode.Model
        
    """
    # Create an integrator object:  use "vode" with "backward
    # differentitaion formula" for stiff ODEs
    r = integrate.ode(derivs).set_integrator('vode',
                                             method='bdf',
                                             atol=1.e-6,
                                             rtol=1e-3,
                                             order=5,
                                             max_step=dt_max)

    # Push the initial state space to the integrator object
    r.set_initial_value(q0, t0)

    # Make a copy of the q1_local object needed to evaluate the entrainment
    q1_local = deepcopy(q0_local)
    q0_hold = deepcopy(q1_local)

    # Create vectors (using the list data type) to store the solution
    t = [t0]
    q = [q0]

    # Integrate a finite number of time steps
    k = 0
    psteps = 30.
    stop = False
    neutral_counter = 0
    top_counter = 0
    while r.successful() and not stop:

        # Print progress to the screen
        if np.remainder(np.float(k), psteps) == 0.:
            print('    Distance:  %g (m), time:  %g (s), k:  %d' % \
                (q[-1][10], t[-1], k))

        # Perform one step of the integration
        r.set_f_params(q0_local, q1_local, profile, p, particles)
        r.integrate(t[-1] + dt_max, step=True)
        q1_local.update(r.t, r.y, profile, p, particles)

        # Correct the temperature
        r = correct_temperature(r, particles)

        # Remove particle solution for particles outside the plume
        r = correct_particle_tracking(r, particles)

        # Store the results
        t.append(r.t)
        q.append(r.y)

        # Update the Lagrangian elements for the next time step
        q0_local = q0_hold
        q0_hold = deepcopy(q1_local)

        # Check if the plume has reached a maximum rise height yet
        if np.sign(q0_local.Jz) != np.sign(q1_local.Jz):
            top_counter += 1

        # Check if the plume is at neutral buoyancy in an intrusion layer
        # (e.g., after the top of the plume)
        if top_counter > 0:
            if np.sign(q0_local.rho_a - q0_local.rho) != \
                np.sign(q1_local.rho_a - q1_local.rho):
                # Update neutral buoyancy level counter
                neutral_counter += 1

        # Evaluate the stop criteria
        if neutral_counter >= 1:
            # Passed through the second neutral buoyany level
            stop = True
        if q[-1][10] / q1_local.D > sd_max:
            # Progressed desired distance along the plume centerline
            stop = True
        if k >= 50000:
            # Stop after specified number of iterations; used to protect
            # against problems with the solution become stuck
            stop = True
        if q[-1][9] <= 0.:
            # Reached a location at or above the free surface
            stop = True
        if q[-1][10] == q[-2][10]:
            # Progress of motion of the plume has stopped
            stop = True

        # Update the index counter
        k += 1

    # Convert solution to numpy arrays
    t = np.array(t)
    q = np.array(q)

    # Show user the final calculated point and return the solution
    print('    Distance:  %g (m), time:  %g (s), k:  %d' % \
                (q[-1,10], t[-1], k))
    return (t, q)
Exemplo n.º 46
0
def _window_smoothing(data=None,
                      maps=None,
                      b=3,
                      l=5,
                      max_iterations=1000,
                      thresh=1e-6):
    """
    The seg_smoothing and window_smoothing functions are adapted from Poulsen et al. (2018) [2].
    Originally, window_smoothing is described in Pasqual-Marqui et al. (1995) [1]. 
    
    Implementation of the Segmentation Smoothing Algorithm, as described in
    Table II of [1]. Smoothes using the interval t-b to t+b excluding t.
    Note, that temporary allocation of labels (denoted with Lambda in [1])
    is not necessary in this implementation, and steps 3 and 6 are therefore
    left out.
  
    Reference:
    [1] - Pascual-Marqui, R. D., Michel, C. M., & Lehmann, D. (1995).
          Segmentation of brain electrical activity into microstates: model
          estimation and validation. IEEE Transactions on Biomedical
          Engineering.
    [2] - Poulsen, A. T., Pedroni, A., Langer, N., &  Hansen, L. K.
          (unpublished manuscript). Microstate EEGlab toolbox: An
            introductionary guide.
    """

    ## Initialisation (step 1 to 4)
    n_chans, n_samples = data.shape
    n_states, __ = maps.shape
    const = sum(sum(data**2))

    # Step 1
    sig2_old = 0
    sig2 = float('inf')

    # Step 2
    activation = maps.dot(data)
    seg = np.argmax(np.abs(activation), axis=0)
    seg_orig = seg
    #Check to avoid the loop getting caught and switching one label back and
    # forth between iterations.
    L_old = np.zeros((3, np.size(seg)))

    # Step 4
    e = (const - sum(sum(np.multiply(maps.T[:, seg], data))**2)) / (np.dot(
        n_samples, (n_chans - 1)))

    # Defining constant for step 5b
    const_5b = (np.tile(sum(data**2),
                        (n_states, 1)) - activation**2) / (2 * e *
                                                           (n_chans - 1))

    ## Iterations (step 5 to 8)
    ind = 0
    while abs(sig2_old -
              sig2) >= (thresh * sig2) and max_iterations > ind and np.mean(
                  L_old[np.remainder(ind, 2) + 1] == seg) != 1:
        ind = ind + 1
        sig2_old = sig2
        L_old[abs(np.remainder(ind, 2) - 2)] = seg
        Nbkt_tmp = np.zeros((n_states, n_samples))

        for k in range(n_states):
            Nbkt_tmp[k, :] = (seg == k)

        #using filter to count the number of labels equal to k before (tmp1)
        #and after (tmp2) a given timepoint.
        tmp1 = lfilter([0, *np.ones(b)], 1, Nbkt_tmp, 1)
        tmp2 = lfilter([0, *np.ones(b)], 1, Nbkt_tmp[:, ::-1], 1)
        Nbkt = tmp1 + tmp2[:, ::-1]

        # Step 5b
        seg = np.argmin((const_5b - l * Nbkt), axis=0)

        # Step 7
        sig2 = (const - sum(sum(np.multiply(maps.T[:, seg], data))**2)) / (
            np.dot(n_samples, (n_chans - 1)))

    seg_smooth = seg

    # Remove the sigle sample map occurences which are a result of the smoothening
    for i in range(len(seg_smooth) - 2):
        if (seg_smooth[i] != seg_smooth[i - 1]) and (
                seg_smooth[i] != seg_smooth[i + 1]
                and seg_smooth[i + 1] == seg_smooth[i - 1]):
            seg_smooth[i] = seg_smooth[i - 1]
            seg_smooth[i + 1] = seg_smooth[i + 2]

    # Step 10 - un-checked. Only Matlab --> Python translated
    # sig2_D = const / (N*(C-1));
    # R2 = 1 - sig2/sig2_D;
    # activations = zeros(size(Z));
    # for n=1:N; activations(L(n),n) = Z(L(n),n); end # setting to zero
    # MSE = mean(mean((X-A*activations).^2));
    return seg_smooth, seg_orig
Exemplo n.º 47
0
    lenA = qA + (kA - 1) * (i - k)
    for j in range(0, lenA):
        sumA = np.zeros((t, aa), dtype=float)
        for rr in range(0, kA):
            if j >= (i - k) * rr and j <= (i - k) * rr + qA - 1:
                sumA = sumA + Wa1[kB * rr, j - (i - k) * rr] * R_A[rr, i - k]
        Wa1[i, j] = sumA

Wbb = {}
for i in range(0, kB):
    for j in range(0, qB):
        Wbb[i, j] = Wb[i * qB + j]
Wb1 = {}
for i in range(0, k):
    for j in range(0, qB):
        Wb1[i, j] = Wbb[np.remainder(i, kB), j]

for i in range(k, n):
    lenB = qB + (kB - 1) * (i - k)
    for j in range(0, lenB):
        sumB = np.zeros((t, bb), dtype=float)
        for rr in range(0, kB):
            if j >= (i - k) * rr and j <= (i - k) * rr + qB - 1:
                sumB = sumB + Wbb[rr, j - (i - k) * rr] * R_B[rr, i - k]
        Wb1[i, j] = sumB

M1 = {}
for i in range(0, kA):
    coding_matrixa = np.zeros((qA, DeltaA), dtype=float)
    coding_matrixa[:, i * qA:(i + 1) * qA] = np.identity(qA, dtype=float)
    M1[i] = coding_matrixa
Exemplo n.º 48
0
def generate_system_machines(config_path,
                             num_machines,
                             magnitude='giga',
                             heterogeneity=None,
                             specrange=None,
                             system_bandwidth=1.0,
                             seed=20):
    # TODO move the necessary information from generate_graph_costs here for
    #  system configuration
    """
    :param seed:
    :param config_path:
    :param num_machines:
    :param magnitude:
    :param heterogeneity:
    :param specrange: List of ranges of FLOP/s provided by
    each separate machine (aligns with heterogeneity list),
    relative to specified magnitude
    :return:
    """
    if heterogeneity is None:
        heterogeneity = [1.0]
    random.seed(seed)
    multiplier = 1  # default is Giga flops
    max_data_rate = 10  # (10 Gigabytes/sec)
    if magnitude is 'giga':
        pass
    elif magnitude is 'tera':
        multiplier *= 10
    elif magnitude is 'peta':
        multiplier *= 10 * 10
    else:
        print('Provided magnitude', magnitude, 'is not supported')
        sys.exit()

    if sum(heterogeneity) != 1:
        sys.exit('System heterogeneity does not sum to 100%!')
    heterogeneity = np.array(heterogeneity)

    if len(specrange) != len(heterogeneity):
        sys.exit('FLOP/s range list provided does not match heterogeneity'
                 ' (List is wrong size)')

    machines = []
    for p in heterogeneity:
        tmp = num_machines * p
        machines.append(tmp)

    for m in machines:
        if 0 < np.remainder(m, 1) < 1:
            sys.exit(
                'Number of machines specified ({0}) and percentage split on'
                'system ({1}) not compatible'.format(num_machines,
                                                     heterogeneity))

    machine_categ = {}
    y = 0
    for i, m in enumerate(machines):
        # nd = int(random.uniform(10, 20))
        lwr, upr = specrange[i][0], specrange[i][1]
        # TODO rewrite this as a for loop for each machine category, add a new machine
        # machine_categ['cat{0}'.format(i)] = {}
        # Calc data transfer rates
        rnd = random.uniform(1, max_data_rate)
        rate = round((rnd - (rnd % multiplier)) / 5) * 5
        # Calculate flops
        rnd = random.uniform(lwr * multiplier, upr * multiplier)
        for x in range(int(m)):
            machine_categ['cat{0}_m{1}'.format(i, y)] = {
                'flops': math.ceil(rnd - (rnd % multiplier)),
                'rates': rate
            }
            y += 1

    system = {
        "header": {
            "time": 'false',
            "gen_specs": {
                "file": config_path,
                "seed": seed,
                "range": str(specrange),
                "heterogeneity": str(heterogeneity),
                "multiplier": multiplier
            }
        },
        'system': {
            'resources': machine_categ,
            # 'rates': data_rates
            'bandwidth': system_bandwidth
        }
    }

    with open(config_path, 'w+') as jfile:
        json.dump(system, jfile, indent=2)

    return config_path
    def arrayReshaping(self, array, bandRemoving):
        """
        Reshapes array to desired shape.
        
        Params:
    
            - array: array to be reshaped (must be 2D)
            
            - bandRemoving: two-element list indicating how many elements one wants 
                            to remove from left and right sides of the image. Usually
                            images have black bands left and right or fat deposits that
                            are useless and can be removed (list of 2 int)

        
        Returns:
            
            - reshaped: reshaped array with desired shape (2D) (array)
        
        """

        array = array[:, bandRemoving[0]:
                      -bandRemoving[1], :]  # Removal of left and right bands

        flag1 = flag2 = 0  # Parameters to control if array has odd dimensions

        # If dimensions of original array are odd, make them even for a simpler processing

        if np.remainder(self.matrixSize[0], 2) == 1:

            # Odd desired shape

            flag1 = 1  # Remove later one row

            # Add 1 to make it even

            self.matrixSize[0] += 1

        if np.remainder(self.matrixSize[1], 2) == 1:

            # Odd desired shape

            flag2 = 1  # Remove later one column

            # Add 1 to make it even

            self.matrixSize[1] += 1

        if np.remainder(array.shape[0], 2) == 1:

            # Odd shape of input array

            # Add extra row

            array = np.concatenate(
                [array, np.zeros((1, array.shape[1], array.shape[2]))], axis=0)

        if np.remainder(array.shape[1], 2) == 1:

            # Odd shape of input array

            # Add extra column

            array = np.concatenate(
                [array, np.zeros((array.shape[0], 1, array.shape[2]))], axis=1)

        center = (np.array((array.shape)) // 2).astype(
            int)  # Central coordinate of input array

        half_shape = (np.array(
            (self.matrixSize)) // 2).astype(int)  # Half-shape of input array

        if (array.shape[0] > self.matrixSize[0]
                or array.shape[0] == self.matrixSize[0]) and (
                    (array.shape[1] > self.matrixSize[1]
                     or array.shape[1] == self.matrixSize[1])):

            # Cropping both horizontally and vertically

            reshaped = array[center[0] - half_shape[0]:center[0] +
                             half_shape[0], center[1] -
                             half_shape[1]:center[1] + half_shape[1], :]

        elif (array.shape[0] < self.matrixSize[0]) and (array.shape[1] <
                                                        self.matrixSize[1]):

            # Zero-padding both horizontally and vertically

            reshaped = self.zero_padding(
                array,
                [self.matrixSize[0], self.matrixSize[1], array.shape[2]])

        elif (array.shape[0] < self.matrixSize[0]) and (
            (array.shape[1] > self.matrixSize[1]
             or array.shape[1] == self.matrixSize[1])):

            # Zero-padding horizontally and cropping vertically

            dif = np.array(self.matrixSize[0]) - np.array(
                array.shape[0])  # Number of rows to add

            half_dif = (np.floor(dif / 2)).astype(
                int)  # Number of rows to be added up and down

            if np.remainder(dif, 2) == 1:

                # Add odd number of rows

                reshaped = np.concatenate([
                    np.zeros(
                        (half_dif, array.shape[1], array.shape[2])), array,
                    np.zeros((half_dif + 1, array.shape[1], array.shape[2]))
                ],
                                          axis=0)

            else:

                # Add even number of rows

                reshaped = np.concatenate([
                    np.zeros(
                        (half_dif, array.shape[1], array.shape[2])), array,
                    np.zeros((half_dif, array.shape[1], array.shape[2]))
                ],
                                          axis=0)

            # Crop vertically

            reshaped = reshaped[:, center[1] - half_shape[1]:center[1] +
                                half_shape[1]]

        elif (array.shape[0] > self.matrixSize[0] or array.shape[0]
              == self.matrixSize[0]) and (array.shape[1] < self.matrixSize[1]):

            # Cropping horizontally and zero-padding vertically

            dif = np.array(self.matrixSize[1]) - np.array(
                array.shape[1])  # Number of columns to add

            half_dif = (np.floor(dif / 2)).astype(
                int)  # Number of columns to be added left and right

            if np.remainder(dif, 2) == 1:

                # Add odd number of columns

                reshaped = np.concatenate([
                    np.zeros(
                        (array.shape[0], half_dif, array.shape[2])), array,
                    np.zeros((array.shape[0], half_dif + 1, array.shape[2]))
                ],
                                          axis=1)

            else:

                # Add even number of columns

                reshaped = np.concatenate([
                    np.zeros(
                        (array.shape[0], half_dif, array.shape[2])), array,
                    np.zeros((array.shape[0], half_dif, array.shape[2]))
                ],
                                          axis=1)

                # Crop horizontally

                reshaped = reshaped[center[0] - half_shape[0]:center[0] +
                                    half_shape[0], :, :]

        # Remove extra row or column added if 2D array had some odd dimension

        if flag1 == 1 and flag2 == 0:  # Array has an odd number of rows, but it is now with an extra row

            reshaped = np.delete(reshaped, -1, axis=0)  # Delete extra row

        if flag1 == 0 and flag2 == 1:  # Array has an odd number of columns, but it is now with an extra column

            reshaped = np.delete(reshaped, -1, axis=1)  # Delete extra column

        return reshaped
Exemplo n.º 50
0
    def toImage(self):
        t = time.time()

        tWAIT = time.time()
        self._arrayreq.wait()
        tWAIT = 1000.0 * (time.time() - tWAIT)

        tAR = time.time()
        a = self._arrayreq.getResult()
        tAR = 1000.0 * (time.time() - tAR)

        assert a.ndim == 2

        if a.dtype == np.bool_:
            a = a.view(np.uint8)

        if self._normalize and self._normalize[0] < self._normalize[1]:
            nmin, nmax = self._normalize
            if nmin:
                a = a - nmin
            scale = old_div((len(self._colorTable) - 1),
                            float(nmax - nmin + 1e-35))  #if max==min
            if scale != 1.0:
                a = a * scale
            if len(self._colorTable) <= 2**8:
                a = np.asanyarray(a, dtype=np.uint8)
            elif len(self._colorTable) <= 2**16:
                a = np.asanyarray(a, dtype=np.uint16)
            elif len(self._colorTable) <= 2**32:
                a = np.asanyarray(a, dtype=np.uint32)

        # Use vigra if possible (much faster)
        tImg = None
        if _has_vigra and hasattr(vigra.colors, 'applyColortable'):
            tImg = time.time()
            img = QImage(a.shape[1], a.shape[0], QImage.Format_ARGB32)
            if not issubclass(a.dtype.type, np.integer):
                raise NotImplementedError()
                #FIXME: maybe this should be done in a better way using an operator before the colortable request which properly handles
                #this problem
                warnings.warn(
                    "Data for colortable layers cannot be float, casting",
                    RuntimeWarning)
                a = np.asanyarray(a, dtype=np.uint32)

            # If we have a masked array with a non-trivial mask, ensure that mask is made transparent.
            _colorTable = self._colorTable
            if np.ma.is_masked(a):
                # Add transparent color at the beginning of the colortable as needed.
                if (_colorTable[0, 3] != 0):
                    # If label 0 is unused, it can be transparent. Otherwise, the transparent color must be inserted.
                    if (a.min() == 0):
                        # If it will overflow simply promote the type. Unless we have reached the max VIGRA type.
                        if (a.max() == np.iinfo(a.dtype).max):
                            a_new_dtype = np.min_scalar_type(
                                np.iinfo(a.dtype).max + 1)
                            if a_new_dtype <= np.dtype(np.uint32):
                                a = np.asanyarray(a, dtype=a_new_dtype)
                            else:
                                assert (np.iinfo(a.dtype).max >= len(_colorTable)), \
                                       "This is a very large colortable. If it is indeed needed, add a transparent" + \
                                       " color at the beginning of the colortable for displaying masked arrays."

                                # Try to wrap the max value to a smaller value of the same color.
                                a[a == np.iinfo(a.dtype).max] %= len(
                                    _colorTable)

                        # Insert space for transparent color and shift labels up.
                        _colorTable = np.insert(_colorTable, 0, 0, axis=0)
                        a[:] = a + 1
                    else:
                        # Make sure the first color is transparent.
                        _colorTable = _colorTable.copy()
                        _colorTable[0] = 0

                # Make masked values transparent.
                a = np.ma.filled(a, 0)

            if a.dtype in (np.uint64, np.int64):
                # FIXME: applyColortable() doesn't support 64-bit, so just truncate
                a = a.astype(np.uint32)

            a = vigra.taggedView(a, 'xy')
            vigra.colors.applyColortable(a, _colorTable, byte_view(img))
            tImg = 1000.0 * (time.time() - tImg)

        # Without vigra, do it the slow way
        else:
            raise NotImplementedError()
            if _has_vigra:
                # If this warning is annoying you, try this:
                # warnings.filterwarnings("once")
                warnings.warn(
                    "Using slow colortable images.  Upgrade to VIGRA > 1.9 to use faster implementation."
                )

            #make sure that a has values in range [0, colortable_length)
            a = np.remainder(a, len(self._colorTable))
            #apply colortable
            colortable = np.roll(
                np.fliplr(self._colorTable), -1,
                1)  # self._colorTable is BGRA, but array2qimage wants RGBA
            img = colortable[a]
            img = array2qimage(img)

        if self.logger.isEnabledFor(logging.DEBUG):
            tTOT = 1000.0 * (time.time() - t)
            self.logger.debug(
                "toImage (%dx%d) took %f msec. (array req: %f, wait: %f, img: %f)"
                % (img.width(), img.height(), tTOT, tAR, tWAIT, tImg))

        return img
Exemplo n.º 51
0
    def genFromCoords(self, coords: np.ndarray) -> np.ndarray:
        """
        Generate noise from supplied coordinates, rather than a rectilinear grid.
        Useful for complicated shapes, such as tesselated surfaces.

        Args:
            coords: 3-D coords as generated by ``fns.empty_coords()`` 
                and filled with relevant values by the user.
            
        Returns: 
            noise: a shape (N,) array of the generated noise values.

        Example::

            import numpy as np
            import pyfastnoisesimd as fns 
            numCoords = 256
            coords = fns.empty_coords(3,numCoords)
            # <Set the coordinate values, it is a (3, numCoords) array
            coords[0,:] = np.linspace(-np.pi, np.pi, numCoords)
            coords[1,:] = np.linspace(-1.0, 1.0, numCoords)
            coords[2,:] = np.zeros(numCoords)
            noise = fns.Noise()
            result = noise.genFromCoords(coords)

        """

        if not isinstance(coords, np.ndarray):
            raise TypeError(
                '`coords` must be of type `np.ndarray`, not type: ',
                type(coords))
        if coords.ndim != 2:
            raise ValueError('`coords` must be a 2D array')
        shape = coords.shape
        if shape[0] != 3:
            raise ValueError('`coords.shape[0]` must equal 3')
        if not check_alignment(coords):
            raise ValueError('Memory alignment of `coords` is not valid')
        if coords.dtype != np.float32:
            raise ValueError('`coords` must be of dtype `np.float32`')
        if np.remainder(coords.shape[1], ext.SIMD_ALIGNMENT /
                        np.dtype(np.float32).itemsize) != 0.0:
            raise ValueError(
                'The number of coordinates must be evenly divisible by the SIMD vector length'
            )

        itemsize = coords.dtype.itemsize
        result = empty_aligned(shape[1])
        if self._num_workers <= 1 or shape[1] < _MIN_CHUNK_SIZE:
            self._fns.NoiseFromCoords(result, coords[0, :], coords[1, :],
                                      coords[2, :], shape[1], 0)
            return result

        n_chunks = np.minimum(self._num_workers,
                              shape[1] * itemsize / ext.SIMD_ALIGNMENT)

        workers = []
        # for I, ((result_chunk, r_offset), (coord_chunk, offset)) in enumerate(zip(
        #             aligned_chunks(result, self._num_workers, axis=0),
        #             aligned_chunks(coords, self._num_workers, axis=1))):
        for I, (result_chunk, offset) in enumerate(
                aligned_chunks(result, self._num_workers, axis=0)):

            # aligned_size = int(vect_len*np.ceil(result_chunk.size/vect_len))
            # print(f'    {I}: Got chunk of length {result_chunk.size}, AlignedSize would be: {aligned_size}')
            # print('    Offset: ', offset, ', offset error: ', offset % 8)

            # zPtr = (coords[0,:].ctypes.data + offset) % 8
            # yPtr = (coords[1,:].ctypes.data + offset) % 8
            # xPtr = (coords[2,:].ctypes.data + offset) % 8
            # print(f'    Pointer alignment: {zPtr, yPtr, xPtr}')
            # peon = self._asyncExecutor.submit(self._fns.NoiseFromCoords, result,
            #     coords[0,:], coords[1,:], coords[2,:], aligned_size, offset)
            peon = self._asyncExecutor.submit(self._fns.NoiseFromCoords,
                                              result, coords[0, :],
                                              coords[1, :], coords[2, :],
                                              result_chunk.size, offset)
            workers.append(peon)

        for peon in workers:
            peon.result()

        return result
Exemplo n.º 52
0
def validateAngleRanges(angList, startAngs, stopAngs, ccw=True):
    """
    A better way to go.  find out if an angle is in the range
    CCW or CW from start to stop

    There is, of course, an ambigutiy if the start and stop angle are
    the same; we treat them as implying 2*pi having been mapped
    """
    # Prefer ravel over flatten because flatten never skips the copy
    angList   = np.asarray(angList).ravel()   # needs to have len
    startAngs = np.asarray(startAngs).ravel() # needs to have len
    stopAngs  = np.asarray(stopAngs).ravel()  # needs to have len

    n_ranges = len(startAngs)
    assert len(stopAngs) == n_ranges, "length of min and max angular limits must match!"

    # to avoid warnings in >=, <= later down, mark nans;
    # need these to trick output to False in the case of nan input
    nan_mask = np.isnan(angList)

    reflInRange = np.zeros(angList.shape, dtype=bool)

    # anonynmous func for zProjection
    zProj = lambda x, y: np.cos(x) * np.sin(y) - np.sin(x) * np.cos(y)

    # bin length for chunking
    binLen = np.pi / 2.

    # in plane vectors defining wedges
    x0 = np.vstack([np.cos(startAngs), np.sin(startAngs)])
    x1 = np.vstack([np.cos(stopAngs), np.sin(stopAngs)])

    # dot products
    dp = np.sum(x0 * x1, axis=0)
    if np.any(dp >= 1. - sqrt_epsf) and n_ranges > 1:
        # ambiguous case
        raise RuntimeError, "Improper usage; at least one of your ranges is alread 360 degrees!"
    elif dp[0] >= 1. - sqrt_epsf and n_ranges == 1:
        # trivial case!
        reflInRange = np.ones(angList.shape, dtype=bool)
        reflInRange[nan_mask] = False
    else:
        # solve for arc lengths
        # ...note: no zeros should have made it here
        a   = x0[0, :]*x1[1, :] - x0[1, :]*x1[0, :]
        b   = x0[0, :]*x1[0, :] + x0[1, :]*x1[1, :]
        phi = np.arctan2(b, a)

        arclen = 0.5*np.pi - phi          # these are clockwise
        cw_phis = arclen < 0
        arclen[cw_phis] = 2*np.pi + arclen[cw_phis]   # all positive (CW) now
        if not ccw:
            arclen= 2*np.pi - arclen

        if sum(arclen) > 2*np.pi:
            raise RuntimeWarning, "Specified angle ranges sum to > 360 degrees, which is suspect..."

        # check that there are no more thandp = np.zeros(n_ranges)
        for i in range(n_ranges):
            # number or subranges using 'binLen'
            numSubranges = int(np.ceil(arclen[i]/binLen))

            # check remaider
            binrem = np.remainder(arclen[i], binLen)
            if binrem == 0:
                finalBinLen = binLen
            else:
                finalBinLen = binrem

            # if clockwise, negate bin length
            if not ccw:
                 binLen      = -binLen
                 finalBinLen = -finalBinLen

            # Create sub ranges on the fly to avoid ambiguity in dot product
            # for wedges >= 180 degrees
            subRanges = np.array(\
                [startAngs[i] + binLen*j for j in range(numSubranges)] + \
                    [startAngs[i] + binLen*(numSubranges - 1) + finalBinLen])

            for k in range(numSubranges):
                zStart = zProj(angList, subRanges[k])
                zStop  = zProj(angList, subRanges[k + 1])
                if ccw:
                    zStart[nan_mask] =  999.
                    zStop[nan_mask]  = -999.
                    reflInRange = reflInRange | np.logical_and(zStart <= 0, zStop >= 0)
                else:
                    zStart[nan_mask] = -999.
                    zStop[nan_mask]  =  999.
                    reflInRange = reflInRange | np.logical_and(zStart >= 0, zStop <= 0)
    return reflInRange
Exemplo n.º 53
0
    def frft(f, a):
        """
        Fractional Fourier transform. As appears in :
        -https://nalag.cs.kuleuven.be/research/software/FRFT/
        -https://github.com/audiolabs/frft/
        Args:
            f       : (array) Input data
            a       : (float) Alpha factor
        Returns:
            ret    : (array) Complex valued analysed data

        """
        ret = np.zeros_like(f, dtype=np.complex)
        f = f.copy().astype(np.complex)
        N = len(f)
        shft = np.fmod(np.arange(N) + np.fix(N / 2), N).astype(int)
        sN = np.sqrt(N)
        a = np.remainder(a, 4.0)

        # Special cases
        if a == 0.0:
            return f
        if a == 2.0:
            return np.flipud(f)
        if a == 1.0:
            ret[shft] = np.fft.fft(f[shft]) / sN
            return ret
        if a == 3.0:
            ret[shft] = np.fft.ifft(f[shft]) * sN
            return ret

        # reduce to interval 0.5 < a < 1.5
        if a > 2.0:
            a = a - 2.0
            f = np.flipud(f)
        if a > 1.5:
            a = a - 1
            f[shft] = np.fft.fft(f[shft]) / sN
        if a < 0.5:
            a = a + 1
            f[shft] = np.fft.ifft(f[shft]) * sN

        # the general case for 0.5 < a < 1.5
        alpha = a * np.pi / 2
        tana2 = np.tan(alpha / 2)
        sina = np.sin(alpha)
        f = np.hstack((np.zeros(N - 1), TimeFrequencyDecomposition.sincinterp(f), np.zeros(N - 1))).T

        # chirp premultiplication
        chrp = np.exp(-1j * np.pi / N * tana2 / 4 * np.arange(-2 * N + 2, 2 * N - 1).T ** 2)
        f = chrp * f

        # chirp convolution
        c = np.pi / N / sina / 4
        ret = fftconvolve(np.exp(1j * c * np.arange(-(4 * N - 4), 4 * N - 3).T ** 2), f)
        ret = ret[4 * N - 4:8 * N - 7] * np.sqrt(c / np.pi)

        # chirp post multiplication
        ret = chrp * ret

        # normalizing constant
        ret = np.exp(-1j * (1 - a) * np.pi / 4) * ret[N - 1:-N + 1:2]

        return ret
Exemplo n.º 54
0
    the larger nsample, the program run slower
    the smaller nsample, clusters may have zeros member
'''
nsample = 10000

fig, ax = plt.subplots(3, 2)
ax[0, 0].imshow(raw_img)
ax[0, 0].set_title('original', fontsize=8)
ax[0, 0].get_xaxis().set_visible(False)
ax[0, 0].get_yaxis().set_visible(False)
for ii in range(5):
    startt = time.time()
    img = Kmedoid(raw_img, k_mesh[ii], dist_choice, nsample)
    endt = time.time()

    ax[int((ii + 1) / 2), np.remainder(ii + 1, 2)].imshow(img)
    ax[int((ii + 1) / 2),
       np.remainder(ii + 1, 2)].set_title('k=' + str(k_mesh[ii]), fontsize=8)
    ax[int((ii + 1) / 2),
       np.remainder(ii + 1, 2)].get_xaxis().set_visible(False)
    ax[int((ii + 1) / 2),
       np.remainder(ii + 1, 2)].get_yaxis().set_visible(False)

    run_time.append(endt - startt)
fig.tight_layout(pad=1.0)
fig.suptitle('Kmedoids results (' + dist_choice + ')')
fig.subplots_adjust(top=0.85)
plt.savefig('kmedoid_result.pdf', dpi=300)

print('Kmedoid with distance ' + dist_choice)
print('sub-sample size: ' + str(nsample))
Exemplo n.º 55
0
    def fit(self, train_sequence, train_cluster_id, args):
        """Fit UISRNN model.

    Args:
      train_sequence: 2-dim numpy array of real numbers, size: N * D
        - the training observation sequence.
        N - summation of lengths of all utterances
        D - observation dimension
        For example, train_sequence =
        [[1.2 3.0 -4.1 6.0]    --> an entry of speaker #0 from utterance 'iaaa'
         [0.8 -1.1 0.4 0.5]    --> an entry of speaker #1 from utterance 'iaaa'
         [-0.2 1.0 3.8 5.7]    --> an entry of speaker #0 from utterance 'iaaa'
         [3.8 -0.1 1.5 2.3]    --> an entry of speaker #0 from utterance 'ibbb'
         [1.2 1.4 3.6 -2.7]]   --> an entry of speaker #0 from utterance 'ibbb'
        Here N=5, D=4.
        We concatenate all training utterances into a single sequence.
      train_cluster_id: 1-dim list or numpy array of strings, size: N
        - the speaker id sequence.
        For example, train_cluster_id =
        ['iaaa_0', 'iaaa_1', 'iaaa_0', 'ibbb_0', 'ibbb_0']
        'iaaa_0' means the entry belongs to speaker #0 in utterance 'iaaa'.
        Note that the order of entries within an utterance are preserved,
        and all utterances are simply concatenated together.
      args: Training configurations. See arguments.py for details.

    Raises:
      TypeError: If train_sequence or train_cluster_id is of wrong type.
      ValueError: If train_sequence or train_cluster_id has wrong dimension.
    """
        # check type
        if (not isinstance(train_sequence, np.ndarray)
                or train_sequence.dtype != float):
            raise TypeError(
                'train_sequence should be a numpy array of float type.')
        if isinstance(train_cluster_id, list):
            train_cluster_id = np.array(train_cluster_id)
        if (not isinstance(train_cluster_id, np.ndarray)
                or not train_cluster_id.dtype.name.startswith('str')):
            raise TypeError(
                'train_cluster_id type be a numpy array of strings.')
        # check dimension
        if train_sequence.ndim != 2:
            raise ValueError('train_sequence must be 2-dim array.')
        if train_cluster_id.ndim != 1:
            raise ValueError('train_cluster_id must be 1-dim array.')
        # check length and size
        train_total_length, observation_dim = train_sequence.shape
        if observation_dim != self.observation_dim:
            raise ValueError(
                'train_sequence does not match the dimension specified '
                'by args.observation_dim.')
        if train_total_length != len(train_cluster_id):
            raise ValueError('train_sequence length is not equal to '
                             'train_cluster_id length.')

        self.rnn_model.train()
        optimizer = self._get_optimizer(optimizer=args.optimizer,
                                        learning_rate=args.learning_rate)

        sub_sequences, seq_lengths, transition_bias = utils.resize_sequence(
            sequence=train_sequence,
            cluster_id=train_cluster_id,
            num_permutations=args.num_permutations)
        num_clusters = len(seq_lengths)
        sorted_seq_lengths = np.sort(seq_lengths)[::-1]
        permute_index = np.argsort(seq_lengths)[::-1]
        if self.transition_bias is None:
            self.transition_bias = transition_bias
        if args.batch_size is None:
            # Packing sequences.
            rnn_input = np.zeros(
                (sorted_seq_lengths[0], num_clusters, self.observation_dim))
            for i in range(num_clusters):
                rnn_input[1:sorted_seq_lengths[i],
                          i, :] = sub_sequences[permute_index[i]]
            rnn_input = autograd.Variable(
                torch.from_numpy(rnn_input).float()).to(self.device)
            packed_train_sequence, rnn_truth = utils.pack_seq(
                rnn_input, sorted_seq_lengths)

        train_loss = []
        for t in range(args.train_iteration):
            optimizer.zero_grad()
            if args.batch_size is not None:
                mini_batch = np.sort(
                    np.random.choice(num_clusters, args.batch_size))
                mini_batch_rnn_input = np.zeros(
                    (sorted_seq_lengths[mini_batch[0]], args.batch_size,
                     self.observation_dim))
                for i in range(args.batch_size):
                    mini_batch_rnn_input[1:sorted_seq_lengths[mini_batch[i]],
                                         i, :] = sub_sequences[permute_index[
                                             mini_batch[i]]]
                mini_batch_rnn_input = autograd.Variable(
                    torch.from_numpy(mini_batch_rnn_input).float()).to(
                        self.device)
                packed_train_sequence, rnn_truth = utils.pack_seq(
                    mini_batch_rnn_input, sorted_seq_lengths[mini_batch])

            hidden = self.rnn_init_hidden.repeat(1, args.batch_size, 1)
            mean, _ = self.rnn_model(packed_train_sequence, hidden)
            # use mean to predict
            mean = torch.cumsum(mean, dim=0)
            mean_size = mean.size()
            mean = torch.mm(
                torch.diag(
                    1.0 /
                    torch.arange(1, mean_size[0] + 1).float().to(self.device)),
                mean.view(mean_size[0], -1))
            mean = mean.view(mean_size)

            # Likelihood part.
            loss1 = utils.weighted_mse_loss(
                input_tensor=(rnn_truth != 0).float() * mean[:-1, :, :],
                target_tensor=rnn_truth,
                weight=1 / (2 * self.sigma2))

            weight = (((rnn_truth != 0).float() * mean[:-1, :, :] -
                       rnn_truth)**2).view(-1, observation_dim)
            num_non_zero = torch.sum((weight != 0).float(), dim=0).squeeze()
            loss2 = ((2 * args.sigma_alpha + num_non_zero + 2) /
                     (2 * num_non_zero) * torch.log(self.sigma2)).sum() + (
                         args.sigma_beta / (self.sigma2 * num_non_zero)).sum()
            # regularization
            l2_reg = 0
            for param in self.rnn_model.parameters():
                l2_reg += torch.norm(param)
            loss3 = args.regularization_weight * l2_reg

            loss = loss1 + loss2 + loss3
            loss.backward()
            nn.utils.clip_grad_norm_(self.rnn_model.parameters(), 5.0)
            # nn.utils.clip_grad_norm_(self.sigma2, 1.0)
            optimizer.step()
            # avoid numerical issues
            self.sigma2.data.clamp_(min=1e-6)

            if np.remainder(t, 10) == 0:
                print('Iter: {:d}  \t'
                      'Training Loss: {:.4f}    \n'
                      '    Negative Log Likelihood: {:.4f}\t'
                      'Sigma2 Prior: {:.4f}\t'
                      'Regularization: {:.4f}'.format(t, float(loss.data),
                                                      float(loss1.data),
                                                      float(loss2.data),
                                                      float(loss3.data)))
            train_loss.append(float(
                loss1.data))  # only save the likelihood part
        print('Done training with {} iterations'.format(args.train_iteration))
Exemplo n.º 56
0
    def serveQueueDQN(self):
        #self.Autoencode()#Encodes Input Vector Giving Current state
        #curr_state=self.inputvector
        # self.AutoEncoderLoopDef()

        self.act_dist = stats.rv_discrete(name='act_dist',
                                          values=([1, 2, 3], self.action_prob))
        if np.remainder(self.service_vecs[0], self.retransmit_no) == 0:
            self.queue_decision = self.act_dist.rvs(size=1)
            if self.queue_decision == 2:
                self.loopElement()
            elif self.queue_decision == 3:
                self.deferElement()
        old_state = self.curr_state
        self.LoopDefState = np.array([])
        self.AutoEncoderLoopDef()
        self.curr_state = self.LoopDefState
        if self.enable_ddpg == 1:
            self.DDQNA.remember(old_state, self.ddpg_action_prob, self.reward,
                                self.curr_state, False)

            if (self.meta_loop == 0):
                self.ddpg_action_prob = self.DDQNA.get_action(self.curr_state)
                self.transmit_power = self.action_vector[self.ddpg_action_prob]
                self.powerVecsPolicy = np.append(self.powerVecsPolicy,
                                                 self.transmit_power)
            else:
                self.ddpg_action_prob = self.DDQNA.get_meta_action(
                    self.curr_state)
                self.transmit_power = self.action_vector[self.ddpg_action_prob]
                self.powerVecsPolicy = np.append(self.powerVecsPolicy,
                                                 self.transmit_power)

        self.action = 0

        self.thresholdPowerIndex(
            self.action)  #Identify servicable users and calculate reward
        self.InstRewardLoopDef(
        )  #Gets instantatneous reward for action using servicable users
        self.InstRewardSch()  #Reward for Scheduling
        if (self.meta_loop == 1):
            self.InstRewardMeta()
        self.makeElementInService(self.action)  #Include only servicable users
        self.deleteUsers(self.action)  #Delete only serviced users
        self.service_vecs[self.action] += 1

        if (self.enable_ddpg == 1):

            self.time += 1
            # self.reward=1*np.mean(self.reward_window)-self.power_beta*self.transmit_power
            self.reward = 1 * np.mean(self.reward_window)
            self.reward_window.clear()
            self.power_window.clear()
            # self.DDPGA.cumul_reward+=self.reward
            # self.reward_array=np.append(self.reward_array, self.DDPGA.cumul_reward/self.time)

            # self.DDPGA.cumul_reward+=self.reward
            # power_grad=0
            if self.time > (
                    300):  # replay if memory has more than 200 elements
                # self.DDPGA.critic.tau=np.max([self.tau_ddpg/(self.time-300)**0,0])
                # self.DDPGA.actor.tau=np.max([self.tau_ddpg/(self.time-300)**0,0])
                # self.DDPGA.train()

                if (self.meta_loop == 0):
                    self.DDQNA.replay()
                    if self.time % 50 == 0:
                        self.DDQNA.update_global_weight()
                    if (self.time % self.meta_interval == 0):
                        self.meta_loop = self.enable_meta
                        print("\n Starting Meta Loop")
                else:
                    self.DDQNA.penalty_step()
                    if self.meta_reward_counter == self.metaWindow:
                        self.reward_meta = np.mean(self.reward_window_meta)
                        self.reward_window_meta.clear()
                        self.meta_reward_counter = 0
                        self.DDQNA.meta_remember(self.meta_parameter,
                                                 self.reward_meta)
                        self.DDQNA.meta_replay(
                            np.min([len(self.DDQNA.meta_memory), 50]))
                        self.meta_parameter = self.DDQNA.meta_step(
                            self.meta_parameter)
                        self.DDQNA.update_meta_actor(
                            self.DDQNA.get_meta_actor_weight(
                                self.meta_parameter))
                        self.meta_loop_counter = self.meta_loop_counter + 1
                    #print(self.ThreadName,self.meta_loop)
                    if (self.meta_loop_counter == self.meta_loop_max):
                        self.meta_loop_counter = 0
                        self.meta_loop = 0
                        print("\n Out of Meta Loop...")

                # if np.remainder(self.time,1)==0:
                # if np.abs(np.mean(self.powerVecs[-300:])-self.avg_power_constraint)>.01:
                #     power_grad=np.mean(self.powerVecs[-300:])-self.avg_power_constraint
                # else:
                #     power_grad=0

                # power_grad=np.mean(self.powerVecs[-300:])-self.avg_power_constraint
                # self.power_beta=np.clip(self.power_beta+self.eta_beta*(power_grad)**0,-self.total_good_users/self.avg_power_constraint,self.total_good_users/self.avg_power_constraint)
                # self.power_beta=self.AdamOpt.AdamOptimizer(self.power_beta,power_grad,1/(self.time-300)**0)
                # self.power_beta=np.clip(self.AdamOpt.AdamOptimizer(self.power_beta,power_grad,1/(self.time-300)**0), -self.total_good_users/self.avg_power_constraint,self.total_good_users/self.avg_power_constraint)
        self.reward_array = np.append(
            self.reward_array,
            np.mean(
                self.
                sojournTimes[-np.min([2000, self.sojournTimes.__len__()]):]))
        self.reward_array = self.reward_array[~np.isnan(self.reward_array)]

        if (np.remainder(self.service_vecs[0], self.schWindow)
                == 0) & (self.enable_sch == 1):

            self.schTime += 1
            self.reward_sch = 1 * np.mean(self.reward_window_sch)
            self.reward_window_sch.clear()
            if self.schTime >= 10:
                if (np.sum(np.abs(self.action_prob - self.state_memory[-2])) >
                        0):
                    self.state_memory.append(self.action_prob)
                    self.target_memory.append([self.reward_sch])
                    self.stop_sch_training = 0
                else:
                    self.stop_sch_training += 0
            else:
                self.state_memory.append(self.action_prob)
                self.target_memory.append([self.reward_sch])
            if self.schTime < 20:
                self.action_prob = self.starting_vector[self.schTime]
                samp_ind = np.random.randint(0, self.target_memory.__len__(),
                                             50)
                tar = (np.array(self.target_memory)[samp_ind]).reshape(
                    50, 1, 1)
                stat = (np.array(self.state_memory)[samp_ind]).reshape(
                    50, 1, 3)
                # tar=(np.array(self.target_memory)[-100:]).reshape(100,1,1)
                # stat=(np.array(self.state_memory)[-100:]).reshape(100,1,3)

                [self.DNN.train_on_batch(stat, tar) for i in range(0, 1)]
            # elif self.schTime==20:
            #     self.action_prob=np.array([1,1,1])/3
            else:  # replay if memory has more than 32 elements
                if self.stop_sch_training < 10:
                    samp_ind = np.random.randint(0,
                                                 self.target_memory.__len__(),
                                                 50)
                    tar = (np.array(self.target_memory)[samp_ind]).reshape(
                        50, 1, 1)
                    stat = (np.array(self.state_memory)[samp_ind]).reshape(
                        50, 1, 3)
                    # tar=(np.array(self.target_memory)[-100:]).reshape(100,1,1)
                    # stat=(np.array(self.state_memory)[-100:]).reshape(100,1,3)

                    [self.DNN.train_on_batch(stat, tar) for i in range(0, 10)]
                grad = self.DNN.approx_gradient(self.action_prob)
                # decay_dnn=1/(1+0.00001*(self.schTime-19)*(np.log10(10+np.log10(10+self.schTime-19))))
                decay_dnn = 1
                self.action_prob = np.clip(
                    self.DNN.AdamOpt.AdamOptimizer(self.action_prob, grad,
                                                   decay_dnn) + .0005 *
                    (.99**(self.schTime - 0)) *
                    np.random.uniform(0, 1, size=3), 0,
                    1)  # To avoid zero gradients in the beginning
                # self.action_prob=np.clip(self.DNN.AdamOpt.AdamOptimizer(self.action_prob,grad,1/(self.schTime-9)**0)+.0005*(.99**(self.schTime-0))*np.random.uniform(0,1,size=3),0,1) # To avoid zero gradients in the beginning
                # self.action_prob=np.clip((self.action_prob-.01*grad)+.00005*(.9**(self.schTime-100))*np.random.uniform(0,1,size=3),0,1) # To avoid zero gradients in the beginning
                # self.action_prob=np.clip(self.DNN.AdamOpt.AdamOptimizer(self.action_prob,grad),0,1)
                self.action_prob = self.action_prob / np.sum(self.action_prob)
                self.actionProbVec = np.append(self.actionProbVec,
                                               self.action_prob)
        # if np.remainder(self.time,100)==0:
        if np.remainder(self.service_vecs[0], 100) == 0:
            if self.reward_array.size:
                #self.live_plotter(np.arange(0,self.reward_array.size),self.reward_array)
                print(
                    self.ThreadName, self.reward_array[-1],
                    self.meta_parameter, self.action_prob, self.transmit_power,
                    self.DDQNA.penalty_lambda, [
                        np.min(self.powerVecsPolicy[
                            -np.min([1000, self.powerVecs.__len__()]):]),
                        np.mean(self.powerVecs[
                            -np.min([1000, self.powerVecs.__len__()]):]),
                        np.max(self.powerVecsPolicy[
                            -np.min([1000, self.powerVecs.__len__()]):])
                    ],
                    np.std(self.powerVecsPolicy[
                        -np.min([1000, self.powerVecs.__len__()]):]))
Exemplo n.º 57
0
def fft_apply_pattern(signal, pattern):
    res = np.dot(signal, pattern)
    res = np.remainder(np.abs(res), [10])
    return res
print(a)
b = np.array([10, 10, 10])
print(np.add(a, b))
print(np.subtract(a, b))
print(np.multiply(a, b))
print(np.divide(a, b))

a = np.array([10, 100, 1000])
print(np.power(a, 2))
b = np.array([1, 2, 3])
print(np.power(a, b))

a = np.array([10, 20, 30])
b = np.array([3, 5, 7])
print(np.mod(a, b))
print(np.remainder(a, b))
"""十一.统计函数"""
a = np.array([[3, 7, 5], [8, 4, 3], [2, 4, 9]])
print(a)
print(np.min(a))
print(np.amin(a, 1))
print(np.amin(a, 0))
print(np.amax(a))
print(np.amax(a, axis=0))

a = np.array([[30, 65, 70], [80, 95, 10], [50, 90, 60]])
print(np.median(a))
print(np.median(a, axis=0))
print(np.median(a, axis=1))

print(np.mean(a))
Exemplo n.º 59
0
    def generate_data(self):
        dt = time.time() - max(self._t0, self._t1)

        noof_syncs = self.rates * dt
        sn_fltr = (self.pq_sn > self.cur_pq_sn) & (self.pq_sn <=
                                                   self.cur_pq_sn + noof_syncs)

        noof_events = np.sum(sn_fltr)
        if noof_events == 0:
            return np.zeros(0, dtype=np.uint32)
        sn = self.pq_sn[sn_fltr]
        sp = self.pq_sp[sn_fltr]
        st = self.pq_st[sn_fltr]
        tt = self.pq_tt[sn_fltr]
        tt_rem = np.remainder(tt, T2_WRAPAROUND)
        ch = self.pq_ch[sn_fltr]
        total_time = tt[-1] - self.cur_pq_tt
        noof_overflows = np.floor(total_time / T2_WRAPAROUND)
        true_noof_syncs = sn[-1] - self.cur_pq_sn

        new_data_len = true_noof_syncs + noof_events + noof_overflows
        ttime = np.zeros(new_data_len, dtype=np.uint32)
        channel = np.zeros(new_data_len, dtype=np.uint32)
        special = np.zeros(new_data_len, dtype=np.uint32)
        d_idx = 0
        for i in range(len(sn)):
            #first insert some overflows and syncs to correct for boring time without events
            ovfls = np.floor((tt[i] - self.cur_pq_tt) / T2_WRAPAROUND)
            special[d_idx:d_idx + ovfls] = 1
            channel[d_idx:d_idx + ovfls] = 63
            d_idx += ovfls
            syncs = max(sn[i] - self.cur_pq_sn - 1, 0)
            special[d_idx:d_idx + syncs] = 1
            channel[d_idx:d_idx + syncs] = 0
            d_idx += syncs

            #now comes the sync corresponding to the event
            if sn[i] > self.cur_pq_sn:
                channel[d_idx] = 0
                special[d_idx] = 1
                sync_time = int(tt_rem[i]) - int(st[i])
                if sync_time > 0:
                    ttime[d_idx] = sync_time  #this will produce some errors
                    #when the event comes directly
                    #after a t2_wraparound,
                    #ie the wraparound is
                    #in between the sync and the event
                else:
                    #raise Exception('Wrong sync generated')
                    logging.error('Wrong sync generated')
                    ttime[d_idx] = 1
                d_idx += 1

            #now comes the event
            channel[d_idx] = ch[i]
            special[d_idx] = sp[i]
            ttime[d_idx] = tt_rem[i]
            d_idx += 1

            self.cur_pq_tt = tt[i]
            self.cur_pq_sn = sn[i]

        self._t1 = time.time()

        return PQ_encode(ttime, channel, special)
Exemplo n.º 60
0
    def _update_peak_assignments(self, randseed):
        """
        Update y / r indicator variables assigning peaks->topics/subregions.

        Parameters
        ----------
        randseed : :obj:`int`
            Random seed for this iteration.
        """
        # Seed random number generator
        np.random.seed(randseed)  # pylint: disable=no-member

        # Retrieve p(x|r,y) for all subregions
        peak_probs = self._get_peak_probs(self)

        # Iterate over all peaks x, and sample a new y and r assignment for each
        for i_ptoken in range(len(self.data["ptoken_doc_idx"])):
            doc = self.data["ptoken_doc_idx"][i_ptoken]
            topic = self.topics["peak_topic_idx"][i_ptoken]
            region = self.topics["peak_region_idx"][i_ptoken]

            # Decrement count in Subregion x Topic count matrix
            self.topics["n_peak_tokens_region_by_topic"][region, topic] -= 1

            # Decrement count in Document x Topic count matrix
            self.topics["n_peak_tokens_doc_by_topic"][doc, topic] -= 1

            # Retrieve the probability of generating current x from all
            # subregions: [R x T] array of probs
            p_x_subregions = (peak_probs[i_ptoken, :, :]).transpose()

            # Compute the probabilities of all subregions given doc
            #     p(r|d) ~ p(r|t) * p(t|d)
            # Counts of subregions per topic + prior: p(r|t)
            p_region_g_topic = self.topics[
                "n_peak_tokens_region_by_topic"] + self.params["delta"]

            # Normalize the columns such that each topic's distribution over
            # subregions sums to 1
            p_region_g_topic = p_region_g_topic / np.sum(p_region_g_topic,
                                                         axis=0)

            # Counts of topics per document + prior: p(t|d)
            p_topic_g_doc = (
                self.topics["n_peak_tokens_doc_by_topic"][doc, :] +
                self.params["alpha"])

            # Reshape from (ntopics,) to (nregions, ntopics) with duplicated rows
            p_topic_g_doc = np.array([p_topic_g_doc] *
                                     self.params["n_regions"])

            # Compute p(subregion | document): p(r|d) ~ p(r|t) * p(t|d)
            # [R x T] array of probs
            p_region_g_doc = p_topic_g_doc * p_region_g_topic

            # Compute the multinomial probability: p(z|y)
            # Need the current vector of all z and y assignments for current doc
            # The multinomial from which z is sampled is proportional to number
            # of y assigned to each topic, plus constant \gamma
            doc_y_counts = self.topics["n_peak_tokens_doc_by_topic"][
                doc, :] + self.params["gamma"]
            doc_z_counts = self.topics["n_word_tokens_doc_by_topic"][doc, :]
            p_peak_g_topic = self._compute_prop_multinomial_from_zy_vectors(
                doc_z_counts, doc_y_counts)

            # Reshape from (ntopics,) to (nregions, ntopics) with duplicated rows
            p_peak_g_topic = np.array([p_peak_g_topic] *
                                      self.params["n_regions"])

            # Get the full sampling distribution:
            # [R x T] array containing the proportional probability of all y/r combinations
            probs_pdf = p_x_subregions * p_region_g_doc * p_peak_g_topic

            # Convert from a [R x T] matrix into a [R*T x 1] array we can sample from
            probs_pdf = probs_pdf.transpose().ravel()

            # Normalize the sampling distribution
            probs_pdf = probs_pdf / np.sum(probs_pdf)

            # Sample a single element (corresponding to a y_i and c_i assignment
            # for the peak token) from the sampling distribution
            # Returns a [1 x R*T] vector with a '1' in location that was sampled
            vec = np.random.multinomial(1, probs_pdf)  # pylint: disable=no-member
            sample_idx = np.where(vec)[0][
                0]  # Extract linear index value from vector

            # Transform the linear index of the sampled element into the
            # subregion/topic (r/y) assignment indices
            # Subregion sampled (r)
            region = np.remainder(sample_idx, self.params["n_regions"])  # pylint: disable=no-member
            topic = int(np.floor(
                sample_idx / self.params["n_regions"]))  # Topic sampled (y)

            # Update the indices and the count matrices using the sampled y/r assignments
            # Increment count in Subregion x Topic count matrix
            self.topics["n_peak_tokens_region_by_topic"][region, topic] += 1
            # Increment count in Document x Topic count matrix
            self.topics["n_peak_tokens_doc_by_topic"][doc, topic] += 1
            self.topics["peak_topic_idx"][
                i_ptoken] = topic  # Update y->topic assignment
            self.topics["peak_region_idx"][
                i_ptoken] = region  # Update y->subregion assignment