Exemplo n.º 1
0
 def assertDataAlmostEqual(self, data, reference_filename, **kwargs):
     reference_path = self.get_result_path(reference_filename)
     if self._check_reference_file(reference_path):
         kwargs.setdefault('err_msg', 'Reference file %s' % reference_path)
         with open(reference_path, 'r') as reference_file:
             stats = json.load(reference_file)
             self.assertEqual(stats.get('shape', []), list(data.shape))
             self.assertEqual(stats.get('masked', False),
                              ma.is_masked(data))
             nstats = np.array((stats.get('mean', 0.), stats.get('std', 0.),
                                stats.get('max', 0.), stats.get('min', 0.)),
                               dtype=np.float_)
             if math.isnan(stats.get('mean', 0.)):
                 self.assertTrue(math.isnan(data.mean()))
             else:
                 data_stats = np.array((data.mean(), data.std(),
                                        data.max(), data.min()),
                                       dtype=np.float_)
                 self.assertArrayAllClose(nstats, data_stats, **kwargs)
     else:
         self._ensure_folder(reference_path)
         stats = collections.OrderedDict([
             ('std', np.float_(data.std())),
             ('min', np.float_(data.min())),
             ('max', np.float_(data.max())),
             ('shape', data.shape),
             ('masked', ma.is_masked(data)),
             ('mean', np.float_(data.mean()))])
         with open(reference_path, 'w') as reference_file:
             reference_file.write(json.dumps(stats))
Exemplo n.º 2
0
def SmartGen(device_id, k,device_user,generate_device_user_likelihood):
	user_candidate = []
	precision = []
	candidates_n = estimation = candidates_l = 0.0
	users = device_user.get(device_id)
	likelihood =estimation_list =  []
	for user in users:
		likelihood.append(generate_device_user_likelihood.get(user))
	for k in range(1, len(users)):
		for i in ~np.argsort(likelihood)[:k]:
			user = users[i]
			user_candidate.append(user)
			candidates_n += len(AllCookie.get(user))
			candidates_l += generate_device_user_likelihood.get(user)
		for i in ~np.argsort(likelihood)[:k]:
			user = users[i]
			precision = np.float_(len(AllCookie.get(user))) / np.float_(candidates_n)
			estimation += (generate_device_user_likelihood.get(user)*1.0 / candidates_l)* ((1.25*precision) / (0.25*precision + 1.0))
		estimation_list.append(estimation)
	k_final = ~np.argsort(estimation_list)
	cookie_final = []
	for i in ~np.argsort(likelihood)[:k_final]:
		user = users[i]
		cookie_candidates = HandleCookie.get(user)
		for cookie in cookie_candidates:
			cookie_final.append(cookie)
	return cookie_final
Exemplo n.º 3
0
 def matrix_mul(X1, X2, shard_size=5000):
   """ Calculate matrix multiplication for big matrix,
   X1 and X2 are sliced into pieces with shard_size rows(columns)
   then multiplied together and concatenated to the proper size
   """
   X1 = np.float_(X1)
   X2 = np.float_(X2)
   X1_shape = X1.shape
   X2_shape = X2.shape
   assert X1_shape[1] == X2_shape[0]
   X1_iter = X1_shape[0] // shard_size + 1
   X2_iter = X2_shape[1] // shard_size + 1
   all_result = np.zeros((1,))
   for X1_id in range(X1_iter):
     result = np.zeros((1,))
     for X2_id in range(X2_iter):
       partial_result = np.matmul(
           X1[X1_id * shard_size:min((X1_id + 1) *
                                     shard_size, X1_shape[0]), :],
           X2[:, X2_id * shard_size:min((X2_id + 1) *
                                        shard_size, X2_shape[1])])
       # calculate matrix multiplicatin on slices
       if result.size == 1:
         result = partial_result
       else:
         result = np.concatenate((result, partial_result), axis=1)
       # concatenate the slices together
       del partial_result
     if all_result.size == 1:
       all_result = result
     else:
       all_result = np.concatenate((all_result, result), axis=0)
     del result
   return all_result
Exemplo n.º 4
0
    def __init__(self, eps_par=numpy.float_(0.),
        mask_var=numpy.float_(1),xlabel=None,ylabel=None):
        super(MyEstimator, self).__init__()

        # self.params=dict()
        # self.params['']=
        # self.params['eps_par']=eps_par
        # self.params['mask_var']=mask_var

        #these are the parameters
        self.eps_par=eps_par
        self.mask_var=mask_var

        self.catastrophe=None
        self.dm=None
        self.max_distance=None
        self.mask_scale=None

        self.outlier_cut=0.95
        self.optimize_frac = 0.1
        self.xlabel=xlabel
        self.ylabel=ylabel

        self.zmin = 0.6
        self.zmax=1.6

        self.oiimin=6e-17
Exemplo n.º 5
0
def pval_KalZtest(n1,N1,n2,N2):
    """Compute p-value using Kal Z-test for count data.
    
    Compute pval using Z-test, as published in
    Kal et al, 1999, Mol Biol Cell 10:1859.
    
    Z = (p1-p2) / sqrt( p0 * (1-p0) * (1/N1 + 1/N2) )
    where p1 = n1/N1, p2=n2/N2, and p0=(n1+n2)/(N1+N2)
    You reject if |Z| > Z_a/2 where a is sig lev.  Here
    we return the p-value itself.
    
    """
    if n1==0 and n2==0:
        return 1.0
    
    n1 = np.float_(n1)
    N1 = np.float_(N1)
    n2 = np.float_(n2)
    N2 = np.float_(N2)
    
    p0 = (n1+n2)/(N1+N2)
    p1 = n1/N1
    p2 = n2/N2
    
    Z = (p1-p2) / np.sqrt( p0 * (1-p0) * ((1/N1) + (1/N2)) )
    
    pval = 2 * sp.stats.norm.cdf(-1*abs(Z))
    
    return pval
Exemplo n.º 6
0
def linearinterpolation(nstep,ndata,dt):
    """used for lienar interpolation between transportation matrices.
    returns weights alpha, beta and indices of matrices.
    Parameters
    -------
    nstep   : int Number of timesteps
    ndata   : int Number of matrices
    Returns
    -------
    alpha,beta : array 
                    coefficients for interpolation
    jalpha,jbeta : array
                    indices for interpolation
    """

    t = np.zeros(nstep,dtype=np.float_)
    for i in range(nstep):
        t[i] = np.fmod(0 + i*dt, 1.0)


    beta    = np.array(nstep,dtype=np.float_)
    alpha   = np.array(nstep,dtype=np.float_)

    w       = t * ndata+0.5
    beta    = np.float_(np.fmod(w, 1.0))
    alpha   = np.float_(1.0-beta)
    jalpha  = np.fmod(np.floor(w)+ndata-1.0,ndata).astype(int)
    jbeta   = np.fmod(np.floor(w),ndata).astype(int)

    return alpha,beta,jalpha,jbeta
Exemplo n.º 7
0
def rotatev_aroundx(p,a): #
    x=p[0]
    y=p[1]
    z=p[2]
    cs=np.float_(np.cos(a))
    cn=np.float_(np.sin(a))
    return np.float_(np.array([x,cs*y-cn*z,cn*y+cs*z]))
Exemplo n.º 8
0
        def Init(self):
                #boundary and domain condition
                self.lat  = io.read_PETSc_vec(self.config["-Metos3DBoundaryConditionInputDirectory"][0] + self.config["-Metos3DLatitudeFileFormat"][0])
                dz        = io.read_PETSc_vec(self.config["-Metos3DDomainConditionInputDirectory"][0] + self.config["-Metos3DLayerHeightFileFormat"][0])
                z         = io.read_PETSc_vec(self.config["-Metos3DDomainConditionInputDirectory"][0] + self.config["-Metos3DLayerDepthFileFormat"][0])
                self.lsm  = io.read_PETSc_mat(self.config["-Metos3DProfileInputDirectory"][0] + self.config["-Metos3DProfileMaskFile"][0])
                self.fice = np.zeros((self.profiles,np.int_(self.config["-Metos3DIceCoverCount"][0])),dtype=np.float_)
                for i in range(np.int_(self.config["-Metos3DIceCoverCount"][0])):
                        self.fice[:,i] = io.read_PETSc_vec(self.config["-Metos3DBoundaryConditionInputDirectory"][0] + (self.config["-Metos3DIceCoverFileFormat"][0] % i))

                self.bc         = np.zeros(2,dtype=np.float_)
                self.dc         = np.zeros((self.ny,2),dtype=np.float_)
                self.dc[:,0]    = z
                self.dc[:,1]    = dz

                self.u          = np.array(self.config["-Metos3DParameterValue"],dtype=np.float_)
                self.dt         = np.float_(self.config["-Metos3DTimeStep"][0])
                self.nspinup    = np.int_(self.config["-Metos3DSpinupCount"][0])
                self.ntimestep  = np.int_(self.config["-Metos3DTimeStepCount"][0])


                self.matrixCount  = np.int_(self.config["-Metos3DMatrixCount"][0])
                self.U_PODN        = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'N/'+ self.config["-Metos3DMatrixPODFileFormat"][0])
                self.U_PODDOP       = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'DOP/'+ self.config["-Metos3DMatrixPODFileFormat"][0])
                self.U_DEIMN       = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'N/'+ self.config["-Metos3DMatrixDEIMFileFormat"][0])
                self.U_DEIMDOP       = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'DOP/'+ self.config["-Metos3DMatrixDEIMFileFormat"][0])
                self.DEIM_IndicesN = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'N/'+ self.config["-Metos3DDEIMIndicesFileFormat"][0])
                self.DEIM_IndicesDOP = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'DOP/'+ self.config["-Metos3DDEIMIndicesFileFormat"][0])

                

                self.AN = np.ndarray(shape=(self.matrixCount,self.U_PODN.shape[1],self.U_PODN.shape[1]), dtype=np.float_, order='C')
                self.ADOP = np.ndarray(shape=(self.matrixCount,self.U_PODDOP.shape[1],self.U_PODDOP.shape[1]), dtype=np.float_, order='C')

                for i in range(0,self.matrixCount):
                        self.AN[i] = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'N/'+ self.config["-Metos3DMatrixReducedFileFormat"][0] % i)
                        self.ADOP[i] = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'DOP/'+ self.config["-Metos3DMatrixReducedFileFormat"][0] % i)
        
                self.PN = np.ndarray(shape=(self.matrixCount,self.U_PODN.shape[1],self.U_DEIMN.shape[1]), dtype=np.float_, order='C')
                self.PDOP = np.ndarray(shape=(self.matrixCount,self.U_PODDOP.shape[1],self.U_DEIMDOP.shape[1]), dtype=np.float_, order='C')
                for i in range(0,self.matrixCount):
                        self.PN[i] = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'N/'+ self.config["-Metos3DMatrixReducedDEINFileFormat"][0] % i)
                        self.PDOP[i] = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'DOP/'+ self.config["-Metos3DMatrixReducedDEINFileFormat"][0] % i)

                #precomputin the interplaton indices for a year         
                [self.interpolation_a,self.interpolation_b,self.interpolation_j,self.interpolation_k] = util.linearinterpolation(2880,12,0.0003472222222222)

                self.yN     = np.ones(self.ny,dtype=np.float_) * np.float_(self.config["-Metos3DTracerInitValue"])[0]
                self.yDOP     = np.ones(self.ny,dtype=np.float_) * np.float_(self.config["-Metos3DTracerInitValue"])[1]
                self.y_redN = np.dot(self.U_PODN.T,self.yN)
                self.y_redDOP = np.dot(self.U_PODDOP.T,self.yDOP)

                self.qN     = np.zeros(self.DEIM_IndicesN.shape[0],dtype=np.float_)
                self.qDOP     = np.zeros(self.DEIM_IndicesDOP.shape[0],dtype=np.float_)

                self.J,self.PJ = util.generateIndicesForNonlinearFunction(self.lsm,self.profiles,self.ny)

                self.out_pathN     = self.config["-Metos3DTracerOutputDirectory"][0] +self.config["-Metos3DSpinupMonitorFileFormatPrefix"][0] + self.config["-Metos3DSpinupMonitorFileFormatPrefix"][1] +self.config["-Metos3DTracerOutputFile"][0]
                self.out_pathDOP     = self.config["-Metos3DTracerOutputDirectory"][0] +self.config["-Metos3DSpinupMonitorFileFormatPrefix"][0] + self.config["-Metos3DSpinupMonitorFileFormatPrefix"][1] +self.config["-Metos3DTracerOutputFile"][1]
                self.monitor_path = self.config["-Metos3DTracerMointorDirectory"][0] +self.config["-Metos3DSpinupMonitorFileFormatPrefix"][0] + self.config["-Metos3DSpinupMonitorFileFormatPrefix"][1] +self.config["-Metos3DTracerOutputFile"][0]
Exemplo n.º 9
0
def loadDevices(trainfile,DictHandle,DictDevice,DictDevType,DictDevOs,DictCountry,DictAnnC1,DictAnnC2):

    NumRows = 0
    with open(trainfile,'rb') as csvfile:
        spamreader=csv.reader(csvfile,delimiter=',')
        spamreader.next()
        for row in spamreader:
            NumRows = NumRows + 1

    XDevices = np.zeros((NumRows,11))
    
    NumRows = 0
    with open(trainfile,'rb') as csvfile:
        spamreader=csv.reader(csvfile,delimiter=',')
        spamreader.next()
        for row in spamreader:
            XDevices[NumRows,0]=DictHandle[row[0]]
            XDevices[NumRows,1]=DictDevice[row[1]]
            XDevices[NumRows,2]=DictDevType[row[2]]
            XDevices[NumRows,3]=DictDevOs[row[3]]
            XDevices[NumRows,4]=DictCountry[row[4]]
            XDevices[NumRows,5]=np.float_(row[5])
            XDevices[NumRows,6]=DictAnnC1[row[6]]
            XDevices[NumRows,7]=DictAnnC2[row[7]]
            XDevices[NumRows,8]=np.float_(row[8])
            XDevices[NumRows,9]=np.float_(row[9])
            XDevices[NumRows,10]=np.float_(row[10])
            
            NumRows = NumRows + 1

    return XDevices
Exemplo n.º 10
0
 def readSnap(self,f):
     snap = Snap()
     snap.time = 0
     snap.box = []
     snap.atoms = []
     snap.natoms = 0
     for i, line in enumerate(f):
         if i > 8 and i < 8 + snap.natoms:
             snap.atoms.append(line.split())
         elif i == 3:
             snap.natoms = int(line.split()[0])
         elif i == 5 or i == 6 or i == 7:
             snap.box.append(np.float_(line.split()))
         elif i == 4:
             if len(line.split()) == 3:
                 snap.boundary = []
             else:
                 snap.boundary = line.split()[3:]
         elif i == 8:
             snap.descriptor = line.split()[2:]
         elif i == 1:
             snap.time = int(line.split()[0])
         elif i == 8 + snap.natoms:
             snap.atoms.append(line.split())
             break
     snap.atoms = np.float_(snap.atoms)
     snap.box = np.array(snap.box)
     return snap
Exemplo n.º 11
0
def rotatev_aroundz(p,a): #
    x=p[0]
    y=p[1]
    z=p[2]    
    cs=np.float_(np.cos(a))
    cn=np.float_(np.sin(a))
    return np.float_(np.array([cs*x-cn*y,cn*x+cs*y,z]))
Exemplo n.º 12
0
def compute_edf_distance(support1, support2):

    bin_edges = numpy.empty((support1.shape[0] + support2.shape[0] + 2,))
    bin_edges[0] = -numpy.inf
    bin_edges[-1] = numpy.inf
    bin_edges[1 : 1 + support1.shape[0]] = support1
    bin_edges[1 + support1.shape[0] : 1 + support1.shape[0] + support2.shape[0]] = support2
    bin_edges = numpy.sort(bin_edges)
    #print bin_edges.shape
    #print bin_edges
    bin_edges = numpy.unique(bin_edges)
    #bin_edges = get_unique_sorted_array(bin_edges, True)
    #print bin_edges.shape
    #print bin_edges

    #print support1.shape
    #print support2.shape

    bin_counts1_i,bins1 = numpy.histogram(support1, bin_edges)
    bin_counts2_i,bins2 = numpy.histogram(support2, bin_edges)

    bin_counts1 = numpy.float_(bin_counts1_i)
    bin_counts2 = numpy.float_(bin_counts2_i)

    sum_counts1 = numpy.cumsum(bin_counts1) / numpy.sum(bin_counts1)
    sum_counts2 = numpy.cumsum(bin_counts2) / numpy.sum(bin_counts2)

    delta = numpy.abs(sum_counts1 - sum_counts2)

    dist = numpy.max(delta)

    return dist
Exemplo n.º 13
0
def slidingMax(x,y,dx):
    x = np.float_(x)
    y = np.float_(y)
    LX = len(x)
    ymax = np.ones(LX)*y.min()
    code=\
    """
    int j;
    int i;
    int j0;
    int inloop;
    j0=1;
    
    for (i=0; i<LX; i++){
        j=j0;
        inloop=0;
        while ((x(j)<=x(i)+dx/2) && (j<LX) ) {
            if ((x(j)>=x(i)-dx/2) && (x(j)<=x(i)+dx/2)) {
                if (y(j)>ymax(i)) {
                    ymax(i) = y(j);
                }
                inloop=1;
            }
            if (inloop==0) {
                j0=j; // memorize where we started before
                }
            j++;
        }
    }
    """
    err = weave.inline(code,
                       ['x', 'y', 'dx','LX','ymax'],
                       type_converters=converters.blitz,
                       compiler = 'gcc')
    return ymax
Exemplo n.º 14
0
    def check_numpy_scalar_argument_return_generic(self):
        f = PyCFunction('foo')
        f += Variable('a1', numpy.int_, 'in, out')
        f += Variable('a2', numpy.float_, 'in, out')
        f += Variable('a3', numpy.complex_, 'in, out')
        foo = f.build()
        args = 2, 1.2, 1+2j
        results = numpy.int_(2), numpy.float_(1.2), numpy.complex(1+2j)
        assert_equal(foo(*args),results)
        args = [2], [1.2], [1+2j]
        assert_equal(foo(*args),results)
        args = [2], [1.2], [1,2]
        assert_equal(foo(*args),results)

        f = PyCFunction('foo')
        f += Variable('a1', 'npy_int', 'in, out')
        f += Variable('a2', 'npy_float', 'in, out')
        f += Variable('a3', 'npy_complex', 'in, out')
        foo = f.build()
        args = 2, 1.2, 1+2j
        results = numpy.int_(2), numpy.float_(1.2), numpy.complex(1+2j)
        assert_equal(foo(*args),results)
        args = [2], [1.2], [1+2j]
        assert_equal(foo(*args),results)
        args = [2], [1.2], [1,2]
        assert_equal(foo(*args),results)
Exemplo n.º 15
0
def from_edf(fname, compression=None, below_water=False, lon=None, lat=None):
    """
    DataFrame constructor to open XBT EDF ASCII format.

    Examples
    --------
    >>> from ctd import DataFrame
    >>> cast = DataFrame.from_edf('../test/data/XBT.EDF.gz',
    ...                           compression='gzip')
    >>> fig, ax = cast['temperature'].plot()
    >>> ax.axis([20, 24, 19, 0])
    >>> ax.grid(True)
    """
    f = read_file(fname, compression=compression)
    header, names = [], []
    for k, line in enumerate(f.readlines()):
        line = line.strip()
        if line.startswith("Serial Number"):
            serial = line.strip().split(":")[1].strip()
        elif line.startswith("Latitude"):
            hemisphere = line[-1]
            lat = line.strip(hemisphere).split(":")[1].strip()
            lat = np.float_(lat.split())
            if hemisphere == "S":
                lat = -(lat[0] + lat[1] / 60.0)
            elif hemisphere == "N":
                lat = lat[0] + lat[1] / 60.0
            else:
                raise ValueError("Latitude not recognized.")
        elif line.startswith("Longitude"):
            hemisphere = line[-1]
            lon = line.strip(hemisphere).split(":")[1].strip()
            lon = np.float_(lon.split())
            if hemisphere == "W":
                lon = -(lon[0] + lon[1] / 60.0)
            elif hemisphere == "E":
                lon = lon[0] + lon[1] / 60.0
            else:
                raise ValueError("Longitude not recognized.")
        else:
            header.append(line)
            if line.startswith("Field"):
                col, unit = [l.strip().lower() for l in line.split(":")]
                names.append(unit.split()[0])
        if line == "// Data":
            skiprows = k + 1
            break

    f.seek(0)
    cast = read_table(
        f, header=None, index_col=None, names=names, dtype=float, skiprows=skiprows, delim_whitespace=True
    )
    f.close()

    cast.set_index("depth", drop=True, inplace=True)
    cast.index.name = "Depth [m]"
    name = basename(fname)[1]
    if below_water:
        cast = remove_above_water(cast)
    return CTD(cast, longitude=lon, latitude=lat, serial=serial, name=name, header=header)
Exemplo n.º 16
0
def allan(freq,noise, t):
    #first, convert to phase noise
    phase=np.float_(noise*1.934*(10**14)/2./(freq**2.))
    #now integrate over all values to get allan variance
    transferFunc = np.float_(np.sin(np.pi*phase*t)**4/(np.pi*t*phase)**2)
    out = 2*integrate.trapz(phase*transferFunc,freq)
    return np.sqrt(out)
Exemplo n.º 17
0
def smooth_spreadsheet_with_rwr(restart, network_sparse, run_parameters):
    """ Simulates a random walk with restarts.
                                        alpha=0.7, max_iteration=100, tol=1.e-4
    Args:
        restart: restart array of any size.
        network_sparse: adjancy matrix stored in sparse format.
        run_parameters: parameters dictionary with alpha, restart_tolerance, 
            number_of_iteriations_in_rwr and

        max_iteration: maximum number of random walap_vals. (default = 100)
        tol: convergence tolerance. (default = 1.e-4)

    Returns:
        smooth_1: smoothed restart data.
        step: number of iterations used
    """
    tol = np.float_(run_parameters["restart_tolerance"])
    alpha = np.float_(run_parameters["restart_probability"])
    smooth_0 = restart
    smooth_r = (1. - alpha) * restart
    for step in range(0, int(run_parameters["number_of_iteriations_in_rwr"])):
        smooth_1 = alpha * network_sparse.dot(smooth_0) + smooth_r
        deltav = LA.norm(smooth_1 - smooth_0, 'fro')
        if deltav < tol:
            break
        smooth_0 = smooth_1

    return smooth_1, step
Exemplo n.º 18
0
	def read_data (self):
		"""Reads in data required for the given input_type."""

		re_int    = '\d+'
		re_ints   = '\\b\d+\\b'
		re_floats = '[+-]? *(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][+-]?\d+)?'

		f_name = self.input_path + "l2errs+convergence_standard.txt"

		var_names = list()

		with open(f_name) as f:
			for line in f:
				if ('n_vars' in line):
					n_vars = np.int_(re.search(re_int,line).group())
				if ('ml_max' in line):
					ml_max = np.int_(re.search(re_int,line).group())
				if ('p_max' in line):
					p_max  = np.int_(re.search(re_int,line).group())

					# Inialize np arrays now that dimensions are known
					block_size2 = (ml_max+1,p_max+1)
					block_size3 = (ml_max+1,p_max+1,n_vars)

					cases_run   = np.int_(  np.zeros(block_size2))
					h           = np.float_(np.zeros(block_size2))
					l2_errors   = np.float_(-float('inf')+np.zeros(block_size3))
					conv_orders = np.float_(-float('inf')+np.zeros(block_size3))

				if ('Cases Run' in line):
					for i in range(0,ml_max+1):
						line = f.readline()
						cases_run[i,:] = np.int_([int(s) for s in re.findall(re_ints, line)])

				if ('Mesh Size' in line):
					assign_block(h,cases_run,f)

				if ('L2 Errors' in line):
					for k in range(0,n_vars):
						skip_lines(1,f)
						line = f.readline()
						var_names.append(line.replace('\n',''))

						assign_block(l2_errors[:,:,k],cases_run,f)

				if ('Convergence Orders' in line):
					for k in range(0,n_vars):
						skip_lines(2,f)
						assign_block(conv_orders[:,:,k],cases_run,f)

			self.n_vars = n_vars
			self.ml_max = ml_max
			self.p_max  = p_max

			self.var_names   = var_names
			self.cases_run   = cases_run
			self.h           = h
			self.l2_errors   = l2_errors
			self.conv_orders = conv_orders
Exemplo n.º 19
0
 def initialize_constants(self):
     """
     Initialize constants that depend on the instance.
     """
     # maximum pheromone value
     self.PH_MAX = np.float_(self.num_vars / (1.0 - self.PH_REDUCE_FACTOR))
     # minimum pheromone value
     self.PH_MIN = np.float_(self.PH_MAX / self.num_lits)
Exemplo n.º 20
0
def gray2real01(img):
    x = img.shape[0]
    y = img.shape[1]
    fxy = np.zeros((x,y),dtype=np.float_)
    for i in xrange(0,x):
        for j in xrange(0,y):
            fxy[i,j] = np.float_(img[i][j])/np.float_(255)
    return fxy
Exemplo n.º 21
0
 def __init__(self, length=0, angle=0, order=1, rotate=0, name=None, **kwargs):
     self.name   = name
     self._type   = 'bend'
     self._length = _np.float_(length)
     self._order  = int(order)
     self._angle  = _np.float_(angle)
     self.rotate  = rotate
     self._kwargs = kwargs
Exemplo n.º 22
0
def snack_raw_pitch_python(wav_fn, frame_shift, window_size, max_pitch, min_pitch):
    """Implement snack_raw_pitch() by calling Snack through Python's tkinter
       library

    Note this method can only be used if the user's machine is setup, so that
    Tcl/Tk can be accessed through Python's tkinter library.

    The vectors returned here are the raw Snack output, without padding.
    For more info, see documentation for snack_raw_pitch().
    """
    try:
        import tkinter
    except ImportError:
        try:
            import Tkinter as tkinter
        except ImportError: # pragma: no cover
            print("Need Python library tkinter. Is it installed?")

    # HACK: Need to replace single backslash with two backslashes,
    #       so that the Tcl shell reads the file path correctly on Windows
    if sys.platform == 'win32' or sys.platform == 'cygwin': # pragma: no cover
        wav_fn = wav_fn.replace('\\', '\\\\')

    # XXX I'm assuming Hz for pitch; the docs don't actually say that.
    # http://www.speech.kth.se/snack/man/snack2.2/tcl-man.html#spitch
    tcl = tkinter.Tcl()
    try:
        # XXX This will trigger a message 'cannot open /dev/mixer' on the
        # console if you don't have a /dev/mixer.  You don't *need* a mixer to
        # snack the way we are using it, but there's no practical way to
        # suppress the message without modifying the snack source.  Fortunately
        # most people running opensauce will in fact have a /dev/mixer.
        tcl.eval('package require snack')
    except tkinter.TclError as err: # pragma: no cover
        log.critical('Cannot load snack (is it installed?): %s', err)
        return
    tcl.eval('snack::sound s')
    tcl.eval('s read {}'.format(wav_fn))
    cmd = ['s pitch -method esps']
    cmd.extend(['-framelength {}'.format(frame_shift / 1000)])
    cmd.extend(['-windowlength {}'.format(window_size / 1000)])
    cmd.extend(['-maxpitch {}'.format(max_pitch)])
    cmd.extend(['-minpitch {}'.format(min_pitch)])
    # Run Snack pitch command
    tcl.eval('set data [{}]'.format(' '.join(cmd)))
    # XXX check for errors here and log and abort if there is one.  Result
    # string will start with ERROR:.
    # Collect results and save in return variables
    num_frames = int(tcl.eval('llength $data'))
    F0_raw = np.empty(num_frames)
    V_raw = np.empty(num_frames)
    # snack returns four values per frame, we only care about the first two.
    for i in range(num_frames):
        values = tcl.eval('lindex $data ' + str(i)).split()
        F0_raw[i] = np.float_(values[0])
        V_raw[i] = np.float_(values[1])

    return F0_raw, V_raw
Exemplo n.º 23
0
def parseProcess():
    global trainData, trainLabel, testData, testLabel
    
    trainData = np.array([],dtype=float)
    procInfoHex = []
    #trainLabel = np.array([],dtype=int)
    testData = np.array([], dtype=float)
    #testLabel = np.array([], dtype=int)

    #entry[0] = ['USER', 'PID', '%CPU', '%MEM', 'VSZ', 'RSS', 'TTY', 'STAT', 'START', 'TIME', 'COMMAND']
    #USER(0), %CPU(2), %MEM(3), VSZ(4), RSS(5), TIME(9), CMD(10) are important attribute
    init = 1
    iteration = 0

    while iteration < NUM_ITR: #100 iteration, about 2000 samples
        shDump = commands.getoutput("ps aux --sort=-pcpu") # list of all processes in stream
        entry = shDump.split('\n') #split the stream into arraies
        #print len(entry)

        for indx in range(len(entry)):
            hexList = []
            if indx != 0:
                procInfo = entry[indx].split() #split array into elements
                
                #remove meaningless contents
                procInfo.pop(1);
                procInfo.pop(5);
                procInfo.pop(6);
                #procInfo = ['USER', '%CPU', '%MEM', 'VSZ', 'RSS','STAT','TIME', 'COMMAND','param0', 'param1', 'param2', 'param3', 'param4', .... 'paramN']
                #convert plain text string to 8-byte element a pop
                for s in procInfo:
                    #if an attribute is more than 8 bytes, we split it
                    procInfoHex = split_hex(s.encode('hex'))
                    procInfoHex = procInfoHex.split()
                    hexList= hexList + procInfoHex
                
                
                #the buffer is 60 doubles wide, so padding the remainders with ' '
                while len(hexList) < NUM_COLS:
                    hexList.append(' '.encode('hex'))
                
                #covert Hex to double
                procInfoHash = [ struct.unpack('<d', s.zfill(16).decode('hex'))[0] for s in hexList]

                if indx == 1 and init == 1:
                    init=0 #only init numpy once
                    
                    #procInfo[7] is the command, use it as label
                    trainData=np.hstack((trainData, np.float_(procInfoHash)));
                    #trainLabel=np.append(trainLabel, np.int_(procInfoHash[7]));

                else:
                    trainData=np.vstack((trainData, np.float_(procInfoHash)));
                    #trainLabel=np.append(trainLabel, np.int_(procInfoHash[7]));
        iteration += 1
    
    # Split into train and test
    trainData, testData= sklearn.cross_validation.train_test_split(trainData)
Exemplo n.º 24
0
    def change_E(self, old_gamma, new_gamma):
        """
        Scale the setpoint energy of the magnet from the old energy *old_gamma* to new energy *new_gamma*.

        The best way to think about this is that the angle will change with different beam energy, so changing the beam energy changes the magnet's properties, because the magnet's B-field stays the same.
        """
        old_gamma = _np.float_(old_gamma)
        new_gamma = _np.float_(new_gamma)
        self._angle *= old_gamma / new_gamma
Exemplo n.º 25
0
def norm(data):
    n = np.zeros((len(data),1),dtype=np.float_)
    for j in range(0,len(data)):
        s=np.float_(0.0)
        for i in range(0,len(data[0])):
                s+=data[j,i]*data[j,i]
        n[j,0] = np.float_(1/np.sqrt(s))
#    return (np.array(data,dtype=np.float_)*n)
    return (n)
Exemplo n.º 26
0
    def __init__(self, timber_variable, press_gauge):
        if type(timber_variable) is str:
            dict_timber = tm.parse_timber_file(timber_variable, verbose=True)
            timber_press_gauge = dict_timber[press_gauge]
        elif type(timber_variable) is dict:
            timber_press_gauge = timber_variable[press_gauge]

        self.t_stamps = np.float_(np.array(timber_press_gauge.t_stamps))
        self.values = np.squeeze(np.float_(np.array(timber_press_gauge.values)))
Exemplo n.º 27
0
def pvals_logRatioMC(counts1, counts2, B=1e6, pseudocount=1, verbose=False):
    """Compute component-wise p-values of difference between two count vectors
    using Monte Carlo sampling of log ratios.
    
    Null hypothesis is that data is from same multinomial.  Parameters estimated
    by combining both count vectors.  Zeros are handled by adding pseudocount to
    each element.
    
    The test statistic is log Ratio, which is computed for each component.
    
    Two random count vectors are generated, and and component-wise log ratio
    is computed.  For each component, it is recorded whether the abs random log
    ratio was greater than or less than the abs test statistic value.  This is
    performed B times.  The absolute value makes the test two-sided and symmetric.
    
    The achieved significance level (ASL) is returned for each component.
    
    """
    if len(counts1) != len(counts2): raise ValueError, "Counts vectors have different lengths."
    
    counts1 = np.asarray(counts1, dtype=np.float)
    counts2 = np.asarray(counts2, dtype=np.float)
    
    total1 = int(np.round(np.sum(counts1)))
    total2 = int(np.round(np.sum(counts2)))
    
    countsMLE = counts1 + counts2 + pseudocount
    counts1 = counts1 + pseudocount     # note: counts1 and counts2 are changed at this point
    counts2 = counts2 + pseudocount
    
    normcounts1 = counts1 / np.sum(counts1)
    normcounts2 = counts2 / np.sum(counts2)
    
    testabslogratios = np.abs(np.log10(normcounts2 / normcounts1))
    
    probvec = countsMLE / np.sum(countsMLE)
    
    atleastasextreme = np.zeros(len(counts1))
    
    for i in xrange(B):
        if verbose and i % 10 == 0:
            sys.stdout.write("%i " % i)
            sys.stdout.flush()
        
        randcounts1 = np.float_(np.random.multinomial(total1, probvec)) + pseudocount
        randcounts2 = np.float_(np.random.multinomial(total2, probvec)) + pseudocount
        
        normrandcounts1 = randcounts1 / np.sum(randcounts1)
        normrandcounts2 = randcounts2 / np.sum(randcounts2)
        
        randabslogratios = np.abs(np.log10(normrandcounts2 / normrandcounts1))
        
        atleastasextreme += np.float_(randabslogratios >= testabslogratios)
    
    ASL = atleastasextreme / B
    
    return ASL
Exemplo n.º 28
0
def sdds_to_dict(in_complete_path):
	us_string = in_complete_path.split('.')[-2]

	try:
		temp = sddsdata(in_complete_path, endian='little', full=True)
	except IndexError:
		print 'Failed to open data file. (save_bct_mat)'
		return
	data = temp.data[0]

	cycleTime = data['cycleTime'].tostring()
	t_stamp_unix = time.mktime(time.strptime(cycleTime.replace('"', '').replace('\n','').split('.')[0], '%Y/%m/%d %H:%M:%S'))

	beamID = np.int_(data['beamID'])
	deviceName = ((data['deviceName'].tostring()).split('\n')[0]).split('SPS.')[-1]
	sbfIntensity = np.float_(data['sbfIntensity'])
	acqState = np.int_(data['acqState'])
	totalIntensity = data['totalIntensity']
	acqTime = data['acqTime'].tostring()
	propType = np.int_(data['propType'])
	totalIntensity_unitExponent = np.int_(data['totalIntensity_unitExponent'])
	measStamp_unitExponent = np.float_(data['measStamp_unitExponent'])
	samplingTime = np.int_(data['samplingTime'])
	measStamp_unit = np.int_(data['measStamp_unit'])
	observables = np.int_(data['observables'])
	nbOfMeas = np.int_(data['nbOfMeas'])
	superCycleNb = np.int_(data['superCycleNb'])
	acqMsg = data['acqMsg'].tostring()
	measStamp = data['measStamp']
	acqDesc = data['acqDesc'].tostring()
	totalIntensity_unit = np.int_(data['totalIntensity_unit'])

	dict_meas = {
		'beamID':beamID,
		'deviceName':deviceName,
		'sbfIntensity':sbfIntensity,
		'acqState':acqState,
		'totalIntensity':totalIntensity,
		'acqTime':acqTime,
		'propType':propType,
		'totalIntensity_unitExponent':totalIntensity_unitExponent,
		'measStamp_unitExponent':measStamp_unitExponent,
		'samplingTime':samplingTime,
		'measStamp_unit':measStamp_unit,
		'observables':observables,
		'nbOfMeas':nbOfMeas,
		'superCycleNb':superCycleNb,
		'cycleTime':cycleTime,
		'acqMsg':acqMsg,
		'measStamp':measStamp,
		'acqDesc':acqDesc,
		'totalIntensity_unit':totalIntensity_unit,
		'SPSuser':us_string,
		't_stamp_unix':t_stamp_unix
			}

	return dict_meas
Exemplo n.º 29
0
    def __init__(self, timber_variable, beam=0):

        if type(timber_variable) is dict:
            timber_variable_BCT = timber_variable[get_variable_dict(beam)['BEAM_INTENSITY']]           
        
        self.beam=beam
        self.t_stamps = np.float_(np.array(timber_variable_BCT[0]))
        self.t_str=[datetime.datetime.fromtimestamp(self.t_stamps[ii]) for ii in np.arange(len(self.t_stamps))]
        self.values = np.squeeze(np.float_(np.array(timber_variable_BCT[1])))
Exemplo n.º 30
0
def quat2rpy2(Q):
    w = Q[0]
    x = Q[1]
    y = Q[2]
    z = Q[3]    
    r = np.float_(np.arctan2(2*y*w - 2*x*z, 1 -2*y*y - 2*z*z))
    p = np.float_(np.arctan2(2*x*w - 2*y*z, 1 -2*x*x - 2*z*z))
    y = np.float_(np.arcsin(2*x*y + 2*z*w))
    return np.float_([r,p,y])
        fill_dict.update(tm.CalsVariables_from_h5(
            data_folder_fill + ('/fill_basic_data_h5s/'
                'basic_data_fill_%d.h5'%filln_offset)))
        fill_dict.update(tm.CalsVariables_from_h5(
            data_folder_fill + ('/fill_heatload_data_h5s/'
                'heatloads_fill_%d.h5'%filln_offset)))

    if args.use_recalc:
        fill_dict.update(qf.get_fill_dict(filln_offset,
             h5_storage=H5_storage(recalc_h5_folder),
             use_dP=True))


    dict_offsets={}
    for kk in hl_varlist:
        dict_offsets[kk] = np.interp(t_zero_unix, np.float_(np.array(fill_dict[kk].t_stamps)), fill_dict[kk].float_values())


pl.close('all')
ms.mystyle_arial(fontsz=fontsz, dist_tick_lab=9)
fig = pl.figure(1, figsize=figsz)
fig.patch.set_facecolor('w')
ax1 = fig.add_subplot(311)
ax11 = ax1.twinx()
ax2 = fig.add_subplot(312, sharex=ax1)
ax3 = fig.add_subplot(313, sharex=ax1)
ms.sciy()

N_fills = len(fill_list)

t_for_integrated = []
Exemplo n.º 32
0
else:
    raise ValueError, "Must give a single argument that is a timeseries data file"

data = timeseries.load_timeseries(inhandle)
labels = data['labels']
times = data['times']
timeseriesmatrix = data['matrix']

try:
    sums = data['sums']
except KeyError:
    sums = timeseriesmatrix.sum(axis=0)

# normalize if desired
if options.normalize:
    timeseriesmatrix = np.float_(timeseriesmatrix) / np.asarray(sums)

# define which time series to plot
if options.threshold:
    idxs = np.sum(timeseriesmatrix >= options.threshold,
                  axis=1) > 0  # breaks threshold at least once
else:
    idxs = np.asarray([True] * timeseriesmatrix.shape[0])
# idxs = np.sum(time_series_freqs>0,axis=1)>2 # seen at least twice
# idxs_bool = np.logical_and(idxs_bool_1,idxs_bool_2)
# idxs_bool = np.array([False]*len(reference_clones))
print "Number of lines plotted: %i" % np.sum(idxs)

# ==================
# = Make the plots =
# ==================
def cross_entropy(Y, P):
    Y = np.float_(Y)
    P = np.float_(P)
    return - np.sum(Y * np.log(P) + (1 - Y) * np.log(1 - P))
import glob
import numpy as np
import matplotlib.pyplot as plt
import sklearn.cluster

images = glob.glob("./imagenes/*.png")

data = []
for i in range(len(images)):
    d = np.float_(plt.imread(images[i]).flatten())
    data.append(d)

data = np.array(data)

# n_cluster_array =
n_clusters = np.arange(1, 21, 1)
inertia_array = []

for nc in n_clusters:
    k_means = sklearn.cluster.KMeans(n_clusters=nc)
    k_means.fit(data)

    # calculo a cual cluster pertenece cada pixel
    cluster = k_means.predict(data)

    # asigno a cada pixel el lugar del centro de su cluster
    data_centered = data.copy()
    for i in range(nc):
        ii = cluster == i
        data_centered[ii, :] = np.int_(k_means.cluster_centers_[i])
Exemplo n.º 35
0
def test_recursively_convert_to_json_serializable():
    asset = ge.dataset.PandasDataset({
        "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
    })
    asset.expect_column_values_to_be_in_set("x", [1, 2, 3, 4, 5, 6, 7, 8, 9],
                                            mostly=0.8)

    part = ge.dataset.util.partition_data(asset.x)
    asset.expect_column_kl_divergence_to_be_less_than("x", part, 0.6)

    # Dumping this JSON object verifies that everything is serializable
    json.dumps(expectationSuiteSchema.dump(asset.get_expectation_suite()),
               indent=2)

    x = {
        "w": ["aaaa", "bbbb", 1.3, 5, 6, 7],
        "x": np.array([1, 2, 3]),
        "y": {
            "alpha": None,
            "beta": np.nan,
            "delta": np.inf,
            "gamma": -np.inf
        },
        "z": {1, 2, 3, 4, 5},
        "zz": (1, 2, 3),
        "zzz": [
            datetime.datetime(2017, 1, 1),
            datetime.date(2017, 5, 1),
        ],
        "np.bool": np.bool_([True, False, True]),
        "np.int_": np.int_([5, 3, 2]),
        "np.int8": np.int8([5, 3, 2]),
        "np.int16": np.int16([10, 6, 4]),
        "np.int32": np.int32([20, 12, 8]),
        "np.uint": np.uint([20, 5, 6]),
        "np.uint8": np.uint8([40, 10, 12]),
        "np.uint64": np.uint64([80, 20, 24]),
        "np.float_": np.float_([3.2, 5.6, 7.8]),
        "np.float32": np.float32([5.999999999, 5.6]),
        "np.float64": np.float64([5.9999999999999999999, 10.2]),
        # 'np.complex64': np.complex64([10.9999999 + 4.9999999j, 11.2+7.3j]),
        # 'np.complex128': np.complex128([20.999999999978335216827+10.99999999j, 22.4+14.6j]),
        # 'np.complex256': np.complex256([40.99999999 + 20.99999999j, 44.8+29.2j]),
        "np.str": np.unicode_(["hello"]),
        "yyy": decimal.Decimal(123.456),
    }
    if platform.system() != "Windows":
        x["np.float128"] = np.float128([5.999999999998786324399999999, 20.4])

    x = ge.data_asset.util.recursively_convert_to_json_serializable(x)
    assert isinstance(x["x"], list)

    assert isinstance(x["np.bool"][0], bool)
    assert isinstance(x["np.int_"][0], int)
    assert isinstance(x["np.int8"][0], int)
    assert isinstance(x["np.int16"][0], int)
    assert isinstance(x["np.int32"][0], int)

    assert isinstance(x["np.uint"][0], int)
    assert isinstance(x["np.uint8"][0], int)
    assert isinstance(x["np.uint64"][0], int)

    assert isinstance(x["np.float32"][0], float)
    assert isinstance(x["np.float64"][0], float)
    if platform.system() != "Windows":
        assert isinstance(x["np.float128"][0], float)
    # self.assertEqual(type(x['np.complex64'][0]), complex)
    # self.assertEqual(type(x['np.complex128'][0]), complex)
    # self.assertEqual(type(x['np.complex256'][0]), complex)
    assert isinstance(x["np.float_"][0], float)

    # Make sure nothing is going wrong with precision rounding
    if platform.system() != "Windows":
        assert np.allclose(
            x["np.float128"][0],
            5.999999999998786324399999999,
            atol=10**(-sys.float_info.dig),
        )

    # TypeError when non-serializable numpy object is in dataset.
    with pytest.raises(TypeError):
        y = {"p": np.DataSource()}
        ge.data_asset.util.recursively_convert_to_json_serializable(y)

    try:
        x = unicode("abcdefg")
        x = ge.data_asset.util.recursively_convert_to_json_serializable(x)
        assert isinstance(x, unicode)
    except NameError:
        pass
Exemplo n.º 36
0
# By Luca
#### DEMO
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import make_lupton_rgb
from astropy.visualization import *

### READING
b = fits.open("data/"+"M66-Blue.fts")[0].data
r = fits.open("data/"+"M66-Red.fts")[0].data 
g = fits.open("data/"+"M66-Green.fts")[0].data


forCasting = np.float_()

### CASTING
r = np.array(r,forCasting)
g = np.array(g,forCasting)
b = np.array(b,forCasting)

rgb_default = make_lupton_rgb(r, g, b, minimum=1000, stretch=900, Q=100, filename="prova2finale.png")
plt.imshow(rgb_default, origin='lower')
plt.show()
Exemplo n.º 37
0

def wfits(name, data, hdr):
    if ptt.exists(name) == False:
        wfit(name, data, hdr)
    else:
        sycall("rm " + name)
        wfit(name, data, hdr)


sys.argv = filter(None, sys.argv)
if len(sys.argv) < 9:
    print "USE: plot_maps.pl map.fits min max bright contrast Label factor dev [MASK_MAP]"
    sys.exit(0)
mapfile = sys.argv[1]
min = float_(sys.argv[2])
max = float_(sys.argv[3])
bright = sys.argv[4]
contrast = sys.argv[5]
label = sys.argv[6]
factor = float_(sys.argv[7])
dev = sys.argv[8]
id_mask = 0
if len(sys.argv) == 10:
    mask_file = sys.argv[9]
    id_mask = 1

print dev
if dev == "null":
    import matplotlib.pyplot as plt
    #    from matplotlib import gridspec
Exemplo n.º 38
0
import random
import sys
from six.moves import zip as izip
from six.moves import xrange

from . import _minpack
from . import _minim
from . import _saoopt

from sherpa.utils import parallel_map
from sherpa.utils._utils import sao_fcmp

#
# Use FLT_EPSILON as default tolerance
#
EPSILON = numpy.float_(numpy.finfo(numpy.float32).eps)

#
# Maximum callback function value, used to indicate that the optimizer
# has exceeded parameter boundaries.  All the optimizers expect double
# precision arguments, so we use numpy.float_ instead of SherpaFloat.
#
# As of numpy 0.9.5, 'max' is a 0-D array, which seems like a bug.
#
FUNC_MAX = numpy.float_(numpy.finfo(numpy.float_).max)


def _check_args(x0, xmin, xmax):
    x = numpy.array(x0, numpy.float_)  # Make a copy
    xmin = numpy.asarray(xmin, numpy.float_)
    xmax = numpy.asarray(xmax, numpy.float_)
Exemplo n.º 39
0
#
# print(r_eci)
# print(v_eci)

json_list = []

print('Parsing file and converting to ECI...')

with open(sys.argv[1], "r") as f:

    for line in f:

        temp = line.split()

        epoch_utc = Time(temp[4], scale='utc', format='isot')
        v = CartesianDifferential(list(np.float_(temp[17:20]))*(u.m/u.s))
        r = CartesianRepresentation(list(np.float_(temp[14:17]))*u.m, differentials={'s': v})
        r_ecef = ITRS(r, obstime=epoch_utc)
        r_eci_temp = r_ecef.transform_to(GCRS(obstime=epoch_utc))
        v_eci = r_eci_temp.cartesian.differentials['s'].d_xyz  # this may be off from Jonathan's initial notebook test
        r_eci = r_eci_temp.cartesian.xyz
        utc_ms = temp[4][:-3]  # FIXME -- orbdetpy can handle 6 digits of precision in time so need to remove this

        json_object = {"Time": utc_ms + "Z",  # add this to maintain ISO8601 format for orbdetpy
                       "PositionVelocity": [
                           r_eci[0].value,
                           r_eci[1].value,
                           r_eci[2].value,
                           v_eci[0].value*1000,  # not sure why this changed to km/s
                           v_eci[1].value*1000,
                           v_eci[2].value*1000
Exemplo n.º 40
0
	ret2, labels2, stats2, centroids2 = cv2.connectedComponentsWithStats(dst2)
	criteria2 = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
	corners2 = cv2.cornerSubPix(imagem2,np.float32(centroids2),(5,5),(-1,-1),criteria2)
	matriz2=[]
	for variavel in corners2:
		array=np.array([variavel])
		matriz2.append(array)
	kp2 = cv2.KeyPoint_convert(matriz2)
	kp2, des2 = brief.compute(img2, kp2)

	mat = bf.match(des1,des2)
	mat = sorted(mat, key = lambda x:x.distance)
	matches = mat[0:150]
	with open("../../imgTeste/img"+str(quantidadeImagens)+".txt",'r') as f:
		texto=f.readlines()
	posicao_x= np.float_(texto[0:4])
	posicao_y = np.float_(texto[4:8])
	min_x = float(min(posicao_x))
	max_x = float(max(posicao_x))
	min_y = float(min(posicao_y))
	max_y = float(max(posicao_y))

	if len(matches)>10:
		    src_pts = np.float32([ kp1[m.queryIdx].pt for m in matches ]).reshape(-1,1,2)
		    dst_pts = np.float32([ kp2[m.trainIdx].pt for m in matches ]).reshape(-1,1,2)
		    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
		    h,w = img1.shape
		    pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
		    dst_comp = cv2.perspectiveTransform(pts,M)
		    img2 = cv2.polylines(img2,[np.int32(dst_comp)],True,255,3, cv2.LINE_AA)
		    for pos in dst_pts:
Exemplo n.º 41
0
        sycall("rm " + name1)
        wfit(name, data, hdr)


sys.argv = filter(None, sys.argv)
if len(sys.argv) < 5:
    print "USE: spec_cube_plot.py INPUT_FILE.CUBE.FITS SN_map.fits signalmap.fits noisemap.fits [WMIN,WMAX]"
    sys.exit(0)

input = sys.argv[1]
sn_map = sys.argv[2]
signal_map = sys.argv[3]
noise_map = sys.argv[4]
if len(sys.argv) == 6:
    data = sys.argv[5].split(',')
    wmin = float_(data[0])
    wmax = float_(data[1])
else:
    wmin = 0.0
    wmax = 0.0

y_min = 1e12
y_max = -1e12
n = 0
[pdl, hdr] = gdata(input, 0, header=True)
[nz, nx, ny] = pdl.shape
crval = hdr["CRVAL3"]
cdelt = hdr["CDELT3"]
crpix = hdr["CRPIX3"]

if cdelt == 0:
Exemplo n.º 42
0
def to_float(_int):
    try:
        return np.float_(_int)
    except OverflowError:
        return np.nan
Exemplo n.º 43
0
def resize_array_example(Array_in, Array_example, method=1):
    """
    This function resizes an array so it has the same size as an example array
    The extend of the array must be the same

    Keyword arguments:
    Array_in -- []
        Array: 2D or 3D array
    Array_example -- []
        Array: 2D or 3D array
    method: -- 1 ... 5
        int: Resampling method
    """

    # Create old raster
    Array_out_shape = np.int_(Array_in.shape)
    Array_out_shape[-1] = Array_example.shape[-1]
    Array_out_shape[-2] = Array_example.shape[-2]

    if method == 1:
        interpolation_method = 'nearest'
        interpolation_number = 0
    if method == 2:
        interpolation_method = 'bicubic'
        interpolation_number = 3
    if method == 3:
        interpolation_method = 'bilinear'
        interpolation_number = 1
    if method == 4:
        interpolation_method = 'cubic'
    if method == 5:
        interpolation_method = 'lanczos'

    if len(Array_out_shape) == 3:
        Array_out = np.zeros(Array_out_shape)

        for i in range(0, Array_out_shape[0]):
            Array_in_slice = Array_in[i, :, :]
            size = tuple(Array_out_shape[1:])

            if sys.version_info[0] == 2:
                import scipy.misc as misc
                Array_out_slice = misc.imresize(np.float_(Array_in_slice),
                                                size,
                                                interp=interpolation_method,
                                                mode='F')
            if sys.version_info[0] == 3:
                import skimage.transform as transform
                Array_out_slice = transform.resize(np.float_(Array_in_slice),
                                                   size,
                                                   order=interpolation_number)

            Array_out[i, :, :] = Array_out_slice

    elif len(Array_out_shape) == 2:

        size = tuple(Array_out_shape)
        if sys.version_info[0] == 2:
            import scipy.misc as misc
            Array_out = misc.imresize(np.float_(Array_in),
                                      size,
                                      interp=interpolation_method,
                                      mode='F')
        if sys.version_info[0] == 3:
            import skimage.transform as transform
            Array_out = transform.resize(np.float_(Array_in),
                                         size,
                                         order=interpolation_number)
    else:
        print('only 2D or 3D dimensions are supported')
    return (Array_out)
    pres = f.variables['ps'][:] * 1e-2  #[:ix,...]*1e-2 # Converting to hPa
    #     pres=f.variables['ps'][:ix,...]*1e-2 # Converting to hPa
    f.close()

    ### Some specific humidity values are < 0,
    ### possibly an interpolation issue.

    q[q < 0] = 0.0
    qps[qps < 0] = 0.0

    pbl_top = pres - 100  ## The sub-cloud layer is 100 mb thick ##
    low_top = np.zeros_like(pres)
    low_top[:] = 500
    #     low_top=pbl_top-400 ## The next layer extends to 500 mb ##

    pbl_top = np.float_(pbl_top.flatten())
    low_top = np.float_(low_top.flatten())
    lev = np.float_(lev)

    print('PREPPING LAYER AVERAGING-2')

    pbl_ind = np.zeros(pbl_top.size, dtype=np.int64)
    low_ind = np.zeros(low_top.size, dtype=np.int64)

    find_closest_index(pbl_top, lev, pbl_ind)
    find_closest_index(low_top, lev, low_ind)

    pres_3d = np.zeros_like(t)
    pres_3d[:] = pres[:, None, :, :]

    levels = np.zeros_like(t)
Exemplo n.º 45
0
 def __init__(self, wave, flux, error=None):
     #Could be np.asarray, but change fit_spectral_lines.py
     self.wave = np.float_(wave)
     self.flux = np.float_(flux)
     self.error = np.float_(error)
Exemplo n.º 46
0
 def __init__(self, delta_t_recent, leap_dates, leap_offsets):
     self.delta_t_table = build_delta_t_table(delta_t_recent)
     self.leap_dates, self.leap_offsets = leap_dates, leap_offsets
     self.J2000 = Time(self, float_(T0))
     self.B1950 = Time(self, float_(B1950))
Exemplo n.º 47
0
def error_formula(y, y_hat):
    y = np.float_(y)
    y_hat = np.float_(y_hat)
    return -np.sum(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat))
Exemplo n.º 48
0
    # CF_score_dict = {} # score of CF
    combined_score = {} # ensemble score...
    for i in range(len(triplets)):
        score_dict[triplets[i,2]] = score[i][0]

    # for idx in del_idx:
    #     del score_dict[idx]


    # get score from PaperRank here...
    PaperRank_score = all_PaperRank_score[p]
    for cited_paper in score_dict.keys():
        if str(cited_paper) not in PaperRank_score:
            PaperRank_score[str(cited_paper)] = 0.0
    # print('len after adding cited paper:', len(PaperRank_score))
    PaperRank_score_val = list(np.float_(list(PaperRank_score.values())))    # list(np.float_(list_name))

    PaperRank_score_key = list(PaperRank_score.keys())
    PaperRank_score_normalized_val = (PaperRank_score_val - np.min(PaperRank_score_val)) / np.ptp(PaperRank_score_val) if np.ptp(PaperRank_score_val) != 0 else [0]*len(PaperRank_score_val)# score of PaperRank is normalized here...

    for i in range(0,len(PaperRank_score)):
        PaperRank_score[PaperRank_score_key[i]] = PaperRank_score_normalized_val[i]

    # print(PaperRank_score)
    # print('final len:',len(PaperRank_score_val))
    # exit(0)

    # calculate ensemble score here (alpha*ConvCN + (1-alpha)*CF)
    # loop through score_dict
    for cited_paper in score_dict.keys():
        # print('ConvCN score:',score_dict[cited_paper], 'CF score', CF_score.get(str(cited_paper),0))
Exemplo n.º 49
0
 def center(X):
     np.array([np.float_(x) for x in X.T])
     np.array([np.float_(1) for _ in X.T])
     return X
Exemplo n.º 50
0
 def _decode(cls, outputs):
     return np.float_(outputs[cls.out_t])
Exemplo n.º 51
0
def fit_spec(spec,
             faxis,
             Jupp=12,
             K_fit=7,
             cutoff=0.009,
             varyf=2,
             interactive=True,
             mode='single'):
    """
	Fit the hyperfine lines of CH3CN, derive best-fitted Trot and Ntot.
	Input:
		spec: the spectra
		faxis: the frequency axis
		Jupp: J of the upper level
		K_fit: the number of K transitions to fit (e.g. K=0-6, then K_fit=7)
		cutoff: not used...
		varyf: number of channels to vary after you select the Vlsr
		interactive: true or false
		mode: single or double, components along the line of sight (to be done...)
	"""
    # Define the J, K numbers as global variables:
    # (Not recommended by Python experts...)
    global J
    global Kladder
    J = Jupp
    Kladder = K_fit

    if interactive:
        plt.ion()
        f = plt.figure(figsize=(14, 8))
        ax = f.add_subplot(111)

    unsatisfied = True
    while unsatisfied:
        if interactive:
            f.clear()
            plt.ion()
            plt.plot(faxis, spec, 'k-', label='Spectrum')
            cutoff_line = [cutoff] * len(faxis)
            cutoff_line_minus = [-1.0 * cutoff] * len(faxis)
            plt.plot(faxis, cutoff_line, 'r-')
            plt.plot(faxis, cutoff_line_minus, 'r-')
            plt.xlabel(r'Sky Frequency (GHz)', fontsize=20, labelpad=20)
            plt.ylabel(r'$T_{\nu}$ (K)', fontsize=20)
            plt.text(0.02,
                     0.92,
                     sourcename,
                     transform=ax.transAxes,
                     color='r',
                     fontsize=15)
            #plt.ylim([-10,60])
            #clickvalue = []
            if mode == 'single':
                cid = f.canvas.mpl_connect('button_press_event', onclick)
                raw_input('Click on the plot to select a Vlsr...')
                #print clickvalue
                if len(clickvalue) >= 1:
                    print 'Please select at least one velocity! The last one will be used.'
                    vlsr1 = clickvalue[-1]
                    vlsr1 = c * (1 - vlsr1 / ch3cn_info['frest'][0]) / 1e5
                elif len(clickvalue) == 0:
                    vlsr1 = 0.0
                print 'Or input one velocity manually:'
                manualv = raw_input()
                manualv = manualv.split()
                if len(manualv) == 1:
                    vlsr1 = np.float_(manualv)
                else:
                    print 'Invalid input...'
                print 'The Vlsr is %0.2f km/s' % vlsr1
                raw_input('Press any key to start fitting...')
                f.canvas.mpl_disconnect(cid)
                vlsr2 = 0.0
            elif mode == 'double':
                cid = f.canvas.mpl_connect('button_press_event', onclick)
                raw_input('Click on the plot to select Vlsrs...')
                print clickvalue
                if len(clickvalue) >= 2:
                    print 'Please select at least two velocities! The last two will be used.'
                    vlsr1, vlsr2 = clickvalue[-2], clickvalue[-1]
                elif len(clickvalue) == 1:
                    vlsr1 = clickvalue[-1]
                    vlsr2 = 0.0
                elif len(clickvalue) == 0:
                    vlsr1, vlsr2 = 0.0, 0.0
                print 'Or input two velocities manually:'
                manualv = raw_input()
                manualv = manualv.split()
                if len(manualv) == 2:
                    vlsr1, vlsr2 = np.float_(manualv)
                else:
                    print 'Invalid input...'
                print 'The two Vlsrs are %0.2f km/s and %0.2f km/s.' % (vlsr1,
                                                                        vlsr2)
                raw_input('Press any key to start fitting...')
                f.canvas.mpl_disconnect(cid)
            else:
                vlsr1, vlsr2 = 0.0, 0.0
        else:
            if mode == 'single':
                if spec_low.max() >= cutoff:
                    print 'Reserved space...'
                else:
                    vlsr1 = 0.0
                vlsr2 = 0.0
            elif mode == 'double':
                vlsr1, vlsr2 = 86.0, 88.0
            else:
                vlsr1, vlsr2 = 0.0, 0.0

        plt.text(0.02,
                 0.85,
                 r'V$_{lsr}$=%.1f km/s' % vlsr1,
                 transform=ax.transAxes,
                 color='r',
                 fontsize=15)
        fsky_init = ch3cn_info['frest'][0] * (1 - vlsr1 * 1e5 / c)

        # Add 4 parameters:
        params = Parameters()
        if vlsr1 != 0:
            params.add('Ntot', value=1e15, min=0, max=1e25)
            params.add('T', value=100, min=10)
            #params.add('sigma', value=0.0035, vary=False)
            params.add('sigma', value=0.0027, min=0, max=0.04)
            if varyf > 0:
                params.add('fsky', value=fsky_init, min=fsky_init-varyf*chanwidth, \
                max=fsky0_init+varyf*chanwidth)
            elif varyf == 0:
                params.add('fsky', value=fsky_init, vary=False)
        if vlsr2 != 0:
            print 'Reserved for two-component fitting.'

        # Run the non-linear minimization:
        if vlsr1 != 0 and vlsr2 != 0:
            result = minimize(__model_11_2c__, params, args=(faxis, spec))
        elif vlsr1 != 0 or vlsr2 != 0:
            result = minimize(__model_11__, params, args=(faxis, spec))
        else:
            unsatisfied = False
            continue

        final = spec + result.residual
        #report_fit(params)

        if interactive:
            plt.plot(faxis, final, 'r', label='Best-fitted model')
            if vlsr1 != 0 and vlsr2 != 0:
                print 'Reserved for two-component fitting.'
            elif vlsr1 != 0 or vlsr2 != 0:
                plt.text(0.02,
                         0.80,
                         r'T$_{rot}$=%.1f($\pm$%.1f) K' %
                         (result.params['T'].value, result.params['T'].stderr),
                         transform=ax.transAxes,
                         color='r',
                         fontsize=15)
                plt.text(0.02,
                         0.75,
                         r'N$_{tot}$=%.2e($\pm$%.2e) cm$^{-2}$' %
                         (result.params['Ntot'].value,
                          result.params['Ntot'].stderr),
                         transform=ax.transAxes,
                         color='r',
                         fontsize=15)
                plt.text(
                    0.02,
                    0.70,
                    r'FWHM=%.2f($\pm$%.2f) km/s' %
                    (c * result.params['sigma'].value / ch3cn_info['frest'][0]
                     / 1e5 * 2.355, c * result.params['sigma'].stderr /
                     ch3cn_info['frest'][0] / 1e5 * 2.355),
                    transform=ax.transAxes,
                    color='r',
                    fontsize=15)
                plt.text(0.02,
                         0.65,
                         r'$\tau_\mathrm{K=0}$=%.1e' %
                         (__tau__(result.params['Ntot'].value,
                                  result.params['sigma'].value,
                                  result.params['T'].value, 0)),
                         transform=ax.transAxes,
                         color='r',
                         fontsize=15)
            plt.legend()
            plt.draw()
            print 'Is the fitting ok? y/n'
            yn = raw_input()
            if yn == 'y':
                unsatisfied = False
                currentT = time.strftime("%Y-%m-%d_%H:%M:%S")
                plt.savefig('CH3CN_fitting_' + currentT + '.png')
            else:
                unsatisfied = True
            #raw_input('Press any key to continue...')
            f.clear()
        else:
            unsatisfied = False
Exemplo n.º 52
0
def get_estimator_boundingbox(tiff_file,
                              channel=0,
                              outputdir='.',
                              w0=1,
                              w1=100,
                              h0=10,
                              h1=1000,
                              acut=0.9,
                              aratio_min=2.,
                              aratio_max=100.,
                              border_pad=5,
                              emin=1.0e-4,
                              debug=False,
                              threshold=None):
    """
    INPUT:
      * file to a tiff image.
      * (w0,w1): minimum and maximum width for bounding box in pixels.
      * (l0,l1): minimum and maximum length for bounding box in pixels.
      * acut: minimum area/rectangle bounding box ratio.
      * threshold is a lower threshold (everything below is set to zero). Value must be a float between 0 and 1. 1 is the maximum, eg. 255 or 65535.
    OUTPUT:
      * 2D matrix of weights corresponding to the probability that a pixel belongs to a cell.

    USEFUL DOCUMENTATION:
      * https://docs.opencv.org/3.4.3/dd/d49/tutorial_py_contour_features.html
      * https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html?highlight=blur#blur
      * https://docs.opencv.org/3.1.0/da/d22/tutorial_py_canny.html
      * https://docs.opencv.org/3.4/d7/d4d/tutorial_py_thresholding.html
      * https://docs.opencv.org/3.4/db/d5c/tutorial_py_bg_subtraction.html
    """

    bname = os.path.splitext(os.path.basename(tiff_file))[0]

    ## read the input tiff_file
    img = get_tiff2ndarray(tiff_file, channel=channel)
    #img0 = np.copy(img)

    # rescale dynamic range linearly (important for OTSU)
    amin = np.min(img)
    amax = np.max(img)
    print "amin = {:.1g}    amax = {:.1g}".format(amin, amax)
    img = (np.float_(img) - amin) / (amax - amin)

    ## thresholding to binary mask
    norm8 = float(2**8 - 1)
    norm16 = float(2**16 - 1)
    if threshold is None:
        print "OTSU thresholding"
        # convert to 8-bit image (Open CV requirement for OTSU)
        img = np.array(255 * img, np.uint8)
        img8 = np.copy(img)
        # OTSU threshold
        ret, img = cv2.threshold(img, 0, 255,
                                 cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        thres1 = float(ret) / norm8 * (amax - amin) + amin
        thres8 = np.uint8(thres1 * norm8)
        thres16 = np.uint16(thres1 * norm16)
    else:
        thres1 = threshold
        thres8 = np.uint8(thres1 * norm8)
        thres16 = np.uint16(thres1 * norm16)
        ret = max((thres1 - amin) / (amax - amin), 0)  # value in rescaled DNR
        ret = np.uint8(255 * ret)  # uint8
        print "thres1 = {:.1g}    threshold_rescaled_DNR_uint8 = {:d}".format(
            thres1, ret)
        # convert to 8-bit image (Open CV requirement for OTSU)
        img = np.array(255 * img, np.uint8)
        img8 = np.copy(img)
        # input threshold
        ret1, img = cv2.threshold(img, ret, 255, cv2.THRESH_BINARY)
    print "thres1 = {:.1g}    thres8 = {:d}    thres16 = {:d}".format(
        thres1, thres8, thres16)
    img_bin = np.copy(img)

    ## opening/closing operations
    kernel = np.ones((3, 3), np.uint8)  # smoothing kernel
    img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
    #img_opening = np.copy(img)
    img = cv2.erode(img, kernel, iterations=1)
    #img_closing = np.copy(img)
    img = cv2.dilate(img, kernel, iterations=1)
    img_morph = np.copy(img)

    ## find connected components
    ncomp, labels = cv2.connectedComponents(img)
    print "Found {:d} objects".format(ncomp)

    ## compute the bounding box for the identified labels
    #for n in range(ncomp):
    height, width = img.shape
    Y, X = np.mgrid[0:height, 0:width]
    boundingboxes = []
    boundingboxes_upright = []
    pointsperbox = []
    for n in np.arange(ncomp):
        idx = (labels == n)
        # pixels coordinates for the label
        points = np.transpose([X[idx], Y[idx]])
        pointsperbox.append(len(points))

        # upright rectangles
        bb = cv2.boundingRect(points)
        boundingboxes_upright.append(bb)

        # rotated rectangles
        bb = cv2.minAreaRect(points)
        boundingboxes.append(bb)

    # estimator matrix
    ## compute scores
    scores = []
    for n in np.arange(ncomp):
        bb = boundingboxes[n]
        bb_upright = boundingboxes_upright[n]
        area = pointsperbox[n]

        # get bounding box width and height
        xymid, wh, angle = bb
        w, h = wh
        if w > h:
            wtp = w
            w = h
            h = wtp

        area_rect = w * h
        aval = area / area_rect
        aratio = h / w
        #        print "w={:.1f}    h={:.1f}  aval={:.2e}  aratio={:.1f}".format(w,h,aval,aratio)

        # computing score
        score = 1.
        score *= min(1, np.exp((w - w0)))  # penalize w < w0
        score *= min(1, np.exp(w1 - w))  # penalize w > w1
        score *= min(1, np.exp((h - h0)))  # penalize w < w0
        score *= min(1, np.exp(h1 - h))  # penalize w > w1
        score *= min(1, np.exp(aval - acut))  # penalize area/rect < acut
        score *= min(1, np.exp(aratio -
                               aratio_min))  # penalize aratio < aratio_min
        score *= min(1, np.exp(aratio_max -
                               aratio))  # penalize aratio > aratio_max

        # check that box corners of an upright bounding box are within the image plus some pad
        x, y, ww, hh = bb_upright
        x0 = x - border_pad
        y0 = y - border_pad
        x1 = x + ww + border_pad
        y1 = y + hh + border_pad
        #        print "a0 = {:d}  b0 = {:d}".format(x,y)
        #        print "a1 = {:d}  b1 = {:d}".format(x+ww,y+hh)
        #        print "x0 = {:d}  y0 = {:d}".format(x0,y0)
        #        print "x1 = {:d}  y1 = {:d}".format(x1,y1)
        if not (x0 >= 0 and x1 < width and y0 >= 0 and y1 < height):
            score = 0.

        # discard small values
        if score < emin:
            score = 0.
        scores.append(score)

    # estimator matrix
    eimg = np.zeros(img.shape, dtype=np.float_)
    for n in np.arange(ncomp):
        idx = (labels == n)
        eimg[idx] = scores[n]
    nz = np.sum(eimg > 0.)
    ntot = len(np.ravel(eimg))
    print "nz = {:d} / {:d}    sparcity index = {:.2e}".format(
        nz, ntot,
        float(nz) / float(ntot))
    efname = bname
    #efile = os.path.join(outputdir,efname+'.txt')
    #efile = os.path.join(outputdir,efname+'.pkl')
    efile = os.path.join(outputdir, efname + '.npz')
    with open(efile, 'w') as fout:
        #np.savetxt(fout, eimg)
        #pkl.dump(eimg,fout)
        ssp.save_npz(efile, ssp.coo_matrix(eimg), compressed=False)
    print "{:<20s}{:<s}".format('est. file', efile)

    if debug:
        debugdir = os.path.join(outputdir, 'debug')
        if not os.path.isdir(debugdir):
            os.makedirs(debugdir)
        # plots
        import matplotlib.pyplot as plt
        import matplotlib.patches
        from matplotlib.path import Path
        import matplotlib.collections
        from matplotlib.gridspec import GridSpec

        ## rescale dynamic range linearly
        img_base = np.array(img8, dtype=np.float_) / 255.
        img_base = (img_base - np.min(img_base)) / (np.max(img_base) -
                                                    np.min(img_base))
        img_base = np.array(img_base * 255, dtype=np.uint8)
        ncolors = (20 - 1)
        labels_iterated = np.uint8(labels -
                                   np.int_(labels / ncolors) * ncolors) + 1
        images = [img_base, img_bin, img_morph, labels_iterated, eimg]
        titles = [
            'original', 'binary (thres = {:d})'.format(thres8),
            'closing/opening', 'bounding box', 'estimator'
        ]
        cmaps = ['gray', 'gray', 'gray', 'tab20c', 'viridis']
        nfig = len(images)
        nrow = int(np.ceil(np.sqrt(nfig)))
        ncol = nfig / nrow
        if (ncol * nrow < nfig): ncol += 1
        fig = plt.figure(num=None, figsize=(ncol * 4, nrow * 3))
        gs = GridSpec(nrow, ncol, figure=fig)
        axes = []
        for r in range(nrow):
            for c in range(ncol):
                ind = r * ncol + c
                if not (ind < nfig):
                    break
                ax = fig.add_subplot(gs[r, c])
                axes.append(ax)
                ax.set_title(titles[ind].upper())
                cf = ax.imshow(images[ind], cmap=cmaps[ind])
                ax.set_xticks([]), ax.set_yticks([])

                if titles[ind] == 'bounding box':
                    # draw bounding boxes
                    rects = []
                    rects_upright = []
                    codes = [
                        Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
                        Path.CLOSEPOLY
                    ]
                    for n in range(ncomp):
                        bb_upright = boundingboxes_upright[n]
                        bb = boundingboxes[n]
                        # upright rect
                        xlo, ylo, w, h = bb_upright
                        rect = matplotlib.patches.Rectangle((xlo, ylo),
                                                            width=w,
                                                            height=h,
                                                            fill=False)
                        rects_upright.append(rect)

                        # rect
                        verts = cv2.boxPoints(bb)
                        verts = np.concatenate((verts, [verts[0]]))
                        path = Path(verts, codes)
                        rect = matplotlib.patches.PathPatch(path)
                        rects.append(rect)

                    col = matplotlib.collections.PatchCollection(
                        rects_upright,
                        edgecolors='k',
                        facecolors='none',
                        linewidths=0.5)
                    ax.add_collection(col)
                    col = matplotlib.collections.PatchCollection(
                        rects,
                        edgecolors='r',
                        facecolors='none',
                        linewidths=0.5)
                    ax.add_collection(col)

                if titles[ind] == 'estimator':
                    fig.colorbar(cf, ax=ax)

        fname = "{}_estimator_debug".format(bname)
        fileout = os.path.join(debugdir, fname + '.png')
        gs.tight_layout(fig, w_pad=0)
        plt.savefig(fileout, dpi=300)
        print "{:<20s}{:<s}".format('debug file', fileout)
        plt.close('all')
    # from agarpad code: end """
    """ canny: start
    #test canny filtering
    print np.min(img), np.max(img)
    maxthres=2500
    minthres=100
    edges = cv2.Canny(img,threshold1=minthres,threshold2=maxthres,apertureSize=5)
    print np.unique(edges)
    print "START TEST"
    import matplotlib.pyplot as plt
    fig = plt.figure(num=None,figsize=(8,4))
    plt.subplot(121)
    plt.imshow(img, cmap='gray')
    plt.xticks([]),plt.yticks([])
    plt.subplot(122)
    plt.imshow(edges, cmap='gray')
    plt.xticks([]),plt.yticks([])
    fileout = os.path.join(os.getcwd(),'test_get_estimator_contours.png')
    fig.tight_layout()
    plt.savefig(fileout,dpi=300)
    print "writing: ", fileout
    plt.close('all')

    fig = plt.figure(num=None,figsize=(4,4))
    hist,hedges = np.histogram(np.ravel(img), bins='auto')
    plt.bar(hedges[:-1], hist, np.diff(hedges), facecolor='blue', lw=0)
    fileout = os.path.join(os.getcwd(),'test_get_estimator_contours_histogram.png')
    fig.tight_layout()
    plt.savefig(fileout,dpi=300)
    print "writing: ", fileout
    plt.close('all')
    sys.exit()
    print "END TEST"
    #test
    # canny: start """

    return os.path.realpath(efile)
Exemplo n.º 53
0
def create_scaled_direct_coordinates(xyz, resiprocal_k, k):
    out = np.float_(k) * np.einsum(
        'ijk,lk->ijl', xyz * 10.0,
        resiprocal_k)  # "10" means to convert nm to angstrom
    return np.mod(out, np.float_(k))
Exemplo n.º 54
0
            atomtype_form[iatype], dq_form = read_aff(atype_name[iatype])
        elif "cm" in args.aff:
            atomtype_form[iatype], dq_form = gen_aff_cm(
                atype_name[iatype], args.cm_endq, args.cm_dq)
        if dq_form != check_dq_form:
            raise IOError(
                " inconsistent form factor dq's {} for {}-th atomtype (name {}) (ref. initial dq's {})"
                .format(dq_form, iatype, str(atype_name[iatype]),
                        check_dq_form))

# count number of atoms with respect to atom types (to do normalization of s(q))
#  writeen by TingTing and edited by Hyuntae
natom_type_kind = np.empty(n_atom_type, dtype=np.int_)
for i_type in range(n_atom_type):
    natom_type_kind[i_type] = len(np.where(alist == atype_name[i_type])[0])
frac_natom_type_kind = np.float_(natom_type_kind) / np.float_(
    n_atoms_wo_drudes)
print("Done: reading form factor files and make atom type list")


##########################################################################
#***********************************************
# this function calculates B_splines which are used in pme as interpolating
# functions.  B_splines are calculated recursively, and therefore it's a good idea
# to grid them
#************************************************
def b_spline(u, n):
    # define m2 for n-1 values
    mn = np.zeros((n, n))
    for i in range(1, n):
        ui = u - np.float_(i - 1)
Exemplo n.º 55
0
import pandas as pd
import numpy as np

if __name__ == '__main__':
    # First, read football.dat into a pandas dataframe
    # using any contiguous block of whitespace as the delimiter between columns.
    # Upon observing the data, I see that there is a column of hyphens between the 'for' and 'against'
    # columns, and this column has no header.
    # To account for this, I set header=0 to discard the top row from the file,
    # thus allowing me to use my own set of column headers.
    # I specify the headers in the 'names' parameter.
    table = pd.read_table('football.dat', delim_whitespace=True, header=0, \
                names=['number', 'name', 'p', 'w', 'l', 'd', 'for', 'dash', 'against', 'pts'])
    # convert the 'for' and 'against' series into numpy float arrays
    pts_agnst, pts_for = np.float_(table['against']), np.float_(table['for'])
    # create a numpy float array that contains the absolute values of the differences between 'against' and 'for'
    spread = abs(pts_agnst - pts_for)
    # Now, get the array index of the smallest spread value that is not == nan
    # Ignoring nan is necessary to account for the row of hyphens.  Another option would have been to
    # skip this row in pd.read_table, but the approach I have used is more general.
    # numpy.nanargmin does exactly this, finding the index of a float array for the minimum value in
    # that array which is not nan
    answer_idx = np.nanargmin(spread)
    # Print the value in the name column for the row with the smallest spread
    print(table['name'][answer_idx])
Exemplo n.º 56
0
    def pre_sev(self,
                earth_radius=6378.0,
                vertical_resolution=0.1,
                cloud_top_height=1.0,
                cloud_bottom_height=0.5):

        self.lay = {}
        self.lev = {}

        f = SD(self.fname_h4, SDC.READ)

        # lon lat
        lon0 = f.select('Longitude')
        lat0 = f.select('Latitude')
        cot0 = f.select('Cloud_Optical_Thickness_16')
        cer0 = f.select('Cloud_Effective_Radius_16')
        cot_pcl0 = f.select('Cloud_Optical_Thickness_16_PCL')
        cer_pcl0 = f.select('Cloud_Effective_Radius_16_PCL')
        cth0 = f.select('Cloud_Top_Height')

        if 'actual_range' in lon0.attributes().keys():
            lon_range = lon0.attributes()['actual_range']
            lat_range = lat0.attributes()['actual_range']
        else:
            lon_range = [-180.0, 180.0]
            lat_range = [-90.0, 90.0]

        lon = lon0[:]
        lat = lat0[:]
        cot = np.float_(cot0[:])
        cer = np.float_(cer0[:])
        cot_pcl = np.float_(cot_pcl0[:])
        cer_pcl = np.float_(cer_pcl0[:])
        cth = np.float_(cth0[:])

        logic = (lon >= lon_range[0]) & (lon <= lon_range[1]) & (
            lat >= lat_range[0]) & (lat <= lat_range[1])
        lon = lon[logic]
        lat = lat[logic]
        cot = cot[logic]
        cer = cer[logic]
        cot_pcl = cot_pcl[logic]
        cer_pcl = cer_pcl[logic]
        cth = cth[logic]

        if self.extent is not None:
            logic = (lon >= self.extent[0]) & (lon <= self.extent[1]) & (
                lat >= self.extent[2]) & (lat <= self.extent[3])
            lon = lon[logic]
            lat = lat[logic]
            cot = cot[logic]
            cer = cer[logic]
            cot_pcl = cot_pcl[logic]
            cer_pcl = cer_pcl[logic]
            cth = cth[logic]

        xy = (self.extent[1] - self.extent[0]) * (self.extent[3] -
                                                  self.extent[2])
        N0 = np.sqrt(lon.size / xy)

        Nx = int(N0 * (self.extent[1] - self.extent[0]))
        if Nx % 2 == 1:
            Nx += 1

        Ny = int(N0 * (self.extent[3] - self.extent[2]))
        if Ny % 2 == 1:
            Ny += 1

        lon_1d0 = np.linspace(self.extent[0], self.extent[1], Nx + 1)
        lat_1d0 = np.linspace(self.extent[2], self.extent[3], Ny + 1)

        lon_1d = (lon_1d0[1:] + lon_1d0[:-1]) / 2.0
        lat_1d = (lat_1d0[1:] + lat_1d0[:-1]) / 2.0

        dx = (lon_1d[1] - lon_1d[0]) / 180.0 * (np.pi * earth_radius)
        dy = (lat_1d[1] - lat_1d[0]) / 180.0 * (np.pi * earth_radius)

        x_1d = (lon_1d - lon_1d[0]) * dx
        y_1d = (lat_1d - lat_1d[0]) * dy

        # lon, lat
        lat_2d, lon_2d = np.meshgrid(lat_1d, lon_1d)
        # lon_2d, lat_2d = np.meshgrid(lon_1d, lat_1d)

        # cot
        cot_range = cot0.attributes()['valid_range']
        cer_range = cer0.attributes()['valid_range']
        cot_pcl_range = cot_pcl0.attributes()['valid_range']
        cer_pcl_range = cer_pcl0.attributes()['valid_range']
        cth_range = cth0.attributes()['valid_range']

        # +
        # create cot_all/cer_all that contains both cot/cer and cot_pcl/cer_pcl
        cot_all = np.zeros(cot.size, dtype=np.float64)
        cer_all = np.zeros(cer.size, dtype=np.float64)
        cth_all = np.zeros(cth.size, dtype=np.float64)
        cth_all[...] = np.nan

        logic = (cot >= cot_range[0]) & (cot <= cot_range[1]) & (
            cer >= cer_range[0]) & (cer <= cer_range[1]) & (
                cth >= cth_range[0]) & (cth <= cth_range[1])
        cot_all[logic] = cot[logic] * cot0.attributes(
        )['scale_factor'] + cot0.attributes()['add_offset']
        cer_all[logic] = cer[logic] * cer0.attributes(
        )['scale_factor'] + cer0.attributes()['add_offset']
        cth_all[logic] = cth[logic] * cth0.attributes(
        )['scale_factor'] + cth0.attributes()['add_offset']

        logic_pcl = np.logical_not(logic) & (cot_pcl >= cot_pcl_range[0]) & (
            cot_pcl <= cot_pcl_range[1]) & (cer_pcl >= cer_pcl_range[0]) & (
                cer_pcl <= cer_pcl_range[1]) & (cth >= cth_range[0]) & (
                    cth <= cth_range[1])
        cot_all[logic_pcl] = cot_pcl[logic_pcl] * cot_pcl0.attributes(
        )['scale_factor'] + cot_pcl0.attributes()['add_offset']
        cer_all[logic_pcl] = cer_pcl[logic_pcl] * cer_pcl0.attributes(
        )['scale_factor'] + cer_pcl0.attributes()['add_offset']
        cth_all[logic_pcl] = cth[logic_pcl] * cth0.attributes(
        )['scale_factor'] + cth0.attributes()['add_offset']
        cth_all /= 1000.0

        logic_all = logic | logic_pcl
        # -

        cot = cot_all
        cer = cer_all
        cth = cth_all

        cot[np.logical_not(logic_all)] = 0.0
        cer[np.logical_not(logic_all)] = 1.0
        cth[np.logical_not(logic_all)] = np.nan

        points = np.transpose(np.vstack((lon, lat)))

        cot_2d = interpolate.griddata(points,
                                      cot, (lon_2d, lat_2d),
                                      method='nearest')
        cer_2d = interpolate.griddata(points,
                                      cer, (lon_2d, lat_2d),
                                      method='nearest')
        cth_2d = interpolate.griddata(points,
                                      cth, (lon_2d, lat_2d),
                                      method='nearest')

        f.end()

        # self.atm = atm_atmmod(np.arange(int(np.nanmax(cth_2d))+2))
        self.atm = atm_atmmod(
            levels=np.arange(cloud_bottom_height, cloud_top_height +
                             vertical_resolution, vertical_resolution))
        self.lay['x'] = {'data': x_1d, 'name': 'X', 'units': 'km'}
        self.lay['y'] = {'data': y_1d, 'name': 'Y', 'units': 'km'}
        self.lay['nx'] = {'data': Nx, 'name': 'Nx', 'units': 'N/A'}
        self.lay['ny'] = {'data': Ny, 'name': 'Ny', 'units': 'N/A'}
        self.lay['dx'] = {'data': dx, 'name': 'dx', 'units': 'km'}
        self.lay['dy'] = {'data': dy, 'name': 'dy', 'units': 'km'}
        self.lay['altitude'] = copy.deepcopy(self.atm.lay['altitude'])
        self.lay['cot'] = {
            'data': cot_2d,
            'name': 'Cloud optical thickness',
            'units': 'N/A'
        }
        self.lay['cer'] = {
            'data': cer_2d,
            'name': 'Cloud effective radius',
            'units': 'micron'
        }
        self.lay['cth'] = {
            'data': cth_2d,
            'name': 'Cloud top height',
            'units': 'km'
        }
        self.lay['lon'] = {
            'data': lon_2d,
            'name': 'Longitude',
            'units': 'degree'
        }
        self.lay['lat'] = {
            'data': lat_2d,
            'name': 'Latitude',
            'units': 'degree'
        }

        # temperature 3d
        t_1d = self.atm.lay['temperature']['data']
        Nz = t_1d.size
        t_3d = np.empty((Nx, Ny, Nz), dtype=t_1d.dtype)
        t_3d[...] = t_1d[None, None, :]

        self.lay['temperature'] = {
            'data': t_3d,
            'name': 'Temperature',
            'units': 'K'
        }

        # extinction 3d
        ext_3d = np.zeros((Nx, Ny, Nz), dtype=np.float64)

        # alt = self.atm.lay['altitude']['data']
        # for i in range(Nx):
        #     for j in range(Ny):
        #         cld_top  = cth_2d[i, j]
        #         if not np.isnan(cld_top):
        #             lwp  = 5.0/9.0 * 1.0 * cot_2d[i, j] * cer_2d[i, j] / 10.0
        #             ext0 = 0.75 * 2.0 * lwp / cer_2d[i, j] / 100.0
        #             index = np.argmin(np.abs(cld_top-alt))
        #             ext_3d[i, j, index] = ext0

        for i in range(Nx):
            for j in range(Ny):

                ext0 = cal_ext(cot_2d[i, j], cer_2d[i, j])
                ext_3d[i, j, :] = ext0 / (
                    self.atm.lay['thickness']['data'].sum() * 1000.0)

        self.lay['extinction'] = {
            'data': ext_3d,
            'name': 'Extinction coefficients'
        }

        self.Nx = Nx
        self.Ny = Ny
        self.Nz = Nz
Exemplo n.º 57
0
    def read_potential(self, fileobj):
        """Reads a LAMMPS EAM file in alloy or adp format
        and creates the interpolation functions from the data
        """

        if isinstance(fileobj, str):
            f = open(fileobj)
            if self.form is None:
                self.set_form(fileobj)
        else:
            f = fileobj

        def lines_to_list(lines):
            """Make the data one long line so as not to care how its formatted
            """
            data = []
            for line in lines:
                data.extend(line.split())
            return data

        lines = f.readlines()
        if self.form == 'eam':  # single element eam file (aka funcfl)
            self.header = lines[:1]

            data = lines_to_list(lines[1:])

            # eam form is just like an alloy form for one element

            self.Nelements = 1
            self.Z = np.array([data[0]], dtype=int)
            self.mass = np.array([data[1]])
            self.a = np.array([data[2]])
            self.lattice = [data[3]]

            self.nrho = int(data[4])
            self.drho = float(data[5])
            self.nr = int(data[6])
            self.dr = float(data[7])
            self.cutoff = float(data[8])

            n = 9 + self.nrho
            self.embedded_data = np.array([np.float_(data[9:n])])

            self.rphi_data = np.zeros(
                [self.Nelements, self.Nelements, self.nr])

            effective_charge = np.float_(data[n:n + self.nr])
            # convert effective charges to rphi according to
            # http://lammps.sandia.gov/doc/pair_eam.html
            self.rphi_data[0, 0] = Bohr * Hartree * (effective_charge**2)

            self.density_data = np.array(
                [np.float_(data[n + self.nr:n + 2 * self.nr])])

        elif self.form in ['alloy', 'adq']:
            self.header = lines[:3]
            i = 3

            data = lines_to_list(lines[i:])

            self.Nelements = int(data[0])
            d = 1
            self.elements = data[d:d + self.Nelements]
            d += self.Nelements

            self.nrho = int(data[d])
            self.drho = float(data[d + 1])
            self.nr = int(data[d + 2])
            self.dr = float(data[d + 3])
            self.cutoff = float(data[d + 4])

            self.embedded_data = np.zeros([self.Nelements, self.nrho])
            self.density_data = np.zeros([self.Nelements, self.nr])
            self.Z = np.zeros([self.Nelements], dtype=int)
            self.mass = np.zeros([self.Nelements])
            self.a = np.zeros([self.Nelements])
            self.lattice = []
            d += 5

            # reads in the part of the eam file for each element
            for elem in range(self.Nelements):
                self.Z[elem] = int(data[d])
                self.mass[elem] = float(data[d + 1])
                self.a[elem] = float(data[d + 2])
                self.lattice.append(data[d + 3])
                d += 4

                self.embedded_data[elem] = np.float_(data[d:(d + self.nrho)])
                d += self.nrho
                self.density_data[elem] = np.float_(data[d:(d + self.nr)])
                d += self.nr

            # reads in the r*phi data for each interaction between elements
            self.rphi_data = np.zeros(
                [self.Nelements, self.Nelements, self.nr])

            for i in range(self.Nelements):
                for j in range(i + 1):
                    self.rphi_data[j, i] = np.float_(data[d:(d + self.nr)])
                    d += self.nr

        elif self.form == 'fs':
            self.header = lines[:3]
            i = 3

            data = lines_to_list(lines[i:])

            self.Nelements = int(data[0])
            d = 1
            self.elements = data[d:d + self.Nelements]
            d += self.Nelements

            self.nrho = int(data[d])
            self.drho = float(data[d + 1])
            self.nr = int(data[d + 2])
            self.dr = float(data[d + 3])
            self.cutoff = float(data[d + 4])

            self.embedded_data = np.zeros([self.Nelements, self.nrho])
            self.density_data = np.zeros(
                [self.Nelements, self.Nelements, self.nr])
            self.Z = np.zeros([self.Nelements], dtype=int)
            self.mass = np.zeros([self.Nelements])
            self.a = np.zeros([self.Nelements])
            self.lattice = []
            d += 5

            # reads in the part of the eam file for each element
            for elem in range(self.Nelements):
                self.Z[elem] = int(data[d])
                self.mass[elem] = float(data[d + 1])
                self.a[elem] = float(data[d + 2])
                self.lattice.append(data[d + 3])
                d += 4

                self.embedded_data[elem] = np.float_(data[d:(d + self.nrho)])
                d += self.nrho
                self.density_data[elem, :, :] = np.float_(
                    data[d:(d + self.nr * self.Nelements)]).reshape(
                        [self.Nelements, self.nr])
                d += self.nr * self.Nelements

            # reads in the r*phi data for each interaction between elements
            self.rphi_data = np.zeros(
                [self.Nelements, self.Nelements, self.nr])

            for i in range(self.Nelements):
                for j in range(i + 1):
                    self.rphi_data[j, i] = np.float_(data[d:(d + self.nr)])
                    d += self.nr

        self.r = np.arange(0, self.nr) * self.dr
        self.rho = np.arange(0, self.nrho) * self.drho

        # choose the set_splines method according to the type
        if self.form == 'fs':
            self.set_fs_splines()
        else:
            self.set_splines()

        if (self.form == 'adp'):
            self.read_adp_data(data, d)
            self.set_adp_splines()
        h.write_map(dir_out + '/sin_r1_' + str(i + 1) + '_tmp.fits', sin_r1)
        h.write_map(dir_out + '/cos_r2_' + str(i + 1) + '_tmp.fits', cos_r2)
        h.write_map(dir_out + '/sin_r2_' + str(i + 1) + '_tmp.fits', sin_r2)
        h.write_map(dir_out + '/cos_r4_' + str(i + 1) + '_tmp.fits', cos_r4)
        h.write_map(dir_out + '/sin_r4_' + str(i + 1) + '_tmp.fits', sin_r4)

    if (i == ydays - 1):
        h.write_map(dir_out + '/nhits_' + str(i + 1) + '_tmp.fits', nhits)
        h.write_map(dir_out + '/cos_r1_' + str(i + 1) + '_tmp.fits', cos_r1)
        h.write_map(dir_out + '/sin_r1_' + str(i + 1) + '_tmp.fits', sin_r1)
        h.write_map(dir_out + '/cos_r2_' + str(i + 1) + '_tmp.fits', cos_r2)
        h.write_map(dir_out + '/sin_r2_' + str(i + 1) + '_tmp.fits', sin_r2)
        h.write_map(dir_out + '/cos_r4_' + str(i + 1) + '_tmp.fits', cos_r4)
        h.write_map(dir_out + '/sin_r4_' + str(i + 1) + '_tmp.fits', sin_r4)

r1 = np.sqrt((cos_r1 / np.float_(nhits))**2. + (sin_r1 / np.float_(nhits))**2.)
r2 = np.sqrt((cos_r2 / np.float_(nhits))**2. + (sin_r2 / np.float_(nhits))**2.)
r4 = np.sqrt((cos_r4 / np.float_(nhits))**2. + (sin_r4 / np.float_(nhits))**2.)

filename_out = dir_out + '/' + filename + '_nhits'
h.write_map(filename_out + '.fits', nhits)
h.mollview(nhits, title='Nobs, ' + filename_out, rot=[0., 0.])
h.graticule(dpar=10, dmer=10, coord='E')
py.savefig(filename_out + '.ps')

filename_out = dir_out + '/' + filename + '_cos_r1'
h.write_map(filename_out + '.fits', cos_r1)
h.mollview(cos_r1, title='cos_r1, ' + filename_out, rot=[0., 0.])
h.graticule(dpar=10, dmer=10, coord='E')
py.savefig(filename_out + '.ps')
Exemplo n.º 59
0
def Download_data(output_folder_Tot, Date, latlim, lonlim):

    polygon = [[[lonlim[0], latlim[0]], [lonlim[0], latlim[1]],
                [lonlim[1], latlim[1]], [lonlim[1], latlim[0]],
                [lonlim[0], latlim[0]]]]
    url = 'https://io.apps.fao.org/gismgr/api/v1/query/'

    header = {
        "Content-type": "application/json;charset=UTF-8",
        "Accept": "application/json"
    }

    # Create request
    payload = {
        "type": "CropRaster",
        "params": {
            "properties": {
                "outputFileName": "LC_WAPOR_%s.01.01.tif" % Date.year,
                "cutline": True,
                "tiled": True,
                "compressed": True,
                "overviews": True
            },
            "cube": {
                "code": "L1_LCC_A",
                "workspaceCode": "WAPOR",
                "language": "en"
            },
            "dimensions": [{
                "code":
                "YEAR",
                "values":
                ["[%s-01-01,%s-01-01)" % (int(Date.year), int(Date.year) + 1)]
            }],
            "measures": ["LCC"],
            "shape": {
                "type": "Polygon",
                "coordinates": polygon
            }
        }
    }

    # Download the data
    response = requests.post(url, data=json.dumps(payload), headers=header)
    response.raise_for_status()

    response_json = response.json()
    result = response_json['response']

    job_url = result['links'][0]['href']

    for tries in range(20):

        file_name_temp = os.path.join(output_folder_Tot,
                                      "LC_WAPOR_%s.01.01_temp.tif" % Date.year)

        if not os.path.exists(file_name_temp):
            time.sleep(5)
            job_response = requests.get(job_url, headers=header)
            if job_response.status_code == 200:
                try:
                    job_result = job_response.json(
                    )['response']['output']['downloadUrl']
                    urllib.request.urlretrieve(job_result, file_name_temp)
                    dest = gdal.Open(file_name_temp)
                    geo = dest.GetGeoTransform()
                    proj = dest.GetProjection()
                    Array = dest.GetRasterBand(1).ReadAsArray()
                    Array = np.float_(Array)
                except:
                    pass

    return (Array, geo, proj, file_name_temp)
Exemplo n.º 60
0
import os.path as ptt
import my
from my import mycall


def sycall(comand):
    from subprocess import call
    line = comand.split(" ")
    fcomand = []
    fcomand.extend(line)
    call(fcomand)


def wfits(name, data, hdr):
    if ptt.exists(name) == False:
        wfit(name, data, hdr)
    else:
        sycall("rm " + name)
        wfit(name, data, hdr)


sys.argv = filter(None, sys.argv)
if len(sys.argv) < 2:
    print "USE: write_img_header.py FILE.FITS HEADER VALUE"
    sys.exit(0)
file = sys.argv[1]
header = sys.argv[2]
value = float_(sys.argv[3])
[pdl, h] = gdata(file, 0, header=True)
h[header] = value
wfits(file, pdl, h)