def test_zeros(sc): from numpy import zeros as npzeros x = npzeros((2, 3, 4)) b = zeros((2, 3, 4), sc) assert allclose(x, b.toarray()) x = npzeros(5) b = zeros(5, sc) assert allclose(x, b.toarray())
def test_06_fill_label(self): # 圈出部分填充 import numpy as np drawlabel = DrawLabel() drawlabel.img_origin = npzeros((6, 6)) drawlabel.img_edged = drawlabel.img_origin.copy() drawlabel.img_label = npzeros((6, 6)) edge_dict = { 0: list(np.arange(5)), 1: [0, 4], 2: [0, 4], 3: [0, 4], 4: list(np.arange(5)) } drawlabel.edge_dict = edge_dict drawlabel.flag_closed = True drawlabel.fill_label(255) self.assertEqual(drawlabel.img_label.sum(), 255 * 5 * 4)
def val2vecParams(N, hamParams): # Extract values if not isinstance(hamParams[0], (collections.Sequence, npndarray)): a = float(hamParams[0]) aVec = npzeros(N, dtype=float_) aVec[0] = a else: aVec = a if not isinstance(hamParams[1], (collections.Sequence, npndarray)): g = float(hamParams[1]) gVec = npzeros(N, dtype=float_) gVec[0] = g else: gVec = g if not isinstance(hamParams[2], (collections.Sequence, npndarray)): p = float(hamParams[2]) pVec = p * npones(N, dtype=float_) else: pVec = p if not isinstance(hamParams[3], (collections.Sequence, npndarray)): q = float(hamParams[3]) qVec = q * npones(N, dtype=float_) else: qVec = q if not isinstance(hamParams[4], (collections.Sequence, npndarray)): b = float(hamParams[4]) bVec = npzeros(N, dtype=float_) bVec[-1] = b else: bVec = b if not isinstance(hamParams[5], (collections.Sequence, npndarray)): d = float(hamParams[5]) dVec = npzeros(N, dtype=float_) dVec[-1] = d else: dVec = d if not isinstance(hamParams[6], (collections.Sequence, npndarray)): s = float(hamParams[6]) sVec = s * npones(N, dtype=float_) else: sVec = s # Convert to vectors returnParams = (aVec, gVec, pVec, qVec, bVec, dVec, sVec) return returnParams
def get_vector(self, key=None): if key == None: raise exp.noneValueError('Vector search key cannot be "None".') try: vpos = self.vector_map.get(key) except KeyError: return npzeros(self.getDimension()) self.file_pointer.seek(vpos) return nparray( self.file_pointer.readline().strip().split()[1:].strip())
def __init__(self, mode, units, pzfilename=None, fileformat=None): """ :param mode: :type mode: :param units: :type units: :param pzfilename: :type pzfilename: :param fileformat: :type fileformat: """ self._filename = pzfilename self.fileformat = fileformat self._mode = mode self._units = units self._h0 = 1 self._poles = npzeros(0, dtype=complex128) self._zeros = npzeros(0, dtype=complex128) if mode not in PAZ.MODE_ZEROS.keys(): raise ValueError("Invalid MODE requested: '{}'. Valid values: {}".format(mode, PAZ.MODE_ZEROS.keys())) if units not in PAZ.UNITS: raise ValueError("Invalid units specified: '{}'. Valid values: {}".format(units, PAZ.UNITS)) if pzfilename and fileformat: if not isinstance(self._filename, str): raise TypeError("Str type expected for filename: '{}'".format(self._filename)) else: self._filename = os.path.abspath(os.path.expanduser(self._filename)) if not os.path.exists(self._filename): raise Exception("File not found: '{}'".format(self._filename)) if fileformat not in PAZ.FILE_FORMATS: raise ValueError("Unsupported fileformat requested: '{}'. Supported values: {}".format( fileformat, PAZ.FILE_FORMATS)) self._load_paz_file()
def get_vector(self, key=None): if key == None: raise exp.noneValueError('Vector search key cannot be "None"') try: vpos = self.elements.index(key) except KeyError: raise KeyError( 'Key doesnot exist in the vocabulary.\nFound: {}'.format(key)) vector = npzeros(self.dimension) vector[vpos] = 1.0 return vector
def test_05_save_edge2(self): # 非闭合自动补齐 drawlabel = DrawLabel() drawlabel.img_origin = npzeros((5, 5)) drawlabel.edge_list = [[0, 1], [0, 3], [3, 3]] drawlabel.img_edged = drawlabel.img_origin.copy() drawlabel.save_edge() self.assertTrue(drawlabel.flag_closed) rows_list = sorted(drawlabel.edge_dict.keys()) assert len(rows_list) == (rows_list[-1] - rows_list[0] + 1 ) # 确认所有点均已补齐.
def test_03_draw_event(self): # 测试通过draw_event添加点 event = cv2.EVENT_MOUSEMOVE x = 1 y = 2 flags = cv2.EVENT_FLAG_LBUTTON img = npzeros((10, 10)) edge_list = [[0, 0]] param = object() setattr(param, 'img_edged', img) setattr(param, 'edge_list', edge_list) draw_event(event, x, y, flags, param) self.assertEqual(len(edge_list), 2)
def getDistance2RegionCentroid(areaManager, area, areaList, indexData=[]): """ The distance from area "i" to the attribute centroid of region "k" """ sumAttributes = npzeros(len(area.data)) if len(areaManager.areas[areaList[0]].data) - len(area.data) == 1: for aID in areaList: sumAttributes += nparray(areaManager.areas[aID].data[0:-1]) else: for aID in areaList: sumAttributes += nparray(areaManager.areas[aID].data) centroidRegion = sumAttributes / len(areaList) regionDistance = sum((nparray(area.data) - centroidRegion)**2) return regionDistance
def getDistance2RegionCentroid(areaManager, area, areaList, indexData=[]): """ The distance from area "i" to the attribute centroid of region "k" """ sumAttributes = npzeros(len(area.data)) if len(areaManager.areas[areaList[0]].data) - len(area.data) == 1: for aID in areaList: sumAttributes += nparray(areaManager.areas[aID].data[0: -1]) else: for aID in areaList: sumAttributes += nparray(areaManager.areas[aID].data) centroidRegion = sumAttributes/len(areaList) regionDistance = sum((nparray(area.data) - centroidRegion) ** 2) return regionDistance
def get_relation(self) -> ndarray: """ Returns ------- Relation between leaves """ size: int = self.number_of_leaves + self.number_of_ancestors relation: ndarray = npzeros((size, size), int32) for i in range(len(self.leaves)): for j in range(len(self.leaves[i])): if self.leaves[i][j] != -1: relation[i][self.leaves[i][j]] = 2 return relation
def getPSDNumerator(freqs, bList, order): qVal = len(bList) - 1 numFreqs = freqs.shape[0] PSDVals = npzeros(numFreqs) if ((order % 2 == 1) or (order <= -1) or (order > 2*qVal)): return PSDVals else: for freq in xrange(freqs.shape[0]): val = 0.0 for i in xrange(qVal + 1): j = 2*qVal - i - order if ((j >= 0) and (j < qVal + 1)): val += (bList[i]*bList[j]*((2.0*pi*1j*freqs[freq])**(2*qVal - (i + j)))*pow(-1.0, qVal - j)).real PSDVals[freq] = val return PSDVals
def obsSystemMissing(m,X,F,Q,H,noise,y,mask,numObs,distSeed,noiseSeed): veryLarge = sqrt(float_info[0]) #veryLarge = sqrt(1.0e300) noiseRand=npzeros(numObs) seed(distSeed) distRand = multivariate_normal(array(m*[0.0]),Q,numObs) seed(noiseSeed) for i in range(numObs): noiseRand[i]=gauss(0.0,noise) for i in range(numObs): H[0,0]=mask[i] X = F*X + transpose(matrix(distRand[i,:])) y[i,0]=H*X+H[0,0]*noiseRand[i] if (mask[i]==1.0): y[i,1]=noise else: y[i,1]=veryLarge return (X,y)
def getPSDDenominator(freqs, aList, order): pVal = len(aList) numFreqs = freqs.shape[0] aList.insert(0, 1.0) PSDVals = npzeros(numFreqs) if ((order % 2 == 1) or (order <= -1) or (order > 2*pVal)): aList.pop(0) return PSDVals else: for freq in xrange(freqs.shape[0]): val = 0.0 for i in xrange(pVal + 1): j = 2*pVal - i - order if ((j >= 0) and (j < pVal + 1)): val += (aList[i]*aList[j]*((2.0*pi*1j*freqs[freq])**(2*pVal - (i + j)))*pow(-1.0, pVal - j)).real PSDVals[freq] = val aList.pop(0) return PSDVals
def zeros(self, mode=None, units=None): if mode: zero_cnt_dif = PAZ.MODE_ZEROS[mode] - PAZ.MODE_ZEROS[self.mode] if zero_cnt_dif > 0: zeros = concatenate((npzeros(zero_cnt_dif, dtype=complex128), self._zeros)) elif zero_cnt_dif < 0: if self._zeros.size >= abs(zero_cnt_dif): zeros = self._zeros[abs(zero_cnt_dif):self._zeros.size].copy() else: raise Exception("Can't convert PAZ with mode '{}' to mode '{}'".format(self.mode, mode)) else: zeros = self._zeros.copy() else: zeros = self._zeros.copy() if (units == 'hz') and (self.units == 'rad'): zeros /= 2 * pi elif (units == 'rad') and (self.units == 'hz'): zeros *= 2 * pi return zeros
def get_shortest_path_length(self): """ Get shortest path length :return: """ sp_matrix = npzeros((len(self._points_from), len(self._points_to))) fuel_path = { 'name': 'fuel consumption', 'from_to': sp_matrix.copy(), 'to_from': sp_matrix.copy() } travel_time_path = { 'name': 'time', 'from_to': sp_matrix.copy(), 'to_from': sp_matrix.copy() } # Fuel and travel time shortest path (empty) for path, weight in zip( [fuel_path, travel_time_path], [self._weights["fuel_empty"], self._weights["time_empty"]]): self.network_model.network.build_graph(*weight) for n, point in enumerate(self._points_from.geometry): path["from_to"][ n, :] = self.network_model.network.get_shortest_path_lengths_from_source( point, [tg for tg in self._points_to.geometry]) # Fuel and travel time shortest path (loaded) for path, weight in zip( [fuel_path, travel_time_path], [self._weights["fuel_loaded"], self._weights["time_loaded"]]): self.network_model.network.build_graph(*weight) for n, point in enumerate(self._points_to.geometry): path[ "to_from"][:, n] = self.network_model.network.get_shortest_path_lengths_from_source( point, [tg for tg in self._points_from.geometry]) return travel_time_path, fuel_path
def get_vector(self, key=None): if key == None: raise exp.noneValueError('Vector search key cannot be "None"') elif not isinstance(key, dict): raise TypeError( 'Vector search key must be a dict object.\nFound: <{}>'.format( type(key))) for k in key.keys(): if k not in self.classes: raise KeyError( 'Class doesnot exist in the vocabulary.\nFound: {}'.format( k)) elif key.get(k) not in self.elements.get(k): raise KeyError( 'A value for the class::{} doesnot exist.\nFound: {}'. format(k, key.get(k))) vectorList = [] for c in self.classes: vectorPart = npzeros(self.dimension.get(c)) if c in key.keys(): vectorPart[self.elements.get(c).index(key.get(c))] = 1.0 vectorList.append(vectorPart) return npcat(vectorList)
def __init__(self, genome_1: Genome, genome_2: Genome): """ Constructor Parameters ---------- genome_1 First genome for distance calculation genome_2 Genome to test distance from the first one """ gene_count_1: int = 0 gene_count_2: int = 0 self.genome_1: Genome = Genome(list()) # Genome 1 as String self.genome_2: Genome = Genome(list()) # Genome 2 as String for chromosome in genome_1.chromosomes: if len(chromosome.genes) != 0: gene_count_1 += len(chromosome.genes) self.genome_1.add_chromosome(chromosome) for chromosome in genome_2.chromosomes: if len(chromosome.genes) != 0: gene_count_2 += len(chromosome.genes) self.genome_2.add_chromosome(chromosome) if gene_count_1 == gene_count_2: # Algorithm requires genomes are equal length self.gene_count: int = gene_count_1 # Number of genes else: raise Exception("Different numbers of genes in both genomes.\n") self.node_ints: ndarray = npzeros(self.gene_count * 2) self.node_strings_1: List[Optional[str]] = list() self.node_strings_2: List[Optional[str]] = list() self.genome_paths_1: List[Optional[BPGPath]] = list() self.genome_paths_2: List[Optional[BPGPath]] = list() self.distance: int = int()
def set_plotting_data(self, xdata, ydata, zdata): self.xdata = xdata self.ydata = ydata self.zdata = zdata self.zmin = self.zdata.min() self.zmax = self.zdata.max() # 色データに変換 from numpy import zeros as npzeros self.cdata = npzeros((self.xdata.size, self.ydata.size, 3)) print "Color ary:", self.cdata.shape, self.xdata.size * self.ydata.size newtime = time() self.stepx = 40 self.stepy = 5 self.cdata = [[getColorJetRGBf(self.zdata[xi, yi], self.zmin, self.zmax) for yi in range(0, self.ydata.size, self.stepy)] for xi in range(0, self.xdata.size, self.stepx)] self.cxdata = [self.xdata[xi] for xi in range(0, self.xdata.size, self.stepx)] self.cydata = [self.ydata[yi] for yi in range(0, self.ydata.size, self.stepy)] from numpy import array as nparray self.cdata = nparray(self.cdata) self.cxdata = nparray(self.cxdata) self.cydata = nparray(self.cydata) dt = time() - newtime print ">set_plotting_data", dt, '[s]' print ">set cdata size,", self.cdata.shape, self.cdata.size print ">screensize", self.size() # 軸の設定 self.auto_axis() pass
def gen_lsp_notau(times, mags, errs, omegas): ''' This runs the loops for the LSP calculation. Requires cleaned times, mags, errs (no nans). ''' ndet = times.size omegalen = omegas.size # the output array pvals = npzeros(omegalen, dtype=np.float64) for oind in range(omegalen): thisomega = omegas[oind] thispval = generalized_lsp_value_notau(times, mags, errs, thisomega) pvals[oind] = thispval return pvals
def __getitem__(self, index): if index >= self.nbr_real: # get fake image path_img = self.fake_img_list[index-self.nbr_real] label = torch.tensor([0], dtype=torch.uint8) else: # get real image path_img = self.real_img_list[index] label = torch.tensor([1], dtype=torch.uint8) try: img = mpimg.imread(path_img) assert img.shape[0] == 256 assert img.shape[1] == 256 if len(img.shape) == 2: img_rgb = npzeros((256, 256, 3), dtype=npuint8) for i in range(3): img_rgb[:,:,i] = img img = self.transform(img_rgb) else: img = self.transform(img) except ValueError: message = 'index : {:}; img.shape : {:}; label : {:}; img.path : {:}'.format(\ index, img.shape, label, path_img) print(message) sample = {'image': img, 'label': label} return sample
def fixedIntervalSmoother(y,r,x,X,P,XMinus,PMinus,F,I,Q,H,R,K): m=F.shape[0] numPts=y.shape[0] XArr=npzeros((numPts,m,1)) PArr=npzeros((numPts,m,m)) XMinusArr=npzeros((numPts,m,1)) PMinusArr=npzeros((numPts,m,m)) smoothXArr=npzeros((numPts,m,1)) smoothPArr=npzeros((numPts,m,m)) for i in range(numPts): R[0,0]=y[i,1]*y[i,1] XMinus=F*X XMinusArr[i,:,:]=XMinus PMinus=F*P*transpose(F)+Q PMinusArr[i,:,:]=PMinus v=y[i,0]-H*XMinus S=H*PMinus*transpose(H)+R inverseS=inv(S) K=PMinus*transpose(H)*inverseS IMinusKH=I-K*H X=K*y[i,0]+IMinusKH*XMinus XArr[i,:,:]=X P=IMinusKH*PMinus*transpose(IMinusKH)+K*R*transpose(K) PArr[i,:,:]=P r[i,0]=v[0,0] #try: r[i,1]=sqrt(S[0,0]) #except ValueError: # pdb.set_trace() smoothPArr[numPts-1,:,:]=PArr[numPts-1,:,:] smoothXArr[numPts-1,:,:]=XArr[numPts-1,:,:] for i in range(numPts-2,-1,-1): IMinus=inv(matrix(PMinusArr[i+1,:,:])) K=matrix(PArr[i,:,:])*transpose(F)*IMinus smoothPArr[i,:,:]=matrix(PArr[i,:,:])-K*(matrix(PMinusArr[i+1,:,:])-matrix(smoothPArr[i+1,:,:]))*transpose(K) smoothXArr[i,:,:]=matrix(XArr[i,:,:])+K*(matrix(smoothXArr[i+1,:,:])-matrix(XMinusArr[i+1,:,:])) for i in range(numPts): x[i,0]=smoothXArr[i,0,0] try: x[i,1]=sqrt(smoothPArr[i,0,0]) except ValueError: pdb.set_trace() return (r,x)
def zeros(N, dtype=float64, bytes=32): return pyfftw.n_byte_align(npzeros(N, dtype=dtype), bytes)
def getACF(times, A, Sigma, H): ACF = npzeros(numtimes) for time in xrange(times.shape[0]): ACF[time] = (transpose(H)*expm(A*times[time])*Sigma*H)[0,0] return ACF
def __init__(self, number_of_ancestors: int, number_of_leaves: int, gene_number: int, paths: Optional[List[PGMPathForAGenome]] = None, node_strings: Optional[List[str]] = None, node_ints: Optional[ndarray] = None, ancestor_genome_string: Optional[List[Genome]] = None): """ Constructor Parameters ---------- number_of_ancestors Number of ancestor nodes in the tree structure number_of_leaves Number of leaf nodes in the tree structure gene_number Number of genes in the structure paths All paths as PGMForAGenomes node_strings Current node as a list of strings node_ints Current node as a list of integers ancestor_genome_string Ancestor genomes as a list of GenomeInStrings """ self.number_of_ancestors: int = number_of_ancestors self.number_of_leaves: int = number_of_leaves self.gene_number: int = gene_number self.leaves: List[List[int]] = [[ -1, -1, -1 ] for _ in range((self.number_of_ancestors + self.number_of_leaves))] self.medians: List[Optional[MedianData]] = [ None for _ in range(self.number_of_ancestors) ] self.node_int: ndarray = npzeros(self.gene_number * 2, int32) self.node_string: List[str] = [ str() for _ in range(self.gene_number * 2) ] if ancestor_genome_string is None: self.node_int = node_ints self.node_string = node_strings self.all_paths: List[Optional[PGMPathForAGenome]] = [ PGMPathForAGenome(p.paths) for p in paths ] self.all_paths.append(None) self.all_paths[3] = PGMPathForAGenome(self.get_pgm_path(None, 3)) self.set_tree_structure(3, 0, 1, 2) else: genome1: Genome = Genome(ancestor_genome_string[0].chromosomes) index1: int = 0 for chromosome in genome1.chromosomes: for gene in chromosome.genes: first_character: str = gene.name[0] node1: str node2: str if first_character == '-': node1 = str().join([gene.name[1:], "h"]) node2 = str().join([gene.name[1:], "t"]) self.node_int[index1] = index1 + 1 self.node_string[index1] = node2 index1 += 1 self.node_int[index1] = index1 + 1 self.node_string[index1] = node1 else: node1 = str().join([gene.name, "t"]) node2 = str().join([gene.name, "h"]) self.node_int[index1] = index1 + 1 self.node_string[index1] = node1 index1 += 1 self.node_int[index1] = index1 + 1 self.node_string[index1] = node2 index1 += 1 self.all_genomes: List[Optional[Genome]] = [ ags for ags in ancestor_genome_string ] # Fill with None for i in range(len(self.all_genomes), self.number_of_leaves + self.number_of_ancestors): self.all_genomes.append(None) self.all_paths: List[Optional[PGMPathForAGenome]] = [ PGMPathForAGenome( self.get_pgm_path(Genome(self.all_genomes[i].chromosomes), i)) for i in range(len(ancestor_genome_string)) ] for i in range(len(ancestor_genome_string), len(self.all_genomes)): self.all_paths.append( PGMPathForAGenome(self.get_pgm_path(None, i))) for i in range(len(self.all_paths), self.number_of_leaves + self.number_of_ancestors): self.all_paths.append(None)
def read_gif(self, fp): self.img_origin = mpimg.imread(fp) self.img_edged = self.img_origin.copy() height, width = self.img_origin.shape[:2] self.img_label = npzeros((height, width))
def zeros(shape, dtype=float, order='C'): return tensor(npzeros(shape, dtype=dtype, order=order))
def ReadTideBoundaryData( args ) : '''Read and interpolate tidal boundary condition data. Returns a tuple with the basin number and scipy interpolate.interp1d function, which can be called with a unix time (Epoch seconds) argument to get the demeaned tidal elevations. Kludge: Since multiprocess (and multiprocessing) can't handle objects with Tk instances, this function has been separated so that it can be called without a model object. It should be a Model class method...''' line = args[0] start_time = args[1] end_time = args[2] path = args[3] words = line.split( ',' ) basin = int ( words[ 0 ] ) data_type = words[ 1 ].strip() data_file = words[ 2 ].strip() #print( '-> ReadTideBoundaryData: ', basin, data_type, data_file ) if data_type == 'None' : return( None ) if data_type not in [ 'stage' ] : msg = 'ReadTideBoundaryData() Invalid data type: ' +\ data_type + '\n' print( msg, flush = True ) return( False ) # The csv file has 2 columns: 1 = Date-time, 2 = data value # Time, WL.(m).demeaned # 1990-01-01 12:00 AM EST, -0.086 # 1990-01-01 1:00 AM EST, 0.166 try: fd = open( path + data_file, 'r' ) except OSError as err : msg = "ReadTideBoundaryData() OS error: {0}\n".format( err ) print( msg, flush = True ) return( False ) rows = fd.readlines() fd.close() # Find rows closest to the start_time & end_time datetimes = [] times = [] data = npzeros( ( len( rows ) - 1 ) ) # Format the date and copy each row of data, skip the header for i in range( 1, len( rows ) ) : row = rows[ i ] words = row.split(',') date_time = strptime( words[ 0 ], '%Y-%m-%d %I:%M %p %Z' ) datetimes.append( date_time ) times.append( (date_time - datetime(1970, 1, 1) ).total_seconds() ) data[ i - 1 ] = float( words[ 1 ] ) # Now search for times in the data that match the simulation start/end start_i = 0 end_i = 0 try: start_i = datetimes.index( start_time ) except ValueError : msg = 'ReadTideBoundaryData() Model start time: ' + str( start_time ) +\ ' is not in the tide boundary data: ' + data_file + '\n' print( msg, flush = True ) return( False ) try: end_i = datetimes.index( end_time ) except ValueError : msg = 'ReadTideBoundaryData() Model end time: ' + str( end_time ) +\ ' is not in the tide boundary data: ' + data_file + '\n' print( msg, flush = True ) return( False ) #print( ' Found start ', times[ start_i ], ' and end ', # times[ end_i ] ) tide_observations = data [ start_i : end_i ] tide_seconds = times[ start_i : end_i ] #print( tide_seconds ) #print( tide_observations ) interpolation_function = interpolate.interp1d( tide_seconds, tide_observations ) return( tuple( ( basin, interpolation_function ) ) )
def get_null_vector(self): return npzeros(self.dimension)
# kss: The stock of capital in the steady state # gamma: The coefficient of the utility function # rho: The continuous-time discount rate # alpha: Production elasticity of capital # sigma: Variance of disturbance z # n indicates the order of the Taylor polynomial. Be aware that Python starts # counting at zero, so the polynomial has n+1 terms n = 4 # Symbolic vectors and matrices sa = MatrixSymbol('a',n+1,n+1) # a: Vector of coefficients of the Taylor polynomial # Value variables of a, rho, alpha, gamma, and kss: va = npzeros([n+1,n+1]) powSeriesCoeff = npzeros([n+1,n+1]) vrho = 0.05 valpha = 0.25 vgamma = -10 vkss = 1 vsigma = 0 # Production function sf = (vrho/valpha)*sk**valpha # Utility function su = (sc**(1+sgamma))/(1+sgamma) # Construct Taylor polynomial for consumption function tvalue = []
def get_null_vector(self): return npcat([npzeros(self.dimension.get(c)) for c in self.classes])
def dmrg(mpo, mps=None, mpsl=None, env=None, envl=None, mbd=10, tol=1.e-5, max_iter=10, min_iter=0, noise=0., mps_subdir='mps', env_subdir='env', mpsl_subdir='mpsl', envl_subdir='envl', nStates=2, dtype=complex_, fixed_bd=False, alg='davidson', return_state=False, return_env=False, return_entanglement=False, return_wgt=False, orthonormalize=False, state_avg=True, left=False, start_gauge=0, end_gauge=0): """ Run the one-site DMRG algorithm Args: mpo : 1D Array An array that contains a list of mpos, each of which is a list with an array for each site in the lattice representing the mpo at that site. Kwargs: mps : 1D Array of Matrix Product States The initial guess for the mps (stored as an mpsList as specified in cyclomps.tools.mps_tools) Default : None (mps0 will be random) mpsl : 1D Array of Matrix Product States The initial guess for the left mps (stored as an mpsList as specified in cyclomps.tools.mps_tools) Default : None (mps0 will be random) env : 1D Array of an environment The initial environment for mps (stored as an envList as specified in cyclomps.tools.env_tools) Default : None (env will be built in computation) envl : 1D Array of an environment The initial environment for the left mps (stored as an envList as specified in cyclomps.tools.env_tools) Default : None (env will be built in computation) mbd : int or 1D Array of ints The maximum bond dimension for the mps. If this is a single int, then the bond dimension will be held constant for all sweeps. Otherwise, the bond dimension will be incremented after max_iter sweeps or until the tolerance is reached. sweeps. (Note that if max_iter and/or tol is a list, we require len(max_iter) == len(mbd) == len(tol), and the maximum number of iterations or convergence tolerance changes with the retained maximum bond dimension.) Default : 10 tol : int or 1D Array of ints The relative convergence tolerance. This may be a list, meaning that as the mbd is increased, different tolerances are specified. Default : 1.e-5 max_iter : int or 1D Array of ints The maximum number of iterations for each mbd Default : 10 min_iter : int or 1D Array of ints The minimum number of iterations for each mbd Default : 0 noise : float or 1D Array of floats The magnitude of the noise added to the mbd to prevent getting stuck in a local minimum !! NOT IMPLEMENTED !! Default : 0. mps_subdir : string The subdirectory under CALC_DIR (specified in cyclomps.tools.params) where the mps will be saved. Default : 'mps' mpsl_subdir : string The subdirectory under CALC_DIR (specified in cyclomps.tools.params) where the left mps will be saved. Default : 'mpsl' env_subdir : string The subdirectory under CALC_DIR (specified in cyclomps.tools.params) where the environment will be saved. Default : 'env' envl_subdir : string The subdirectory under CALC_DIR (specified in cyclomps.tools.params) where the environment for the left mps will be saved. Default : 'envl' nStates : int The number of retained states Default : 2 dtype : dtype The data type for the mps and env Default : np.complex_ fixed_bd : bool This ensures that all bond dimensions are constant throughout the MPS, i.e. mps[0].dim = (1 x d[0] x mbd) instead of mps[0].dim = (1 x d[0] x d[0]), and so forth. Default : False alg : string The algorithm that will be used. Available options are 'arnoldi', 'exact', and 'davidson', current default is 'davidson'. Default : 'davidson' return_state : bool Return the resulting mps list Default : False return_env : bool Return the resulting env list Default : False return_entanglement : bool Return the entanglement entropy and entanglement spectrum Default : False return_wgt : bool Return the discarded weights Default : False orthonormalize : bool Specify whether to orthonormalize eigenvectors after solution of eigenproblem. This will cause problems for all systems unless the eigenstates being orthonormalized are degenerate. Default : False state_avg : bool Specify whether to use the state averaging procedure to target multiple states when doing the renormalization step. Default : True !! ONLY STATE AVG IS IMPLEMENTED !! left : bool If True, then we calculate the left and right eigenstate otherwise, only the right. Default : False start_gauge : int The site at which the gauge should (or is) located in the initial mps. Default : 0 end_gauge : int The site at which the gauge should be located when the mps is returned. Default : 0 Returns: E : 1D Array The energies for the number of states targeted EE : 1D Array The entanglement entropy for the states targeted Returned only if return_entanglement == True EEs : 1D Array of 1D Arrays The entanglement entropy spectrum for the states targeted Returned only if return_entanglement == True mps : 1D Array of Matrix Product States The resulting matrix product state list Returned only if return_state == True env : 1D Array of an environment The resulting environment list """ t0 = time.time() mpiprint(0, '\n\nStarting DMRG one-site calculation') mpiprint(0, '#' * 50) # Check inputs for problems if not hasattr(mbd, '__len__'): mbd = nparray([mbd]) if not hasattr(tol, '__len__'): tol = tol * npones(len(mbd)) else: assert (len(mbd) == len(tol)), 'Lengths of mbd and tol do not agree' if not hasattr(max_iter, '__len__'): max_iter = max_iter * npones(len(mbd)) else: assert (len(max_iter) == len(mbd) ), 'Lengths of mbd and max_iter do not agree' if not hasattr(min_iter, '__len__'): min_iter = min_iter * npones(len(mbd)) else: assert (len(min_iter) == len(mbd) ), 'Lengths of mbd and min_iter do not agree' # -------------------------------------------------------------------------------- # Solve for Right Eigenvector # -------------------------------------------------------------------------------- # Determine local bond dimensions from mpo d = mpo_local_dim(mpo) # Create structures to save results return_E = npzeros((len(mbd), nStates)) return_EE = npzeros((len(mbd), nStates)) return_EEs = npzeros((len(mbd), nStates, max(mbd))) mps_res = [] env_res = [] # Loop through all maximum bond dimensions for mbd_ind, mbdi in enumerate(mbd): mpiprint(1, '\n' + '/' * 50) mpiprint(1, 'Starting Calculation for mbd = {}'.format(mbdi)) # Set up initial mps if mbd_ind == 0: if mps is None: # There is no previous guess to build on mps = create_mps_list(d, mbdi, nStates, dtype=dtype, fixed_bd=fixed_bd, subdir=mps_subdir + '_mbd' + str(mbdi) + '_') # Make sure it is in correct canonical form mps = make_mps_list_right(mps) mps = move_gauge(mps, 0, start_gauge) else: # Increase the maximum bond dim of the previous system mps = increase_bond_dim(mps, mbdi, fixed_bd=fixed_bd) # Set up initial env if mbd_ind == 0: if env is None: env = calc_env(mps, mpo, dtype=dtype, subdir='env_mbd' + str(mbdi) + '_') else: env = calc_env(mps, mpo, dtype=dtype, gSite=end_gauge, subdir='env_mbd' + str(mbdi) + '_') # Run the DMRG Sweeps outputr = sweeps(mps, mpo, env, max_iter=max_iter[mbd_ind], min_iter=min_iter[mbd_ind], tol=tol[mbd_ind], alg=alg, noise=noise, orthonormalize=orthonormalize, state_avg=state_avg, start_gauge=start_gauge, end_gauge=end_gauge) # Collect results E = outputr[0] EE = outputr[1] EEs = outputr[2] wgt = outputr[3] # -------------------------------------------------------------------------------- # Solve for Left Eigenvector # -------------------------------------------------------------------------------- if left: mpiprint(0, '#' * 50) mpiprint(0, 'Left State') mpiprint(0, '#' * 50) # Create left mpo mpol = mpo_conj_trans(mpo) # Run this same function, but now with this new left mpo outputl = dmrg(mpo, mps=mpsl, env=envl, mbd=mbd, tol=tol, max_iter=max_iter, min_iter=min_iter, noise=noise, mps_subdir=mpsl_subdir, env_subdir=envl_subdir, nStates=nStates, dtype=dtype, fixed_bd=fixed_bd, alg=alg, return_state=True, return_env=True, return_entanglement=True, return_wgt=True, orthonormalize=orthonormalize, state_avg=state_avg, left=False, start_gauge=start_gauge, end_gauge=end_gauge) # Collect results El = outputl[0] EEl = outputl[1] EEls = outputl[2] wgtl = outputl[3] mpsl = outputl[4] envl = outputl[5] # --------------------------------------------------------------------------------- # Wrap Up Calculation # --------------------------------------------------------------------------------- # Prin://arxiv.org/abs/1706.09537t time for dmrg procedure timeprint(1, 'Total time: {} s'.format(time.time() - t0)) # Return results ret_list = [E] if left: ret_list += [El] if return_entanglement: ret_list += [EE, EEs] if left: ret_list += [EEl, EEls] if return_wgt: ret_list += [wgt] if left: ret_list += [wgtl] if return_state: ret_list += [mps] if left: ret_list += [mpsl] if return_env: ret_list += [env] if left: ret_list += [envl] return ret_list
def _get_acf_peakheights(lags, acf, npeaks=20, searchinterval=1): '''This calculates the relative peak heights for first npeaks in ACF. Usually, the first peak or the second peak (if its peak height > first peak) corresponds to the correct lag. When we know the correct lag, the period is then:: bestperiod = time[lags == bestlag] - time[0] Parameters ---------- lags : np.array An array of lags that the ACF is calculated at. acf : np.array The array containing the ACF values. npeaks : int THe maximum number of peaks to consider when finding peak heights. searchinterval : int From `scipy.signal.argrelmax`: "How many points on each side to use for the comparison to consider comparator(n, n+x) to be True." This effectively sets how many points on each of the current peak will be used to check if the current peak is the local maximum. Returns ------- dict This returns a dict of the following form:: {'maxinds':the indices of the lag array where maxes are, 'maxacfs':the ACF values at each max, 'maxlags':the lag values at each max, 'mininds':the indices of the lag array where mins are, 'minacfs':the ACF values at each min, 'minlags':the lag values at each min, 'relpeakheights':the relative peak heights of each rel. ACF peak, 'relpeaklags':the lags at each rel. ACF peak found, 'peakindices':the indices of arrays where each rel. ACF peak is, 'bestlag':the lag value with the largest rel. ACF peak height, 'bestpeakheight':the largest rel. ACF peak height, 'bestpeakindex':the largest rel. ACF peak's number in all peaks} ''' maxinds = argrelmax(acf, order=searchinterval)[0] maxacfs = acf[maxinds] maxlags = lags[maxinds] mininds = argrelmin(acf, order=searchinterval)[0] minacfs = acf[mininds] minlags = lags[mininds] relpeakheights = npzeros(npeaks) relpeaklags = npzeros(npeaks, dtype=npint64) peakindices = npzeros(npeaks, dtype=npint64) for peakind, mxi in enumerate(maxinds[:npeaks]): # check if there are no mins to the left # throw away this peak because it's probably spurious # (FIXME: is this OK?) if npall(mxi < mininds): continue leftminind = mininds[mininds < mxi][-1] # the last index to the left rightminind = mininds[mininds > mxi][0] # the first index to the right relpeakheights[peakind] = (acf[mxi] - (acf[leftminind] + acf[rightminind]) / 2.0) relpeaklags[peakind] = lags[mxi] peakindices[peakind] = peakind # figure out the bestperiod if possible if relpeakheights[0] > relpeakheights[1]: bestlag = relpeaklags[0] bestpeakheight = relpeakheights[0] bestpeakindex = peakindices[0] else: bestlag = relpeaklags[1] bestpeakheight = relpeakheights[1] bestpeakindex = peakindices[1] return { 'maxinds': maxinds, 'maxacfs': maxacfs, 'maxlags': maxlags, 'mininds': mininds, 'minacfs': minacfs, 'minlags': minlags, 'relpeakheights': relpeakheights, 'relpeaklags': relpeaklags, 'peakindices': peakindices, 'bestlag': bestlag, 'bestpeakheight': bestpeakheight, 'bestpeakindex': bestpeakindex }
particle.solution[i] = 0 #==========================================================================# # Algorithm . # #==========================================================================# global_best:BP local_best:BP # init swarm = [BP(SACK_SIZE) for s in range(SWARM_SIZE)] global_best = find_best() local_best = swarm[rng.randint(0, SWARM_SIZE)] best_score = fx(sack, local_best) global_best_score = fx(sack, global_best) results = npzeros((STEPS, EXPERIMENTS)) #matriz de resultados for iteracion in range(EXPERIMENTS): # numero de experimentos for epoch in range(STEPS): # cuantos pasos van a dar las particulas # optimize for p in swarm: if p is global_best: continue if p is local_best: continue update_velocity(p, global_best, local_best) update_position(p) score = fx(sack, p) if score > best_score: best_score = score local_best = p
def L2(input_path_L1, input_path_Compensate, output_path, E0_const): try: input_fp_Compensate = open(input_path_Compensate, 'r') except IOError: print "IO error;Check the input File: ", input_path_Compensate except: print "Unexpected Open Error: ", input_path_Compensate input_fp_Compensate.close() try: input_fp_L1 = open(input_path_L1, 'r') except IOError: print "IO error;Check the input File: ", input_path_L1 except Error: print "Unexpected Open Error: ", input_path_L1 input_fp_L1.close() #output file path output_file_path = os.path.join(output_path, 'ResultL2.csv') try: output_fp = open(output_file_path, 'w+') except IOError: print "IO error;Check the output File: ", output_file_path return 'L2 failed' #output_plot_1�� Reynold-Taylor Equation���� output_plot_2_file_path = os.path.join(output_path, 'Plot_L2_2.csv') try: output_plot_2_fp = open(output_plot_2_file_path, 'w+') except IOError: print "IO error;Check the output File: ", output_plot_2_file_path return 'L2 failed' try: Compensate_csv = csv.reader(input_fp_Compensate, delimiter = ',') except csv.Error: print "Parse ErrorCheck the input File: ", input_path_Compensate except StandardError: print "Unexpected Read Error: ", input_path_Compensate try: L1_csv = csv.reader(input_fp_L1, delimiter = ',') except csv.Error: print "Parse ErrorCheck the input File: ", input_path_L1 except StandardError: print "Unexpected Read Error: ", input_path_L1 n_Compensate = 0 n_L1 = 0 data_Compensate = [] data_L1 = [] for row in Compensate_csv: data_Compensate.append(row) n_Compensate = n_Compensate + 1 for row in L1_csv: data_L1.append(row) n_L1 = n_L1 + 1 #Data count check if(n_Compensate != n_L1): print 'Count Error;Process count dismatch between Compensate and L1' return 'L2 failed' #initialize date = [] rsdn = npzeros(n_Compensate) Ta = npzeros(n_Compensate) h2o = npzeros(n_Compensate) #press = npzeros(n_Compensate) #Read Input Data i = 0 for row in data_Compensate: rsdn[i] = float(row[0]) Ta[i] = float(row[1]) h2o[i] = float(row[2]) i = i + 1 press = 998.0 #initialize Fs = npzeros(n_L1) Fc = npzeros(n_L1) Fsc = npzeros(n_L1) Hs = npzeros(n_L1) Hc = npzeros(n_L1) Hsc = npzeros(n_L1) LEs = npzeros(n_L1) LEc = npzeros(n_L1) LEsc = npzeros(n_L1) co2 = npzeros(n_L1) ustar = npzeros(n_L1) itime = npzeros(n_L1) iustar = npzeros(n_L1) date = [] i = 0 for row in data_L1: date.append(row[0]) Fs[i] = float(row[1]) Fc[i] = float(row[2]) Fsc[i] = float(row[3]) Hs[i] = float(row[4]) Hc[i] = float(row[5]) Hsc[i] = float(row[6]) LEs[i] = float(row[7]) LEc[i] = float(row[8]) LEsc[i] = float(row[9]) co2[i] = float(row[10]) ustar[i] = float(row[14]) itime[i] = float(row[15]) iustar[i] = float(row[16]) i = i + 1 # Define constants and parameters for gap filling #-------------------------------------------------------------------------- num_day = 28 ni = 36 nd = 10 n1 = 2 # how many the largest points are considered for respiration # DO NOT Modify! #num_point_per_day = 24 # number of data points per day (48 -> 30 min avg time) #avgtime = 30 #determine num_point_per_day automatically . using datetime module date_1st = datetime.datetime.strptime(date[0], "%Y-%m-%d %H:%M") date_2nd = datetime.datetime.strptime(date[1], "%Y-%m-%d %H:%M") date_diff = date_2nd - date_1st avgtime = int(date_diff.seconds / 60) # averaging time (minutes) num_point_per_day = 1440 / avgtime # number of data points per day (1440 : minutes of a day) num_segment = num_point_per_day * num_day num_avg = int(n_L1 / num_segment) num_day_2 = 7 # nday_re = 20 # noverlap = 5 num_day_re = 20 noverlap = 5 ni = int(num_point_per_day * 3 / 4) # the data point that night starts nd = 300 / avgtime # how many the largest points are considered for respiration (300 : minitues of 5 hours) #-------------------------------------------------------------------------- #E0_const = True # Do you want to use constant E0 for one year? Y/N beta0 = nparray([2, 200]) Tref = 10.0 T0 = -46.02 gap_limit = 0.025 ustar_limit = 0.5 upper_Fc = 0.35 # upper limit of nighttime CO2 flux (mg/m2/s) Fc_limit = 0.005 ## Information for MLT drsdn = 50.0 # W/m2 dta = 2.5 # oC dvpd = 5.0 # 5 hPa rv = 461.51 #-------------------------------------------------------------------------- upper_co2 = 1000.0 # upper limit of CO2 concent.(mg/m3) upper_h2o = 60.0 # upper limit of H2O concent. (g/m3) upper_Ta = 60.0 # upper limit of air temperature (oC) lower_Fc = -3.0 # lower limit of daytime CO2 flux (mg/m2/s) lower_LE = -200 # lower limit of LE (W/m2) lower_H = -300 # lower limit of H (W/m2) upper_Fc = 3 # upper limit of nighttime CO2 flux (mg/m2/s) upper_LE = 800 # upper limit of LE (W/m2) upper_H = 800 # upper limit of H (W/m2) upper_agc = 95.0 # upper limit of AGC value ustar_limit = 0.03 # minimum ustar for filtering out nighttime fluxes Fc_limit = 0.005 # lower limit of Re (ecosystem respiration) (mg/m2/s) gap_limit = 0.025 # 0.025 --> 95# confidence interval Tak = npzeros(len(Ta)) tr = npzeros(len(Ta)) ea = npzeros(len(Ta)) es = npzeros(len(Ta)) vpd = npzeros(len(Ta)) #-------------------------------------------------------------------------- # calculation of vapor pressure deficit a = [13.3185, 1.9760, 0.6445, 0.1299] for i in range(n_Compensate): Tak[i] = Ta[i] + 273.15 tr[i] = 1.0-(373.15/Tak[i]) es[i] = 1013.25*exp(a[0]*tr[i]-a[1]*(tr[i]**2)-(a[2]*(tr[i]**3))-a[3]*(tr[i]**4)) # hPa for i in range(n_L1): ea[i] = h2o[i] vpd[i]= float(es[i]) - float(ea[i]) #unit is hPa Fc_filled = copy.deepcopy(Fsc) print 'Gap Filling Process' print 'Before running this program, ' print ' please make sure that you correctly set all parameters' #print 'E0_const'. E0_const #print 'nn', nn #print 'num_point_per_day', num_point_per_day #print 'num_day_2', num_day_2 #print 'num_day_re', num_day_re #print 'noverlap', noverlap #print 'drsdn', drsdn #print 'dta', dta #print 'dvpd', dvpd #print '-------------------------------------------------------------------' index = [] for main_j in range(num_avg): # loop for gap-filling of CO2 fluxes seg_start_i = main_j * num_segment seg_fin_i = seg_start_i + num_segment if((seg_start_i + 2 * num_segment) > n_L1): seg_fin_i = n_L1 x2 = [] x3 = [] #-------------------------------------------------------------------------- if(main_j == 0): print 'Application of modified lookup table method' #-------------------------------------------------------------------------- for i in range(seg_start_i, seg_fin_i): if(itime[i] == 1): ii = 0 if(isnan(Fsc[i]) == True): jj = 0 while ((ii < 1) and (jj <= 4)): ta_f = Ta[i] rsdn_f = rsdn[i] vpd_f = vpd[i] i0 = i - jj * num_day_2 * num_point_per_day i1 = i + jj * num_day_2 * num_point_per_day+1 if(i0 < 1): i0 = 0 i1 = 2 * jj * num_day_2 * num_point_per_day+1 if(i1 >= n_L1): i0 = n_L1 - 2 * jj * num_day_2 * num_point_per_day i1 = n_L1 if(i0 < 1): i0 = 0 ks = 0 for j in range(i0, i1): if((fabs(vpd_f - vpd[j]) < dvpd) and \ (fabs(rsdn_f - rsdn[j]) < drsdn) and \ (fabs(ta_f - Ta[j]) < dta) and \ (isnan(Fsc[j]) == False)): ks = ks + 1 x3_temp = [] x2.append(Fsc[j]) x3_temp.append(j) x3_temp.append(vpd[j]) x3_temp.append(rsdn[j]) x3_temp.append(Ta[j]) x3_temp.append(ks) x3.append(x3_temp) ii = ks #index_temp = [] #index_temp.append(i) #index_temp.append(i0) #index_temp.append(i1) #index_temp.append(ks) #index_temp.append(main_j) #index.append(index_temp) if(ks >= 1): Fc_filled[i] = npmedian(nparray(x2)) jj = jj + 1 x2 = [] x3 = [] if(ii < 1): jj = 0 while(ii < 1): rsdn_f = rsdn[i] i0 = i - jj * num_day_2 * num_point_per_day i1 = i + jj * num_day_2 * num_point_per_day+1 if(i0 < 1): i0 = 0 i1 = 2 * jj * num_day_2 * num_point_per_day+1 if(i1 >= n_L1): i0 = n_L1 - 2 * jj * num_day_2 * num_point_per_day i1 = n_L1 if(i0 < 0): i0 = 0 ks = 0 for j in range(i0, i1): if((fabs(rsdn_f - rsdn[j]) < drsdn) and \ (isnan(Fsc[j]) == False)): ks = ks + 1 x3_temp = [] x2.append(Fsc[j]) x3_temp.append(j) x3_temp.append(vpd[j]) x3_temp.append(rsdn[j]) x3_temp.append(Ta[j]) x3_temp.append(ks) x3.append(x3_temp) ii = ks #index_temp = [] #index_temp.append(i) #index_temp.append(i0) #index_temp.append(i1) #index_temp.append(ks) #index_temp.append(main_j) #index.append(index_temp) if(ks >= 1): Fc_filled[i] = npmedian(nparray(x2)) jj = jj + 1 x2 = [] x3 = [] x2 = [] x3 = [] ks = 0 d = npzeros((n_L1, 5)) d2 = npzeros((n_L1, 5)) dd = npzeros((n_L1, 5)) x4 = [] #Regression to Lloyd-Taylor equation print 'Regression to Lloyd-Taylor equation' if(E0_const == True): for i in range(ni-1, n_Compensate, num_point_per_day): t1 = npzeros(nd) for j in range(nd): t1[j] = Fsc[i + j] #Set to 'descend' t2, IX = Common.matlab_sort(t1) k2 = 0 for k in range(nd-1): if((isnan(t2[k]) == False) and \ (t2[k] < upper_Fc) and \ (t2[k+1] > Fc_limit)): k2 = k2 + 1 if(k2 >= 2): for j in range(nd-1): if((itime[i+1 + IX[j]] == 0) and \ (isnan(t2[j]) == False) and \ (isnan(Ta[i+1 + IX[j]]) == False) and \ (t2[j] < upper_Fc) and \ (t2[j + 1] > Fc_limit) and \ (iustar[i + IX[j]] == 0) and \ (iustar[i + IX[j+1]] == 0)): x3.append(t2[j]) x3.append(t2[j+1]) x2.append(Ta[i + IX[j]]) x2.append(Ta[i + IX[j+1]]) x4.append(date[i + IX[j]]) x4.append(date[i + IX[j+1]]) ks = ks + n1 break TC = copy.deepcopy(nparray(x2)) PV = copy.deepcopy(nparray(x3)) betafit = spfmin(Common.Reco, beta0, args = (TC, PV), disp=False) A = betafit[0] B = betafit[1] yfit = npzeros(len(TC)) for i in range(len(TC)): yfit[i] = A * exp(B * (1 / (10 + 46.02) - 1 / (TC[i] + 46.02))) E0 = betafit[1] E0l = copy.deepcopy(E0) # figure(1) # plot(TC][PV,'ko',TC][yfit,'or') # grid # xlabel('air temperature (^oC)') # ylabel('Ecosystem respiration(mgm^{-2}s^{-1})') # TC = x2' # PV = x3' # # [beta][resnorm] = lsqcurvefit(@myfun][beta0][TC][PV) # # A=beta(1) # B=beta(2) # yfit=A.*exp(B.*(1./(10.+46.02)-1./(TC+46.02))) # E0 = betafit(2) # E0l = E0 # # # figure(5) # plot(TC][PV,'ko',TC][yfit,'or') # grid # xlabel('air temperature (^oC)') # ylabel('Ecosystem respiration(mgm^{-2}s^{-1})') x2 = [] x3 = [] t1 = [] t2 = [] TC = [] PV = [] yfit = npzeros(len(TC)) #num_day_re = 20 #noverlap = 5 #avgtime = 30 delta = (60 / avgtime) * 24 * num_day_re dnoverlap = (60 / avgtime) * 24 * noverlap jj = 0 sday = [] Rref = [] RE_limit = [] stdev_E0 = [] E0v = [] REs = [] Taylor_date = [] yfit_array = [] for i in range(0, n_L1, dnoverlap): i0 = int(i - delta / 2) i1 = int(i + delta / 2) if(i0 < 1): i0 = 0 i1 = int(i0 + delta) if(i1 >= n_L1): i0 = int(n_L1 - delta) - 1 i1 = n_L1 ks = 1 for j in range(i0+ni-1, i1, num_point_per_day): t1 = npzeros(nd) for k in range(nd): t1[k] = Fsc[j + k] #Set to 'descend' t2, IX = Common.matlab_sort(t1) k2 = 1 for k in range(nd-1): if((isnan(t2[k]) == False) and \ (t2[k] < upper_Fc) and \ (t2[k+1] > Fc_limit)): k2 = k2 + 1 if(k2 >= n1): for k in range(nd-1): if((itime[j+1 +IX[k]] == 0) and \ (isnan(t2[k]) == False) and \ (isnan(Ta[j + IX[k]]) == False) and \ (t2[k] < upper_Fc) and \ (t2[k+1] > Fc_limit) and \ (iustar[j +IX[k]] == 0) and \ (iustar[j +IX[k+1]] == 0)): x3.append(t2[k]) x3.append(t2[k+1]) x2.append(Ta[j + IX[k]]) x2.append(Ta[j + IX[k+1]]) Taylor_date.append(str(date[j + IX[k]])) Taylor_date.append(str(date[j + IX[k+1]])) ks = ks + n1 break ks = ks - 1 if(ks < 6): if(E0_const == True): Rref.append(float('NaN')) RE_limit.append(float('NaN')) jj = jj + 1 else: Rref.append(float('NaN')) E0v.append(float('NaN')) stdev_E0.append(float('NaN')) RE_limit.append(float('NaN')) jj = jj + 1 else: TC = copy.deepcopy(nparray(x2)) PV = copy.deepcopy(nparray(x3)) if(E0_const == True): betafit = spfmin(Common.Reco2, beta0, args = (TC, PV, E0l), disp=False) A = betafit[0] Rref.append(A) for j in range(len(TC)): yfit = A * exp(E0 * (1.0/(10.0+46.02) - 1.0/(TC[j] + 46.02))) yfit_array.append(yfit) REs.append(PV[j] - yfit) sz = nparray(REs).shape upper = fabs(Common.tq(gap_limit, sz[0]-1 ) ) RE_limit.append(upper*Common.stdn1(nparray(REs))/sqrt(sz[0])) jj = jj + 1 else: betafit=spfmin(Common.Reco2, beta0, args = (TC, PV, E0)) A=betafit[0] B=betafit[1] Rref.append(A) E0v.append(B) if((B < 0) or (B > 450)): E0v.append(float('NaN')) for j in range(len(TC)): yfit = A * exp(E0v[jj] * (1.0 / (10.0 + 46.02) - 1.0 / (TC[j] + 46.02))) yfit_array.append(yfit) REs.append(PV[j] - yfit) sz = nparray(REs).shape upper = abs(Common.tq(gap_limit, sz[0]-1)) stdev_E0.append(Common.stdn1(REs) / sqrt(sz[0])) RE_limit.append(upper * Common.stdn1(nparray(REs)) / sqrt(sz[0])) jj = jj + 1 #Regression to Lloyd-Taylor equation with 28-day segmentation date_extracted = re.search('^(\d{4}[-]\d{2}[-]\d{2})',str(Taylor_date[0])) #print date_extracted.group(0) if(date_extracted != None): fname = 'Plot_L2_1_'+str(date_extracted.group(0))+'.csv' output_plot_1_file_path = os.path.join(output_path, fname) try: output_plot_1_fp = open(output_plot_1_file_path, 'w+') except IOError: print "IO error;Check the output File: ", output_plot_1_file_path return 'L2 failed' for i in range(len(TC)): file_plot_str = StringIO() file_plot_str.write(Taylor_date[i] + ',') #1 file_plot_str.write(str(A) + ',') #2 file_plot_str.write(str(B) + ',') #3 file_plot_str.write(str(TC[i]) + ',') #4 file_plot_str.write(str(PV[i]) + ',' ) #5 file_plot_str.write(str(yfit_array[i]) + '\n' ) #6 output_plot_string = file_plot_str.getvalue() output_plot_1_fp.write(output_plot_string) output_plot_1_fp.close() sday_temp = [] sday_temp.append(i) sday_temp.append(i0) sday_temp.append(i1) sday.append(sday_temp) x2 = [] x3 = [] t1 = [] t2 = [] TC = [] PV = [] Taylor_date = [] REs = [] yfit = [] sday = nparray(sday) if(E0_const == True): print 'Long-term E0 ' E0s = copy.deepcopy(E0l) else: E0v_s = [] stdev_E0_s = [] for k in range(len(E0v)): E0v_s.append(E0v[k]/stdev_E0[k]) stdev_E0_s.append(1/stdev_E0[k]) print 'Short-term E0 ' E0s = npnansum(E0v_s)/npnansum(stdev_E0_s) Rref = [] #REs = [] #RE_limit = [] jj = 0 for i in range(0, n_L1, dnoverlap): i0 = i - delta / 2 i1 = i + delta / 2 if(i0 < 1): i0 = 0 i1 = i0 + delta if(i1 >= n_L1): i0 = n_L1 - delta - 1 i1 = n_L1 ks = 1 for j in range(i0+ni-1, i1, num_point_per_day): t1 = npzeros(nd) for k in range(nd): t1[k] = Fsc[j + k] #Set to 'descend' t2, IX = Common.matlab_sort(t1) k2 = 1 for k in range(nd-1): if((isnan(t2[k]) == False) and \ (t2[k] < upper_Fc) and \ (t2[k+1] > Fc_limit)): k2 = k2 + 1 if(k2 >= n1): for k in range(nd-1): if((itime[j+1 + IX[k]] == 0) and \ (isnan(t2[k]) == False) and \ (isnan(Ta[j + IX[k]]) == False) and \ (t2[k] < upper_Fc) and \ (t2[k+1] > Fc_limit) and \ (iustar[j + IX[k]] == 0) and \ (iustar[j + IX[k+1]] == 0)): x3.append(t2[k]) x3.append(t2[k + 1]) x2.append(Ta[j + IX[k]]) x2.append(Ta[j + IX[k+1]]) ks = ks + n1 break ks = ks - 1 if(ks < 6): # Rref.append(Rref[jj]) # RE_limit.append(RE_limit[jj]) Rref.append(Rref[-1]) RE_limit.append(RE_limit[-1]) if(E0_const != True): stdev_E0.append(stdev_E0[jj]) jj = jj + 1 else: TC = nparray(x2) PV = nparray(x3) betafit = spfmin(Common.Reco2, beta0, args = (TC, PV, E0s), disp=False) A=betafit[0] Rref.append(A) for j in range(len(TC)): yfit = Rref[jj] * exp(E0s * (1 / (10 + 46.02) - 1 / (TC[j] + 46.02))) REs.append(PV[j]-yfit) sz = nparray(REs).shape upper = abs(Common.tq(gap_limit, sz[0]-1)) RE_limit.append(upper*Common.stdn1(REs)/sqrt(sz[0])) jj = jj + 1 x2 = [] x3 = [] t1 = [] t2 = [] TC = [] PV = [] #for k in REs: # print k REs = [] #for k in Rref: # print k ## ks = 0 nsp2 = npzeros((n_L1 / num_point_per_day)) RE = npzeros(n_L1) GPP = npzeros(n_L1) for i in range(n_L1): RE[i] = float('NaN') GPP[i] = float('NaN') for i in range(0, n_L1, num_point_per_day): i0 = i i1 = i + num_point_per_day if(i0 >=sday[ks][1]): ks = ks + 1 if(i0 >= sday[len(sday)-1][1]): ks = len(sday)-1 for j in range(i0, i1): if(E0_const == True): yfit=Rref[ks-1] * exp(E0l * (1.0 / (10 + 46.02) - 1.0 / (Ta[j] + 46.02))) else: yfit=Rref[ks-1] * exp(E0s * (1.0 / (10 + 46.02) - 1.0 / (Ta[j] + 46.02))) RE[j] = yfit if(itime[j]==0): # nighttime condition RE[j] = Fc_filled[j] if((isnan(Fsc[j]) == True) or \ ((Fsc[j]-yfit) < RE_limit[ks-1]) or \ ((Fsc[j]-yfit) > 1.0 * RE_limit[ks-1]) or \ (iustar[j] == 1)): nsp2[ks-1] = nsp2[ks-1] + 1 Fc_filled[j] = yfit RE[j] = Fc_filled[j] GPP[j] = RE[j]- Fc_filled[j] #figure(2) #plot(time][Fsc][time][Fc_filled[:],'or') #set(gca,'XTick',[year0:1/12:year0+0.9999999]) #set(gca,'xticklabel',xticks) #ylim = [-1.5][1.5] #set(gca,'xLim',xlim(:)) #ylabel('F_c (mgm^{-2}s^{-1})') #-------------------------------------------------------------------------- #-------------------------------------------------------------------------- print 'Gap-filling of LE' #-------------------------------------------------------------------------- x2 = [] x3 = [] index = [] LE_filled = copy.deepcopy(LEsc) for main_j in range(num_avg): # loop for gap-filling of H2O fluxes seg_start_i = main_j * num_segment seg_fin_i = seg_start_i + num_segment if((seg_start_i + 2 * num_segment) > n_L1): seg_fin_i = n_L1 x2 = [] x3 = [] for i in range(seg_start_i, seg_fin_i): ii = 0 if(isnan(LEsc[i]) == True): jj = 0 while((ii < 1) and (jj <= 4)): ta_f = Ta[i] rsdn_f = rsdn[i] vpd_f = vpd[i] i0 = i - jj * num_day_2 * num_point_per_day i1 = i + jj * num_day_2 * num_point_per_day+1 if(i0 < 1): i0 = 0 i1 = 2 * jj * num_day_2 * num_point_per_day+1 if(i1 >= n_L1): i0 = n_L1 - 2 * jj * num_day_2 * num_point_per_day - 1 i1 = n_L1 if(i0 < 1): i0 = 0 ks = 0 for j in range(i0, i1): if((fabs(vpd_f-vpd[j]) < dvpd) and \ (fabs(rsdn_f-rsdn[j]) < drsdn) and \ (fabs(ta_f-Ta[j]) < dta) and \ (isnan(LEsc[j]) == False)): x3_temp = [] x2.append(LEsc[j]) x3_temp.append(j) x3_temp.append(vpd[j]) x3_temp.append(rsdn[j]) x3_temp.append(Ta[j]) x3_temp.append(ks) x3.append(x3_temp) ks = ks + 1 ii = ks #index_temp = [] #index_temp.append(i) #index_temp.append(i0) #index_temp.append(i1) #index_temp.append(ks) #index_temp.append(main_j) #index.append(index_temp) if(ks >= 1): LE_filled[i] = npmedian(nparray(x2)) jj = jj + 1 x2 = [] x3 = [] if(ii < 1): jj = 0 while(ii < 1): rsdn_f = rsdn[i] i0 = i - jj * num_day_2 * num_point_per_day i1 = i + jj * num_day_2 * num_point_per_day + 1 if(i0 < 1): i0 = 0 i1 = 2 * jj * num_day_2 * num_point_per_day + 1 if(i1 >= n_L1): i0 = n_L1 - 2 * jj * num_day_2 * num_point_per_day - 1 i1 = n_L1 if(i0 < 1): i0 = 0 ks = 0 for j in range(i0, i1): if((fabs(rsdn_f-rsdn[j]) < drsdn) and \ (isnan(LEsc[j]) == False)): x3_temp = [] x2.append(LEsc[j]) x3_temp.append(j) x3_temp.append(vpd[j]) x3_temp.append(rsdn[j]) x3_temp.append(Ta[j]) x3_temp.append(ks) x3.append(x3_temp) ks = ks + 1 ii = ks #index_temp = [] #index_temp.append(i) #index_temp.append(i0) #index_temp.append(i1) #index_temp.append(ks) #index_temp.append(main_j) #index.append(index_temp) if(ks >= 1): LE_filled[i] = npmedian(nparray(x2)) jj = jj + 1 x2 = [] x3 = [] x2 = [] x3 = [] ks = 0 #figure(3) #plot(time][LEsc][time][LE_filled[:],'or') #set(gca,'XTick',[year0:1/12:year0+0.9999999]) #set(gca,'xticklabel',xticks) #ylim = [-100][600] #set(gca,'xLim',xlim(:)) #ylabel('LE (Wm^{-2})') ## #-------------------------------------------------------------------------- print 'Gap-filling of H (sensible heat flux)' #-------------------------------------------------------------------------- x2 = [] x3 = [] index = [] H_filled = copy.deepcopy(Hsc) for main_j in range(num_avg): # loop for gap-filling of H2O fluxes seg_start_i = main_j * num_segment seg_fin_i = seg_start_i + num_segment if((seg_start_i + 2 * num_segment) >= n_L1): seg_fin_i = n_L1 x2 = [] x3 = [] for i in range(seg_start_i, seg_fin_i): ii = 0 if(isnan(Hsc[i]) == True): jj = 0 while ((ii < 1) and (jj <= 4)): ta_f = Ta[i] rsdn_f = rsdn[i] vpd_f = vpd[i] i0 = i - jj * num_day_2 * num_point_per_day i1 = i + jj * num_day_2 * num_point_per_day+1 if(i0 < 1): i0 = 0 i1 = 2 * jj * num_day_2 * num_point_per_day+1 if(i1 >= n_L1): i0 = n_L1 - 2 * jj * num_day_2 * num_point_per_day - 1 i1 = n_L1 if(i0 < 1): i0 = 0 ks = 0 for j in range(i0, i1): if((fabs(vpd_f-vpd[j]) < dvpd) and \ (fabs(rsdn_f-rsdn[j]) < drsdn) and \ (fabs(ta_f-Ta[j]) < dta) and \ (isnan(Hsc[j]) == False)): x3_temp = [] x2.append(Hsc[j]) x3_temp.append(j) x3_temp.append(vpd[j]) x3_temp.append(rsdn[j]) x3_temp.append(Ta[j]) x3_temp.append(ks) ks = ks + 1 x3.append(x3_temp) ii = ks #index_temp = [] #index_temp.append(i) #index_temp.append(i0) #index_temp.append(i1) #index_temp.append(ks) #index_temp.append(main_j) #index.append(index_temp) if(ks >= 1): H_filled[i] = npmedian(nparray(x2)) jj = jj + 1 x2 = [] x3 = [] if(ii < 1): jj = 0 while(ii < 1): rsdn_f = rsdn[i] i0 = i - jj * num_day_2 * num_point_per_day i1 = i + jj * num_day_2 * num_point_per_day+1 if(i0 < 1): i0 = 0 i1 = 2 * jj * num_day_2 * num_point_per_day+1 if(i1 >= n_L1): i0 = n_L1 - 2 * jj * num_day_2 * num_point_per_day - 1 i1 = n_L1 if(i0 < 1): i0 = 0 ks = 0 for j in range(i0, i1): if((fabs(rsdn_f-rsdn[j]) < drsdn) and \ (isnan(Hsc[j]) == False)): ks = ks + 1 x3_temp = [] x2.append(Hsc[j]) x3_temp.append(j) x3_temp.append(vpd[j]) x3_temp.append(rsdn[j]) x3_temp.append(Ta[j]) x3_temp.append(ks) x3.append(x3_temp) ii = ks #index_temp = [] #index_temp.append(i) #index_temp.append(i0) #index_temp.append(i1) #index_temp.append(ks) #index_temp.append(main_j) #index.append(index_temp) if(ks >= 1): H_filled[i] = npmedian(nparray(x2)) jj = jj + 1 x2 = [] x3 = [] x2 = [] x3 = [] ks = 0 #figure(4) #plot(time,Hsc,time,H_filled[:],'or') #set(gca,'XTick',[year0:1/12:year0+0.9999999]) #set(gca,'xticklabel',xticks) #ylim = [-100,600] #set(gca,'xLim',xlim(:)) #ylabel('H (Wm^{-2})') ## print '-------------------------------------------------------------------' print 'Calculating daily mean values' print '-------------------------------------------------------------------' # disp('Press any key to calculate daily mean values') # pause print '-------------------------------------------------------------------' print 'calculation of daily mean. Unit seg_start_i [C g/m2/day].' print '-------------------------------------------------------------------' Fsc_daily = npzeros(n_L1/num_point_per_day) GPP_daily = npzeros(n_L1/num_point_per_day) RE_daily = npzeros(n_L1/num_point_per_day) ET_daily = npzeros(n_L1/num_point_per_day) H_daily = npzeros(n_L1/num_point_per_day) LE_daily = npzeros(n_L1/num_point_per_day) H_daily = npzeros(n_L1/num_point_per_day) k = 0 for i in range(0, n_L1, num_point_per_day): for j in range(i,i + num_point_per_day): Fsc_daily[k] = Fsc_daily[k] + Fc_filled[j] GPP_daily[k] = GPP_daily[k] + GPP[j] RE_daily[k] = RE_daily[k] + RE[j] ET_daily[k] = ET_daily[k] + LE_filled[j] Fsc_daily[k] = Fsc_daily[k] * (60*float(avgtime)/1000*12/44) GPP_daily[k] = GPP_daily[k] * (60*float(avgtime)/1000*12/44) RE_daily[k] = RE_daily[k] * (60*float(avgtime)/1000*12/44) ET_daily[k] = ET_daily[k] * (60*float(avgtime)/(2440)/1000) k = k + 1 NEE_annual= npmean(Fc_filled)*float((1800*48*(n_L1/(60.0/avgtime*24))*12/44/1000.0)) GPP_annual = npmean(GPP)*float((1800*48*(n_L1/(60.0/avgtime*24))*12/44/1000.0)) RE_annual = npmean(RE)*float((1800*48*(n_L1/(60.0/avgtime*24))*12/44/1000.0)) NEE_std_annual = Common.stdn1(Fsc_daily)/sqrt(n_L1/(60.0/avgtime*24))*(n_L1/(60.0/avgtime*24)) GPP_std_annual = Common.stdn1(GPP_daily)/sqrt(n_L1/(60.0/avgtime*24))*(n_L1/(60.0/avgtime*24)) RE_std_annual = Common.stdn1(RE_daily)/sqrt(n_L1/(60.0/avgtime*24))*(n_L1/(60.0/avgtime*24)) print 'NEE_annual', NEE_annual print 'GPP_annual', GPP_annual print 'RE_annual', RE_annual print 'NEE_std_annual', NEE_std_annual print 'GPP_std_annual', GPP_std_annual print 'RE_std_annual', RE_std_annual print '-------------------------------------------------------------------' print 'Calculating daily mean ETs' print '-------------------------------------------------------------------' print 'calculation of daily mean. Unit seg_start_i [C g/m2/day].' print '-------------------------------------------------------------------' k = 0 for i in range(0, n_L1, num_point_per_day): for j in range(i,i + num_point_per_day): LE_daily[k] = LE_daily[k] \ + LE_filled[j]*(60*float(avgtime)/(2440*1000)) k = k + 1 # npmean(Fc_filled) LE_annual = npmean(LE_filled)*(1800.0*48.0*float(n_L1/(60.0/avgtime*24))/2440.0/1000.0) LE_std_annual = Common.stdn1(LE_daily)/sqrt(n_L1/float(60.0/avgtime*24.0))*float(n_L1/(60.0/avgtime*24.0)) print 'LE_annaul', LE_annual print 'LE_std_annaul', LE_std_annual print '-------------------------------------------------------------------' print 'Calculating daily npmean heating rate' print '-------------------------------------------------------------------' print 'calculation of daily npmean heating rate. Unit seg_start_i [MJ/m2/day].' print '-------------------------------------------------------------------' k = 0 for i in range(0, n_L1, num_point_per_day): for j in range(i,i + num_point_per_day): H_daily[k] = H_daily[k] \ + H_filled[j]*(60*float(avgtime)/(1004*1.0))/(10E6) k = k + 1 H_annual = sum(H_daily) H_std_annual = Common.stdn1(H_daily) print 'H_annaul', H_annual print 'H_std_annaul', H_std_annual for i in range(len(Fsc)): file_plot_str = StringIO() file_plot_str.write(str(date[i]) + ',') #1 file_plot_str.write(str(Fsc[i]) + ',') #2 file_plot_str.write(str(Fc_filled[i]) + ',') #3 file_plot_str.write(str(LEsc[i]) + ',') #4 file_plot_str.write(str(LE_filled[i]) + ',') #5 file_plot_str.write(str(Hsc[i]) + ',') #6 file_plot_str.write(str(H_filled[i]) + '\n') #7 output_plot_string = file_plot_str.getvalue() output_plot_2_fp.write(output_plot_string) output_plot_2_fp.close() #For output #Assume data start from 0:00 output_Fsc_daily = npzeros(n_L1) output_GPP_daily = npzeros(n_L1) output_RE_daily = npzeros(n_L1) output_ET_daily = npzeros(n_L1) output_LE_daily = npzeros(n_L1) output_H_daily = npzeros(n_L1) j = 0 for i in range(n_L1): output_Fsc_daily[i] = Fsc_daily[j] output_GPP_daily[i] = GPP_daily[j] output_RE_daily[i] = RE_daily[j] output_ET_daily[i] = ET_daily[j] output_H_daily[i] = H_daily[j] output_LE_daily[i] = LE_daily[j] if((i+1) % num_point_per_day == 0): j = j + 1 for i in range(n_L1): file_str = StringIO() file_str.write(str(output_ET_daily[i]) + ',') #1 file_str.write(str(Fc_filled[i]) + ',' ) #2 file_str.write(str(output_Fsc_daily[i]) + ',' ) #3 file_str.write(str(GPP[i]) + ',' ) #4 file_str.write(str(output_GPP_daily[i]) + ',') #5 file_str.write(str(GPP_annual) + ',') #6 file_str.write(str(GPP_std_annual) + ',') #7 file_str.write(str(H_filled[i]) + ',') #8 file_str.write(str(output_H_daily[i]) + ',') #9 file_str.write(str(H_annual) + ',') #10 file_str.write(str(H_std_annual) + ',') #11 file_str.write(str(LE_filled[i]) + ',') #12 file_str.write(str(output_LE_daily[i]) + ',') #13 file_str.write(str(LE_annual) + ',') #14 file_str.write(str(LE_std_annual) + ',') #15 file_str.write(str(NEE_annual) + ',') #16 file_str.write(str(NEE_std_annual) + ',') #17 file_str.write(str(output_RE_daily[i]) + ',') #18 file_str.write(str(RE_annual) + ',') #19 file_str.write(str(RE_std_annual) + ',') #20 file_str.write(str(co2[i]) + ',') #21 file_str.write(str(rsdn[i]) + ',') #22 file_str.write(str(ea[i]) + ',') #23 file_str.write(str(h2o[i]) + ',') #24 file_str.write(str(Ta[i]) + ',') #25 file_str.write(str(vpd[i]) + '\n' ) #26 output_string = file_str.getvalue() output_fp.write(output_string) output_fp.close() return 'L2 Done'