def set_param(self,keywords,value): # define grammar pointPos = keywords[1].find("[") #print(keywords[1]) if(pointPos<0): key = keywords[1] if(key in lh_flt_param): setattr(self,key,float(value)) elif(key in lh_int_param): setattr(self,key,int(value)) elif(key=="prob"): #print(value) self.prob = np.array([float(x) for x in commaSeparatedList.parseString(set_str(value))]) elif(pointPos==7): self.comment.append(set_str(value)) #print(keywords,value) else: #print(keywords[1]) modele = "range[" + Word(nums) + "][" + Word(nums) + "]" try: rg = modele.parseString( keywords[1] ) except ParseException as pe: pass else: #print(keywords[1],value) i = int(rg[1]) if len(self.range)<i: self.range.append([]) self.range[i-1].append([float(x) for x in commaSeparatedList.parseString(set_str(value))])
def load_csv(filename,verbose=False): """Load a Comma Separated File into a list of lists. Each line is a list of comma separated objects, appended to a list of lines. input: input filename, verbose mode (optional) output: Dataset structure containting data from filename """ data = [] if verbose: print 'loading instances from %s: 0' % filename, import sys i = 0 for line in file(filename): if not line[0] == '#': datainst = commaSeparatedList.parseString(line) inst = [] for value in datainst: if value.isdigit(): #print 'ld#', value inst.append(int(value)) else: inst.append(value) data.append(inst) i += 1 if verbose: if i % 100 == 0: print '%d' % i, sys.stdout.flush() elif i % 50 == 0: print '.', sys.stdout.flush() if verbose: print ' %d instances loaded' % i return data
def set_param(self,line): # define grammar a = line.find("]") b = line.find("=") if(a>0 and b>0 and (b-a)==1): return else: modele = Word( alphas ) + "[" + Word(nums) + "]" + Word(objectPath) + "=" + Word( divers ) try: pd = modele.parseString( line ) except ParseException as pe: pass else: obj = pd[0] key = pd[4] value = pd[6][:len(pd[6])-1] nb = int(pd[2]) if(key[0]=="."): key = key[1:] #expect ".keyword" if(key.find(".")<0): #a single keyword if(key in ("ref")): setattr(self,key,set_str(value)) #print("-> ocd[{id}].{key}={value}".format(id=self.id,key=key,value=value)) elif(key in ("start","resolution")): setattr(self,key,float(value)) #print("-> ocd[{id}].{key}={value}".format(id=self.id,key=key,value=value)) elif(key in ("bp","sigma")): setattr(self,key,np.array([float(x) for x in commaSeparatedList.parseString(set_str(value))]))
def parse_stations(raw_stations): logging.debug("Extracting raw stations...") filtered = re.findall(r'setMarker\((.+?)\);(?=setMarker|\})', raw_stations) logging.debug("Parsing stations...") parsed = [] for station in filtered: stripped = [s.strip("' ") for s in commaSeparatedList.parseString(station)[:7]] d = dict(zip(FIELD_KEYS, stripped)) for floatkey in ['longitude', 'latitude']: d[floatkey] = float(d[floatkey]) for intkey in ['id', 'poles', 'available']: d[intkey] = int(d[intkey]) parsed.append(d) logging.debug("Parsed {} stations".format(len(parsed))) return parsed
def parseReview(reviewNumber): database = db.DB() database.open("rw.idx") review = database.get(str(reviewNumber)) database.close() reviewItems = commaSeparatedList.parseString(review).asList() #reviewItems = ["1","2","3","4","5","6","7","1182816100","9","10"] reviewDict = dict(zip(reviewsColumns, reviewItems)) date = datetime.datetime.fromtimestamp(int(reviewDict['date'])) reviewDict['date'] = date try: reviewDict['price'] = float(reviewDict['price']) except ValueError: pass reviewDict['score'] = float(reviewDict['score']) return reviewDict
def parse_stations(raw_stations): logging.debug("Extracting raw stations...") filtered = re.findall(r'setMarker\((.+?)\);(?=setMarker|\})', raw_stations) logging.debug("Parsing stations...") parsed = [] for station in filtered: stripped = [ s.strip("' ") for s in commaSeparatedList.parseString(station)[:7] ] d = dict(zip(FIELD_KEYS, stripped)) for floatkey in ['longitude', 'latitude']: d[floatkey] = float(d[floatkey]) for intkey in ['id', 'poles', 'available']: d[intkey] = int(d[intkey]) parsed.append(d) logging.debug("Parsed {} stations".format(len(parsed))) return parsed
# commasep.py # # comma-separated list example, to illustrate the advantages of using # the pyparsing commaSeparatedList as opposed to string.split(","): # - leading and trailing whitespace is implicitly trimmed from list elements # - list elements can be quoted strings, which can safely contain commas without breaking # into separate elements from pyparsing import commaSeparatedList testData = [ "a,b,c,100.2,,3", "d, e, j k , m ", "'Hello, World', f, g , , 5.1,x", "John Doe, 123 Main St., Cleveland, Ohio", "Jane Doe, 456 St. James St., Los Angeles , California ", "", ] for line in testData: print commaSeparatedList.parseString(line) print line.split(",") print
''' Pick a backend to use ''' libimaging = library_loader.load_library(parser_args['use_back_end'],parser_args['precision']) from bullseye_mo import base_types base_types.force_precision(parser_args['precision']) from helpers import data_set_loader from bullseye_mo import convolution_filter from bullseye_mo import gridding_parameters ''' initially the output grids must be set to NONE. Memory will only be allocated before the first MS is read. ''' gridded_vis = None sampling_funct = None ms_names = commaSeparatedList.parseString(parser_args['input_ms']) ''' General strategy for IO and processing: for all measurement sets: read ms header data (the headers better correspond between measurement sets otherwise the user is really doing something weird and wonderful - this is a primary assumption) parse and create a list of facet centres parse and create a list of enabled channel ids parse and create a list of whereto each of the enabled channels should be gridded (we might do continuim imaging or smear only certain bands together for spectral imaging) allocate some memory initialize backend infrastructure with data that doesn't change from stride to stride now carve up the current measurement set primary table into no_chunks and process each such stride in the following loop: for all chunks: load chunk from disk (buffer the io) wait for any previous gridding operations to finish up deep copy chunk