def _boundingbox(parms, options, option_errors): if parms.has_key('image_extents') and parms['image_extents'] == 'on': #set the image_extents status True in product options options['image_extents'] = True #make sure we got upper left x,y and lower right x,y vals if parms.has_key('minx') and utils.is_number(parms['minx']): options['minx'] = float(parms['minx']) else: option_errors.append("Please provide a valid upper left x value") if parms.has_key('maxx') and utils.is_number(parms['maxx']): options['maxx'] = float(parms['maxx']) else: option_errors.append("Please provide a valid lower right x value") if parms.has_key('miny') and utils.is_number(parms['miny']): options['miny'] = float(parms['miny']) else: option_errors.append("Please provide a valid lower right y value") if parms.has_key('maxy') and utils.is_number(parms['maxy']): options['maxy'] = float(parms['maxy']) else: option_errors.append("Please provide a valid upper left y value") #make sure values make some sort of sense if options['minx'] >= options['maxx']: option_errors.append("Upper left x value must be less than lower right x value") if options['miny'] >= options['maxy']: option_errors.append("Lower right y value must be less than upper left y value")
def get(self, song_id, rec_id): if utils.is_number(song_id): song = Song.get_by_id(int(song_id)) if song and utils.is_number(rec_id): rec = Recording.get_by_id(int(rec_id), parent=song.key()) if song and rec: self.render("recording.html", song=song, audiolink=rec.audiolink, performer=rec.performer) else: self.error(404)
def post(self, song_id, rec_id): if utils.is_number(song_id): song = Song.get_by_id(int(song_id)) if song and utils.is_number(rec_id): rec = Recording.get_by_id(int(rec_id), parent=song.key()) if song and rec: rec.delete_self() self.redirect(song.get_url()) else: self.error(404)
def post(self, song_id, rec_id): if utils.is_number(song_id): song = Song.get_by_id(int(song_id)) if song and utils.is_number(rec_id): rec = Recording.get_by_id(int(rec_id), parent=song.key()) audiolink = self.request.get("audiolink") performer = self.request.get("performer") if audiolink: rec.update(audiolink, performer) self.redirect("/song/" + str(song.key().id())) else: error = u"Линк не может быть пустым" self.render("recording.html", song=song, audiolink=audiolink, performer=performer, error=error)
def incoming_message_response(data): debug = True if debug: sender = utils.is_number(data.get('From', None)) receiver = utils.is_number(data.get('To', None)) message = utils.clean_message(data.get('Body', None)) sender_info = None else: sender = utils.is_number(data.values.get('From', None)) receiver = utils.is_number(data.values.get('To', None)) message = utils.clean_message(data.values.get('Body', None)) sender_info = None if not sender: no_sender(receiver, message) return if not message: no_message(sender, receiver) return if not receiver: #Cannot text company, try to locate company by sender and send email no_receiver(sender, message) return admin = database.companies.find_one({'bot.phone':receiver}) if not admin: #Reciever bot is not Registered with any company #Log it. Notify sender message cannot be processed return #Admin is sender if admin['admin'].get('phone', None) == sender: admin_is_sender(admin, receiver, message) return sender_info = database.jobs.find_one({'phone':sender}) if sender_info: customer_is_sender(sender_info, receiver, admin, message) return for employee in admin['employees']: if employee.get('phone', "") == sender: employee_is_sender(employee, admin, receiver, message) return
def get(self, song_id): if utils.is_number(song_id): song = Song.get_by_id(int(song_id)) if song: self.render("recording.html", song=song) else: self.error(404)
def get(self, song_id): if utils.is_number(song_id): song = Song.get_by_id(int(song_id)) if song: self.render("song.html", song=song, recordings=song.get_recordings(), edit_lyrics=True) else: self.error(404)
def applyReplacementRules(expression, rules, symboltable): # # Zero is added to each ode because some odes may be equal to # zero and a number does not have an attribute of .subs(...) # so we convert the expression to symbolic to force it to be # susceptible to .subs(symbol, value) # # print "expression:", expression # print "rules: ", rules # print "symboltable: ", symboltable ZERO = Symbol("ZERO") expr=expression+ZERO vlist=list(rules) for v in vlist: # print "applyReplacementRules: expr:", expr, "v:", v, " rules[v]:", rules[v] symbol=symboltable[v] value=rules[v] if utils.is_number(value): if round(value,0)==value: value = int(round(value,0)) expr = expr.subs(symbol,value) #expr = expr.replace(symbol, value) # print "expr: ", expr, type(symbol), type(value), type(expr) expr = expr.subs(ZERO,0) #expr = expr.replace("ZERO","0") return expr
def stat_values_parser(self, cols, season): """ Loops through of a list of columns and returns a list of values which change or skip the col strings based on their content. :param stat_values: Initial values list. :param cols: List of column values in a single gamelog. :param season: Season of the gamelog. Either 'regular' or 'playoff'. :returns: Parsed stat values. """ stat_values = self.initialize_stat_values(season) for i, col in enumerate(cols): text = str(col.getText()) # Date if i == 2: stat_values.append(datetime.strptime(text, '%Y-%m-%d')) # Home elif i == 4: stat_values.append(not(text == '@')) # Percentages # Skip them because they can be calculated manually. elif i in {11, 14, 17}: pass # Number elif is_number(text): stat_values.append(float(text)) # Text else: stat_values.append(text) return stat_values
def load_config(self, path): self.path = path; with open(self.path) as configFile: self.config = (line.rstrip('\n') for line in open(self.path)) #finds the names and values from config file #adds them in ConfigVariable class and stores in configVariables array for line in self.config: count = 0; for character in line: if character == ':': variableHeld = line[count+1:len(line)]; if is_int(variableHeld): variableHeld = int(variableHeld); elif is_number(variableHeld): variableHeld = float(variableHeld); elif is_bool(variableHeld): variableHeld = make_bool(variableHeld); #dont create an object if it has no value if variableHeld != '': newConfigVar = ConfigVariable(line[0:count], variableHeld); self.configVariables.append(newConfigVar); count += 1;
def post(self, song_id): if utils.is_number(song_id): song = Song.get_by_id(int(song_id)) if song: song.delete_self() self.redirect("/") else: self.error(404)
def post(self, song_id): if utils.is_number(song_id): song = Song.get_by_id(int(song_id)) if song: lyrics = self.request.get("lyrics") song.set_lyrics(lyrics) self.redirect(song.get_url()) else: self.error(404)
def __find_best_series(book, config): ''' Queries the databse to find a best guess for a series matching the given ComicBook, based on its name, year, issue number, and other text attributes. Returns SeriesRef if a reasonable guess was found, or None if one wasn't. ''' # 1. obtain SeriesRefs for this book, removing some as dictated by prefs series_refs = db.query_series_refs( book.series_s, config.ignored_searchterms_sl ) series_refs = dbutils.filter_series_refs( series_refs, config.ignored_publishers_sl, config.ignored_before_year_n, config.ignored_after_year_n, config.never_ignore_threshold_n) # 2. obtain the first, second, and third best matching SeriesRefs for the # given book, if there are any. primary = None secondary = None tertiary = None if len(series_refs) > 0: mscore = MatchScore() def find_best_score( refs ): return reduce( lambda x,y: x if mscore.compute_n(book, x) >= mscore.compute_n(book,y) else y, refs) if refs else None primary = find_best_score(series_refs) if primary: series_refs.remove(primary) secondary = find_best_score(series_refs) if secondary: series_refs.remove(secondary) tertiary = find_best_score(series_refs) # 3. if our book is the first (or unknown) issue, figure out if the best # matching series has a similar cover to the second or third best. # if it does, we're probably dealing with a trade paperback and a # regular issue, and we can't find the best series reliably, so we bail is_first_issue = (lambda i : not i or \ (utils.is_number(i) and float(i)==1.0))(book.issue_num_s) if is_first_issue and primary and secondary: too_similar = False SIMILARITY_THRESHOLD = __MATCH_THRESHOLD - 0.10 hash1 = __get_remote_hash(primary) hash2 = __get_remote_hash(secondary) if imagehash.similarity(hash1, hash2) > SIMILARITY_THRESHOLD: too_similar = True elif tertiary: hash3 = __get_remote_hash(tertiary) if imagehash.similarity(hash1, hash3) > SIMILARITY_THRESHOLD: too_similar = True primary = None if too_similar else primary return primary
def printable_elem(s): #check if integer try: int(s) return str(s) except: #check if float: if is_number(s): return "%.4f" % s else: return s
def is_year(v): if len(v) > 30: return False if len(v)==4 and utils.is_number(v): v = utils.flt(v) return v > 1900 and v < 2050 else: matched = re.search("19[0-9]{2}[^0-9]+", v) \ or re.search("20[0-9]{2}[^0-9]+", v) \ or re.search("[0189][0-9]-[0189][0-9]", v) return matched
def post(self, song_id): if utils.is_number(song_id): song = Song.get_by_id(int(song_id)) if not song: self.error(404) audiolink = self.request.get("audiolink") performer = self.request.get("performer") if audiolink: song.add_recording(audiolink, performer) self.redirect("/song/%s" % str(song.key().id())) else: error = u"Линк не может быть пустым" self.render("recording.html", song=song, audiolink=audiolink, performer=performer, error=error)
def get_post_id_from_text(s): if is_number(s): return int(s) rt = route_from(s, method="GET") if rt[0] == 'views.render_post': return rt[1]['post_id'] elif rt[0] == 'views.post_page': try: return Post.query.filter(Post.url==rt[1]['post_url']) \ .with_entities(Post.id).one()[0] except NoResultFound: return None elif rt[0] == 'views.index': return Post.root_post_id()
def printable_elem(s): ''' Return printable version of s. ''' #check if integer try: int(s) return str(s) except: #check if float: if is_number(s): return "%.4f" % s else: return s
def _append_token_by_type(self, value): if utils.is_string(value): self._append_string(value) elif utils.is_number(value): self._append_number(value) elif value is None: self._append_null() elif value == True: self._append_true() elif value == False: self._append_false() elif utils.is_json_array(value): self._read_json_array(value) elif utils.is_dict(value): self._read_dict(value)
def _check_magic_file(path_s): ''' ComicVine implementation of the identically named method in the db.py ''' series_key_s = None file_s = None try: # 1. get the directory to search for a cvinfo file in, or None dir_s = path_s if path_s and Directory.Exists(path_s) else \ Path.GetDirectoryName(path_s) if path_s else None dir_s = dir_s if dir_s and Directory.Exists(dir_s) else None if dir_s: # 2. search in that directory for a properly named cvinfo file # note that Windows filenames are not case sensitive. for f in [dir_s + "\\" + x for x in ["cvinfo.txt", "cvinfo"]]: if File.Exists(f): file_s = f # 3. if we found a file, read it's contents in, and parse the # comicvine series id out of it, if possible. if file_s: with StreamReader(file_s, Encoding.UTF8, False) as sr: line = sr.ReadToEnd() line = line.strip() if line else line match = re.match(r"^.*?\b(49|4050)-(\d{2,})\b.*$", line) line = match.group(2) if match else line if utils.is_number(line): series_key_s = utils.sstr(int(line)) except: log.debug_exc("bad cvinfo file: " + sstr(file_s)) # 4. did we find a series key? if so, query comicvine to build a proper # SeriesRef object for that series key. series_ref = None if series_key_s: try: dom = cvconnection._query_series_details_dom( __api_key, utils.sstr(series_key_s)) num_results_n = int(dom.number_of_total_results) series_ref =\ __volume_to_seriesref(dom.results) if num_results_n==1 else None except: log.debug_exc("error getting SeriesRef for: " + sstr(series_key_s)) if file_s and not series_ref: log.debug("ignoring bad cvinfo file: ", sstr(file_s)) return series_ref # may be None!
def __extract_series_ref(self): ''' This method attempts to rebuild the SeriesRef that the user chose the last time that they scraped this comic. If it can do so, it will return that SeriesRef, otherwise it will return None. ''' # in this method, its easier to work with tags as a single string bd = self.__bookdata retval = None series_key = int(bd.series_key_s) if \ utils.is_number(bd.series_key_s) else None if series_key != None: # found a key tag! convert to a sparse SeriesRef retval = SeriesRef(series_key, None, -1, '', -1, None); return retval
def set_data(headers, data): """ set data in the data table. set region, period properties if identified in series """ start_col = headers["row_series_count"] series = headers["series"] def set_key(d, s, key, idx): if s.get("type")==key: d[key] = s["values"][idx] return True return False def set_keys_or_head(d, s, row_idx, col_idx, heads): idx = row_idx if s["position"]=="row" else col_idx region = set_key(d, s, "region", idx) period = set_key(d, s, "period", idx) # add as head if not (region or period): heads.append(s["values"][idx]) for row_idx, row in enumerate(data[1:]): for col_idx, value in enumerate(row[start_col:]): if not utils.is_number(value): continue heads = [] d = { "value": value, "dataset": headers["title"] } for s in series: set_keys_or_head(d, s, row_idx, col_idx, heads) db.insert("data", d) d_id = db.sql("last_insert_id()")[0][0] for h in heads: db.insert("data_head", {"data": d_id, "head": h})
def get_data(self, content=None): if content is None: d = pq(url=self.url, opener=self.opener) else: d = pq(content) # Product name product_name = d(".prod:first").find("h2").text() # Product image dirty_link = d(".contenedor_foto_producto>a").attr("href") clean_link = re.search("\(\'(?P<url>https?://[^\s]+)\',", dirty_link).group("url") # Product categories categories = d(".btn_navegacion").find("td:nth-child(2)").find(".link_nav")[1:-1] categories = [d(a).text().strip() for a in categories] # Product price price_with_currency = d('.precio_producto').find("span").text() #Precio se obtiene "$ 33.00" price_str = price_with_currency.strip().split(" ")[1] assert is_number(price_str) price = to_decimal(price_str) return {'name': product_name, "price": price, "image_url": clean_link, "categories": categories}
def __parse_extra_details_from_path(self): ''' Series name, issue number, and volume year are all critical bits of data for scraping purposes--yet fresh, unscraped files often do not have them. So when some or all of these values are missing, this method tries to fill them in by parsing them out of the comic's path. ''' bd = self.__bookdata no_series = BookData.blank("series_s") == bd.series_s no_issuenum = BookData.blank("issue_num_s") == bd.issue_num_s no_year = BookData.blank("pub_year_n") == bd.pub_year_n if no_series or no_issuenum or no_year: if bd.path_s: # 1. at least one detail is missing, and we have a path name to # work with, so lets try to extract some details that way. filename = Path.GetFileName(bd.path_s) config = self.__scraper.config regex = config.alt_search_regex_s extracted = None # 2. first, extract using the user specified regex, if there is one if regex: extracted = fnameparser.regex(filename, regex) if not extracted: extracted = fnameparser.extract(filename) # never fails # 3. now that we have some extracted data, use it to fill in # any gaps in our details. if no_series: bd.series_s = extracted[0] if no_issuenum: bd.issue_num_s = extracted[1] if no_year: bd.pub_year_n = int(extracted[2]) \ if is_number(extracted[2])\ else BookData.blank("pub_year_n")
def get_data(self, content=None): if content is None: d = pq(url=self.url, opener=self.opener) else: d = pq(content) # Product name product_name = d("h1").text() # Product image incomplete_link = d("img[class=imgproducto]").attr("src") clean_link = "http://" + urlparse(self.url).hostname + incomplete_link # Product categories categories = d(".barnavega").text().split("/")[1:-1] categories = [a.strip() for a in categories] # Product price price_with_currency = d('h1').parent().find("span").eq(1).text() # Precio se obtiene "$U 33.00" price_str = price_with_currency.strip().split(" ")[1] assert is_number(price_str) price = to_decimal(price_str) return {'name': product_name, "price": price, "image_url": clean_link, "categories": categories}
def __extract_issue_ref(self): ''' This method attempts to rebuild the IssueRef that the user chose the last time that they scraped this comic. If it can do so, it will return that IssueRef. If not, it will return None, or the string "skip" (see below). If the user has manually added the magic CVDBSKIP flag to the tags or notes for this book, then this method will return the string "skip", which should be interpreted as "never scrape this book". ''' # in this method, its easier to work with tags as a single string bd = self.__bookdata tagstring = ', '.join(bd.tags_sl) # check for the magic CVDBSKIP skip flag skip_found = re.search(r'(?i)'+ComicBook.CVDBSKIP, tagstring) if not skip_found: skip_found = re.search(r'(?i)'+ComicBook.CVDBSKIP, bd.notes_s) retval = "skip" if skip_found else None if retval is None: # if no skip tag, see if there's a key tag in the tags or notes issue_key = db.parse_key_tag(tagstring) if issue_key == None: issue_key = db.parse_key_tag(bd.notes_s) if issue_key == None: issue_key = int(bd.issue_key_s) if \ utils.is_number(bd.issue_key_s) else None if issue_key != None: # found a key tag! convert to an IssueRef retval = IssueRef(self.issue_num_s, issue_key, self.__bookdata.title_s, self.__bookdata.cover_url_s); return retval
def _pixelsize(parms, options, option_errors): if parms.has_key('resize') and parms['resize'] == 'on': #set the resize status True in the product options options['resize'] = True # #Handle pixel_size_unit validation # if not parms.has_key('pixel_size_units') or parms['pixel_size_units'] == None: option_errors.append("Target pixel size units not recognized") else: units = parms['pixel_size_units'].strip() if units == 'dd' or units == 'meters': options['pixel_size_units'] = units else: option_errors.append("Unknown pixel size units provided:%s" % units) # #Now validate the supplied pixel_size. Must be between 30 and 1000 meters or .0002695 to .0089831 dd # if not parms.has_key('pixel_size') or parms['pixel_size'] == None: option_errors.append("Please enter desired pixel size") else: pixel_size = parms['pixel_size'] if not utils.is_number(pixel_size): option_errors.append("Please enter a pixel size between 30 and 1000 meters or .0002695 to .0089831 dd") else: if options['pixel_size_units'] != None: pixel_size = float(pixel_size) if options['pixel_size_units'] == 'meters': if pixel_size >= 30 and pixel_size <= 1000: options['pixel_size'] = pixel_size else: option_errors.append("Please enter a pixel size between 30 and 1000 meters") else: if pixel_size >= .0002695 and pixel_size <= .0089831: options['pixel_size'] = pixel_size else: option_errors.append("Please enter a pixel size between .0002695 and .0089831 decimal degrees")
def get_data(self, content=None): if content is None: d = pq(url=self.url, opener=self.opener) else: d = pq(content) # Product name product_name = d("h1:first").text() product_name = re.sub(' +', ' ', product_name) # if d("#ctl00_ContentPlaceHolder1_lblUnitType").text().lower() == 'kg': # product_name += " 1 Kg" # Product image incomplete_link = d("#ctl00_ContentPlaceHolder1_imgProductImage").attr("src") clean_link = urljoin(self.url, incomplete_link[1:]) # Product categories categorias = d("#ctl00_ContentPlaceHolder1_lblMap").text().split(" -> ")[1:-1] # Product price price_with_currency = d('#ctl00_ContentPlaceHolder1_lblPrecioMA').text() #Precio se obtiene "$ 33.00" price_str = price_with_currency.strip().split(" ")[1] assert is_number(price_str) price = to_decimal(price_str) return {'name': product_name, "price": price, "image_url": clean_link, "categories": categorias}
def stat_values_parser(self, cols, season): """ Loops through of a list of columns and returns a list of values which change or skip the col strings based on their content. :param cols: List of column values in a single gamelog. :param season: Season of the gamelog. Either 'regular' or 'playoff'. :returns: Parsed stat values. """ stat_values = self.initialize_stat_values(season) for i, col in enumerate(cols): text = str(col.getText()) # Date if i == 2: stat_values.append(datetime.strptime(text, '%Y-%m-%d')) # Home elif i == 5: stat_values.append(not(text == '@')) # WinLoss elif i == 7: plusminus = re.compile('.*?\((.*?)\)') stat_values.append(float(plusminus.match(text).group(1))) # Percentages # Skip them because they can be calculated manually. elif i in {12, 15, 18}: pass # PlusMinus elif i == 29: stat_values.append(0 if text == '' else float(text)) # Number elif is_number(text): stat_values.append(float(text)) # Text else: stat_values.append(text) return stat_values
def parse_limit(limit_text): limit = limit_text.split(", ") if len(limit) == 2: lower, upper = limit if is_number(lower) and is_number(upper): return float(lower), float(upper)
def main(ini_path=None, overwrite_flag=False, delay_time=0, gee_key_file=None, max_ready=-1, reverse_flag=False): """Compute monthly Tcorr images by WRS2 tile Parameters ---------- ini_path : str Input file path. overwrite_flag : bool, optional If True, overwrite existing files (the default is False). delay_time : float, optional Delay time in seconds between starting export tasks (or checking the number of queued tasks, see "max_ready" parameter). The default is 0. gee_key_file : str, None, optional Earth Engine service account JSON key file (the default is None). max_ready: int, optional Maximum number of queued "READY" tasks. The default is -1 which is implies no limit to the number of tasks that will be submitted. reverse_flag : bool, optional If True, process WRS2 tiles in reverse order. """ logging.info('\nCompute monthly Tcorr images by WRS2 tile') # TODO: Read from INI study_area_extent = [-124, 35, -119, 42] # study_area_extent = [-121.7, 39, -121.7, 39] # study_area_extent = None ini = utils.read_ini(ini_path) model_name = 'SSEBOP' # model_name = ini['INPUTS']['et_model'].upper() tmax_name = ini[model_name]['tmax_source'] export_id_fmt = 'tcorr_scene_{product}_{wrs2}_month{month:02d}' asset_id_fmt = '{coll_id}/{wrs2}_month{month:02d}' tcorr_monthly_coll_id = '{}/{}_monthly'.format( ini['EXPORT']['export_coll'], tmax_name.lower()) wrs2_coll_id = 'projects/earthengine-legacy/assets/' \ 'projects/usgs-ssebop/wrs2_descending_custom' wrs2_tile_field = 'WRS2_TILE' # wrs2_path_field = 'ROW' # wrs2_row_field = 'PATH' try: wrs2_tiles = str(ini['INPUTS']['wrs2_tiles']) wrs2_tiles = [x.strip() for x in wrs2_tiles.split(',')] wrs2_tiles = sorted([x.lower() for x in wrs2_tiles if x]) except KeyError: wrs2_tiles = [] logging.debug(' wrs2_tiles: not set in INI, defaulting to []') except Exception as e: raise e try: study_area_extent = str(ini['INPUTS']['study_area_extent']) \ .replace('[', '').replace(']', '').split(',') study_area_extent = [float(x.strip()) for x in study_area_extent] except KeyError: study_area_extent = None logging.debug(' study_area_extent: not set in INI') except Exception as e: raise e # TODO: Add try/except blocks and default values? collections = [x.strip() for x in ini['INPUTS']['collections'].split(',')] cloud_cover = float(ini['INPUTS']['cloud_cover']) min_pixel_count = float(ini['TCORR']['min_pixel_count']) min_scene_count = float(ini['TCORR']['min_scene_count']) if (tmax_name.upper() == 'CIMIS' and ini['INPUTS']['end_date'] < '2003-10-01'): logging.error( '\nCIMIS is not currently available before 2003-10-01, exiting\n') sys.exit() elif (tmax_name.upper() == 'DAYMET' and ini['INPUTS']['end_date'] > '2018-12-31'): logging.warning('\nDAYMET is not currently available past 2018-12-31, ' 'using median Tmax values\n') # sys.exit() # elif (tmax_name.upper() == 'TOPOWX' and # ini['INPUTS']['end_date'] > '2017-12-31'): # logging.warning( # '\nDAYMET is not currently available past 2017-12-31, ' # 'using median Tmax values\n') # # sys.exit() # Extract the model keyword arguments from the INI # Set the property name to lower case and try to cast values to numbers model_args = { k.lower(): float(v) if utils.is_number(v) else v for k, v in dict(ini[model_name]).items() } # et_reference_args = { # k: model_args.pop(k) # for k in [k for k in model_args.keys() if k.startswith('et_reference_')]} logging.info('\nInitializing Earth Engine') if gee_key_file: logging.info( ' Using service account key file: {}'.format(gee_key_file)) # The "EE_ACCOUNT" parameter is not used if the key file is valid ee.Initialize(ee.ServiceAccountCredentials('x', key_file=gee_key_file), use_cloud_api=True) else: ee.Initialize(use_cloud_api=True) logging.debug('\nTmax properties') tmax_source = tmax_name.split('_', 1)[0] tmax_version = tmax_name.split('_', 1)[1] tmax_coll_id = 'projects/earthengine-legacy/assets/' \ 'projects/usgs-ssebop/tmax/{}'.format(tmax_name.lower()) tmax_coll = ee.ImageCollection(tmax_coll_id) tmax_mask = ee.Image(tmax_coll.first()).select([0]).multiply(0) logging.debug(' Collection: {}'.format(tmax_coll_id)) logging.debug(' Source: {}'.format(tmax_source)) logging.debug(' Version: {}'.format(tmax_version)) logging.debug('\nExport properties') export_info = utils.get_info(ee.Image(tmax_mask)) if 'daymet' in tmax_name.lower(): # Custom smaller extent for DAYMET focused on CONUS export_extent = [-1999750, -1890500, 2500250, 1109500] export_shape = [4500, 3000] export_geo = [1000, 0, -1999750, 0, -1000, 1109500] # Custom medium extent for DAYMET of CONUS, Mexico, and southern Canada # export_extent = [-2099750, -3090500, 2900250, 1909500] # export_shape = [5000, 5000] # export_geo = [1000, 0, -2099750, 0, -1000, 1909500] export_crs = export_info['bands'][0]['crs'] else: export_crs = export_info['bands'][0]['crs'] export_geo = export_info['bands'][0]['crs_transform'] export_shape = export_info['bands'][0]['dimensions'] # export_geo = ee.Image(tmax_mask).projection().getInfo()['transform'] # export_crs = ee.Image(tmax_mask).projection().getInfo()['crs'] # export_shape = ee.Image(tmax_mask).getInfo()['bands'][0]['dimensions'] export_extent = [ export_geo[2], export_geo[5] + export_shape[1] * export_geo[4], export_geo[2] + export_shape[0] * export_geo[0], export_geo[5] ] export_geom = ee.Geometry.Rectangle(export_extent, proj=export_crs, geodesic=False) logging.debug(' CRS: {}'.format(export_crs)) logging.debug(' Extent: {}'.format(export_extent)) logging.debug(' Geo: {}'.format(export_geo)) logging.debug(' Shape: {}'.format(export_shape)) if study_area_extent is None: if 'daymet' in tmax_name.lower(): # CGM - For now force DAYMET to a slightly smaller "CONUS" extent study_area_extent = [-125, 25, -65, 49] # study_area_extent = [-125, 25, -65, 52] elif 'cimis' in tmax_name.lower(): study_area_extent = [-124, 35, -119, 42] else: # TODO: Make sure output from bounds is in WGS84 study_area_extent = tmax_mask.geometry().bounds().getInfo() logging.debug(f'\nStudy area extent not set in INI, ' f'default to {study_area_extent}') study_area_geom = ee.Geometry.Rectangle(study_area_extent, proj='EPSG:4326', geodesic=False) if not ee.data.getInfo(tcorr_monthly_coll_id): logging.info('\nExport collection does not exist and will be built' '\n {}'.format(tcorr_monthly_coll_id)) input('Press ENTER to continue') ee.data.createAsset({'type': 'IMAGE_COLLECTION'}, tcorr_monthly_coll_id) # Get current asset list logging.debug('\nGetting GEE asset list') asset_list = utils.get_ee_assets(tcorr_monthly_coll_id) # if logging.getLogger().getEffectiveLevel() == logging.DEBUG: # pprint.pprint(asset_list[:10]) # Get current running tasks tasks = utils.get_ee_tasks() if logging.getLogger().getEffectiveLevel() == logging.DEBUG: logging.debug(' Tasks: {}\n'.format(len(tasks))) input('ENTER') # if cron_flag: # # CGM - This seems like a silly way of getting the date as a datetime # # Why am I doing this and not using the commented out line? # end_dt = datetime.date.today().strftime('%Y-%m-%d') # end_dt = datetime.datetime.strptime(end_dt, '%Y-%m-%d') # end_dt = end_dt + datetime.timedelta(days=-4) # # end_dt = datetime.datetime.today() + datetime.timedelta(days=-1) # start_dt = end_dt + datetime.timedelta(days=-64) # else: start_dt = datetime.datetime.strptime(ini['INPUTS']['start_date'], '%Y-%m-%d') end_dt = datetime.datetime.strptime(ini['INPUTS']['end_date'], '%Y-%m-%d') start_date = start_dt.strftime('%Y-%m-%d') end_date = end_dt.strftime('%Y-%m-%d') next_date = (end_dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d') logging.debug('Start Date: {}'.format(start_date)) logging.debug('End Date: {}\n'.format(end_date)) # Limit by year and month try: month_list = sorted(list(utils.parse_int_set(ini['TCORR']['months']))) except: logging.info('\nTCORR "months" parameter not set in the INI,' '\n Defaulting to all months (1-12)\n') month_list = list(range(1, 13)) try: year_list = sorted(list(utils.parse_int_set(ini['TCORR']['years']))) except: logging.info('\nTCORR "years" parameter not set in the INI,' '\n Defaulting to all available years\n') year_list = [] # Get the list of WRS2 tiles that intersect the data area and study area wrs2_coll = ee.FeatureCollection(wrs2_coll_id) \ .filterBounds(export_geom) \ .filterBounds(study_area_geom) if wrs2_tiles: wrs2_coll = wrs2_coll.filter( ee.Filter.inList(wrs2_tile_field, wrs2_tiles)) wrs2_info = wrs2_coll.getInfo()['features'] for wrs2_ftr in sorted(wrs2_info, key=lambda k: k['properties']['WRS2_TILE'], reverse=reverse_flag): wrs2_tile = wrs2_ftr['properties'][wrs2_tile_field] logging.info('{}'.format(wrs2_tile)) wrs2_path = int(wrs2_tile[1:4]) wrs2_row = int(wrs2_tile[5:8]) # wrs2_path = wrs2_ftr['properites']['PATH'] # wrs2_row = wrs2_ftr['properites']['ROW'] for month in month_list: logging.info('Month: {}'.format(month)) export_id = export_id_fmt.format(product=tmax_name.lower(), wrs2=wrs2_tile, month=month) logging.debug(' Export ID: {}'.format(export_id)) asset_id = asset_id_fmt.format(coll_id=tcorr_monthly_coll_id, wrs2=wrs2_tile, month=month) logging.debug(' Asset ID: {}'.format(asset_id)) if overwrite_flag: if export_id in tasks.keys(): logging.debug(' Task already submitted, cancelling') ee.data.cancelTask(tasks[export_id]['id']) # This is intentionally not an "elif" so that a task can be # cancelled and an existing image/file/asset can be removed if asset_id in asset_list: logging.debug(' Asset already exists, removing') ee.data.deleteAsset(asset_id) else: if export_id in tasks.keys(): logging.debug(' Task already submitted, exiting') continue elif asset_id in asset_list: logging.debug(' Asset already exists, skipping') continue # CGM: I couldn't find a way to build this from the Collection class # TODO: Will need to be changed/updated for SR collection # TODO: Add code to handle real time collections landsat_coll = ee.ImageCollection([]) if 'LANDSAT/LC08/C01/T1_TOA' in collections: l8_coll = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \ .filterMetadata('WRS_PATH', 'equals', wrs2_path) \ .filterMetadata('WRS_ROW', 'equals', wrs2_row) \ .filterMetadata('CLOUD_COVER_LAND', 'less_than', cloud_cover) \ .filterMetadata('DATA_TYPE', 'equals', 'L1TP') \ .filter(ee.Filter.gt('system:time_start', ee.Date('2013-03-24').millis())) \ .filter(ee.Filter.calendarRange(month, month, 'month')) # .filterDate(start_date, next_date) landsat_coll = landsat_coll.merge(l8_coll) if 'LANDSAT/LE07/C01/T1_TOA' in collections: l7_coll = ee.ImageCollection('LANDSAT/LE07/C01/T1_TOA') \ .filterMetadata('WRS_PATH', 'equals', wrs2_path) \ .filterMetadata('WRS_ROW', 'equals', wrs2_row) \ .filterMetadata('CLOUD_COVER_LAND', 'less_than', cloud_cover) \ .filterMetadata('DATA_TYPE', 'equals', 'L1TP') \ .filter(ee.Filter.calendarRange(month, month, 'month')) # .filterDate(start_date, next_date) landsat_coll = landsat_coll.merge(l7_coll) if 'LANDSAT/LT05/C01/T1_TOA' in collections: l5_coll = ee.ImageCollection('LANDSAT/LT05/C01/T1_TOA') \ .filterMetadata('WRS_PATH', 'equals', wrs2_path) \ .filterMetadata('WRS_ROW', 'equals', wrs2_row) \ .filterMetadata('CLOUD_COVER_LAND', 'less_than', cloud_cover) \ .filterMetadata('DATA_TYPE', 'equals', 'L1TP') \ .filter(ee.Filter.lt('system:time_start', ee.Date('2011-12-31').millis())) \ .filter(ee.Filter.calendarRange(month, month, 'month')) # .filterDate(start_date, next_date) landsat_coll = landsat_coll.merge(l5_coll) # if 'LANDSAT/LT04/C01/T1_TOA' in collections: # l4_coll = ee.ImageCollection('LANDSAT/LT04/C01/T1_TOA') \ # .filterMetadata('WRS_PATH', 'equals', wrs2_path) \ # .filterMetadata('WRS_ROW', 'equals', wrs2_row) \ # .filterMetadata('CLOUD_COVER_LAND', 'less_than', cloud_cover) \ # .filterMetadata('DATA_TYPE', 'equals', 'L1TP') \ # .filter(ee.Filter.calendarRange(month, month, 'month')) # # .filterDate(start_date, next_date) # landsat_coll = landsat_coll.merge(l4_coll) def tcorr_img_func(landsat_img): # TODO: Will need to be changed for SR t_obj = ssebop.Image.from_landsat_c1_toa( landsat_img, **model_args) t_stats = ee.Dictionary(t_obj.tcorr_stats) \ .combine({'tcorr_p5': 0, 'tcorr_count': 0}, overwrite=False) tcorr = ee.Number(t_stats.get('tcorr_p5')) count = ee.Number(t_stats.get('tcorr_count')) return tmax_mask.add(tcorr) \ .rename(['tcorr']) \ .set({ 'system:time_start': ee.Image(landsat_img).get('system:time_start'), 'tcorr_value': tcorr, 'tcorr_pixel_count': count, 'scene_id': t_obj._scene_id, }) # Filter the Tcorr image collection based on the pixel counts tcorr_coll = ee.ImageCollection(landsat_coll.map(tcorr_img_func)) \ .filterMetadata('tcorr_pixel_count', 'not_less_than', min_pixel_count) # Use a common reducer for the image and property stats reducer = ee.Reducer.median() \ .combine(ee.Reducer.count(), sharedInputs=True) # Compute stats from the collection images # This might be used when Tcorr is spatial # tcorr_img = tcorr_coll.reduce(reducer).rename(['tcorr', 'count']) # Compute stats from the image properties tcorr_stats = ee.List(tcorr_coll.aggregate_array('tcorr_value')) \ .reduce(reducer) tcorr_stats = ee.Dictionary(tcorr_stats) \ .combine({'median': 0, 'count': 0}, overwrite=False) tcorr = ee.Number(tcorr_stats.get('median')) count = ee.Number(tcorr_stats.get('count')) index = count.lt(min_scene_count).multiply(8).add(1) # index = ee.Algorithms.If(count.gte(min_scene_count), 1, 9) # Clip the mask image to the Landsat footprint # Change mask values to 1 if count >= threshold # Mask values of 0 will be set to nodata mask_img = tmax_mask.add(count.gte(min_scene_count)) \ .clip(ee.Geometry(wrs2_ftr['geometry'])) output_img = ee.Image( [mask_img.multiply(tcorr), mask_img.multiply(count)]) \ .rename(['tcorr', 'count']) \ .updateMask(mask_img.unmask(0)) # # Write an empty image if the pixel count is too low # # CGM: Check/test if this can be combined into a single If() # tcorr_img = ee.Algorithms.If( # count.gte(min_scene_count), # tmax_mask.add(tcorr), tmax_mask.updateMask(0)) # count_img = ee.Algorithms.If( # count.gte(min_scene_count), # tmax_mask.add(count), tmax_mask.updateMask(0)) # # Clip to the Landsat image footprint # output_img = ee.Image([tcorr_img, count_img]) \ # .rename(['tcorr', 'count']) \ # .clip(ee.Geometry(wrs2_ftr['geometry'])) # # Clear the transparency mask # output_img = output_img.updateMask(output_img.unmask(0)) output_img = output_img.set({ 'date_ingested': datetime.datetime.today().strftime('%Y-%m-%d'), 'model_name': model_name, 'model_version': ssebop.__version__, 'month': int(month), # 'system:time_start': utils.millis(start_dt), 'tcorr_value': tcorr, 'tcorr_index': index, 'tcorr_scene_count': count, 'tmax_source': tmax_source.upper(), 'tmax_version': tmax_version.upper(), 'wrs2_path': wrs2_path, 'wrs2_row': wrs2_row, 'wrs2_tile': wrs2_tile, 'years': ','.join(map(str, year_list)), # 'year_start': year_list[0], # 'year_end': year_list[-1], }) # pprint.pprint(output_img.getInfo()) # input('ENTER') logging.debug(' Building export task') task = ee.batch.Export.image.toAsset( image=output_img, description=export_id, assetId=asset_id, crs=export_crs, crsTransform='[' + ','.join(list(map(str, export_geo))) + ']', dimensions='{0}x{1}'.format(*export_shape), ) logging.info(' Starting export task') utils.ee_task_start(task) # Pause before starting the next export task utils.delay_task(delay_time, max_ready) logging.debug('')
def _convert_to_mim_number(self, x): tmp = [i for i in x.split('|') if i.startswith('OMIM:')] tmp = [t.split(':')[1] for t in tmp] tmp = [int(t) for t in tmp if is_number(t)] return tmp
Created on Feb 6, 2018 @author: mmp ''' import sys, utils def sum_all(valor1, valor2): soma = 0 for i in range(valor1, valor2): soma += i if (valor1 <= valor2): soma += valor2 return soma if __name__ == '__main__': if (len(sys.argv) != 3): print('usage: module.py <int1> <int2>') sys.exit(0) print(sys.argv) valor1 = utils.is_number(sys.argv[1]) valor2 = utils.is_number(sys.argv[2]) if (valor1 == None or valor2 == None): print('usage: module.py <int1> <int2>') sys.exit(0) print(sum_all(valor1, valor2))
# 显示当前配置 print("当前传输方式为:%s") % mystreamnetwork # 选择新的传输方式 print("请选择新的传输方式:") print("1.普通TCP") print("2.HTTP伪装") print("3.WebSocket流量") print("4.普通mKCP") print("5.mKCP 伪装 FaceTime通话") print("6.mKCP 伪装 BT下载流量") print("7.mKCP 伪装 微信视频流量") newstreamnetwork = raw_input() if (not is_number(newstreamnetwork)): print("请输入数字!") exit else: if not (newstreamnetwork > 0 and newstreamnetwork < 8): if (newstreamnetwork == "1"): writejson.WriteStreamNetwork("tcp", "none") elif (newstreamnetwork == "2"): print("请输入你想要为伪装的域名(不不不需要http):") host = raw_input() writejson.WriteStreamNetwork("tcp", str(host)) elif (newstreamnetwork == "3"): print( "请输入你的服务器绑定域名(不不不需要http,如果输入的内容为空,则自动视为监听服务器本地地址,方便后续交由 Nginx/Caddy/Apache 等 Web 服务器做分流):" )
def __encode(self, obj): s = '' tab = self.tab newline = self.newline tp = type(obj) if obj == None: s += 'nil' elif tp in [str, unicode]: #处理转义字符 obj = obj.replace('\\', r'\\') obj = obj.replace('\"', r'\"') obj = obj.replace('\n', r'\n') obj = obj.replace('\t', r'\t') obj = obj.replace('\r', r'') s += ("'" + obj + "'") elif tp in [int, float, long, complex]: s += str(obj) elif tp is bool: s += str(obj).lower() elif tp in [dict]: self.depth += 1 if len(obj) == 0: newline = tab = '' dp = tab * self.depth s += "%s{%s" % (tab * (self.depth - 2), newline) ls = [] for k, v in obj.iteritems(): if utils.is_number(k): item = dp + '[%s] = %s' % (k, self.__encode(v)) else: item = dp + '%s = %s' % (k, self.__encode(v)) ls.append(item) s += (',%s' % newline).join(ls) self.depth -= 1 s += "%s%s}" % (newline, tab * self.depth) elif tp in [list, tuple]: self.depth += 1 if len(obj) == 0 or len( filter( lambda x: type(x) in (int, float, long) or (type(x) in [str, unicode] and len(x) < 10), obj)) == len(obj): newline = tab = '' dp = tab * self.depth s += "%s{%s" % (tab * (self.depth - 2), newline) ls = [] for v in obj: item = dp + self.__encode(v) ls.append(item) s += (',%s' % newline).join(ls) self.depth -= 1 s += "%s%s}" % (newline, tab * self.depth) return s
# -*- coding: utf-8 -*- import readjson import writejson from utils import is_number # 主要程序部分 print("当前加密方式为:%s") % str(readjson.ConfSecurity) print("请选择新的加密方式:") print("1.aes-128-cfb") print("2.aes-128-gcm") print("3.chacha20-poly1305") print("4.auto") print("5.none") newsecurity = raw_input() if (not is_number(newsecurity)): print("输入错误,请检查输入是否为数字") exit else: if (newsecurity == "1"): writejson.WriteSecurity("aes-128-cfb") elif (newsecurity == "2"): writejson.WriteSecurity("aes-128-gcm") elif (newsecurity == "3"): writejson.WriteSecurity("chacha20-poly1305") elif (newsecurity == "4"): writejson.WriteSecurity("auto") elif (newsecutity == "5"): writejson.WriteSecurity("none") else: print("请输入1-5之间的数字!")
# -*- coding: utf-8 -*- import readjson import writejson from utils import is_number # 主要程序部分 print("当前加密方式为:%s") % str(readjson.ConfSecurity) print("请选择新的加密方式:") print("1.aes-128-cfb") print("2.aes-128-gcm") print("3.chacha20-poly1305") print("4.auto") print("5.none") newsecurity = raw_input() if (not is_number(newsecurity)): print("输入错误,请检查输入是否为数字") exit else: if (newsecurity == "1"): writejson.WriteSecurity("aes-128-cfb") elif(newsecurity == "2"): writejson.WriteSecurity("aes-128-gcm") elif(newsecurity == "3"): writejson.WriteSecurity("chacha20-poly1305") elif(newsecurity == "4"): writejson.WriteSecurity("auto") elif(newsecutity == "5"): writejson.WriteSecurity("none") else: print("请输入1-5之间的数字!")
def get_feature_index_and_count_from_history(self, history: History): if utils.is_number(history.cword): return 0, self.dict.get(self.dict_key, 0) else: return self.INVALID_IDX, self.INVALID_VAL