def find_inmates(self, exclude_list=None, number_to_fetch=MAX_INMATE_NUMBER, start_date=None): if exclude_list is None: exclude_list = [] if start_date is None: start_date = yesterday() self._put(self._find_inmates, {'excluded_inmates': exclude_list, 'number_to_fetch': number_to_fetch, 'start_date': start_date})
def save(self): try: inmate_housing_location = self._inmate_details.housing_location() if inmate_housing_location != '': try: self._housing_location, created_location = \ HousingLocation.objects.get_or_create(housing_location=inmate_housing_location) if created_location: self._process_housing_location() self._housing_location.save() self._debug('New housing location encountered: %s' % self._housing_location.housing_location) except DatabaseError as e: self._debug( "Could not save housing location '%s'\nException is %s" % (inmate_housing_location, str(e))) try: housing_history, new_history = \ self._inmate.housing_history.get_or_create(housing_location=self._housing_location) if new_history: housing_history.housing_date_discovered = yesterday() housing_history.save() self._inmate.in_jail = self._housing_location.in_jail except DatabaseError as e: self._debug( "For inmate %s, could not save housing history '%s'.\nException is %s" % (self._inmate.jail_id, inmate_housing_location, str(e))) except Exception, e: self._debug("Unknown exception for inmate '%s'\nException is %s" % (self._inmate.jail_id, str(e)))
def save(self): try: inmate_housing_location = self._inmate_details.housing_location() if inmate_housing_location != '': try: self._housing_location, created_location = \ HousingLocation.objects.get_or_create(housing_location=inmate_housing_location) if created_location: self._process_housing_location() self._housing_location.save() self._debug('New housing location encountered: %s' % self._housing_location.housing_location) except DatabaseError as e: self._debug("Could not save housing location '%s'\nException is %s" % (inmate_housing_location, str(e))) try: housing_history, new_history = \ self._inmate.housing_history.get_or_create(housing_location=self._housing_location) if new_history: housing_history.housing_date_discovered = yesterday() housing_history.save() self._inmate.in_jail = self._housing_location.in_jail except DatabaseError as e: self._debug("For inmate %s, could not save housing history '%s'.\nException is %s" % (self._inmate.jail_id, inmate_housing_location, str(e))) except Exception, e: self._debug("Unknown exception for inmate '%s'\nException is %s" % (self._inmate.jail_id, str(e)))
def read_pdf(self,infile,update_stock): if not self.parse_invoice(infile,update_stock): return None if self.check_if_present(): return None if update_stock: yesterd = utils.yesterday(self.date) self.e_items = list(map(lambda item: \ item.process_item(self.supplier,yesterd), self.items)) if None in self.e_items: easygui.msgbox("Nicht alle Artikel wurden eingetragen.\n Deshalb kann keine Einkaufsrechnung in ERPNext erstellt werden.") return None if not ask_if_to_continue(self.check_total(),"Fortsetzen?"): return None if not ask_if_to_continue(self.check_duplicates()): return None self.create_e_invoice(update_stock) #print(self.e_invoice) self.doc = gui_api_wrapper(Api.api.insert,self.e_invoice) #print(self.doc) upload = gui_api_wrapper(Api.api.read_and_attach_file, "Purchase Invoice",self.doc['name'], infile,True) self.doc['supplier_invoice'] = upload['file_url'] self.doc = gui_api_wrapper(Api.api.update,self.doc) #doc = gui_api_wrapper(Api.api.get_doc,'Purchase Invoice',self.doc['name']) if easygui.buttonbox("Einkaufsrechnung {0} als Entwurf an ERPNext übertragen.\n\nSoll die Rechnung auch gleich gebucht werden oder nicht?".format(self.e_invoice['title']),"Sofort buchen?",["Sofort buchen","Später buchen"]) == "Sofort buchen": self.doc = gui_api_wrapper(Api.api.submit,self.doc) return self
def save(self): """ Stores the inmates charges if they are new or if they have been changes Charges: charges come on two lines. The first line is a citation and the # second is an optional description of the charges. """ try: charges = strip_the_lines(self._inmate_details.charges().splitlines()) if just_empty_lines(charges): return # Capture Charges and Citations if specified parsed_charges_citation = charges[0] parsed_charges = charges[1] if len(charges) > 1 else '' create_new_charge = True if len(self._inmate.charges_history.all()) != 0: inmate_latest_charge = self._inmate.charges_history.latest('date_seen') # last known charge # if the last known charge is different than the current info then create a new charge if inmate_latest_charge.charges == parsed_charges and \ inmate_latest_charge.charges_citation == parsed_charges_citation: create_new_charge = False if create_new_charge: new_charge = self._inmate.charges_history.create(charges=parsed_charges, charges_citation=parsed_charges_citation) new_charge.date_seen = yesterday() new_charge.save() except DatabaseError as e: self._debug("Could not save charges '%s' and citation '%s'\nException is %s" % (parsed_charges, parsed_charges_citation, str(e))) except Exception, e: self._debug("Unknown exception for inmate '%s'\nException is %s" % (self._inmate.jail_id, str(e)))
def _known_inmates_ids_starting_with(self, args): known_inmates_ids = [] cur_date = args['start_date'] the_yesterday = yesterday() while cur_date <= the_yesterday: known_inmates_ids.extend([inmate.jail_id for inmate in self._inmate_class.known_inmates_for_date(cur_date)]) cur_date += ONE_DAY args['response_queue'].put(known_inmates_ids)
def _find_inmates(self, args): excluded_inmates = set(args['excluded_inmates']) cur_date = args['start_date'] while cur_date <= yesterday(): for inmate_id in _jail_ids(cur_date, args['number_to_fetch']): if inmate_id not in excluded_inmates: self._inmate_scraper.create_if_exists(inmate_id) cur_date += ONE_DAY self._notify(self.FINISHED_FIND_INMATES)
def run(module_id, group_id, sensor_id, action): try: # ensure the group and sensor exist sensor = utils.get_sensor(module_id, group_id, sensor_id) sensor = init_sensor(sensor) if sensor is None: log.error("[" + module_id + "][" + group_id + "][" + sensor_id + "] not found, skipping it") return # execute the action log.debug("[" + sensor["module_id"] + "][" + sensor["group_id"] + "][" + sensor["sensor_id"] + "] requested " + action) if action == "poll": # read the measure (will be stored into the cache) poll(sensor) elif action == "parse": # just parse the output log.info(parse(sensor)) elif action == "save": # save the parsed output into the database save(sensor) elif action == "force_save": # save the parsed output into the database forcing polling the measure save(sensor, force=True) elif action == "summarize_hour": # every hour calculate and save min,max,avg of the previous hour summarize(sensor, 'hour', utils.hour_start(utils.last_hour()), utils.hour_end(utils.last_hour())) elif action == "summarize_day": # every day calculate and save min,max,avg of the previous day (using hourly averages) summarize(sensor, 'day', utils.day_start(utils.yesterday()), utils.day_end(utils.yesterday())) elif action == "expire": # purge old data from the database expire(sensor) else: log.error("Unknown action " + action) except Exception, e: log.warning("[" + sensor["module_id"] + "][" + sensor["group_id"] + "][" + sensor["sensor_id"] + "] unable to run " + action + ": " + utils.get_exception(e))
def open_file(self, filename, force_date = None): self._file_list = list() ds = None if os.linesep in filename: # already list of files (date range) return filename.split(os.linesep) mtype = mimetypes.guess_type(filename)[0] if mtype is None or 'xml' not in mtype: # assuming text file containing list of VFR files try: f = open(filename) i = 0 lines = f.read().splitlines() for line in lines: if len(line) < 1 or line.startswith('#'): continue # skip empty or commented lines if not line.startswith('http://') and \ not line.startswith('20'): # determine date if missing if not force_date: if line.startswith('ST_Z'): date = yesterday() else: date = last_day_of_month() else: date = force_date line = date + '_' + line if not line.endswith('.xml.gz'): # add extension if missing line += '.xml.gz' if not os.path.exists(line): if not line.startswith('http://'): line = 'http://vdp.cuzk.cz/vymenny_format/soucasna/' + line line = download_vfr(line) self._file_list.append(line) i += 1 VfrLogger.msg("%d VFR files will be processed..." % len(self._file_list)) except IOError: raise VfrError("Unable to read '%s'" % filename) f.close() else: # single VFR file self._file_list.append(filename) return self._file_list
def find_inmates(self, exclude_list=None, number_to_fetch=MAX_INMATE_NUMBER, start_date=None): if exclude_list is None: exclude_list = [] if start_date is None: start_date = yesterday() self._put( self._find_inmates, { 'excluded_inmates': exclude_list, 'number_to_fetch': number_to_fetch, 'start_date': start_date })
def _fetch_articles_in_24hours(self, url): print url yesterday = utils.yesterday() articles = [] response = urllib2.urlopen(url).read() parser = ET.XMLParser(encoding='utf-8') xml = ET.fromstring(response, parser=parser) for child in xml.iter('item'): find = lambda x: child.findall(x)[0].text publish_date_txt = find('pubDate') publish_date = datetime.datetime.\ strptime(publish_date_txt, "%a, %d %b %Y %H:%M:%S %Z") if publish_date > yesterday: title = find('title') title = self._jtof(title) article = {'link': find('link') + '?full=y', 'title': title, 'publish_date': publish_date} articles.append(article) return articles
def fetch_all_articles_in_24hours(self): ''' Fetch all articles within 24 hours from the RSS feed of The New York Times. ''' url = 'http://cn.nytimes.com/rss/zh-hant/' print('Fetching RSS feed from ' + url) try: response = urllib2.urlopen(url).read() parser = ET.XMLParser(encoding='utf-8') xml = ET.fromstring(response, parser=parser) print('...Done') except Exception as e: print('...Failed. ' + str(e)) return [] yesterday = utils.yesterday() articles = [] def format_text(text): text = text.replace('<div id=story_main_mpu></div>', '') text = text.replace('</p>', '\n') text = re.sub('<[^<]+?>', '', text) return text for child in xml.iter('item'): find = lambda x: child.findall(x)[0].text publish_date_txt = find('pubDate').replace(' +0800', '') publish_date = datetime.datetime.\ strptime(publish_date_txt, "%a, %d %b %Y %H:%M:%S") if publish_date > yesterday: text = find('description') text = format_text(text).split('\n') article = {'link': find('link'), 'title': find('title'), 'text': text, 'publish_date': publish_date, 'source': 'The New York Times Chinese'} articles.append(article) return articles
def _fetch_articles_in_24hours(self, url): yesterday = utils.yesterday() articles = [] response = urllib2.urlopen(url).read() response = response.decode('big5').encode('utf-8') parser = ET.XMLParser(encoding='utf-8') xml = ET.fromstring(response, parser=parser) for child in xml.iter('{http://purl.org/rss/1.0/}item'): find = lambda x: child.\ findall('{http://purl.org/rss/1.0/}' + x)[0].text publish_date_txt = find('pubDate').replace(' +0800', '') publish_date = datetime.datetime.\ strptime(publish_date_txt, "%Y/%m/%d %H:%M:%S") if publish_date > yesterday: article = {'link': find('link'), 'title': find('title'), 'publish_date': publish_date, 'source': 'The Wall Street Journal Chinese'} articles.append(article) return articles
def read_pdf(self,infile): self.infiles = [infile] if not self.parse_invoice(infile): return None print("Prüfe auf doppelte Rechung") if self.check_if_present(): return None if self.update_stock: print("Hole Lagerdaten") yesterd = utils.yesterday(self.date) self.e_items = list(map(lambda item: \ item.process_item(self.supplier,yesterd), self.items)) if None in self.e_items: print("Nicht alle Artikel wurden eingetragen.\n Deshalb kann keine Einkaufsrechnung in ERPNext erstellt werden.") return None if not ask_if_to_continue(self.check_total(),"Fortsetzen?"): return None if not ask_if_to_continue(self.check_duplicates()): return None self.create_taxes() return self
def parse_cmd(argv, flags, params, optdir): get_opt(argv, flags, params, optdir) if optdir['list']: if not optdir['dbname']: raise getopt.GetoptError("--dbname required") return 0 filename = optdir.get('filename', None) date = optdir.get('date', None) ftype = optdir.get('ftype', None) # check required options if not filename and not ftype: raise getopt.GetoptError("--file or --type required") if filename and ftype: raise getopt.GetoptError("--file and --type are mutually exclusive") if ftype and not date: if ftype.startswith('ST_Z'): date_list = [yesterday()] else: date_list = [last_day_of_month()] elif ftype and date and ':' in date: if ftype.startswith('ST_Z'): date_list = get_date_interval(date) else: raise getopt.GetoptError("Date interval is valid only for '--type ST_ZXXX'") else: date_list = [date] if optdir['overwrite'] and optdir.get('append', False): raise getopt.GetoptError("--append and --overwrite are mutually exclusive") if optdir['layer']: optdir['layer'] = optdir['layer'].split(',') if filename: # is file a valid VFR file filename = check_file(filename) else: # --date & --type flist = [] base_url = "http://vdp.cuzk.cz/vymenny_format/" if ftype != 'ST_UVOH': base_url += "soucasna/" else: base_url += "specialni/" for d in date_list: fname = "%s_%s.xml.gz" % (d, ftype) url = base_url + fname if os.path.exists(fname): # use existing file flist.append(fname) else: # download file flist.append(download_vfr(url)) if not flist: raise getopt.GetoptError("Empty date range") filename = os.linesep.join(flist) if not filename: raise getopt.GetoptError("Invalid input file") return filename
from models import OperateLog from utils import today, yesterday, datetime2str, is_qualify, yesterday_str FAIL = (-1, u'失败') SUCCESS = (0, u'完成') logger = logging.getLogger(__name__) ANALYSIS = { 'TOTAL_REQ': { 'desc': u'', 'include': ['*'], 'exclude': [], 'qs': OperateLog.objects.filter(request_time__gte=yesterday(), request_time__lt=today()), 'display': '{LOGON}/{ANONYMOUS}', }, 'TOTAL_USER': { 'desc': u'', 'include': ['*'], 'exclude': [], 'qs': OperateLog.objects.filter(request_time__gte=yesterday(), request_time__lt=today()).values_list( "account_id", flat=True).distinct(), 'display': '{LOGON}',
def data_get_data(module_id, group_id, sensor_id, timeframe, stat): data = [] sensor = utils.get_sensor(module_id, group_id, sensor_id) if sensor is None: log.error("[" + module_id + "][" + group_id + "][" + sensor_id + "] sensor not found") return json.dumps(data) if "plugin" in sensor and "poll_on_demand" in sensor["plugin"] and sensor[ "plugin"]["poll_on_demand"] and timeframe == "realtime": # the sensor needs to be polled on demand run(module_id, group_id, sensor_id, "save") # get the parameters for the requested timeframe if timeframe == "realtime": # recent hourly measures up to now range = "" start = utils.realtime() end = utils.now() withscores = True elif timeframe == "recent": # recent hourly measures up to now range = ":hour" start = utils.recent() end = utils.now() withscores = True elif timeframe == "history": # historical daily measures up to new range = ":day" start = utils.history() end = utils.now() withscores = True elif timeframe == "short_history": # historical daily measures up to new range = ":day" start = utils.history( conf["general"]["timeframes"]["short_history_days"]) end = utils.now() withscores = True elif timeframe == "today": # today's measure range = ":day" start = utils.day_start(utils.now()) end = utils.day_end(utils.now()) withscores = False elif timeframe == "yesterday": # yesterday's measure range = ":day" start = utils.day_start(utils.yesterday()) end = utils.day_end(utils.yesterday()) withscores = False elif timeframe == "forecast": # next days measures range = ":day" start = utils.day_start(utils.now()) end = utils.day_start(utils.now() + (conf["general"]["timeframes"]["forecast_days"] - 1) * conf["constants"]["1_day"]) withscores = True else: return data # define the key to request key = conf["constants"]["db_schema"][ "root"] + ":" + module_id + ":" + group_id + ":" + sensor_id + range requested_stat = ":" + stat # if a range is requested, start asking for the min if stat == "range": requested_stat = ":min" if timeframe == "realtime": requested_stat = "" # request the data data = db.rangebyscore( key + requested_stat, start, end, withscores=withscores, milliseconds=True, formatter=conf["constants"]["formats"][sensor["format"]]["formatter"]) if stat == "range" and len(data) > 0: # if a range is requested, ask for the max and combine the results data_max = db.rangebyscore(key + ":max", start, end, withscores=False, milliseconds=True, formatter=conf["constants"]["formats"][ sensor["format"]]["formatter"]) for i, item in enumerate(data): # ensure data_max has a correspondent value if i < len(data_max): if (isinstance(item, list)): data[i].append(data_max[i]) else: data.append(data_max[i]) return json.dumps(data)