def ProcessDailyFX (): global counter dailyfx_start_url = 'http://www.dailyfx.com/forex_market_news/' dailyfx_base_url = 'http://www.dailyfx.com' dailyfx_write_path = '/home/ubuntu/news_scrape/dailyfx_data/' html = GetURLText(dailyfx_start_url) soup = BeautifulSoup (html, 'html5lib') divs = soup.findAll('div', attrs={'class':'secondary-box-content'}) for a_div in divs: for a_link in a_div.findAll('a'): a_url = a_link['href'] new_html = GetURLText (dailyfx_base_url + a_url) time = strftime("%Y-%m-%d %H:%M:%S") time = time.replace(' ', '_').replace(':','-') f = open (dailyfx_write_path + str(counter) + '_' + time + '.html', 'w') f.write(new_html.encode('utf-8')) f.close() counter += 1 break divs = soup.findAll('div', attrs={'class':'main-article-non-home'}) for a_div in divs: for a_link in a_div.findAll('a'): a_url = a_link['href'] new_html = GetURLText (dailyfx_base_url + a_url) time = strftime("%Y-%m-%d %H:%M:%S") time = time.replace(' ', '_').replace(':','-') f = open (dailyfx_write_path + str(counter) + '_' + time + '.html', 'w') f.write(new_html.encode('utf-8')) f.close() counter += 1 break
def getUpdate(i): global bugidlist number = i cj = cookielib.MozillaCookieJar() cj.load(os.path.join(os.path.expanduser(""),"cookies.txt")) url1 = "https://bugs.launchpad.net/bugs/+bugs?field.searchtext=&search=Search&field.status%3Alist=NEW&field.status%3Alist=OPINION&field.status%3Alist=INVALID&field.status%3Alist=WONTFIX&field.status%3Alist=EXPIRED&field.status%3Alist=CONFIRMED&field.status%3Alist=TRIAGED&field.status%3Alist=INPROGRESS&field.status%3Alist=INCOMPLETE_WITH_RESPONSE&field.status%3Alist=INCOMPLETE_WITHOUT_RESPONSE&assignee_option=any&field.assignee=&field.bug_reporter=&field.bug_commenter=&field.subscriber=&field.tag=&field.tags_combinator=ANY&field.status_upstream-empty-marker=1&field.has_cve.used=&field.omit_dupes.used=&field.omit_dupes=on&field.affects_me.used=&field.has_patch.used=&field.has_branches.used=&field.has_branches=on&field.has_no_branches.used=&field.has_no_branches=on&field.has_blueprints.used=&field.has_blueprints=on&field.has_no_blueprints.used=&field.has_no_blueprints=on&orderby=-date_last_updated&start=" url2 = "%d"%(number) url = url1+url2 opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) r = opener.open(url).read() data = r.decode("utf-8") soup = BeautifulSoup(data) try: tab = [] tab = soup.findAll("div", {"class": "buglisting-row"}) except: pass try: for x in range(0,len(tab)): table = tab[x] bugid = table.find("span",{"class":"bugnumber"}).renderContents().decode("utf-8").strip() time = table.find("span",{"class":"sprite milestone field"}).renderContents().decode("utf-8").strip() bugid = bugid.replace("#", "") bugidlist.append(bugid) time = time.replace("Last updated", "") time = time.replace("ago", "") time = time.replace("on", "") time = time.replace("-", "") out_file = open("bugidlist.txt", "a") term = bugid + " " out_file.write(term) out_file.close() lastedtime[0] = time except: pass
def link_parse(self,html,base): if not html or not base: return urls soup = BeautifulSoup(html) for li in soup.findAll('li'): try: li.contents[0].contents[0] except: continue title = li.contents[0].contents[0] #title = self.get_safe_utf8(title) href = li.contents[0]["href"] time = li.contents[1].strip() time = time.replace(u')',"") time = time.replace(u'(',"") #title = self.cleanHtmlTag(self.get_safe_utf8(title)) if not href:continue if href in self.suburl.keys():continue href = self.normal_url(self.get_safe_utf8(urljoin(base, self.get_safe_utf8(href)))) #self.suburl[href] = (title,time) if time == self.today: self.suburl[href] = (title,time) #print title #print href #print time return True
def ProcessCNBC (): global counter cnbc_start_url = 'http://www.cnbc.com/id/15839121/' cnbc_base_url = 'http://www.cnbc.com' cnbc_write_path = '/home/ubuntu/news_scrape/cnbc_data/' html = GetURLText(cnbc_start_url) soup = BeautifulSoup (html, 'html5lib') divs = soup.findAll('div', attrs={'class':'asset cnbcnewsstory big'}) for a_div in divs: for a_link in a_div.findAll('a'): a_url = a_link['href'] new_html = GetURLText (cnbc_base_url + a_url) time = strftime("%Y-%m-%d %H:%M:%S") time = time.replace(' ', '_').replace(':','-') f = open (cnbc_write_path + str(counter) + '_' + time + '.html', 'w') f.write(new_html.encode('utf-8')) f.close() counter += 1 divs = soup.findAll('div', attrs={'class':'asset cnbcnewsstory'}) for a_div in divs: for a_link in a_div.findAll('a'): a_url = a_link['href'] new_html = GetURLText (cnbc_base_url + a_url) time = strftime("%Y-%m-%d %H:%M:%S") time = time.replace(' ', '_').replace(':','-') f = open (cnbc_write_path + str(counter) + '_' + time + '.html', 'w') f.write(new_html.encode('utf-8')) f.close() counter += 1
def init(self, thread=None, log_name=None): self.log_name = log_name if not thread == None: self.name = self.name+"_thread:"+str(thread) if self.log == None: if thread == None: # Creating a log file self.log_name = caller = self.name self.file_path = settings.log_file_path time = datetime.now() time = str(time) time = time.replace(' ','') time = time.replace(':','-') self.file_name = "%s_%s_%s_%s.log" % (caller, caller, caller, time) self.file = self.file_path+self.file_name f = open(self.file, "w") f.close() logging.basicConfig(filename=self.file, level=logging.DEBUG,format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') log = logging.getLogger(self.log_name) log.setLevel(logging.INFO) # create console handler and set level to debug ch = logging.StreamHandler() ch.setLevel(logging.INFO) # create formatter formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # add formatter to ch ch.setFormatter(formatter) # add ch to logger log.addHandler(ch) self.log=log self.log.info("%s initiated" % (self.name))
def currentTime(): time = str(datetime.utcnow()) time = time.replace(' ','T') time = time.replace('-','') time = time.replace(':','') time = time.split('.')[0] time = time + 'Z' return time
def get_date(time): """ Get tweeting date of the tweet. """ threshold_time = (parse(setting['start_time']) - datetime.timedelta(minutes=30)).time() if time.time() < threshold_time: return time.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None) - datetime.timedelta(days=1) else: return time.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None)
def reminder_time(input): global next_func next_func = None time = datetime.datetime.today() words, has_nums = parse_to_nums(input.lower().split()) day_identifier = [i for i in words if i in weekdays] if "after tomorrow" in input.lower(): time = time+datetime.timedelta(days=2) elif 'tomorrow' in words: time = time+datetime.timedelta(days=1) elif day_identifier: day = weekdays.index(day_identifier[0]) print day time = time+datetime.timedelta(days=(day-time.weekday())%7) hour = [i for i in words if i in times] if hour: time = time.replace(hour=times[hour[0]],minute=0,second=0,microsecond=0) hour = False minute = 0 pmam = False bell = False for i in words: if hasattr(i, '__int__'): if type(hour)==bool and not hour: hour = i elif not minute: minute = i if i in bells: bell = True minute = bells[i] elif bell: if i in ['as', 'past', 'after']: pass elif i in ['to', 'told', 'til', 'till', 'of']: minute = -minute bell = False elif pmam: if i=='m.': if pmam=='pm': hour = hour%12+12 elif pmam=='am': hour = hour%12 pmam = False elif i in ['p.', 'a.']: pmam = {'p.':'pm','a.':'am'}[i] if minute<0: hour=(hour-1)%24 minute = 60+minute if type(hour)==bool: hour = time.hour tf = tempfile.NamedTemporaryFile() tf.write('espeak "'+input+'"') time = time.replace(hour=hour,minute=minute,second=0,microsecond=0) os.system('at -t '+time.strftime('%Y%m%d%H%M')+' -f '+tf.name) return str(time), None
def getTime(staElem): try: timeNode = staElem.find_element_by_css_selector('a.S_link2.WB_time') time = timeNode.get_attribute("title") time = time.replace('-', '') time = time.replace(':', '') time = time.replace(' ', '') except: time = "void" return time
def extractTime(self, log_text): totals_line = self.extractTotalsLine(log_text) if not totals_line: return None # Total: X tests, X failures, X errors in [X minutes] X.Y seconds. time = totals_line.split(' in ')[-1] time = time.replace(' minutes ', 'm') time = time.replace(' seconds.', 's') time = re.sub('[.][0-9]+s', 's', time) return time
def get_datetime(cls, f_timestep, meteocenter, s_timestep, hashtag): date = datetime.strptime(f_timestep.attrib.get('value'), '%Y%m%d') time = datetime.now() if s_timestep.attrib.get('value') == '24:00': time = time.replace(hour=00) time = time.replace(minute=00) else: time = datetime.strptime(s_timestep.attrib.get('value'), '%H:%M') datestr = '%s-%s-%s %s:%s' % (date.year, date.month, date.day, time.hour, time.minute) return datetime.strptime(datestr, meteocenter.date_reg)
def __getIntFromTime(self, time): pm = False if "PM" in time.upper(): pm = True if "." in time or ":" in time: time = time.replace(":",".") time = time[:time.find(".")] time = int(time.replace("AM", "").replace("PM", "").replace("am", "").replace("pm", "")) if pm: time = int(time)+12 return str(time)
def init(self, url=None, nbr=None, logger=None): # Creating a log file if not nbr == None: self.name = self.name+"_thread_"+str(nbr) if logger == None: caller = self.name project_name = 'prox' build = '0001' self.file_path = settings.log_file_path time = datetime.now() time = str(time) time = time.replace(' ','') time = time.replace(':','-') self.file_name = "%s_%s_%s_%s.log" % (caller, project_name, build, time) self.file = self.file_path+self.file_name f = open(self.file, "w") f.close() logging.basicConfig(filename=self.file, level=logging.DEBUG,format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') else: log = logger log = logging.getLogger(self.name) log.setLevel(logging.INFO) # create console handler and set level to debug ch = logging.StreamHandler() ch.setLevel(logging.INFO) # create formatter formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # add formatter to ch ch.setFormatter(formatter) # add ch to logger log.addHandler(ch) #logg.setFormatter(formatter) # 'application' code #log.debug('debug message') #log.info('info message') #log.warn('warn message') #log.error('error message') #log.critical('critical message') #log.info("simple_load_test initiating....") self.prox = prox_front.ProX() self.prox.init(settings.prox_url) log.info("%s initiated" % (self.name)) self.log=log logg = log
def ProcessBloomberg(): global counter bloomberg_start_url = 'http://www.bloomberg.com/news/' bloomberg_base_url = 'http://www.bloomberg.com' bloomberg_write_path = '/home/ubuntu/news_scrape/bloomberg_data/' html = GetURLText(bloomberg_start_url) soup = BeautifulSoup (html, 'html5lib') divs = soup.findAll('div', attrs={'id':'markets_news'}) for a_div in divs: for a_link in a_div.findAll('a'): a_url = a_link['href'] if a_url != '/news/markets/': new_html = GetURLText (bloomberg_base_url + a_url) time = strftime("%Y-%m-%d %H:%M:%S") time = time.replace(' ', '_').replace(':','-') f = open (bloomberg_write_path + str(counter) + '_' + time + '.html', 'w') f.write(new_html.encode('utf-8')) f.close() counter += 1 divs = soup.findAll('div', attrs={'id':'industries_news'}) for a_div in divs: for a_link in a_div.findAll('a'): a_url = a_link['href'] if a_url != '/news/industries/': new_html = GetURLText (bloomberg_base_url + a_url) time = strftime("%Y-%m-%d %H:%M:%S") time = time.replace(' ', '_').replace(':','-') f = open (bloomberg_write_path + str(counter) + '_' + time + '.html', 'w') f.write(new_html.encode('utf-8')) f.close() counter += 1 divs = soup.findAll('div', attrs={'id':'economy_news'}) for a_div in divs: for a_link in a_div.findAll('a'): a_url = a_link['href'] if a_url != '/news/economy/': new_html = GetURLText (bloomberg_base_url + a_url) time = strftime("%Y-%m-%d %H:%M:%S") time = time.replace(' ', '_').replace(':','-') f = open (bloomberg_write_path + str(counter) + '_' + time + '.html', 'w') f.write(new_html.encode('utf-8')) f.close() counter += 1
def ProcessSeekingAlpha (): global counter seeking_alpha_start_url = 'http://www.seekingalpha.com' seeking_alpha_alt_start_url = 'http://seekingalpha.com/analysis/macro-view/all' seeking_alpha_base_url = 'http://www.seekingalpha.com' seeking_alpha_write_path = '/home/ubuntu/news_scrape/seekingalpha_data/' html = GetURLTextAuth(seeking_alpha_start_url) soup = BeautifulSoup (html, 'html5lib') divs = soup.findAll('div', attrs={'id':'hp_news_unit'}) for a_div in divs: for a_link in a_div.findAll('a'): a_url = a_link['href'] new_html = GetURLTextAuth (seeking_alpha_base_url + a_url) print (seeking_alpha_base_url + a_url) time = strftime("%Y-%m-%d %H:%M:%S") time = time.replace(' ', '_').replace(':','-') f = open (seeking_alpha_write_path + str(counter) + '_' + time + '.html', 'w') f.write(new_html.encode('utf-8')) f.close() counter += 1 divs = soup.findAll('div', attrs={'class':'articles'}) for a_div in divs: for a_list in a_div.findAll('li'): for a_link in a_list.findAll('a'): a_url = a_link['href'] new_html = GetURLTextAuth (seeking_alpha_base_url + a_url) print (seeking_alpha_base_url + a_url) time = strftime("%Y-%m-%d %H:%M:%S") time = time.replace(' ', '_').replace(':','-') f = open (seeking_alpha_write_path + str(counter) + '_' + time + '.html', 'w') f.write(new_html.encode('utf-8')) f.close() counter += 1 break html = GetURLTextAuth(seeking_alpha_alt_start_url) soup = BeautifulSoup (html, 'html5lib') for a_link in soup.findAll('a', attrs={'class':'article_title'}): a_url = a_link['href'] new_html = GetURLTextAuth (seeking_alpha_base_url + a_url) print (seeking_alpha_base_url + a_url) time = strftime("%Y-%m-%d %H:%M:%S") time = time.replace(' ', '_').replace(':','-') f = open (seeking_alpha_write_path + str(counter) + '_' + time + '.html', 'w') f.write(new_html.encode('utf-8')) f.close() counter += 1
def fillup_with_zeros(datapoints, start_time, time_window, period, statistic='Sum'): # first, make sure datapoints are sorted datapoints.sort(key=lambda item: item['Timestamp']) dates_as_string = False if len(datapoints) > 0: dates_as_string = isinstance(datapoints[0]['Timestamp'], basestring) expected_length = int(time_window / period) for i in range(0, expected_length): unit = 'n/a' time = start_time + timedelta(seconds=i * period) add_zero = False if len(datapoints) <= i: add_zero = True else: unit = datapoints[i]['Unit'] next_time = datapoints[i]['Timestamp'] if dates_as_string: next_time = aws_common.parse_cloudwatch_timestamp(datapoints[i]['Timestamp']) # make sure all timestamps are in UTC timezone, to avoid uncomparable datetimes next_time = next_time.replace(tzinfo=pytz.UTC) time = time.replace(tzinfo=pytz.UTC) if (time + timedelta(seconds=period)) <= next_time: add_zero = True if add_zero: if dates_as_string: time = aws_common.format_cloudwatch_timestamp(time) datapoints.insert(i, { 'Timestamp': time, statistic: 0, 'Unit': unit })
def convert_time(self, time): """ Helper function to convert the displaytime strings to an actual integer minute represenation. Examples: 'Nu' => 0, '8 min.'' => 8, '12:22' (at the time of 12:18) => 4 '9' => 9 """ if 'min' in time: time = time.replace('min', '').replace('.', '').strip() elif 'Nu' in time: time = 0 elif ':' in time: now = self.get_now() # floor below minute now = datetime.datetime(year=now.year, month=now.month, day=now.day, hour=now.hour, minute=now.minute, second=0, microsecond=0) hour, minute = time.split(':') dtime = datetime.datetime(year=now.year, month=now.month, day=now.day, hour=int(hour), minute=int(minute), second=0, microsecond=0) # 00.00 wraparound? if dtime < now: dtime = dtime + datetime.timedelta(days=1) time = round((dtime - now).total_seconds() / 60.0) return int(time)
def time_command(inp, bot=None): """.time <area> - Gets the time in <area>.""" if inp.lower() == "butts": return "It's always time for butts." query = "current time in {}".format(inp) api_key = bot.config.get("api_keys", {}).get("wolframalpha", None) if not api_key: return "error: no wolfram alpha api key set" request = http.get_xml(api_url, input=query, appid=api_key) time = " ".join( request.xpath("//pod[@title='Result']/subpod/plaintext/text()")) time = time.replace(" | ", ", ") if time: # nice place name for UNIX time if inp.lower() == "unix": place = "Unix Epoch" else: place = cap_first( " ".join(request.xpath("//pod[@" "title='Input interpretation']/subpod/plaintext/text()")).split('|')[0]) place = place.replace("Current Time In", "").strip() return "\x02{}\x02 - {}".format(place, time) else: return "Could not get the time for '{}'.".format(inp)
def onMatch(self, d): """""" trade = VtTradeData() trade.gatewayName = self.gatewayName trade.symbol = d['product_id'] trade.exchange = EXCHANGE_COINBASE trade.vtSymbol = '.'.join([trade.symbol, trade.exchange]) if d['maker_order_id'] in orderDict: order = orderDict[d['maker_order_id']] else: order = orderDict[d['taker_order_id']] trade.orderID = order.orderID trade.vtOrderID = order.vtOrderID trade.tradeID = str(d['trade_id']) trade.vtTradeID = '.'.join([trade.gatewayName, trade.tradeID]) trade.direction = order.direction trade.price = float(d['price']) trade.volume = float(d['size']) date, time = d['time'].split('T') time = time.replace('Z', '') trade.tradeTime = time self.gateway.onTrade(trade)
def ipdetails(ip, cgiloc): import socket try: addr = socket.gethostbyaddr(ip)[0] except: addr = 'unknown host' if addr != 'unknown host': # add successful lookups to the DNS cache try: db = dbm.open(database, "c") db[ip] = addr db.close() except: pass # fail silently - lots of things could have gone wrong... print """<p class = "section">Visit details for <b>%s</b><br /><b>hostname:</b> %s<br />""" % (ip, addr) counter = 1; pagecounter = 1 for line in loglines: address = line.split(' ')[0] if address == ip: time = line.split(' ')[3].replace('[','') if time.startswith(apachetoday): time = time.replace(apachetoday +':','today, ') resource = line.split(' ')[6] if counter == 1: referrer = line.split('"')[-4] user_agent = line.split('"')[-2] if len(user_agent) > 50: user_agent = user_agent[0:50].strip() + "..." if len(referrer) > 1: print """<b>referrer:</b> <a href = "%(referrer)s">%(referrer)s</a><br />""" % vars() print """<b>browser:</b> %(user_agent)s<br /><br />""" % vars() if ispage(resource): quotedresource = urllib.quote(resource) print """%(pagecounter)s: <b>%(time)s</b>: %(resource)s [<a href = "%(cgiloc)s?url=%(quotedresource)s">details</a>] <br />""" % vars() pagecounter += 1 counter += 1 print "</p>"
def buildLineup(self, name=None): log('buildLineup, name = ' + str(name)) soup = BeautifulSoup(self.openURL(GUIDE_URL), "html.parser") results = soup('div' , {'class': 'row'}) for channel in results: chname = cleanString(channel.find_all('img')[0].attrs['alt']) link = cleanString(channel.find_all('a')[0].attrs['href']) thumb = LOGO%chname if name is None: infoLabels = {"mediatype":"episode","label":chname ,"title":chname} infoArt = {"thumb":thumb,"poster":thumb,"fanart":FANART,"icon":thumb,"logo":thumb} self.addDir(chname, chname, 2, infoLabels, infoArt) elif name.lower() == chname.lower(): try: date = soup('a' , {'class': 'last'})[0].attrs['href'] aired = re.findall('/tv-guide/(.+?)/00',date, flags=re.DOTALL)[0] except: aired = self.getTVCtime().strftime('%Y-%m-%d') items = channel('div' , {'class': 'hide'}) for item in items: try: time = trimString(item.find_all('span')[0].get_text()) dur = int((abs(eval(time.replace(':','.'))) * 60) * 60) start = datetime.datetime.strptime(time.split('-')[0], '%H:%M').strftime('%I:%M %p') except: continue label = '%s: %s - %s'%(start,chname,cleanString(item.get_text()).split('\n')[0]) try: desc = trimString(item.find_all('br')[0].get_text()) except: desc = '' infoLabels = {"mediatype":"episode","label":label ,"title":label,"plot":desc,"duration":dur,"aired":aired} infoArt = {"thumb":thumb,"poster":thumb,"fanart":FANART,"icon":thumb,"logo":thumb} self.addLink(label, link, 9, infoLabels, infoArt, len(items)) break
def can_reconnect(w, wu, user, time): print "dans can_reconnect" ti_zone = tz.tzlocal() time = time.replace(tzinfo=ti_zone) time_start = wu.connection_start time_end = wu.connection_end total_connection_day = timedelta() other_connection_today = False #wuall = WorkstationUser.objects.filter(workstation_type_id=wu.workstation_type_id).filter(username=user.username).filter(connection_start__startswith=time).exclude(logged=True) #print "avant calcul total connection" #for wua in wuall: # other_connection_today = True # diff = wua.connection_end - wua.connection_start # total_connection += diff if time_end == None: return None diff_time = time - time_end #print "avant other_connection_today" #if other_connection_today: # max_hours = timedelta(hours=w.max_hours_connection) # if total_connection < max_hours: # return True timedelta_interval = timedelta(minutes=w.interval_time_not_disconnection) if diff_time < timedelta_interval: return None timedelta_interval = timedelta(hours=w.waiting_time_before_reconnect) if diff_time >= timedelta_interval: return None return diff_time
async def send_reminder_start_msg(self, user, channel, client, time): ''' Gives an acknowledgement that the reminder has been set. ''' time = time.replace(microsecond=0) msg = ":+1: %s I'll remind you at %s UTC." % (user, str(time)) await client.send_message(channel, msg)
def format(time): class TZ(tzinfo): def utcoffset(self, dt): return timedelta(minutes=+330) # For India time = time.replace(tzinfo = TZ(), microsecond=0) return time.isoformat('T')
def get_date_pripara_prpr(time): # first threshold - on wednesday if parse('wed').weekday() == time.weekday(): if time.time() < parse('21:00').time(): delta = 4 else: delta = 0 # second threshold - on saturday if parse('sat').weekday() == time.weekday(): if time.time() < parse('21:00').time(): delta = 3 else: delta = 0 # first span - from thu to fri if parse('wed').weekday() < time.weekday() < parse('sat').weekday(): delta = time.weekday() - parse('wed').weekday() # second span - from sun to tue if parse('sat').weekday() < time.weekday(): delta = time.weekday() - parse('sat').weekday() elif time.weekday() < parse('wed').weekday(): delta = time.weekday() + 2 date = time.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None) date -= datetime.timedelta(days=delta) return date
def getGpxTrackSet(docroot, ns): trackCollection = [] for trk in docroot.findall("ns:trk", ns): trackName = trk.find("ns:name", ns).text trackSegPointList = trk.find("ns:trkseg", ns) trackSegment = {"name": trackName} trackSegPoints = [] foundTimeInTrackData = True for point in trackSegPointList: if point.find("ns:time", ns) is not None: time = dateparser(point.find("ns:time", ns).text) time = time.replace(tzinfo=None) else: foundTimeInTrackData = False time = "<no time>" trackPoint = {"lat": float(point.attrib["lat"]), "lon": float(point.attrib["lon"]), "ele": float(point.find("ns:ele", ns).text), "time": time} trackSegPoints.append(trackPoint) trackSegment["foundTimeInTrackData"] = foundTimeInTrackData trackSegment["trackPoints"] = trackSegPoints trackCollection.append(trackSegment) return trackCollection
def __parse(value): """ Parse the string date. Supports the subset of ISO8601 used by xsd:time, but is lenient with what is accepted, handling most reasonable syntax. Subsecond information is rounded to microseconds due to a restriction in the python datetime.time implementation. @param value: A time string. @type value: str @return: A time object. @rtype: B{datetime}.I{time} """ match_result = _RE_TIME.match(value) if match_result is None: raise ValueError("date data has invalid format '%s'" % (value,)) time, round_up = _time_from_match(match_result) tzinfo = _tzinfo_from_match(match_result) if round_up: time = _bump_up_time_by_microsecond(time) return time.replace(tzinfo=tzinfo)
def ScrawOnePage(url): #爬1页里的60条新闻 global RecordFlag,LastRecord,num html=requests.get(url,headers = hea) html.encoding = 'utf-8' selector=etree.HTML(html.text) f=open('huanqiunews.txt','a+') for i in range(60): #有的需将4改为3 title=selector.xpath('/html/body/div['+str(num)+']/div/div[3]/ul/li['+str(i+1)+']/h3/a/text()')[0] newsurl=selector.xpath('/html/body/div['+str(num)+']/div/div[3]/ul/li['+str(i+1)+']/h3/a/@href')[0] try: abstract=selector.xpath('/html/body/div['+str(num)+']/div/div[3]/ul/li['+str(i+1)+']/h5/text()')[0] except : abstract='' try: img=selector.xpath('/html/body/div['+str(num)+']/div/div[3]/ul/li['+str(i+1)+']/a/img/@src')[0] except : img='' time=selector.xpath('/html/body/div['+str(num)+']/div/div[3]/ul/li['+str(i+1)+']/h6/text()')[0] if time == LastRecord: print "scraw complete !" os._exit(0) if RecordFlag==False: #首次爬的内容写入日志 SetRecord(time) RecordFlag=True media,author,text,newsurl2=ScrawOneNews(newsurl) if media=='环球网' or media=="环球时报": realurl=newsurl else: realurl=newsurl2 content=time.replace('\n','')+' '+title.replace('\n','')+' '\ +media.replace('\n','')+' '+author.replace('\n','')+' '+abstract.replace('\n','')+' '+realurl.replace('\n','')+' '\ +img.replace('\n','')+'\n'+text+'\n' print content f.write(content) f.close()
def getCheckinTime(self,offset): time = datetime.now() time += offset time = time.replace(hour=randint(self.category.in_start, self.category.in_stop),minute=randint(0,59),second=randint(0,59)) return time
def _get_stop_times_by_vehicle(vehicle, stop_start, stop_end, route, direction, time=arrow.now(), window=45, index='route_direction_vehicle_time'): lower_key = [route, direction, vehicle, r.epoch_time(time.replace(minutes=-window).timestamp)] upper_key = [route, direction, vehicle, r.epoch_time(time.timestamp)] query = r.table('vehicle_stop_times') \ .between(lower_key, upper_key, index=index) \ .order_by(index=r.asc(index)) \ .filter((r.row['stop_id'] == stop_start) | (r.row['stop_id'] == stop_end)) return list(query.run())
def __init__(self, time): time = datetime.strptime(time, "%d %b %Y %H:%M:%S") time = time.replace(tzinfo=pytz.timezone("EST")) self.time = time
print("{0:02d}.{1:02d}.{2:02d}".format(t.tm_mday, t.tm_mon, t.tm_year)) # Datetime # Aktuelles Datum datum = datetime.date.today() print(str(datum.day) + '.' + str(datum.month) + '.' + str(datum.year)) # Datum manuell erfassen datum2 = datetime.date(2015, 1, 15) print(str(datum2.day) + '.' + str(datum2.month) + '.' + str(datum2.year)) # Monat überschreiben datum3 = datum2.replace(month=5) print(str(datum3.day) + '.' + str(datum3.month) + '.' + str(datum3.year)) time = datetime.time(10, 20, 0) print(time) time2 = time.replace(hour=15) print(time2) zeitstempel1 = datetime.datetime(2015, 1, 15, 10, 20, 0) print(zeitstempel1) zeitstempel2 = datetime.datetime.now() print(zeitstempel2) # Differenz print(zeitstempel2 - zeitstempel1) input('END')
def articleURL(url, site): # 取出中文 # 取出域名 parsed = urlparse.urlparse(url) querys = urlparse.parse_qs(parsed.query) querys = {k: v[0] for k, v in querys.items()} aa = 0 for value in querys.values(): aa = aa + 1 if aa > 1: break # 请求地址 获取网页源代码 rsp = requests.get(url, headers=headers) bs = BeautifulSoup(rsp.content, 'html.parser', from_encoding='utf-8') content = str(bs) content_html = re.compile( '<tr class="\w*" id="form:reFreshData_row_\d">[\s\S]*?.</tr>').findall( content) # 取出标题 title = bs.select('title') title = title[0] title = title.text ss = 0 push_state = 0 siteid = 1038898 for i in content_html: #取出发表时间 time = re.compile('发表于:<label>\s*(.*?)\</label>').findall( str(content_html)) time = time[ss] time = time.replace("\\n", "") # = datetime.datetime.strptime(time, "%Y-%m-%d %H:%M:%S") #取出正文 content = re.compile( '<span style="word-break:break-all;">(.*?)</span>').findall( str(content_html)) content = content[ss] md = re.compile('openAuthor\((.*?)\)').findall(str(content_html)) md = md[ss] wy = str(md) + str(time) wy = md5(wy) ss = ss + 1 data = [] data.append( InsertOne({ "url": url, "title": title, "aid": value, "content": content, "site": site, "pub_time": time, "only_id": wy, "push_state": push_state, "site_id": siteid })) try: collection.bulk_write(data) except: import traceback continue # 关闭连接 client.close()
def time_to_nummeric(time): return time.replace("-", "").replace(":", "").replace("T", "").replace("Z", "")
import time import Adafruit_ADXL345 #Configure ADXL345 accel = Adafruit_ADXL345.ADXL345() accel.set_data_rate(Adafruit_ADXL345.ADXL345_DATARATE_100_HZ) time = time.ctime() time = time.replace(" ","_") time = time.replace(":","-") file_name = "/home/pi/Documents/minion_data/%s_ACC.txt" % time file = open(file_name,"w+") file.write("%s\r\n" % time) file.write("X,Y,Z = +/- 2g\r\n") print time while True: # Read the X, Y, Z axis acceleration values and print them. x, y, z = accel.read() file.write('{0},{1},{2}\n'.format(x, y, z))
def year(time): return time.replace(year=time.year + 1)
dict_conv = [] for msg in class_conv: dict_conv.append(msg.msg) return dict_conv if __name__ == '__main__': conversation = [] print("creating list...") for d in range(1, 8): #add class times if d % 3 == 0: add_lesson_time(2019, 6, d, 19, 0) for i in range(10): time = datetime.datetime.now() + datetime.timedelta(minutes=i) time.replace(day=d, hour=19) conversation.append({ "meta_data": "bob", "date_time": time, "message": "haha" }) print("analyzing...") for m in conversation: print(m) my_conv = dict2class_array(conversation)
def _seconds_since(self, time): return (datetime.utcnow() - time.replace(tzinfo=None)).total_seconds()
def _parse_timestamp(timestamp): """parse timestamp into local time.""" time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%fZ") utc_time = time.replace(tzinfo=utc_tz) local_time = utc_time.astimezone(local_tz) return local_time
data = pd.read_csv('../' + str(sys.argv[1]) + '-data-clean.txt', sep='\t') # Set times for the recorded games if str(sys.argv[1]) == 'Heat': starttimes = ['2018-03-21 23:30:00','2018-03-24 00:00:00','2018-04-03 23:30:00','2018-04-04 23:30:00','2018-04-09 23:30:00','2018-04-12 00:00:00'] elif str(sys.argv[1]) == 'Bulls': starttimes = ['2018-03-22 00:00:00','2018-03-24 00:00:00','2018-03-24 23:00:00','2018-03-28 00:00:00','2018-04-04 00:00:00','2018-04-08 00:00:00','2018-04-09 23:30:00','2018-04-12 00:00:00'] else: starttimes = ['2018-03-21 02:30:00','2018-03-23 00:00:00','2018-03-25 00:00:00','2018-03-28 00:00:00','2018-04-04 00:00:00','2018-04-06 00:00:00','2018-04-08 00:30:00','2018-04-11 02:30:00','2018-04-12 02:30:00'] # Examine each game in the given teams data for time in starttimes: # Record start time and end time metrics a = time.replace('-',' ').replace(':',' ').split() a = [int(x) for x in a] end_time = datetime.datetime(a[0],a[1],a[2],a[3],a[4],a[5]) if a[3] < 6: start_time = datetime.datetime(a[0],a[1],a[2]-1,(a[3]-6)+24,a[4],a[5]) else: start_time = datetime.datetime(a[0],a[1],a[2],a[3]-6,a[4],a[5]) minutes = start_time.minute # Initialize loop variables interval = 5 times = [] labels = [] tot_time = 0 tot_tweets = []
def __init__(self, time): # convert the string to datetime time = datetime.strptime(time,"%d %b %Y %H:%M:%S") # set the time in EST self.time = time.replace(tzinfo=pytz.timezone("EST"))
def getTime(self): time = datetime.datetime.utcnow().strftime( '(%w), %d [%m] %Y %H:%M:%S +0000') time = time.replace('(1)', 'Mon') time = time.replace('(2)', 'Tue') time = time.replace('(3)', 'Wed') time = time.replace('(4)', 'Thu') time = time.replace('(5)', 'Fri') time = time.replace('(6)', 'Sat') time = time.replace('(0)', 'Sun') # [sic!] time = time.replace('[01]', 'Jan') time = time.replace('[02]', 'Feb') time = time.replace('[03]', 'Mar') time = time.replace('[04]', 'Apr') time = time.replace('[05]', 'May') time = time.replace('[06]', 'Jun') time = time.replace('[07]', 'Jul') time = time.replace('[08]', 'Aug') time = time.replace('[09]', 'Sep') time = time.replace('[10]', 'Oct') time = time.replace('[11]', 'Nov') time = time.replace('[12]', 'Dec') return time
def aware(self, time): time = datetime.strptime(time, "%d %b %Y %H:%M:%S") time = time.replace(tzinfo=pytz.timezone("EST")) return time
#!/usr/bin/env python3 import shutil import psutil import time import sys import os time = time.ctime() # sansSpaces = time.replace(' ', '_', 4) # refinedTime = time.replace(':', '', 2) # user = os.getlogin() # filename = "C:\\Users\\" + user + "\\Desktop\\" + "BackupTranscript" + "_" + refinedTime + ".txt" # transcript = open(filename, 'w') def selectDisk(): # diskList = [] # disks = psutil.disk_partitions() # options = [] # print(""" Disk \t Mount Point \t File System Type \t Percent Used
def parseTime(time): return addTimeZone(time.replace("-", "").replace(":", "").replace("T", ""))
ser.write("REL1 = Pin('REL1', Pin.OUT_PP)\r\n") ser.write("REL2 = Pin('REL2', Pin.OUT_PP)\r\n") while 1: data = client_socket.recv(512) if (data == 'Command1\r\n'): #ON_REL ser.write("REL1.high()\r\n") time.sleep(2) ser.write("REL1.low()\r\n") elif (data == 'Command2\r\n'): print "REL_OFF" ser.write("REL1.low()\r\n") elif ('time=' in data): time, frec, grs = data.split(";") time = time.replace('time=', '') if (frec == 'frec=0'): job = cron.new( command='python ~/aquarium_files/Python_Mcmahon/feed.py', comment='E') job.every(24).hours() job.hour.also.on(time) cron.write() elif (frec == 'frec=1'): job = cron.new( command='python ~/aquarium_files/Python_Mcmahon/feed.py', comment='W') job.every(24).hours() job.hour.also.on(time) job.dow.on('FRI', 'SAT', 'SUN') cron.write()
return float(x) except ValueError: return -404.0 url_A = 'https://api.thingspeak.com/channels/241665/feeds/last.xml?timezone=Asia/Taipei' #url_B='https://api.thingspeak.com/channels/256131/feeds/last.xml?timezone=Asia/Taipei' #url_C='https://api.thingspeak.com/channels/306218/feeds/last.xml?timezone=Asia/Taipei' #url_D='https://api.thingspeak.com/channels/330089/feeds/last.xml?timezone=Asia/Taipei' html = requests.get(url_A) html.encoding = 'utf-8' sp = BeautifulSoup(html.text, 'html.parser') tab_66 = sp.find("feed") time = tab_66.find("created-at").string time = time.strip("+08:00") time = time.replace("T", " ") date = time.replace("-", "/") nid = tab_66.find("entry-id").string hum = tab_66.find("field1").string temp = tab_66.find("field2").string PM25 = tab_66.find("field3").string PM10 = tab_66.find("field4").string cursor.execute("SET NAMES UTF8") select_sql = "select * from LoRa where `timestamp`= %s and `sensor`='A'" cursor.execute(select_sql, date) result = cursor.fetchall() conn.commit() a = 'A' if result == ():
def _incorrectly_formatted_time(self, time): return not (re.search("^\d+h([0-5]\d|\d)min$", time.replace(".", "")) or time.replace(".", "").isdecimal())
def month(time): date = time.replace(day=1) date += timedelta(32) return date.replace(day=1)
def _from_iso8601_string(self, value): """ Parses an ISO8601:2004 date time string. """ # remove trailing 'Z' value = value.replace('Z', '') # split between date and time try: (date, time) = value.split("T") except Exception: date = value time = "" # remove all hyphens in date date = date.replace('-', '') # remove colons in time time = time.replace(':', '') # guess date pattern length_date = len(date) if date.count('W') == 1 and length_date == 8: # we got a week date: YYYYWwwD # remove week indicator 'W' date = date.replace('W', '') date_pattern = "%Y%W%w" year = int(date[0:4]) # [Www] is the week number prefixed by the letter 'W', from W01 # through W53. # strpftime %W == Week number of the year (Monday as the first day # of the week) as a decimal number [00,53]. All days in a new year # preceding the first Monday are considered to be in week 0. week = int(date[4:6]) - 1 # [D] is the weekday number, from 1 through 7, beginning with # Monday and ending with Sunday. # strpftime %w == Weekday as a decimal number [0(Sunday),6] day = int(date[6]) if day == 7: day = 0 date = "%04d%02d%1d" % (year, week, day) elif length_date == 7 and date.isdigit() and value.count('-') != 2: # we got a ordinal date: YYYYDDD date_pattern = "%Y%j" elif length_date == 8 and date.isdigit(): # we got a calendar date: YYYYMMDD date_pattern = "%Y%m%d" else: raise ValueError("Wrong or incomplete ISO8601:2004 date format") # check for time zone information # note that the zone designator is the actual offset from UTC and # does not include any information on daylight saving time if time.count('+') == 1 and '+' in time[-6:]: (time, tz) = time.rsplit('+') delta = -1 elif time.count('-') == 1 and '-' in time[-6:]: (time, tz) = time.rsplit('-') delta = 1 else: delta = 0 if delta: while len(tz) < 3: tz += '0' delta = delta * (int(tz[0:2]) * 60 * 60 + int(tz[2:]) * 60) # split microseconds ms = 0 if '.' in time: (time, ms) = time.split(".") ms = float('0.' + ms.strip()) # guess time pattern length_time = len(time) if length_time == 6 and time.isdigit(): time_pattern = "%H%M%S" elif length_time == 4 and time.isdigit(): time_pattern = "%H%M" elif length_time == 2 and time.isdigit(): time_pattern = "%H" elif length_time == 0: time_pattern = "" else: raise ValueError("Wrong or incomplete ISO8601:2004 time format") # parse patterns dt = datetime.datetime.strptime(date + 'T' + time, date_pattern + 'T' + time_pattern) # add microseconds and eventually correct time zone dt += datetime.timedelta(seconds=float(delta) + ms) self._from_datetime(dt)
def format_time(self, time): if not time: return '··:··' time = time.replace('h', ':') return time
def get_timestamp_for_filename(time): time = time.replace("-", "") time = time.replace(":", "") return time
rec1.append("Received") rec1.append(timee.replace("]", "")) for lin in dataTransmisor2: if len(lin) > 2: elem = lin.split(" ") fecha = elem[0] time = elem[2] dat = elem[3:5] ## if dat != "\n": for l in dat: if l == "Success\n": c_success_t2 += 1 time_suc_t2.append("Success") time_suc_t2.append(time.replace("]", "")) if l == "01\n": c_error1_t2 += 1 time_suc_t2.append("Error 01") time_suc_t2.append(time.replace("]", "")) if l == "02\n": c_error2_t2 += 1 time_suc_t2.append("Error 02") time_suc_t2.append(time.replace("]", "")) if l == "03\n": c_error3_t2 += 1 time_suc_t2.append("Error 03") time_suc_t2.append(time.replace("]", "")) if l == "Received!\n": rec2.append("Received") rec2.append(time.replace("]", ""))
def __time_to_seconds(self, time): """Convert timestamp string of the form 'hh:mm:ss' to seconds.""" return sum( abs(int(x)) * 60**i for i, x in enumerate(reversed(time.replace(',', '').split( ':')))) * (-1 if time[0] == '-' else 1)
def catch_data(self, page_id): """catch one page's data""" headers = { 'User-Agent': self.user_agents[random.randrange(0, len(self.user_agents))] } resp = requests.get(url=self._url_template.format( self._query_val, self._query_val, page_id), headers=headers) card_group = json.loads(resp.text)['data']['cards'][0]['card_group'] results = 0 effect_rows = 0 for card in card_group: mblog = card['mblog'] time = mblog['created_at'] sendtime = dt.datetime.now() delta = dt.timedelta(seconds=0) if '小时' in time: delta = dt.timedelta(hours=int(time.replace('小时前', ''))) elif '分钟' in time: delta = dt.timedelta(minutes=int(time.replace('分钟前', ''))) elif '刚刚' in time: pass else: if len(time) is 5: sendtime = dt.datetime.strptime( "{:%Y-}".format(sendtime) + time, '%Y-%m-%d') else: sendtime = dt.datetime.strptime(time, '%Y-%m-%d') time = "{:%Y-%m-%d %H:%M}:00".format(sendtime - delta) result = [ mblog['id'], # weibo id "weibo", # type self.clean_text( mblog['text']).strip('\n').encode("utf8"), # text time, # time of posts str(mblog['user']['id']), # userid mblog['user']['screen_name'], # username mblog['reposts_count'], # reposts num mblog['comments_count'], # comments num mblog['attitudes_count'], # attitudes num ] results += 1 # save to mysql try: sql = 'insert ignore into ' + self._table + \ ' (mid, type, text, time, userid, username, reposts_count, comments_count, attitudes_count) values (%s,%s,%s,%s,%s,%s,%s,%s,%s);' effect_rows += self._db.insert(sql, result) except Exception as e: print("\nMySQL ERROR: \t", e) try: self._log.write( "{:%Y-%m-%d %H:%M:%S} Success: Catch {:2} data, update {:2} date at page {}.\tURL: {}\n" .format( dt.datetime.now(), results, effect_rows, page_id, self._url_template.format(self._query_val, self._query_val, page_id))) except Exception as e: print("\nLog ERROR: \t", e) return effect_rows
def populate_file_info(self, file, reload=False): """Replace contents of text widgets with any pre-existing metadata in the target BWF file. Args: file (str): The name of the target file. reload (bool): If True, prevent date warning messagebox and block signals, which is what is desired if the file is being reloaded after metadata save. """ import re import os.path from datetime import datetime # # Generate and pre-fill defaults # date_time_created = datetime \ .fromtimestamp(os.path.getctime(file)) \ .replace(microsecond=0) \ .isoformat() [date_created, time] = date_time_created.split("T") m = re.compile(self.config["filenameRegex"]).match(file) if m: matches = m.groups() identifier = matches[0] identifier = identifier.replace("-", ".") identifier = identifier.replace("_", "") file_use = matches[1] date_from_filename = matches[2] date_from_filename = (date_from_filename[0:4] + "-" + date_from_filename[4:6] + "-" + date_from_filename[6:]) if date_created != date_from_filename and not reload: msg = QMessageBox() msg.setIcon(QMessageBox.Warning) msg.setText("Filename and timestamp dates disagree") msg.setInformativeText("filename: " + date_from_filename + "\ntimestamp: " + date_created) msg.setWindowTitle("Choose date") msg.addButton('Use ' + date_from_filename, QMessageBox.NoRole) msg.addButton('Use ' + date_created, QMessageBox.YesRole) retval = msg.exec_() if retval == 1: date_from_filename = date_created self.set_gui_text("OriginationDate", date_from_filename) self.set_gui_text("OriginationTime", time) self.set_gui_text( "OriginatorReference", self.config["repocode"] + " " + date_from_filename.replace("-", "") + " " + time.replace(":", "")) try: file_use = self.config["fileuse"][file_use] except KeyError: QMessageBox.warning( self, 'Warning', file_use + " does not not have a standard translation") file_use = "Unknown" description = ("File content: " + identifier + "; File use: " + file_use + "; Original filename: " + os.path.basename(file)) self.set_gui_text("Description", description) else: QMessageBox.warning( self, 'Warning', file + " does not follow file naming convention") self.set_gui_text("OriginationDate", date_created) self.set_gui_text("OriginationTime", time) self.set_gui_text( "OriginatorReference", self.config["repocode"] + " " + date_created.replace("-", "") + " " + time.replace(":", "")) description = "" self.update_coding_history(block=reload) # # insert existing values # for field in self.gui_text_widgets.keys(): if self.original_md[field] != "": self.set_text_to_original(field) widget = self.gui_text_widgets[field] widget_type = type(widget) if widget_type is QtWidgets.QComboBox: widget.currentTextChanged.connect( lambda value, element=field: self.text_changed(element )) elif widget_type is QtWidgets.QPlainTextEdit: widget.textChanged.connect( lambda element=field: self.text_changed(element)) else: widget.textEdited.connect(lambda value, element=field: self .text_changed(element)) # check sanity of existing BWF description field if self.original_md[ "Description"] != "" and description != self.original_md[ "Description"]: msg = QMessageBox() msg.setIcon(QMessageBox.Warning) msg.setText("BWF Description is inconsistent with filename") msg.setInformativeText( "Field will be regenerated. " + "The original can be restored using the drop-down menu next to the text box." ) msg.exec_() self.set_gui_text("Description", description, block=False) if self.original_md["MD5Stored"] != "": self.md5Check.setEnabled(False)
def generate_time(time): minutes = int(numpy.random.uniform(0, 60, 1)[0]) seconds = int(numpy.random.uniform(0, 60, 1)[0]) time = time.replace(minute = minutes, second = seconds) return time
def pretty_dt_timestamp(time=False, format=None): from datetime import datetime import pytz def get_format(): if format != None: return format formats = [ '%Y-%m-%d %H:%M:%S.%f %z', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d', '%Y/%m/%d' ] for i in formats: try: t = time if type(time) == str else str(time.strftime(i)) d = datetime.strptime(t, i) return i except Exception as e: continue return None now = datetime.utcnow().replace(tzinfo=pytz.utc) #datetime.now() if type(time) is int or type(time) is float or type(time) is str: if type(time) is int or type(time) is float: time = datetime.fromtimestamp(time) else: format = get_format() if format == None: return None time = datetime.strptime(time, format) time = time.replace(tzinfo=pytz.utc) diff = now - time elif isinstance(time, datetime) or isinstance(time, date): format = get_format() if format == None: return None time = str(time.strftime(format)) time = datetime.strptime(time, format) time = time.replace(tzinfo=pytz.utc) diff = now - time elif not time: diff = now - now else: return None second_diff = diff.seconds day_diff = diff.days if day_diff < 0: return '' def str_t(diff, t, name): def sing_plu(val, t_n): if int(val) == 1: return '{0} {1} ago'.format(val, t_n) else: return '{0} {1}s ago'.format(val, t_n) return sing_plu(round(diff / t), name) if day_diff == 0: if second_diff < 10: return "just now" if second_diff < 60: return str_t(diff=second_diff, t=1, name='second') if second_diff < 120: return "a minute ago" if second_diff < 3600: return str_t(diff=second_diff, t=60, name='minute') if second_diff < 7200: return "an hour ago" if second_diff < 86400: return str_t(diff=second_diff, t=3600, name='hour') if day_diff == 1: return "Yesterday" if day_diff < 7: return str_t(diff=day_diff, t=1, name='day') if day_diff < 31: return str_t(diff=day_diff, t=7, name='week') if day_diff < 365: return str_t(diff=day_diff, t=30, name='month') else: return str_t(diff=day_diff, t=365, name='year')
def tzconv(userID,tstr,dstr): ''' Grabs a timestring in HH:MM format, and converts it into next sunday + timezones ''' #check if time is within bounds if int(tstr.split(':')[0])<0 or int(tstr.split(':')[0])>23 or int(tstr.split(':')[1])<0 or int(tstr.split(':')[1])>59 : estr="Please provide a valid time" sendMessage(estr) return #check if day is within bounds if(dstr!='1'): if int(dstr.split('-')[0])<0 or int(dstr.split('-')[0])>9999 or int(dstr.split('-')[1])<1 or int(dstr.split('-')[1])>12 or int(dstr.split('-')[2])<1 or int(dstr.split('-')[2])>31: estr="Please provide a valid date" sendMessage(estr) return if int(dstr.split('-')[1]) == 2: if calendar.isleap(int(dstr.split('-')[0])): if int(dstr.split('-')[2]) > 29: estr="Please provide a valid date" sendMessage(estr) return else: if int(dstr.split('-')[2]) > 28: estr="Please provide a valid date" sendMessage(estr) return db = sqlite3.connect("data/db.db") cursor = db.cursor() cursor.execute('''SELECT userID from user where userID=?''',[userID]) d=cursor.fetchone() if d==None: estr = "You must register first!" sendMessage(estr) return #convert time into all other registered timezones time=pytz.datetime.datetime.strptime(tstr, "%H:%M") nowtime=pytz.datetime.datetime.now() time=time.replace(year=nowtime.year, month=nowtime.month, day=nowtime.day) if(dstr=='1'): if(time.weekday() != 6): time=time.replace(day=time.day + (6-time.weekday())) #sends to the next sunday if not already a sunday else: nowtime=pytz.datetime.datetime.strptime(dstr, "%Y-%m-%d") cursor.execute('''SELECT tz from user WHERE userID=? ''',[userID]) tzdata=cursor.fetchone()[0] cursor.execute('''SELECT DISTINCT tz from user''') d = cursor.fetchall() t1=pytz.timezone(pytz.country_timezones(tzdata)[0]) nowtime=t1.normalize(t1.localize(nowtime)) time=t1.normalize(t1.localize(time)) sendstr='' for x in range(0,len(d)): curtobj=time.astimezone(pytz.timezone((pytz.country_timezones(d[x][0]))[0])) sendstr=sendstr+str(curtobj.year) + '-' + str(curtobj.month) + '-' + str(curtobj.day) + ' ' + ('0'+str(curtobj.hour) if curtobj.hour<10 else str(curtobj.hour)) + ':' + ('0'+str(curtobj.minute) if curtobj.minute<10 else str(curtobj.minute)) + ' in ' + str(curtobj.tzinfo) sendstr=sendstr+'\n' print(sendstr) sendMessage(sendstr)
def cleanTimeData(time): newTime = time.replace('T',' ') newTime = newTime.replace('Z','') return newTime