def quiz(raw_data): quiz_questions = [] for quest in raw_data.input_data: quest.pop(-1) number = quest[0] q = quest[1] options = quest[2:6] correct = quest[6] marks = tuple(quest[7:9]) comp = quest[-1] question = Question(number, q, options, correct, marks, comp) quiz_questions.append(question) time = raw_data.headers[-1] time = time.split("=")[-1] time = time.strip() unit = time[-1] time = time.strip(unit) time = int(time) if(unit == 'h'): multiplier = 3600 elif(unit == 'm'): multiplier = 60 elif(unit == 's'): multiplier = 1 return quiz_questions, time, multiplier
def get_music_info(link): html = requests.get(link, headers=headers) soup = BeautifulSoup(html.text, 'html.parser') names = soup.select('#wrapper > h1 > span') authors = soup.select('#info > span > span > a') styles = re.findall('<span class="pl">流派:</span> (.*?)<br />', html.text, re.S) times = re.findall('<span class="pl">发行时间:</span> (.*?)<br />', html.text, re.S) publishers = re.findall('<span class="pl">出版者:</span> (.*?)<br />', html.text, re.S) scores = soup.select( '#interest_sectl > div > div.rating_self.clearfix > strong') for (name, author, style, time, publisher, score) in zip(names, authors, styles, times, publishers, scores): if len(style) == 0: style = '未知' else: style = style.strip() print(name.get_text(), author.get_text(), style, time.strip(), publisher.strip(), score.get_text()) info = { 'name': name.get_text(), 'author': author.get_text(), 'style': style, 'time': time.strip(), 'publisher': publisher.strip(), 'score': score.get_text() } musictop.insert_one(info)
def get_ceilometer_data_rate(request,metric,resource_id,time): d = datetime.datetime.utcnow() threehours = datetime.timedelta(hours=3) oneday = datetime.timedelta(days=1)# + datetime.timedelta(hours=8) oneweek = 7 * oneday# + datetime.timedelta(hours=8) onemonth = 30 * oneday# + datetime.timedelta(hours=8) if time.strip() == '3h': timestamp = d - threehours elif time.strip() == '1d': timestamp = d - oneday elif time.strip() == '7d': timestamp = d - oneweek elif time.strip() == '30d': timestamp = d - onemonth #print 'XXXXXXXXXXXX %s %s %s' % (metric,resource_id,time) #print 'HHHHHHHHHHHHHHHHH %s' % resource_id query = [dict(field='resource_id', op='eq', value=resource_id), dict(field='timestamp', op='gt', value=timestamp)] results = api.ceilometer.sample_list(request, meter_name=metric, query=query) data0 = '' data1 = '' for result in results: data0 = (data0 + '%s' + ',') % result.timestamp data1 = (data1 + '%s' + ',') % result.counter_volume data = data0 + ';' + data1 return HttpResponse(data, mimetype='application/javascript')
def get_data(html): html = BeautifulSoup(html, 'html.parser') data = [] for item in html.findAll('div', class_='el-card__body'): try: movie = [] name = item.select( 'a.router-link-exact-active.router-link-active > h2')[0].text chinese_name = name.split(' - ')[0] english_name = name.split(' - ')[1] type = [ i.text.strip() for i in item.select('div.categories > button > span') ] country = item.select( 'div.el-col-md-16 > div.m-v-sm.info:nth-child(3) > span' )[0].text time = item.select( 'div.el-col-md-16 > div.m-v-sm.info:nth-child(3) > span' )[2].text published = item.select( 'div.el-col-md-16 > div.m-v-sm.info:nth-child(4) > span')[0] published = published.text if len(published) == 1 else None score = item.select('p.score.m-t-md.m-b-n-sm')[0].text movie.append(chinese_name) movie.append(english_name) movie.append('%s' % ','.join(type)) movie.append(country.strip()) movie.append(time.strip()) movie.append(published.strip()) movie.append(score.strip()) data.append(movie) except IndexError: continue return data
async def poll(self, ctx, *, msg): """Create a poll using reactions. [p]help poll for more information. [p]poll <question> | <answer> | <answer> - Create a poll. You may use as many answers as you want, placing a pipe | symbol in between them. Example: [p]poll What is your favorite anime? | Steins;Gate | Naruto | Attack on Titan | Shrek You can also use the "time" flag to set the amount of time in seconds the poll will last for. Example: [p]poll What time is it? | HAMMER TIME! | SHOWTIME! | time=10 """ await ctx.message.delete() options = msg.split(" | ") time = [x for x in options if x.startswith("time=")] if time: time = time[0] if time: options.remove(time) if len(options) <= 1: return await ctx.send(self.bot.bot_prefix + "You must have 2 options or more.") if len(options) >= 11: return await ctx.send(self.bot.bot_prefix + "You must have 9 options or less.") if time: time = int(time.strip("time=")) else: time = 30 emoji = ['1⃣', '2⃣', '3⃣', '4⃣', '5⃣', '6⃣', '7⃣', '8⃣', '9⃣'] to_react = [] confirmation_msg = "**{}?**:\n\n".format(options[0].rstrip("?")) for idx, option in enumerate(options[1:]): confirmation_msg += "{} - {}\n".format(emoji[idx], option) to_react.append(emoji[idx]) confirmation_msg += "\n\nYou have {} seconds to vote!".format(time) poll_msg = await ctx.send(confirmation_msg) for emote in to_react: await poll_msg.add_reaction(emote) await asyncio.sleep(time) async for message in ctx.message.channel.history(): if message.id == poll_msg.id: poll_msg = message results = {} for reaction in poll_msg.reactions: if reaction.emoji in to_react: results[reaction.emoji] = reaction.count - 1 end_msg = "The poll is over. The results:\n\n" for result in results: end_msg += "{} {} - {} votes\n".format( result, options[emoji.index(result) + 1], results[result]) top_result = max(results, key=lambda key: results[key]) if len([x for x in results if results[x] == results[top_result]]) > 1: top_results = [] for key, value in results.items(): if value == results[top_result]: top_results.append(options[emoji.index(key) + 1]) end_msg += "\nThe victory is tied between: {}".format( ", ".join(top_results)) else: top_result = options[emoji.index(top_result) + 1] end_msg += "\n{} is the winner!".format(top_result) await ctx.send(end_msg)
def query_ARA(self, slack_data): ''' Query ARA database for information to report ''' if slack_data[0]["text"].split()[2] == "status": conn = sqlite3.connect(self.sqlite_db) c = conn.cursor() response = ''' ```Playbook: | Start Time(UTC): | Status: ''' c.execute( "select path, time_start, complete from playbooks order by time_start desc LIMIT 5" ) #TODO: allow this to be configurable but protect against injection for row in c.execute( "select path, time_start, complete from playbooks order by time_start desc LIMIT 5" ): print(row[0].split('/')[-1], type(row[0].split('/')[-1])) print(row[1], type(row[1])) print(row[2], type(row[2])) print() playbook = row[0].split('/')[-1] time = row[1] if row[2]: completed = "Complete" else: completed = "Incomplete" response += "{} | {} | {} \n".format(playbook.strip(), time.strip(), str(completed).strip()) response += "```" if self.verbose: self.debug_log.debug(response) self.respond(response, slack_data[0]["channel"])
async def timeto(self, ctx, tz: str, *, time: str): """Compute the time remaining until the [timezone] [time]""" try: tz_obj = tzStrToObj(tz) except Exception as e: await ctx.send("Failed to parse tz: " + tz) return try: time_obj = timeStrToObj(time) except Exception as e: print(e) await ctx.send("Failed to parse time: " + time) return now = datetime.datetime.now(tz_obj) req_time = now.replace(hour=time_obj.tm_hour, minute=time_obj.tm_min) if req_time < now: req_time = req_time + timedelta(days=1) delta = req_time - now msg = "There are " + fmtHrsMins(delta.seconds).strip() + \ " until " + time.strip() + " in " + now.strftime('%Z') await ctx.send(inline(msg))
def store2DBForMain(self, imgList, baseUrl): # 保存到sqlite self.conn = sqlite3.connect(Constants.DB_PATH) self.cursor = self.conn.cursor() self.cursor.execute('create table if not exists meitiMain (id integer primary key autoincrement, ' 'title text, url text, time datetime, num text, status int)') title = '' for soupItem in imgList: # 先拿标题 try: # <img src="http://img.mmjpg.com/small/2017/1086.jpg" width="220" height="330" alt="磨人的小妖精温心怡美臀让人垂涎欲滴" /> spans = soupItem.findAll('span') url = spans[0].find('a')["href"] title = spans[0].find('a').string time = spans[1].string numStr = spans[2].string print(url + ',' + title + ',' + time + ',' + numStr) self.cursor.execute( "insert into meitiMain (title, url, time, num, status) values (\'%s\',\'%s\', \'%s\',\'%s\', 0);" % (title.strip(), url.strip(), time.strip(), numStr.strip())) except Exception as err: self.saveDbFailure(url=baseUrl, title=title) print(err) finally: pass # 关闭Cursor: self.cursor.close() # 提交事务: self.conn.commit() # 关闭Connection: self.conn.close()
def getHr(time): time = str(time) time = time.strip() time = time.split(':') # time_sec = time[1] time = time[0] return time
def store2DBForMain(self, imgList): #保存到sqlite # 连接到SQLite数据库 # 如果文件不存在,会自动在当前目录创建: self.conn = sqlite3.connect('res/douban/meizitu.db') # 创建一个Cursor: self.cursor = self.conn.cursor() # 执行一条SQL语句,创建user表: 91电影 self.cursor.execute( 'create table if not exists meituUrl (id integer primary key autoincrement, ' 'title text, url text, time datetime, num text)') for soupItem in imgList: # 先拿标题 try: # <img src="http://img.mmjpg.com/small/2017/1086.jpg" width="220" height="330" alt="磨人的小妖精温心怡美臀让人垂涎欲滴" /> spans = soupItem.findAll('span') url = spans[0].find('a')["href"] title = spans[0].find('a').string time = spans[1].string numStr = spans[2].string print(url + ',' + title + ',' + time + ',' + numStr) self.cursor.execute( "insert into meituUrl (title, url, time, num) values (\'%s\',\'%s\', \'%s\',\'%s\');" % (title.strip(), url.strip(), time.strip(), numStr.strip())) except Exception as err: print(err) finally: pass # 关闭Cursor: self.cursor.close() # 提交事务: self.conn.commit() # 关闭Connection: self.conn.close()
def parseTime(time): ret = 0 if isinstance(time, str): aux = time.strip() if aux.find('d')>=0: index = aux.find('d') ret += 24*60*60*int(aux[0:index]) if index+1<len(aux): ret += parseTime(aux[index+1:]) elif aux.find('h')>=0: index = aux.find('h') ret += 60*60*int(aux[0:index]) if index+1<len(aux): ret += parseTime(aux[index+1:]) elif aux.find('m')>=0: index = aux.find('m') ret += 60*int(aux[0:index]) if index+1<len(aux): ret += parseTime(aux[index+1:]) elif aux.find('s')>=0: index = aux.find('s') ret += int(aux[0:index]) if index+1<len(aux): ret += parseTime(aux[index+1:]) else: ret += int(aux) else: ret = time return ret
def time_check(): break_line() print "OK! What time do you want to ride a Citi Bike?" print "We will search over a given hour, in military time." print "e.g. Type '0' for Midnight - 1AM, '10' for 10AM - 11AM, or '14' for 2PM - 3PM" status = 0 while status == 0: time = raw_input("Time:\n") time = time.strip() try: time = int(time) if time in short_times: status = 1 time = short_times.index(time) return time else: break_line() print "\nOops! Didn't understand that." print "Please type the hour you're interested in, in military time." break_line() status = 0 except: print "\nOops! Didn't understand that." print "Please type the hour you're interested in, in military time." break_line() status = 0
def getTime_w_o_sec(time): time = str(time) time = time.strip() time = time.split(':') time_sec = time[1] time = time[0] return time + ':' + time_sec
def formatTimeStr(time): time = time.strip() try: # time exm: 2017-08-23 06:30 if time[4] == '-': time = time.replace(' ', '-') \ .replace(':', '-') + '-00' # 2011年... if time[4] == '年': # 2019年06月21日 17:38 if time[11] == ' ': time = time.replace('年', '-') \ .replace('月', '-') \ .replace('日 ', '-') \ .replace(':', '-') + '-00' # 2011年07月12日10:33 else: time = time.replace('年', '-') \ .replace('月', '-') \ .replace('日', '-') \ .replace(':', '-') + '-00' return time except IndexError: return None
def read_file(): state = STATE_NAME list = [] dict = {} for line in open('timediff.txt', 'r'): line = line.replace("\n", "") if (line == "\n" or line == ""): continue # skip empty line elif (line.startswith(":")): continue # skip reactions elif (line == "1"): continue # skip Add reaction if (line.startswith(ANSWER_PREFIX)): list.append({"name": ANSWER_KEY, "time": parse_answer(line)}) elif (state == STATE_NAME): name = line.split(" ")[0] dict["name"] = name state = STATE_TIME elif (state == STATE_TIME): time = line.replace("(編集済み)", "") time = time.strip() dict["time"] = time state = STATE_NAME list.append(dict) dict = {} return list
def parseTime(time): try: period = time.strip().split("-") timeFrom = period[0].strip() timeTo = period[1].strip() return timeFrom, timeTo except: return "", ""
def record_file_to_dict(file_name) : ret_dict = {} f = open(file_name , 'w+') for line in f.readlines(): app , time = line.split(';') ret_dict[app] = time.strip() f.close() return ret_dict
def notify(self, type, name, value, status, reason=" ", time=" "): if not self.ready: return if reason == "": reason = " " notificationTime = str(datetime.now())[:-3] csp = self._getCSP() # Multiple notifications if ITEM_SEP in status: statusList = status.split(ITEM_SEP) sCount = len(filter(lambda x: x == NOTIF_STATUS_OK, statusList)) # Ensure the list of comments is correct if reason.strip(ITEM_SEP).strip() == "": rList = [" "] * len(statusList) reason = ITEM_SEP.join(rList) # Ensure the list of times is correct if time.strip(ITEM_SEP).strip() == "": tList = [notificationTime] * len(statusList) time = ITEM_SEP.join(tList) else: if status == NOTIF_STATUS_OK: sCount = 1 else: sCount = 0 if self.__itemNotifyMsg is None: self.__itemNotifyMsg = MessageClass() self.__itemNotifyMsg.setType(MSG_TYPE_NOTIFY) self.__itemNotifyMsg.setId(MSG_TYPE_NOTIFY) self.__itemNotifyMsg[ExcMessages.FIELD_PROC_ID] = self._getProcId() self.__itemNotifyMsg[FIELD_DATA_TYPE] = ExcMessages.DATA_TYPE_ITEM self.__itemNotifyMsg[NOTIF_ITEM_TYPE] = type self.__itemNotifyMsg[NOTIF_ITEM_NAME] = name self.__itemNotifyMsg[NOTIF_ITEM_VALUE] = value self.__itemNotifyMsg[NOTIF_ITEM_STATUS] = status self.__itemNotifyMsg[NOTIF_ITEM_REASON] = reason self.__itemNotifyMsg[NOTIF_ITEM_TIME] = time self.__itemNotifyMsg[FIELD_SCOUNT] = sCount self.__itemNotifyMsg[ExcMessages.FIELD_CSP] = csp self.__itemNotifyMsg[FIELD_TIME] = notificationTime self._toAsRun(notificationTime, "ITEM", csp, type, name, value, status, reason, time) if self.__manualMode: self.__itemNotifyMsg[ ExcMessages. FIELD_EXECUTION_MODE] = ExcMessages.DATA_EXEC_MODE_MANUAL else: self.__itemNotifyMsg[ ExcMessages. FIELD_EXECUTION_MODE] = ExcMessages.DATA_EXEC_MODE_PROCEDURE LOG("Notify " + type, level=LOG_COMM) self._sendNotificationMessage(self.__itemNotifyMsg)
def print_info(index_rows): '''Print index information given a list of named tuples containing: (0:network,1:station,2:location,3:channel,4:quality,5:starttime,6:endtime,7:samplerate, 8:filename,9:byteoffset,10:bytes,11:hash,12:timeindex,13:timespans,14:timerates, 15:format,16:filemodtime,17:updated,18:scanned) ''' for NRow in index_rows: print("{0}:".format(NRow.filename)) print(" {0}.{1}.{2}.{3}.{4}, samplerate: {5}, timerange: {6} - {7}". format(NRow.network, NRow.station, NRow.location, NRow.channel, NRow.quality, NRow.samplerate, NRow.starttime, NRow.endtime)) print( " byteoffset: {0}, bytes: {1}, endoffset: {2}, hash: {3}".format( NRow.byteoffset, NRow.bytes, NRow.byteoffset + NRow.bytes, NRow.hash)) print(" filemodtime: {0}, updated: {1}, scanned: {2}".format( NRow.filemodtime, NRow.updated, NRow.scanned)) print("Time index: (time => byteoffset)") for index in NRow.timeindex.split(','): (time, offset) = index.split('=>') # Convert epoch times to nicer format if re.match("^[+-]?\d+(>?\.\d+)?$", time.strip()): time = datetime.datetime.utcfromtimestamp( float(time)).isoformat(' ') print(" {0} => {1}".format(time, offset)) if NRow.timespans: # If per-span sample rates are present create a list of them rates = None if NRow.timerates: rates = NRow.timerates.split(',') # Print time spans either with or without per-span rates print("Time spans:") for idx, span in enumerate(NRow.timespans.split(',')): (start, end) = span.lstrip('[').rstrip(']').split(':') if rates: print(" {0} - {1} ({2})".format( datetime.datetime.utcfromtimestamp( float(start)).isoformat(' '), datetime.datetime.utcfromtimestamp( float(end)).isoformat(' '), rates[idx])) else: print(" {0} - {1}".format( datetime.datetime.utcfromtimestamp( float(start)).isoformat(' '), datetime.datetime.utcfromtimestamp( float(end)).isoformat(' '))) return
def get_feed_time(file_path): # time log file から最後に更新された時間を抜き取る exist_log = os.path.exists(file_path) if not exist_log: return None with open(file_path) as log_file: for line in log_file: time = line return time.strip()
def get_date_and_time(table): # May be get some datetime? date_time = table.xpath('tr')[0].text_content() date_time = date_time.strip() date_time = date_time.split('\t')[-1] # Gonna take the date. # Gonna take the time. date, time = date_time.split(',') time = time.strip() return (date, time)
def peak_hour(): """判断时间段""" now = datetime.datetime.now() time = now.strftime('%Y-%m-%d %H:%M:%S') hour = time.strip().split(' ')[1].split(':')[0] # print(int(hour)) if int(hour) < 8: return False else: return True
def GetDataFromSina(self): url = "http://hq.sinajs.cn/list=" + self.code s = urllib.urlopen(url).read() print GetNowTime( ) + ' ' + self.code + '[OK] read data from sina......' s = s.split(',') len_s = len(s) date = s[len_s - 3] date.strip() time = s[len_s - 2] time.strip() price = float(s[3]) #close money = float(s[9]) / 10000 len_LD = len(self.LD) if money == 0: return if len_LD == 0: #no any data self.LD.append([date, time, price, money, 0.0, 0.0, 0.0]) self.Append2DataFile() else: self.Price_now = price if (time == self.LD[len_LD - 1][1]): #if the same time, then drop the data print GetNowTime( ) + ' ' + self.code + '[warning] get the same time data......' return elif (date == self.LD[len_LD - 1][0]): # same today kmoney = money - self.LD[len_LD - 1][3] if kmoney == 0: return self.Price_pre = self.LD[len_LD - 1][2] self.LD.append([date, time, price, money, 0.0, 0.0, kmoney]) self.Append2DataFile() else: # new day self.Price_pre = self.LD[len_LD - 1][2] self.LD.append([date, time, price, money, 0.0, 0.0, money]) self.Append2DataFile() if price / self.Price_pre > 1.12 or price / self.Price_pre < 0.88: self.is_dataValid = False
def timezone(): conf = open("default.conf") for time in conf.readlines(): if (time.find('zonename') != -1): if (time[0] == '#'): continue cmd = "./config_image.sh --timezone %s" % (time.strip()) os.system(cmd) break conf.close()
def get_data_from_a_site(link): data = [] highest_number = [] running = True while running: try: page = requests.get(link) running = False except requests.exceptions.ConnectionError: time.sleep(3) if page.status_code == 200: soup = BeautifulSoup(page.content, "html.parser") soup = soup.find("div", class_="EventsToday") soup = soup.find("div", class_="Gut") date = "" time = "" for i in soup: if i.name == "h5": # day header date = i.get_text() if i.name == "dl": # container for events in a given day for j in i: # iterates over its children if j.name == "dt": # hour time = j.get_text() if j.name == "dd": # container for events in a given hour links = j.find_all("a") for link in links: my_link = "http://www.kulturalna.warszawa.pl/%s" % link["href"] picture_link = get_image_from_a_site(my_link) if not (picture_link is None): thumbnail_link = picture_link.split(".") thumbnail_link[-2] += "_w180" thumbnail_link = ".".join(thumbnail_link) else: thumbnail_link = "none" data += [{"event": link.get_text().strip(), "link": my_link, "date": date.strip(), "time": time.strip(), "picture": picture_link, "thumbnail": thumbnail_link}] # this code finds out the number of pages: pager = soup.find("div", class_="Pager").find_all("a") for i in pager: try: highest_number += [int(i.get_text())] except ValueError: pass highest_number = sorted(highest_number) highest_number = highest_number[-1] pprint(data) return (data, highest_number)
def abbr_to_normal(time): time = time.strip() """ I don't know this issue. 12:24:35AM -> 0:24:35 12:24:35PM -> 12:24:35 """ if time.find("AM") >= 0: time = time.strip("AM") if time == "12:00": time = "00:00" time = time.split(':') hh = int(time[0]) mm = int(time[1]) ss = int(0) if hh == 12 and (mm > 0 or ss > 0): hh = 0 today = date.today() timestr = ("%04d-%02d-%02d %02d:%02d:%02d" % (today.year,today.month, today.day,hh, mm, ss)) dt = datetime.strptime(timestr, "%Y-%m-%d %H:%M:%S") else: time = time.strip("PM") time = time.split(':') hh = int(time[0]) mm = int(time[1]) ss = int(0) if hh != 12: hh = hh + 12 today = date.today() timestr = ("%04d-%02d-%02d %02d:%02d:%02d" % (today.year,today.month, today.day,hh, mm, ss)) dt = datetime.strptime(timestr, "%Y-%m-%d %H:%M:%S") return dt
def getbilibili_vedioinf(keyword_list): try: for mid in keyword_list: page = 1 while True: url_vlist = "https://search.bilibili.com/all?keyword=%s&page=%s" % ( mid, page) # print url_vlist response = requests.get(url_vlist) html_doc = response.text soup = BeautifulSoup(html_doc, 'lxml') # print(html_doc) divs = soup.find_all('li', attrs={"class": "video matrix"}) if divs: for div in divs: pic = div.find("img").get('src') length = div.find("span", attrs={ "class": "so-imgTag_rb" }).get_text() avid = div.find(class_="type avid").get_text() type = div.find(class_="type hide").get_text() info = div.find("a", attrs={"class": "title"}) title = info.get('title') url = info.get('href') des = div.find("div", attrs={ "class": "des hide" }).get_text() tags = div.find("div", attrs={"class": "tags"}) time = tags.find("span", attrs={ "class": "so-icon time" }).get_text() author = tags.find("a", attrs={ "class": "up-name" }).get_text() data = dict(aid=avid[2:], url=url, title=title, description=des.strip(), type=type.strip(), author=author, created=time.strip(), length=length.strip(), pic=pic.strip()) insert(conn, data) page = page + 1 else: break except: print('no found')
def get_epg_info(self, html): programList = [] # one day epg soup = BeautifulSoup(html, "lxml") date = self.get_date(soup) if not date: return [] print date for li in soup.find("ul", id="pgrow").children: if isinstance(li, NavigableString): continue program = {} time = "" programName = "" desc = "" s = 0 # only the first link as program name if li.a: url = self.baseurl + li.a["href"] if li.div: try: for d in li.div.p.children: if d.name != "a": desc += d.string except: desc = "" else: desc = self.get_description(url) for info in li.children: if isinstance(info, NavigableString): programName = programName + info.string elif info.name == "span": time = info.string time = date + " " + time.strip() + ":00" elif info.name == "a" and s == 0: programName = programName + info.string s = 1 elif info.name == None: programName = programName + info.string if time == "": continue program["name"] = programName.strip() program["starttime"] = time program["desc"] = desc programList.append(program) if len(programList) == 0: return [] for i in range(len(programList) - 1): programList[i]["endtime"] = programList[i + 1]["starttime"] programList[len(programList) - 1]["endtime"] = "" return programList
def notify(self, type, name, value, status, reason = " ", time = " "): if not self.ready: return if reason == "": reason = " " notificationTime = str(datetime.now())[:-3] csp = self._getCSP() # Multiple notifications if ITEM_SEP in status: statusList = status.split(ITEM_SEP) sCount = len(filter( lambda x: x == NOTIF_STATUS_OK, statusList )) # Ensure the list of comments is correct if reason.strip(ITEM_SEP).strip() == "": rList = [" "] * len(statusList) reason = ITEM_SEP.join(rList) # Ensure the list of times is correct if time.strip(ITEM_SEP).strip() == "": tList = [notificationTime] * len(statusList) time = ITEM_SEP.join(tList) else: if status == NOTIF_STATUS_OK: sCount = 1 else: sCount = 0 if self.__itemNotifyMsg is None: self.__itemNotifyMsg = MessageClass() self.__itemNotifyMsg.setType(MSG_TYPE_NOTIFY) self.__itemNotifyMsg.setId(MSG_TYPE_NOTIFY) self.__itemNotifyMsg[ExcMessages.FIELD_PROC_ID] = self._getProcId() self.__itemNotifyMsg[FIELD_DATA_TYPE] = ExcMessages.DATA_TYPE_ITEM self.__itemNotifyMsg[NOTIF_ITEM_TYPE] = type self.__itemNotifyMsg[NOTIF_ITEM_NAME] = name self.__itemNotifyMsg[NOTIF_ITEM_VALUE] = value self.__itemNotifyMsg[NOTIF_ITEM_STATUS] = status self.__itemNotifyMsg[NOTIF_ITEM_REASON] = reason self.__itemNotifyMsg[NOTIF_ITEM_TIME] = time self.__itemNotifyMsg[FIELD_SCOUNT] = sCount self.__itemNotifyMsg[ExcMessages.FIELD_CSP] = csp self.__itemNotifyMsg[FIELD_TIME] = notificationTime self._toAsRun( notificationTime, "ITEM", csp, type, name, value, status, reason, time ) if self.__manualMode: self.__itemNotifyMsg[ExcMessages.FIELD_EXECUTION_MODE] = ExcMessages.DATA_EXEC_MODE_MANUAL else: self.__itemNotifyMsg[ExcMessages.FIELD_EXECUTION_MODE] = ExcMessages.DATA_EXEC_MODE_PROCEDURE LOG("Notify " + type, level = LOG_COMM) self._sendNotificationMessage(self.__itemNotifyMsg)
def parse_time(time): time_str = time.replace("(編集済み)", "") time_str = time.strip() time_str = time_str.replace("時間", ":").replace("分", ":").replace("秒", "") time_str = time_str.replace(" ", "") if (time_str.count(":") == 1): time_str = "00:" + time_str #dt = datetime.strptime(date, '%Y-%m-%dT%H:%M:%S') time = parse(time_str) return time
def parse_one_page(self): # p = re.compile('<a href.*?title="(.*?)".*?<p class="star">(.*?)</p>.*?"releasetime">(.*?)</p>$',re.S) p = re.compile( '<div class="movie-item-info">.*?title="(.*?)".*?<p class="star">(.*?)</p>.*?releasetime">(.*?)</p>', re.S) html = self.get_one_page() results = p.findall(html) for name, actors, time in results: yield { 'name': name, 'actors': actors.strip()[3:], '上映时间': time.strip()[5:] }
def __capture_times(self, time, results): if time: time = time.strip() test_time = re.match('(?:(P<test_time>[0-9]+)(?P<units>ms|mks))', time) if test_time: multiplier = test_time.group( 'units') == 'ms' and 1000000 or 1000 subseconds = int(test_time.group('test_time')) total_nanosecs = subseconds * multiplier results['cpu_time'] = total_nanosecs cpu_times = re.match( r'(?P<wall_time>[0-9.]+)s wall, ' '(?P<user_time>[0-9.]+)s user [+] ' '(?P<sys_time>[0-9.]+)s system [=] ' '(?P<cpu_time>[0-9.]+)s CPU [(](?P<wall_cpu_percent>[nN/aA0-9.]+)%?[)]', time) if cpu_times: results['wall_time'] = nanosecs_from_time( cpu_times.group('wall_time')) results['user_time'] = nanosecs_from_time( cpu_times.group('user_time')) results['sys_time'] = nanosecs_from_time( cpu_times.group('sys_time')) results['cpu_time'] = nanosecs_from_time( cpu_times.group('cpu_time')) self.test_suites[ results['suite']]['wall_time'] += results['wall_time'] self.test_suites[ results['suite']]['user_time'] += results['user_time'] self.test_suites[ results['suite']]['sys_time'] += results['sys_time'] results['wall_cpu_percent'] = cpu_times.group( 'wall_cpu_percent') self.test_suites[ results['suite']]['cpu_time'] += results['cpu_time'] store_durations(results) else: results['cpu_duration'] = duration_from_elapsed(0) ## For backward compatibility - remove later results['elapsed'] = results['cpu_time']
def parse_datetime(date=None, time=None, mode='str'): month = { 'Janeiro': '01', 'Fevereiro': '02', 'Março': '03', 'Abril': '04', 'Maio': '05', 'Junho': '06', 'Julho': '07', 'Agosto': '08', 'Setembro': '09', 'Outubro': '10', 'Novembro': '11', 'Dezembro': '12' } date = '-'.join( reversed([ date.strip() if index != 1 else month[date.strip()] for index, date in enumerate(date.split(',')[1].split('de')) ])) return datetime.fromisoformat( date + ' ' + time.strip()) if mode == 'date' else date + ' ' + time.strip()
def _parse_entry(self, row): tag, time = row time = int(time.strip()) if tag.endswith("_clock_in"): place_tag = tag[:-len("_clock_in")] event = "in" elif tag.endswith("_clock_out"): place_tag = tag[:-len("_clock_out")] event = "out" else: logging.error('worktimes: invalid tag {}'.format(tag)) sys.exit(1) return {'place': place_tag, 'event': event, 'time': time}
def writeReason(fout, termR, time, strat, flag=False): global _totalRef global _timeRef if "Theorem" in termR : fout.write("TH:"+time) if flag == True: _timeRef[strat] = _timeRef[strat]+float(time) _totalRef[strat] = _totalRef[strat]+1 return if "Unsatisfiable" in termR : fout.write("US:"+time) if flag == True: if "%" in time: return if not(time.isalpha()): _timeRef[strat] = _timeRef[strat]+float(time) _totalRef[strat] = _totalRef[strat]+1 return if "Refutation" in termR and not("Refutation not found" in termR): fout.write("R:"+time) if flag==True: _timeRef[strat] = _timeRef[strat] + float(time) _totalRef[strat] = _totalRef[strat]+1 return if "Refutation" in termR and "not found" in termR: fout.write("RNF ") return if termR == "": fout.write("ERROR") return if "Time limit" in termR : fout.write("TO") return if "Timeout" in termR: fout.write("TO") return if "Unknown" in termR: fout.write("UKN") return if "Memory limit" in termR: fout.write("MEM") return if "GaveUp" in termR: fout.write("GU") return fout.write(termR.strip("%")+time.strip("%")) return
def create_chunks(self): """Create the utterance wav chunks out of wavFile from the mtFile.""" # create wav directory try: os.mkdir(self.wavDir) except OSError as e: ## log.error - file exist pass wavFile = self.wavFile (nchannels, sampwidth, framerate, nframes, comptype, compname) = wavFile.getparams() mtFile = self.mtFile _ = mtFile.readline() #TODO: should I change the mt file format? # check filename == self.filename transcriptionFile = self.transcriptionFile fileidsFile = self.fileidsFile ## get 3 same iterators on f for [id , time, text] in itertools.izip(*([iter(mtFile)]*3)): # write .fileids and .transcription files outFilename = self.filename + "_" + id.strip() fileidsFile.write(outFilename+"\n") text = "<s> " + text.strip().upper() + "</s>" + " (" + outFilename + ")" transcriptionFile.write(text+"\n") [st, ed] = time.strip().split("-") msSt = float(st.split(":")[0])*60000 + float(st.split(":")[1])*1000 msEd = float(ed.split(":")[0])*60000 + float(ed.split(":")[1])*1000 frameSt = int(msSt * framerate/1000) frameEd = int(msEd * framerate/1000) wavFile.setpos(frameSt) chunk = wavFile.readframes(frameEd - frameSt) out = wave.open(self.wavDir+"/"+outFilename+".wav", "w") # writeframes will take care of overwritting nframes out.setparams((nchannels, sampwidth, framerate, nframes, comptype, compname)) out.writeframes(chunk) out.close()
def dz_prayer(): raw = next_prayer() if not raw: return i("clock","") #else if len(raw) != 3: return i("clock","") #else again (prayer,time,delta) = raw if delta < 15: icolor="green" color="green" elif delta < 30: icolor = "white" color = "white" elif delta < 60: icolor = "white" color = "" else: icolor = "" color = "" return i("clock",icolor) + "^fg({0}) {1} - {2} ({3}m)".format(color,prayer.strip(),time.strip(),delta)
def appendTitle(self,title,artists,time,dateTime,timeMask,splitArtist): givenTime = datetime.strptime(time.strip().lstrip(), timeMask) givenTime = givenTime.replace(year = dateTime.year) givenTime = givenTime.replace(day = dateTime.day) givenTime = givenTime.replace(month = dateTime.month) artists = artists.lower() #Splitting the artists if needed if(splitArtist != ""): artists= re.split(splitArtist,artists) for i in range(len(artists)): artists[i]=artists[i].lstrip().strip() else: artists = [artists.lstrip().strip()] appendedTitle = Title(title.lower().lstrip().strip(),artists,givenTime) self.titles.append(appendedTitle)
def create_chunks(self): """Create the utterance wav chunks out of wavFile from the mtFile.""" # create wav directory try: os.mkdir(self.wavDir) except OSError as e: ## log.error - file exist pass wavFile = self.wavFile (nchannels, sampwidth, framerate, nframes, comptype, compname) = wavFile.getparams() asFile = self.asFile asFile.readline() # dump the filename as the first line ## get 2 same iterators on f for (id , time) in itertools.izip(*([iter(asFile)]*2)): # write .fileids and .transcription files outFilename = self.filename + "_" + '{:04d}'.format(int(id.strip())) [msSt, msEd] = time.strip().split("-") frameSt = int((float(msSt) - self.window) * framerate / 1000) frameEd = int((float(msEd) + self.window) * framerate / 1000) if frameSt < 0: frameSt = 0 if frameEd > nframes: frameEd = nframes start = str(datetime.timedelta(seconds=int(msSt)/1000)).replace(':','-') stop = str(datetime.timedelta(seconds=int(msEd)/1000)).replace(':','-') outFilename += '_' + start + '_' + stop wavFile.setpos(frameSt) chunk = wavFile.readframes(frameEd - frameSt) out = wave.open(self.wavDir+"/"+outFilename+".wav", "w") # writeframes will take care of overwritting nframes out.setparams((nchannels, sampwidth, framerate, nframes, comptype, compname)) out.writeframes(chunk) out.close()
def get_wavelet_score(): gsr = json.load(open("/home/jf/Documents/EMBERS/GPS_tag/wavelet/correct_gsr_city_lat_lon.json")) time_list = ["2013-12-02 19:30:00", "2013-12-02 19:35:00", "2013-12-02 19:40:00", "2013-12-02 19:45:00", "2013-12-02 19:50:00", "2013-12-02 19:55:00", "2013-12-02 20:00:00", "2013-12-02 20:05:00", "2013-12-02 20:10:00", "2013-12-02 20:15:00", "2013-12-02 20:20:00", "2013-12-02 20:25:00", "2013-12-02 20:30:00", "2013-12-02 20:35:00", "2013-12-02 20:40:00", "2013-12-02 20:45:00"] for loc in os.listdir("./Venezuela_zscore"): one = json.load(open("./Venezuela_zscore/" + loc )) loc1 = encode(loc, "utf-8") if loc1 in gsr: lat = gsr[loc1]["lat"] lon = gsr[loc1]['log'] for time in time_list: if time in one: later = time.strip().split()[1] score = one[time] record = str(lat) + "\t" + str(lon) + "\t" + str(score) location_file = "./Venezuela-map-%s" % later with open(location_file, 'a') as out: out.write( record + '\n' )
def __capture_times( self, time, results ): if time: time = time.strip() test_time = re.match( '(?:(P<test_time>[0-9]+)(?P<units>ms|mks))', time ) if test_time: multiplier = test_time.group('units') == 'ms' and 1000000 or 1000 subseconds = int(test_time.group('test_time')) total_nanosecs = subseconds * multiplier results['cpu_time'] = total_nanosecs cpu_times = re.match( r'(?P<wall_time>[0-9.]+)s wall, ' '(?P<user_time>[0-9.]+)s user [+] ' '(?P<sys_time>[0-9.]+)s system [=] ' '(?P<cpu_time>[0-9.]+)s CPU [(](?P<wall_cpu_percent>[nN/aA0-9.]+)%?[)]', time ) if cpu_times: results['wall_time'] = nanosecs_from_time( cpu_times.group('wall_time') ) results['user_time'] = nanosecs_from_time( cpu_times.group('user_time') ) results['sys_time'] = nanosecs_from_time( cpu_times.group('sys_time') ) results['cpu_time'] = nanosecs_from_time( cpu_times.group('cpu_time') ) self.test_suites[results['suite']]['wall_time'] += results['wall_time'] self.test_suites[results['suite']]['user_time'] += results['user_time'] self.test_suites[results['suite']]['sys_time'] += results['sys_time'] results['wall_cpu_percent'] = cpu_times.group('wall_cpu_percent') self.test_suites[results['suite']]['cpu_time'] += results['cpu_time'] store_durations( results ) else: results['cpu_duration'] = duration_from_elapsed(0) ## For backward compatibility - remove later results['elapsed'] = results['cpu_time']
def train(logall, easy_time, hard_time, eeg_log=False): rfClassifier = RandomForestClassifier(n_estimators=100) times = [easy_time, hard_time] times_seconds = [] for time in times: time = time.strip().split(" ") times_seconds.append(time_to_seconds(time[1])) log_array = numpy.vstack(logall) indices = [0,0] time_array = log_array[:,-1] for i in range(2): for index,time in enumerate(time_array): if math.fabs(time - times_seconds[i]) < 0.01: indices[i] = index break easy_array = log_array[0:indices[0]] hard_array = log_array[indices[0]:indices[1]] labels0 = [0]*easy_array.shape[0] labels1 = [1]*hard_array.shape[0] print easy_array.shape, hard_array.shape label_array = numpy.array(labels0+labels1,dtype=numpy.int32) training_array = numpy.vstack([easy_array, hard_array]) training_array = training_array[:,1:-2] training_array, label_array = create_sample(training_array, label_array) rfClassifier.fit(training_array, label_array) joblib.dump(rfClassifier,os.path.join(logpath,"rfClassifier.clf")) return rfClassifier
def getChat(self, chatTag): time = chatTag.findAll("font", attrs={"size":"2"})[0].contents[0] time = time.strip("()") speaker = chatTag.findAll("b")[0].contents[0].strip().strip(":") chat = None i = 0 while len(chatTag.contents) > 3: if SHOWERROR: print >> sys.stderr, "Notice: Stripping Links from %s\n" % repr(chatTag) self.stripLinks(chatTag) i += 1 if i >= 10: self.warning("Cannot reduce content length to 3.", chatTag, severity=3) break length = len(chatTag.contents) if length == 2: chat = chatTag.contents[1] if chat.__class__.__name__ == "NavigableString": chat = unicode(chat) elif chat.__class__.__name__ == "Tag": chat = self.recursiveFindString(chat) else: self.warning("Mismatch Message Type", chat, Type=chat.__class__.__name__) elif length == 3: chat = chatTag.contents[2] chat = self.recursiveFindString(chat) else: self.warning("Message Length Mismatch", chatTag, Length=length) return time, speaker, self.cleanupMessage(chat)
ret = dict(time=timestamp) print timestamp for (header, info) in response: label, time = None, None try: label, _ = header.split(' - ') except ValueError, e: print "\theader:%s\n\t\t%s\n\t\t%s" % (e, header, info) try: _, time = info.split(': ') except ValueError, e: print "\tinfo:%s\n\t\t%s\n\t\t%s" % (e, header, info) if label and time: time = time.strip() slug = 'home' if label.endswith(home) else 'work' ret[slug] = time print '\t%s: %s' % (slug, time) return ret def main(): while True: d = get_time() with open('data.txt', 'a') as f: f.write('%s\n' % json.dumps(d, sort_keys=True))
struct_time = time.strptime(time, '%Y-%m-%d %X') hour = struct_time[3] minute = struct_time[4] second = struct_time[5] time_spice = hour*6 + minute/10 + 1 return time_spice #process the order data and generate the complete data. #schema structure: #order_id, driver_id, passenger_id, start_district_id, start_tj_level, start_poi_class, #dest_district_id, dest_tj_level, dest_poi_class, price, time, weather, temperature, pm2.5 output = open("complete_data.csv", "wb") for line in open("./training_data/order_data/order_data"): order_id, driver_id, passenger_id, start_district_hash, dest_district_hash, price, time \ = line.split("\t") time = time.strip('\n') time_spice = getTimeSpice(time) if "NULL" == driver_id: flag = 0 else: flag = 1 if start_district_hash in cluster_dict.keys(): start_district_id = cluster_dict[start_district_hash] else: start_district_id = "" if dest_district_hash in cluster_dict.keys(): dest_district_id = cluster_dict[dest_district_hash] else: dest_district_id = ""
def deal_time(time): time = time.replace(' ','') #remove whitespace time = time.strip() #remove '\n' time = datetime.strptime(time, "%H:%M") return time
def __createTimeDelimiter(self, time, day): return datetime.datetime.strptime("2015 2 " + day.strip() + " " + time.strip(), '%Y %W %A %H:%M')