def correctTimeStr(time, correction): isAdd = False isCorrNeeded = True if correction[0] == '+': isAdd = False elif correction[0] == '-': isAdd = True else: isCorrNeeded = False if(isCorrNeeded): # If not already GMT+0 hourCorrectionVal = int(int(correction[1:]) / 100) # Hour correction currHour = int(time[:time.index(':')]) if isAdd: currHour += hourCorrectionVal else: currHour -= hourCorrectionVal if currHour < 0: currHour += 24 return str(currHour) + time[time.index(':'):] return time
def change_time(self, time): """ 把时间格式统一为%Y-%m-%d %H:%M,但是时间过早的评论只显示了日期 :param time: :return: """ now = datetime.datetime.now() day = now.strftime('%Y-%m-%d') year = now.strftime('%Y') # 昨天 yesterday = now + datetime.timedelta(days=-1) yesterday = yesterday.strftime('%Y-%m-%d') if '昨天' in time: time = time.replace('昨天', yesterday + ' ') elif '前' in time: minut = int(time[:time.index('分')]) time = ( now + datetime.timedelta(minutes=-minut)).strftime('%Y-%m-%d %H:%M') elif len(time) == 5: time = day + ' ' + time elif time.index('月') == 1: time = time.replace('月', '-').replace('日', '') time = year + '-' + time elif '年' in time: time = time.replace('年', '-').replace('月', '-').replace('日', '') else: # print('不明时间格式') return None return time
def bitstamp_net_v1(): save = [] for page in range(43): try: opt = webdriver.ChromeOptions() opt.set_headless() driver = webdriver.Chrome(options=opt) driver.get( 'https://www.bitstamp.net/ajax/news/?start={0}&limit=10'. format(page)) driver.refresh() time = driver.find_elements_by_css_selector( 'body > article > a > hgroup > h3 > time') title = driver.find_elements_by_css_selector( 'body > article > a > section > h1 ') content = driver.find_elements_by_css_selector( 'body > article > a > section > div') for a in time: try: rows = OrderedDict() rows['title'] = title[time.index(a)].text.replace( '\r', '').replace('\n', '') rows['time'] = a.text rows['content'] = stringpro(content[time.index(a)].text) save.append(rows) except Exception as e: continue except Exception as e: logging.warning("{},{}".format(page, e)) continue df = pd.DataFrame(save) df.to_csv("./datasets/bitstamp_net.csv", index=None)
def writing_speed_windows(fi, first, second, num): with open( './chats_process/' + str(first) + '_' + str(second) + '/' + 'training_set_' + str(num) + '_final' + '.csv', 'r') as csvinput: with open( './chats_process/' + str(first) + '_' + str(second) + '/' + 'training_' + str(num) + '_final' + '.csv', 'w') as csvoutput: writer = csv.writer(csvoutput) a = 0 for row in csv.reader(csvinput): writer.writerow(row + ["writing_speed"]) break f = open(fi, 'r') time_txt = open( './chats_process/' + str(first) + '_' + str(second) + '/' + 'time.txt', 'r') for time in time_txt: a = time.index(":::") p = time[:a] time = time[a:] date1 = datetime.strptime(time, "::: %j-%m-%Y %H:%M:%S:\n") prev = date1 break time_txt = open( './chats_process/' + str(first) + '_' + str(second) + '/' + 'time.txt', 'r') for row, line, time in zip(csv.reader(csvinput), f, time_txt): a = 0 a = time.index(":::") p = time[:a] time = time[a:] answer_time = open( './chats_process/' + str(first) + '_' + str(second) + '/' + 'writing_speed' + str(num) + '.txt', 'a') date1 = datetime.strptime(time, "::: %j-%m-%Y %H:%M:%S:\n") if (date1 - prev > timedelta(hours=2)): answer_time.write(str(a) + " " + "-1") writer.writerow(row + ["-1"]) else: answer_time.write( str(a) + " " + str((date1 - prev).total_seconds() / len(line))) writer.writerow( row + [str((date1 - prev).total_seconds() / len(line))]) prev = date1 os.rename( './chats_process/' + str(first) + '_' + str(second) + '/' + 'training_' + str(num) + '_final' + '.csv', './chats_process/' + str(first) + '_' + str(second) + '/' + 'training_set_' + str(num) + '_final' + '.csv')
def time_conv(time): # globalization required to refer variable from outside global c global mm global hh c = 0 mm = "" hh = "" # if the loop encounters ":" then it wouldn't do anything and goes on separating hours and mins for c in range(0, len(time)): if c > (time.index(':')): mm = mm + time[c] elif c < (time.index(':')): hh = hh + time[c]
def logFilter(): frt = open('log.txt') sample = frt.readlines() frs = open('log1.txt') test = frs.readlines() processQuence = set() timeQuence = [] filterResult = [] for tLine in test: tLinelist = tLine.strip().split(' ') for sLine in sample: sLinelist = sLine.strip().split(' ') if re.match(sLinelist[0], tLinelist[2]) and re.match(sLinelist[1], tLinelist[3]): tLinelist.append(sLinelist[-1]) processQuence.add(sLinelist[-1]) timeQuence.append(sLinelist[-1]) filterResult.append(tLinelist) print("end:") processQuence = sorted(processQuence) print(processQuence) print(timeQuence) # for line in filterResult: # print(line) index = [] index0 = timeQuence.index(processQuence[0]) index.append('0') time = timeQuence[index0:].copy() print(time) for i in processQuence[1:]: index.append(time.index(i)) print(index) for i in range(len(index)-1): if int(index[i+1])-int(index[i]) != 1: print(False)
def time_now(): time = [ i for i in str(datetime.datetime.time(datetime.datetime.now())) if i != ':' ] index = time.index('.') time = time[:index] return time
def parse_detail(self, url): h = HttpUtility(url) html = h.get() soup = BeautifulSoup(html, features) detail = soup.find(id="center").find(class_="col").find(class_="post") # 名字 name = detail.h2.string name = name[name.index("《") + 1:name.index("》")] # 验证电影名称是否存在,如存在不继续获取 if DatabaseAccess.exist_name(name): return # 更新时间 time = detail.find(class_="postmeat").text time = time[time.index(":") + 1:time.index("|")] # 状态 status = detail.find(class_="postmeat").span.text # 图片 # imgs = detail.find_all("img") # img = "" # if len(imgs) > 0: # # print(imgs[0]) # img = imgs[0]["src"] div = detail.find("div", class_="entry") # 下载地址 down_links = self.get_download_url(div) # 豆瓣上获取详情 # result = DoubanContentParser(name).start() # 获取内容 content, about = self.get_content(div) print(name, time, status, content, about, down_links, end="\n \n \n") self.save(name=name, time=time, tag=status, content=content, about=about, down_links=down_links) pass
def marketHistory(symbol): url = 'https://www.yobit.net/api/3/trades/' + symbol + '_btc' r = requests.get(url, headers={ 'apisign': hmac.new(secret, url.encode(), hashlib.sha512).hexdigest() }) try: resp = json.loads(r.text) except: print(r.text) datetime.datetime.fromtimestamp(1499275110).strftime('%H:%M') with open('mh.json', 'w') as outfile: json.dump(resp, outfile) price = [] time = [] for i in resp[symbol + '_btc']: price.append(i['price']) time.append(i['timestamp']) firstItem = time[0] currentTime = datetime.datetime.fromtimestamp(firstItem).strftime('%M') index1 = 0 price1 = 0 for i in time: t = datetime.datetime.fromtimestamp(i).strftime('%M') if float(t) == float(currentTime) - 1: index1 = time.index(i) price1 = price[index1] break index2 = 0 price2 = 0 for i in time: t = datetime.datetime.fromtimestamp(i).strftime('%M') if float(t) == float(currentTime) - 2: index2 = time.index(i) price2 = price[index2] break return (price1, price2)
def trimSampleIndex(self): # Returns the index token's value only # time = self.getRawTimeValues() boundaries = self.getFrameBoundaries(self.frames_per_syllable) origin_sample = boundaries[0] end_sample = boundaries[-1] trimmed_index_list = [] for item in time: if item >= origin_sample and item <= end_sample: index_item = time.index(item) trimmed_index_list.append(index_item) return trimmed_index_list
def save(self): """ Save parameters and data. """ if self.stream is None: print 'warning: no stream defined in channel' return if self._last_data_index is None: self._saved_params = [] self._last_data_index = 0 # update protocol parameters for p in self.get_parameters(): self.params[self.protocol + '.' + p.name] = p.get_value() # update configuration parameters for p in self.model.get_configuration(): self.params['Configuration.' + p.name] = p.get_value() if len(self._saved_params) != len(self.params): for key in sorted(self.params): item = key, self.params[key] if item not in self._saved_params: self.stream.write('# %s : %s\n' % item) self._saved_params.append(item) if self._last_data_index == 0: label0 = self.model.get_axis_label(0) label1 = self.model.get_axis_label(1) label2 = self.model.get_axis_label(2) self.stream.write('# %16s %18s %18s %s\n' % (label0, label1, label2, '<event[<comment>]>')) start, end = self._last_data_index, len(self.time_data) print 'Saving channel %s data[%s:%s]' % (self.index, start, end) time = self.get_time() data = self.get_data() slope = self.get_data_slope() for i in range(start, end): t = time[i] v = data[i] r = slope[i] l = self.tasks.pop(t, '') self.stream.write('%18s %18s %18s %s\n' % (t, v, r, l)) while self.tasks: t, l = self.tasks.popitem() i = time.index(t) v = data[i] r = slope[i] self.stream.write('# %18s %18s %18s %s\n' % (t, v, r, l)) self.stream.flush() self._last_data_index = len(time)
def writing_speed_windows(fi,first,second,num): with open('./chats_process/'+str(first)+'_'+str(second)+'/'+'training_set_'+str(num)+'_final'+'.csv','r') as csvinput: with open('./chats_process/'+str(first)+'_'+str(second)+'/'+'training_'+str(num)+'_final'+'.csv', 'w') as csvoutput: writer = csv.writer(csvoutput) a=0 for row in csv.reader(csvinput): writer.writerow(row+["writing_speed"]) break f = open(fi, 'r') time_txt = open('./chats_process/'+str(first)+'_'+str(second)+'/'+'time.txt', 'r') for time in time_txt: a = time.index(":::") p=time[:a] time = time[a:] date1 = datetime.strptime(time,"::: %j-%m-%Y %H:%M:%S:\n") prev = date1 break time_txt = open('./chats_process/'+str(first)+'_'+str(second)+'/'+'time.txt', 'r') for row,line,time in zip(csv.reader(csvinput),f,time_txt): a = 0 a = time.index(":::") p=time[:a] time = time[a:] answer_time = open('./chats_process/'+str(first)+'_'+str(second)+'/'+'writing_speed'+str(num)+'.txt','a') date1 = datetime.strptime(time,"::: %j-%m-%Y %H:%M:%S:\n") if(date1-prev > timedelta(hours=2)): answer_time.write(str(a) + " " + "-1") writer.writerow(row+["-1"]) else: answer_time.write(str(a) + " " + str((date1-prev).total_seconds()/len(line))) writer.writerow(row+[str((date1-prev).total_seconds()/len(line))]) prev= date1 os.rename('./chats_process/'+str(first)+'_'+str(second)+'/'+'training_'+str(num)+'_final'+'.csv', './chats_process/'+str(first)+'_'+str(second)+'/'+'training_set_'+str(num)+'_final'+'.csv')
def save(self): """ Save parameters and data. """ if self.stream is None: print 'warning: no stream defined in channel' return if self._last_data_index is None: self._saved_params = [] self._last_data_index = 0 # update protocol parameters for p in self.get_parameters(): self.params[self.protocol+'.'+p.name] = p.get_value() # update configuration parameters for p in self.model.get_configuration(): self.params['Configuration.'+p.name] = p.get_value() if len(self._saved_params) != len(self.params): for key in sorted(self.params): item = key, self.params[key] if item not in self._saved_params: self.stream.write('# %s : %s\n' % item) self._saved_params.append(item) if self._last_data_index==0: label0 = self.model.get_axis_label(0) label1 = self.model.get_axis_label(1) label2 = self.model.get_axis_label(2) self.stream.write('# %16s %18s %18s %s\n' % (label0, label1, label2, '<event[<comment>]>')) start, end = self._last_data_index, len(self.time_data) print 'Saving channel %s data[%s:%s]' % (self.index, start, end) time = self.get_time() data = self.get_data() slope = self.get_data_slope() for i in range(start, end): t = time[i] v = data[i] r = slope[i] l = self.tasks.pop(t, '') self.stream.write('%18s %18s %18s %s\n' % (t, v, r,l)) while self.tasks: t, l = self.tasks.popitem() i = time.index (t) v = data[i] r = slope[i] self.stream.write('# %18s %18s %18s %s\n' % (t, v,r, l)) self.stream.flush() self._last_data_index = len(time)
def get_defi_tvl_covered(cache=False): if cache: return json.loads(r.get('defi_tvl_covered')) cover_amount = get_active_cover_amount(cache=True) defi_tvl = json.loads(r.get('defi_tvl')) defi_tvl_covered = {} for time in cover_amount['USD']: date = time[:time.index(' ')] if time[:time. index(' ')] in defi_tvl else max( defi_tvl) defi_tvl_covered[ time] = cover_amount['USD'][time] / defi_tvl[date] * 100 return defi_tvl_covered
def crawer(id): datas = [] headers = { 'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Connection': 'keep-alive', 'Cookie': 'visid_incap_99025=babKYtmwTLaf+ImTYwcD1WM8R1sAAAAAQUIPAAAAAABeLjBe1NguLhA+zPaQ7hWw; incap_ses_1044_99025=IbbLddndSW4IEE3IdQl9DmM8R1sAAAAAFrUpwD5NP3PvFtzXaIlRZw==; nlbi_99025=pgStDZphyAILIXx88F1n9AAAAAAcvACDG4TEwMKFMQbx94BR; __utma=209907974.1954295422.1531395177.1531395177.1531395177.1; __utmc=209907974; __utmz=209907974.1531395177.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); _ga=GA1.2.1954295422.1531395177; _gid=GA1.2.195663399.1531395178; cookie_consent=1531395186830; csrftoken=GeKi1AClNifdWOUbbiJToZIIgrSqoKaY; stmpkola=s60xttw8nqc68d2hmnulu8s2jb7dy6bh; incap_ses_460_99025=ipZIJ28JaX7bGS8cukBiBiFJR1sAAAAAhxatE28utZTTwt7o6p7rHw==; incap_ses_572_99025=q/qaYPpUqkcWxluw1yfwB/9LR1sAAAAAKo5K25NHPHoaWEvRkN+TGw==', 'Host': 'www.bitstamp.net', 'Referer': 'https://www.bitstamp.net/news/', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36', 'X-Requested-With': 'XMLHttpRequest' } data = requests.get( 'https://www.bitstamp.net/ajax/news/?start={0}&limit=10'.format(id), headers=headers) soup = BeautifulSoup(data.text, 'lxml') time = soup.select('body > article > a > hgroup > h3 > time') title = soup.select('body > article > a > section > h1 ') content = soup.select('body > article > a > section > div') # print(time) # print(time) for a in time: data = { 'time': a.text, 'title': title[time.index(a)].text.replace('\r', '').replace('\n', ''), 'content': content[time.index(a)].text.replace('\r', '').replace('\n', '').replace( '*\t', ''), } datas.append(data) return datas
def change_time(self, time): '把时间格式统一为%Y-%m-%d %H:%M,但是时间过早的评论只显示了日期' now = datetime.datetime.now() day = now.strftime('%Y-%m-%d') year = now.strftime('%Y') '把时间转换为统一格式' if '昨天' in time: time = time.replace('昨天', day + ' ') elif '前' in time: minut = int(time[:time.index('分')]) time = ( now + datetime.timedelta(minutes=-minut)).strftime('%Y-%m-%d %H:%M') elif len(time) == 5: time = day + ' ' + time elif time.index('月') == 1: time = time.replace('月', '-').replace('日', '') time = year + '-' + time elif '年' in time: time = time.replace('年', '-').replace('月', '-').replace('日', '') else: print('不明时间格式') return None return time
def getTimeThroughURL(id, url): global count if check_time_present(id)[0] == False: time_driver = webdriver.PhantomJS( service_args=['--ignore-ssl-errors=true']) print("url in gettimethroughurl: " + url) time_driver.get(url) time_driver.get_screenshot_as_file("hello" + str(count) + ".png") count += 1 doc = BeautifulSoup(time_driver.page_source, "html.parser") time = doc.select( "span[id=eventDetailsHeader] > nobr > span")[0].text.strip() print("time in gettimethrough url for id " + id + ": " + time) time = time[time.index('-'):len(time) - 2] time = formatDate(time) alter_times(id, time)
def shaw(proxies=None): print "<<<<< shaw cinema process started >>>>>" url = "http://www.shaw.sg/sw_buytickets.aspx" baseUrl = "http://www.shaw.sg/" proxies = validate_proxies(proxies, url) soup = scrape(url, proxies=proxies) if soup == 0: return 0 viewState = soup.select('input#__VIEWSTATE')[0]['value'] optionList = soup.select('select#ctl00_Content_ddlShowDate > option') for k in optionList: date = k['value'] print date try: soup = scrapeUrlshaw(viewState, date, proxies=proxies) date = date.split('/') date = date[1] + '/' + date[0] + '/' + date[2] schedules = soup.select('table.panelSchedule') hall = '' film = '' for i in schedules: filmDiv = i.select('td.txtScheduleHeaderCineplex') if (len(filmDiv) > 0): hall = filmDiv[0].text.split('(')[0].encode( 'ascii', 'ignore') else: timeDiv = i.select('a.txtSchedule') if (len(timeDiv) > 0): film = timeDiv[0].text for j in timeDiv[1:]: time = j.text time = time[:time.index('M') + 1] link = ("http://www.shaw.sg/" + j['href']) link = link.replace( '/imax/index.htm?page=seatselect&', '/imax_ticketing/sw_seatselect.aspx?') link = link.replace( '/premiere/movies.html?page=seatselect&', '/premiere_ticketing/sw_seatselect.aspx?') link = link.replace(' ', '%20') line = '"' + film + '","' + hall + '","' + hall + '","' + date + '","' + time + '","' + link + '"' #print line fileWrite(line) except Exception as e: warnings.append('(Gv) Error extraction %s ' % (str(date))) print(e) print "<<<<< shaw cinema process ended >>>>>"
def utc_offset(self, time, offset): def pad(number): n = str(abs(number)) while len(n) < 2: n = "0" + n if number >= 0: n = "+" + n else: n = "-" + n return n utc_diff_format = f"{pad(offset)}:00" time = list(time) i = time.index("+") time[i:] = list(utc_diff_format) time = ''.join(time) return time
#integrate some information from df_history to df_ for index, row in df_history.iterrows(): t=np.count_nonzero(~np.isnan(row['promotionid_used'])) df_['promotion_used_number'][index] = t # the number of promotion each user used df_['order_number'][index]=len(row['promotionid_used']) # total number of orders for each user df_['last_order_day'][index]=(ttoday-max(row['order_time']))/86400 # last order until now unit days df_['last_order'][index]=max(row['order_time']) # last order exact time df_['day_counts'][index]=(max(row['order_time'])-min(row['order_time']))/86400 # day counts of each user df_['shop_diversity'][index]=len(np.unique(row['shopid'])) # shop diversity per user df_['repeat_shop'][index]= row['shopid'].count(max(row['shopid'],key=row['shopid'].count)) # max frequency to some shop per user time = row['order_time'] s = row['total_price'] time, s = zip(*sorted(zip(time, s))) tm=min(time, key=lambda x:abs(x-round((max(time)-min(time))/2+min(time)))) ind=time.index(tm) #delt_s=np.mean(s[ind:])-np.mean(s[:ind]) if (max(row['order_time'])-min(row['order_time']))/86400 > 30 and len(s[ind:]) != 0 and len(s[:ind]) != 0: df_.loc[index,'spent_trend']=round(np.mean(s[ind:])-np.mean(s[:ind])) # latter averge spent - early average spent by each user df_['spent_trend']=df_['spent_trend'].fillna(-10) # not well distributed orders #------------------------------------------------------------ # I tried to convert consecutive values to bins in this part, but it takes time to formulate based on distribution or logically # Since I didnt formulate them beautifully, I didnt use df_1 later # convert the consective numbers in total spent, order number and promotion used number to catogeries using bins # here I generated 10 bins, this parameter can be changed later bins = np.zeros((3,21)) df_1=pd.DataFrame(index=indexh, columns=columnsh) df_1 = df_1.fillna(0) # with 0s rather than NaNs df_1['userid']=df_history['userid'] df_1['last_order']=df_['last_order'] for a, k in zip(df_[df_.columns[1:4]], range(len(df_.columns)-1)):
def bar_base() -> Bar: path = "Video_Monitor/video_list.json" ztstatus = Readjson(path) now = datetime.datetime.now() zsf = now.minute % 10 time = [] if zsf > 0: dqzsj = now - datetime.timedelta(minutes=zsf) else: dqzsj = now for j in range(720): tssj1 = dqzsj - datetime.timedelta(minutes=1 * (j - 1)) tssj = tssj1.strftime('%Y-%m-%d %H:%M') time.insert(0, tssj) # app.logger.info(time) data = select(time[0]) # app.logger.info(data) name = ztstatus[1][0] newData = [] for value in range(len(data)): x = time.index(data[value].get('testtime')) y = name.index(data[value].get('ztname')) z = data[value].get('ztstatus') newData.append([x, y, z]) data = [[d[0], d[1], d[2] or "-"] for d in newData] c = ( HeatMap().add_xaxis(xaxis_data=time).add_yaxis( series_name="详情", yaxis_data=name, value=data, label_opts=opts.LabelOpts(is_show=True, color="#fff", position="bottom", horizontal_align="100%"), ).set_series_opts( label_opts=opts.LabelOpts(is_show=True), minortick_opts=opts.MinorTickOpts(is_show=True, split_number=8), minorsplitkine_opts=opts.MinorSplitLineOpts(is_show=True)). set_global_opts( # tooltip_opts=opts.TooltipOpts(formatter='{a},{@time}:\n,{@[0]}'), # axistick_opts=opts.AxisTickOpts(is_show=False), visualmap_opts=opts.VisualMapOpts(is_show=True, is_piecewise=True, pos_left='center', orient='horizontal', pieces=[{ "value": 200, "label": '正常', "color": 'green' }, { "value": 404, "label": '异常', "color": 'Orange' }, { "value": 408, "label": '超时', "color": 'grey' }]), legend_opts=opts.LegendOpts(is_show=False), xaxis_opts=opts.AxisOpts( type_="category", boundary_gap=True, axistick_opts=opts.AxisTickOpts(is_align_with_label=True), splitarea_opts=opts.SplitAreaOpts( is_show=True, areastyle_opts=opts.AreaStyleOpts(opacity=1)), splitline_opts=opts.SplitLineOpts( is_show=True, linestyle_opts=opts.LineStyleOpts(opacity=1)), minor_split_line_opts=opts.MinorSplitLineOpts(is_show=True), ), yaxis_opts=opts.AxisOpts( type_="category", boundary_gap=True, axistick_opts=opts.AxisTickOpts(is_align_with_label=True), splitarea_opts=opts.SplitAreaOpts( is_show=True, areastyle_opts=opts.AreaStyleOpts(opacity=1)), splitline_opts=opts.SplitLineOpts( is_show=True, linestyle_opts=opts.LineStyleOpts(opacity=1)), ), )) return c
def advanced_weather(location = "cincinnati"): """"""""""""""" Soup stuff """"""""""""""" import requests if "cincinnati" in location.lower(): url = ("https://weather.com/weather/hourbyhour/l/229496fea2153559a056b812ded2a50721c9ae6c4a60ff800179cca93ec56caa") else: #resort to cincinnati if there is no other locaton url = ("https://weather.com/weather/hourbyhour/l/229496fea2153559a056b812ded2a50721c9ae6c4a60ff800179cca93ec56caa") r = requests.get(url) data = r.text soup = BeautifulSoup(data) #print(soup) """"""""""""""" Get table """"""""""""""" table = soup.find_all('table')[0] info = table.find_all('td') """"""""""""""" Get individual data """"""""""""""" time = str(info[1].find_all("span")[0])[23:]#time for aproximate description = str(info[2].find_all("span")[0])[6:]#what the wether is outside rain_chance = str(info[5].find_all("span")[2])[6:]#%chance for rain wind = str(info[7].find_all("span")[0])[15:]# wind (direction speed mph) temp_F = str(info[3].find_all("span"))[16:]#temp in F temp_F_feals = str(info[4].find_all("span"))[16:]#feels like in F humidity = str(info[6].find_all("span")[0])[21:]# % humidity """"""""""""""" Cleanup """"""""""""""" time = time[:time.index('<')] description = description[:description.index('<')] rain_chance = rain_chance[:rain_chance.index('<')] wind = wind[:wind.index('<')] temp_F = temp_F[:temp_F.index('<')] temp_F_feals = temp_F_feals[:temp_F_feals.index('<')] humidity = humidity[:humidity.index('<')] """"""""""""""" Show """"""""""""""" print("Time for estamate:",time) print("Weather outside:",description," Rain chance:",rain_chance," Wind: ",wind) print("Tempeture:",temp_F," Feels like:",temp_F_feals," Humidity:",humidity) data = {"Time":time,"Description": description, "Rain": rain_chance, "Wind": wind, "Temp": temp_F, "Feels": temp_F_feals, "Humidity": humidity} return(data)
def time_def(text, cut, gcs_url): time = text.split(cut, 1)[0] part = '' other = cut + text.split(cut, 1)[1] # print(time) if time == '': name, other = cut_name(other, gcs_url) final_time = '' score, magnitude = sentiment_text(other, gcs_url) verb, noun, adj = syntax_text(other, gcs_url) return wirte_excel(part, final_time, name, other, verb, noun, adj, score, magnitude, gcs_url) for word in time: if word.isdigit(): digit = time.index(word) break else: digit = None if digit: part = time[0:digit] time = time[digit:] # print(part) have = [ '月', '號', '日', '早上', '早', '下午', '下', '午', '晚上', '點', '分', '半', '晚', '上' ] for word in time: if word not in have and word.isdigit() != True: time = time.replace(word, '') # print(word) # print(time) if time == '': name, other = cut_name(other, gcs_url) final_time = '' score, magnitude = sentiment_text(other, gcs_url) verb, noun, adj = syntax_text(other, gcs_url) return wirte_excel(part, final_time, name, other, verb, noun, adj, score, magnitude, gcs_url) today = datetime.datetime.now() time = str(today.year) + '年' + time if '日' in time: time = time.replace('日', '號') if '點' not in time: final_time = cut_time_day(time, gcs_url) else: if '月' and '號' in time: if '早上' in time: time = time.replace('早上', '') elif '下午' or '晚上' in time: if '下午' in time: new = time[:(time.index('下午') + 2)] + str( int(time[(time.index('下午') + 2):time.index('點')]) + 12) + time[time.index('點'):] time = new.replace('下午', '') # print(time) else: new = time[:(time.index('晚上') + 2)] + str( int(time[(time.index('晚上') + 2):time.index('點')]) + 12) + time[time.index('點'):] time = new.replace('晚上', '') else: time = time else: if '早上' in time: temp = list(time) station = temp.index('早') time = "".join(temp[0:(station)]) + '號' + "".join( temp[station:]) time = time.replace('早上', '') elif '下午' or '晚上' in time: if '下午' in time: temp = list(time) station = temp.index('下') time = "".join(temp[0:(station)]) + '號' + "".join( temp[station:]) new = time[:(time.index('下午') + 2)] + str( int(time[(time.index('下午') + 2):time.index('點')]) + 12) + time[time.index('點'):] time = new.replace('下午', '') # print(time) else: temp = list(time) station = temp.index('晚') time = "".join(temp[0:(station)]) + '號' + "".join( temp[station:]) new = time[:(time.index('晚上') + 2)] + str( int(time[(time.index('晚上') + 2):time.index('點')]) + 12) + time[time.index('點'):] time = new.replace('晚上', '') # print(time) final_time = cut_time_hour(time, gcs_url) name, other = cut_name(other, gcs_url) score, magnitude = sentiment_text(other, gcs_url) verb, noun, adj = syntax_text(other, gcs_url) wirte_excel(part, final_time, name, other, verb, noun, adj, score, magnitude, gcs_url)
def minToSecs(time): time = str(time) colon = time.index(":") mins = time[:colon] secs = time[colon + 1:] return int(float(mins)) * 60 + int(float(secs))
def recommendationAlgoFunc(DesiredArrivalTime,clientID): try: global newttobackground proceed = False dateproceed = False route_time = datetime.datetime.now(pytz.timezone(client_data[clientID]['timeZone'])) theorytimeinsecs = client_data[clientID]['theoryTime'] # TheoriticalTravelDuration(TTD) TheoriticalTravelDuration = theorytimeinsecs/60.0 #Theory time in minutes # ILLUSTRATION STEP 1 --> Round Off TTD to nearest 10min time interval by introducing offset if (TheoriticalTravelDuration%10 >= 5): # OFFSET VALUE rem = 10.0-TheoriticalTravelDuration%10 else: rem = TheoriticalTravelDuration%10 # OFFSET VALUE rem = -rem TimeDurationOffset = rem #(TDO) Limit = 1 # All the mongolab operations are inside the try exception block try: # This is to get the latest date in the mongodb dateCursor = newttobackground.ttobgcoll.find({"route":client_data[clientID]['routeName']}).sort('recorddate', pymongo.DESCENDING).limit(Limit) for datedoc in dateCursor: endDate = datedoc['recorddate'] dateproceed = True except Exception as e: dateproceed = False result = {"responseType":4,"message":"oops!! Internal problem"} publish_handler(clientID,result) logging.error("The dateCursor error is %s,%s\n"%(e,type(e))) if dateproceed == True: diff = DesiredArrivalTime-route_time day = diff.days hour = (day*24 + diff.seconds/3600) realtimeinminutes = [] time = [] # ILLUSTRATION STEP 2 --> Fetch 12hr advance prediction for the Selected Route try: #This is to get the predictions from the result collection cursor = newttobackground.ttoresultcoll.find({"route":client_data[clientID]['routeName']}).sort('time', pymongo.ASCENDING).limit(150) if (0<=hour <= 12 and 0<=day<=1): proceed = True for datadoc in cursor: time.append(datadoc['time'].replace(second=0)) realtimeinminutes.append(datadoc['predictioninmins']) else: if day < 0: result = {"responseType":4,"message":"Desired Arrival Time is below 12 hours range"} else: result = {"responseType":4,"message":"Desired Arrival Time is more than 12 hours away"} publish_handler(clientID,result) proceed = False del client_data[clientID] except Exception as e: logging.error("The testdatacursor error is %s,%s\n"%(e,type(e))) proceed = False result = {"responseType":4,"message":"oops!! Internal problem"} publish_handler(clientID,result) timediffinminutes = [] # ILLUSTRATION STEP 3 --> Update all predicted time durations with the TDO value if (proceed == True): for j in realtimeinminutes: timediffinminutes.append((((float(j)-(TheoriticalTravelDuration)))+TimeDurationOffset)) DesiredArrivalTimeIndexInList = -1 for i in range(len(time)): if (int(DesiredArrivalTime.strftime("%H")) == int(time[i].strftime("%H")) and int(DesiredArrivalTime.strftime("%M")) == int(time[i].strftime("%M"))): DesiredArrivalTimeIndexInList = time.index(time[i]) try: if DesiredArrivalTimeIndexInList != -1: pred_minutes = [] for i in range(len(timediffinminutes)): pred_minutes.append(float(timediffinminutes[i]+TheoriticalTravelDuration)) # ILLUSTRATION STEP 4 --> Handled as part of the overall flow of STEP 6 # ILLUSTRATION STEP 5 --> Calculate Recommendation Reference Start Time(RRST) from the DAT and TTD RecommendationRefferenceStartTime = int(DesiredArrivalTimeIndexInList-((TheoriticalTravelDuration+TimeDurationOffset)/10)) i = RecommendationRefferenceStartTime recommendationFlag = True checkedOnce = [] recommendationResult = {} listlen = len(time) j = 0 while (recommendationFlag == True): predictedArrivalTime = time[i]+datetime.timedelta(minutes=pred_minutes[i]) replaceapproach = predictedArrivalTime.replace(tzinfo=pytz.timezone(client_data[clientID]['timeZone'])) zone = pytz.timezone(client_data[clientID]["timeZone"]) predictedArrivalTime = zone.localize(predictedArrivalTime) diff = DesiredArrivalTime - predictedArrivalTime diff_minutes = (diff.days *24*60)+(diff.seconds/60) # ILLUSTRATION STEP 6.3 --> Checking for the onTime if (diff_minutes == 0): #This condition is the top priority pred_minutesReal = pred_minutes[i]-TimeDurationOffset # ILLUSTRATION STEP 6.3.1 --> Return the latest ontime recommendations to the user recommendationResult.update({"onTime":{"predictedDepartureTime":str(time[i].replace(second=0,tzinfo=None)),"predictedArrivalTime":str(predictedArrivalTime.replace(tzinfo=None)),"dep_note":"You will reach ontime","pred_minutesReal":pred_minutesReal}}) recommendationFlag = False # ILLUSTRATION STEP 6.6(Condition)--> Checking for the early elif (0<=diff_minutes<=10): pred_minutesReal = pred_minutes[i]-TimeDurationOffset if(time[i] not in checkedOnce): checkedOnce.append(time[i]) # ILLUSTRATION STEP 6.1,6.2 --> Derive the latest Recommendation recommendationResult.update({"Early":{"predictedDepartureTime":str(time[i].replace(second=0,tzinfo=None)),"predictedArrivalTime":str(predictedArrivalTime.replace(tzinfo=None)),"dep_note":"You will reach %s min(s) early"%(abs(diff_minutes)),"pred_minutesReal":pred_minutesReal}}) # ILLUSTRATION STEP 6.6(Operation) --> Move RRST 10min Forward i+=1 else: # ILLUSTRATION 6.4.1(Scenario 1) --> Return the latest late and early recommendations to the user recommendationResult.update({"Early":{"predictedDepartureTime":str(time[i].replace(second=0,tzinfo=None)),"predictedArrivalTime":str(predictedArrivalTime.replace(tzinfo=None)),"dep_note":"You will reach %s min(s) early"%(abs(diff_minutes)),"pred_minutesReal":pred_minutesReal}}) recommendationFlag = False # ILLUSTRATION STEP 6.7(Condition) --> Checking for the late else: if diff_minutes <0: pred_minutesReal = pred_minutes[i]-TimeDurationOffset if (time[i] not in checkedOnce): checkedOnce.append(time[i]) # ILLUSTRATION STEP 6.1,6.2 --> Derive the latest Recommendation recommendationResult.update({"Late":{"predictedDepartureTime":str(time[i].replace(second=0,tzinfo=None)),"predictedArrivalTime":str(predictedArrivalTime.replace(tzinfo=None)),"dep_note":"You will reach %s min(s) late"%(abs(diff_minutes)),"pred_minutesReal":pred_minutesReal}}) # ILLUSTRATION STEP 6.7(Operation) --> Move RRST 10min Backward i-=1 else: # ILLUSTRATION 6.4.1(Scenario 2) --> Return the latest early and late recommendations to the user recommendationResult.update({"Late":{"predictedDepartureTime":str(time[i].replace(second=0,tzinfo=None)),"predictedArrivalTime":str(predictedArrivalTime.replace(tzinfo=None)),"dep_note":"You will reach %s min(s) late"%(abs(diff_minutes)),"pred_minutesReal":pred_minutesReal}}) recommendationFlag = False else: i+=1 recommresult = [] for val in recommendationResult.keys(): recommresult.append(recommendationResult[val]) pub_dict = {"responseType":1,"route_name":client_data[clientID]['routeName'],"arrival_time":str(DesiredArrivalTime.replace(tzinfo=None)),"recommendation":recommresult} publish_handler(client_data[clientID]["clientID"],pub_dict) logging.info("The sent message for the recommendationmessage%s\n"%(str(pub_dict))) client_data[clientID].update({"recommndsentproceed":False}) except Exception as e: logging.error("The error occured in recommalgoinnerError is %s,%s\n"%(e,type(e))) result = {"responseType":4,"message":"oops!! Internal problem"} publish_handler(clientID,result) except Exception as recommalgoError: result = {"responseType":4,"message":"oops!! Internal problem"} publish_handler(clientID,result) logging.error("The error occured in recommalgoError is %s,%s\n"%(recommalgoError,type(recommalgoError)))
def __init__(self, time: str): self.hours = int(time[:time.index(':')]) self.minutes = int(time[time.index(':') + 1:time.rindex(':')]) self.seconds = int(time[time.rindex(':') + 1:time.index('.')]) self.microseconds = int(time[time.index('.') + 1:])
def fraction_time(time): if (':' in time): return str(time[time.index(':'):time.index('m') - 1]) else: return ':00'
def getintervalratings(begin,end, time,rate): startindex=time.index(begin) stopindex=time.index(end) ratings=rate[startindex:stopindex] return ratings
def writing_speed_android(fi, first, second, num): with open( './chats_process/' + str(first) + '_' + str(second) + '/' + 'training_set_' + str(num) + '_final' + '.csv', 'r') as csvinput: with open( './chats_process/' + str(first) + '_' + str(second) + '/' + 'training_' + str(num) + '_final' + '.csv', 'w') as csvoutput: writer = csv.writer(csvoutput) a = 0 for row in csv.reader(csvinput): writer.writerow(row + ["writing_speed"]) break f = open(fi, 'r') time_txt = open( './chats_process/' + str(first) + '_' + str(second) + '/' + 'time.txt', 'r') for time in time_txt: a = time.index(":::") p = time[:a] time = time[a:] correctDate = False try: date1 = datetime.strptime(time, "::: %d/%m/%Y, %H:%M \n") correctDate = True except ValueError: correctDate = False if correctDate == True: date1.strftime("::: %m/%d/%Y, %I:%M %p\n") else: date1 = datetime.strptime(time, "::: %m/%d/%Y, %I:%M %p\n") prev = date1 break time_txt = open( './chats_process/' + str(first) + '_' + str(second) + '/' + 'time.txt', 'r') for row, line, time in zip(csv.reader(csvinput), f, time_txt): a = time.index(":::") p = time[:a] time = time[a:] answer_time = open( './chats_process/' + str(first) + '_' + str(second) + '/' + 'writing_speed' + str(num) + '.txt', 'a') correctDate = False try: date1 = datetime.strptime(time, "::: %d/%m/%Y, %H:%M \n") correctDate = True except ValueError: correctDate = False if correctDate == True: date1.strftime("::: %m/%d/%Y, %I:%M %p\n") else: date1 = datetime.strptime(time, "::: %m/%d/%Y, %I:%M %p\n") if (date1 - prev > timedelta(hours=2)): answer_time.write(str(a) + " " + "-1") writer.writerow(row + ["-1"]) else: answer_time.write( str(a) + " " + str((date1 - prev).total_seconds() / len(line))) writer.writerow( row + [str((date1 - prev).total_seconds() / len(line))]) prev = date1 os.rename( './chats_process/' + str(first) + '_' + str(second) + '/' + 'training_' + str(num) + '_final' + '.csv', './chats_process/' + str(first) + '_' + str(second) + '/' + 'training_set_' + str(num) + '_final' + '.csv')
def read(self, infile): # change infile to the right format infile = infile+'.out' # time array time = [] suffix = [] # pop out timepoint index array popidx = [] # read steady state operation time from input with open(infile,'r') as fin: for line in fin: if '1 ' and 'HISTORY DATA (1)' in line: # print (line) for i in range(8): line = next(fin) # read steady state time while True: line = fin.readline() if not line.strip(): # print ('pass') pass elif line.split()[-1] == '0': # print ('iteration step',line.split()[0]) newline = line.replace(':',' ') timeburn_str = newline.split() elif line.split()[-1] == '2': break timeburn = float(timeburn_str[1])*3600+float(timeburn_str[2])*60+float(timeburn_str[3])+float(timeburn_str[4])/1000 break print ('operational time (s): ', timeburn) # print ('operational time: ', timeburn) # print ('operational time: ', timeburn_str) # sys.exit() # read fuel tempearture array from input for line in open(infile,'r'): # read time if 'TIME (H:M:S:MS)' in line: time_str = FEMAXI_reader.find_between_(line,')','|') time_str = time_str.split(':') # print (time_str) # convert time to s time_curr = float(time_str[0])*3600+float(time_str[1])*60+float(time_str[2])+float(time_str[3])/1000 - timeburn # print (time_curr) suffix_curr = str('{0:.2f}'.format(time_curr)).replace('.','d') # convert total time to transient time # print (suffix_curr) time.append('%.2f'%float(time_curr)) suffix.append(suffix_curr) # print (time) # print (suffix) for timepoint in time: if float(timepoint) < 0: popidx.append(time.index(timepoint)) # print ('pop out list is:', popidx) # read pin geometry data with open(infile,'r') as fin: for line in fin: if 'PELLET SPECIFICATIONS ' in line: line = next(fin) line = next(fin) line = next(fin) dPellet = float(line.split()[2]) rmax = dPellet/2 elif 'CLAD. INSIDE DIAMETER (CM)' in line: dGas = float(line.split()[-1]) dClad = float(next(fin).split()[-1]) rmax_gas = dGas/2 rmax_clad = dClad/2 fin.close() # read feul temperature nodes with open(infile,'r') as fin: for line in fin: if 'TEMPERATURE DISTRIBUTION IN THE FUEL (DEG.C) ' in line: # print (line) line = next(fin) line = next(fin) line = next(fin) # print (line) data = line.split() n_axial = int(data[0]) n_radial = len(data) - 1 break # add gas and cladding node to radcal array n_radial = n_radial + 2 # 1 node for gas and 1 node for cladding fin.close() # print(n_axial,n_radial) # create temperature array # number of time point n_old = len(time) # creat dictionary for fuel temperature fuelT = {} with open(infile,'r') as fin: n = 0 # read fuel temperature into dictionary for line in fin: if 'TEMPERATURE DISTRIBUTION IN THE FUEL (DEG.C) ' in line: data = [] T_idx = 'fuelT'+str(n) line = next(fin) line = next(fin) for i in range(n_axial): data.append((next(fin).split())) data = np.array(data, dtype = float) data = data + 273.15 # convert C to K fuelT[T_idx] = data[:,1:n_radial+1] # print (fuelT[T_idx]) n = n+1 # read cladding and gas temperature into dictionary fin.seek(0) n = 0 for line in fin: if 'PC PS CI CO ' in line: Tgas = [] Tclad = [] T_idx = 'fuelT'+str(n) for i in range(n_axial): data = next(fin) data = data.replace('*',' ') Tgas.append((float(data.split()[8])+float(data.split()[9]))/2) Tclad.append((float(data.split()[9])+float(data.split()[10]))/2) Tgas = np.array(Tgas, dtype = float) Tclad = np.array(Tclad, dtype = float) Tgas = Tgas + 273.15 # convert C to K Tclad = Tclad + 273.15 # convert C to K # append gas and clad temperature to the fuel temperature array fuelT[T_idx] = np.c_[fuelT[T_idx],Tgas] fuelT[T_idx] = np.c_[fuelT[T_idx],Tclad] n = n+1 # fuelT[T_idx] = np.append(fuelT[T_idx],Tclad,axis=1) # sys.exit() fin.close() # remove element from burnup mode time = [i for j, i in enumerate(time) if j not in popidx] # convert time array to float time = [float(i) for i in time] # print(type(time[2])) # print ('time is ',time) suffix = [i for j, i in enumerate(suffix) if j not in popidx] # for key in fuelT.keys(): # print ('old fuelT key are: ',key) # print ('length of old fuelT key: ',len(fuelT)) for i in popidx: del_idx = 'fuelT'+str(i) del fuelT[del_idx] # print ('fuelT is ',fuelT) # print ('time is ',time) # print ('suffix is ', suffix) # replace the old key by renamed new key n_rm = len(popidx) Tidx_new = range(n_old - n_rm -1) # print (len(fuelT)) # print (Tidx_new) for Tidx_new in range(n_old - n_rm): key_new = 'fuelT'+str(Tidx_new) key_old = 'fuelT'+str(Tidx_new+n_rm) # print(key_new) # print(key_old) fuelT[key_new] = fuelT.pop(key_old) # for key in fuelT.keys(): # print ('new fuelT keys are: ',key) # print ('length of new fuelT key: ',len(fuelT)) self.fuelT = fuelT self.n_axial = self.n_axial + n_axial self.n_radial = self.n_radial + n_radial self.time = self.time + time self.suffix = self.suffix + suffix self.rmax = self.rmax + rmax self.rmax_gas = self.rmax_gas + rmax_gas self.rmax_clad = self.rmax_clad + rmax_clad
def data_for_picture(hours, mins, secs): #finds the data for the nearest time to the picture # i.e. a picture taken at 18:40:32 but data taken at 18:40:34 open_image(hours, mins, secs) for i in np.arange(len(time)): if time[i][0:2] == str(hours): if len(str(mins)) == 1: mins = '0' + str(mins) if time[i][3:5] == str(mins): list_of_secs = [] if (len(time) - i) <= 6: for j in np.arange(0, len(time) - i): list_of_secs.append(int(time[j + i][6:8])) else: for j in np.arange(0, 11): list_of_secs.append(int(time[j + i][6:8])) list_of_secs.append(secs) list_of_secs.sort() i = list_of_secs.index(secs) #print i, list_of_secs, time if i != len(list_of_secs) - 1: above = int(list_of_secs[i + 1]) - int(secs) below = int(secs) - int(list_of_secs[i - 1]) if above > below: secs = int(list_of_secs[i - 1]) if above < below: secs = int(list_of_secs[i + 1]) if above == below: secs = int(list_of_secs[i + 1]) if len(str(secs)) == 1: secs = '0' + str(secs) string = str(hours) + ':' + str(mins) + ':' + str(secs) i = time.index(string) print 'Nearest Time', time[i] print 'Altitude', altitude[i] print 'Internal Temp', internal_temp[i] print 'External Temp', temp_bme[i] print 'Pressure', pres[i] print 'Humidity', hum[i] print 'UVA', uva[i] print 'UVB', uvb[i] print 'Sound', sound[i] print 'Co-ordinates', gps_lat[i], gps_long[i] plot_variable(gps_lat, gps_long, 'Lat', 'Long') plt.scatter(gps_lat[i], gps_long[i], s=200, color='r', marker='+', linewidth=2) return time[i], altitude[i], internal_temp[i], temp_bme[ i], pres[i], hum[i], uva[i], uvb[i], sound[i], gps_lat[ i], gps_long[i]
def time(self, time): index = time.index(':') fen = time[1:index] miao = time[index + 1:] self._time = float(fen) * 60 + float(miao)
def convertToSeconds(self, time): if ":" in time: minutes = float(time[:time.index(":")]) seconds = minutes * 60 + float(time[time.index(":") + 1:]) return seconds return float(time)
def __timeStrip(self,time): return time[0:time.index('.')]
def _sort(file_queue): # 根据文件大小和文件创建日期进行排序 size = sorted(file_queue, key=lambda x: x.size, reverse=True) time = sorted(file_queue, key=lambda x: x.time) return sorted(file_queue, key=lambda x: size.index(x) + time.index(x))