def convert_to_utc(time, user): try: tz = pytz.timezone(user.sciriususer.timezone) except: return time return tz.normalize(tz.localize(time.replace(tzinfo=None))).astimezone( pytz.utc)
def read_csv(fname): from csv import DictReader, excel results = [] dialect = excel() dialect.skipinitialspace = True lines = open(fname).readlines() header = DictReader(lines[0:2], dialect=dialect).next() device = header["Service Account"] data = DictReader(lines[3:], dialect=dialect) for datum in data: time = datetime.strptime(datum["Interval Date & Time"], "%m/%d/%Y %I:%M %p ") time = time.replace(tzinfo=timezone("America/Los_Angeles")) results.append({ "device": "sce-" + device, "sensor": "meter", "type": "mains", "time": to_epoch(time), "duration": 3600, "value": float(datum["kWh Delivered"]) }) return results
def read_xml(fname, device): from xml.etree import ElementTree results = [] tree = ElementTree.parse(fname) for interval in tree.getroot().findall( ".//{http://www.w3.org/2005/Atom}IntervalReading"): period = interval.find("{http://www.w3.org/2005/Atom}timePeriod") time = datetime.utcfromtimestamp( int(period.find("{http://www.w3.org/2005/Atom}start").text)) time = time.replace(tzinfo=timezone("America/Los_Angeles")) results.append({ "device": "sce-" + device, "sensor": "meter", "type": "mains", "time": to_epoch(time), "duration": int(period.find("{http://www.w3.org/2005/Atom}duration").text), "value": float(interval.find("{http://www.w3.org/2005/Atom}value").text) }) return results
def test_standup_start(): clear() info2 = auth_register("*****@*****.**", "RFVtgb45678", "M", "Johnson") channels_create(info2['token'], 'first', True) time = datetime.utcnow() + timedelta(seconds=6) assert standup_start(info2['token'], 0, 6) == { 'time_finish': round(time.replace(tzinfo=timezone.utc).timestamp(), 0), }
def request_newsUrl(self, url, type_cn, date): ls = [] res = requests.get(url) res_soup = bs(res.content, 'html.parser') contents = res_soup.select('div.newslist-container > a') for j in contents: time = j.select('span.newstime')[0].get_text() time = time.replace('/', '-') time = re.search('[0-9]+-[0-9]+-[0-9]+', time).group(0) if date == 'all' or time in date: href = j.attrs['href'] title = j.attrs['title'] ls.append({'date': time, 'url': href, 'title': title}) return ls
def read_html(fname): from xml.etree import ElementTree results = [] tree = ElementTree.parse(fname) table = tree.getroot().find( ".//{http://www.w3.org/1999/xhtml}table[@id='gvRawDataByDevice']") headings = [ th.text for th in table.find("{http://www.w3.org/1999/xhtml}tr").findall( "{http://www.w3.org/1999/xhtml}th") ] def make_dict(rows): for row in rows: cells = [ c.text for c in row.findall("{http://www.w3.org/1999/xhtml}td") ] if len(cells) == 0: continue yield dict(zip(headings, cells)) data = make_dict(table.findall("{http://www.w3.org/1999/xhtml}tr")) for datum in data: time = datetime.strptime(datum["Measured Date (30 min)"], "%m/%d/%Y %I:%M:%S %p") time = time - timedelta(seconds=time.second) time = time.replace(tzinfo=timezone("America/Los_Angeles")) results.append({ "device": "solarcity-" + datum["InverterID"], "sensor": "generated", "type": "solar", "time": to_epoch(time), "duration": 15 * 60, "value": float(datum["Energy (kWh AC)"]) }) return results
def getEPG(self, chid, dt = None): self.initTimeFix() if dt == None: dt = time.replace(0, 0, 0) response = self._request('get_epg', 'cid=%s&from_uts=%s&hours=24&time_shift=%s' % (chid, dt, self.timeshift)) res = [] for channel in response['channels']: if 'time_shift' in channel: ts_fix = int(channel["time_shift"])*60 else: ts_fix = self.timeshift *3600 for epg in channel['epg']: res.append({ 'title': epg['title'], 'time': self.fixTime(int(epg['begin']), ts_fix), 'uts': int(epg['begin']), 'info': epg['info'], 'is_video': 0 }) return res
def net_plot(title, AAT, theta, Z, r, lambda_A, lambda_R, layout='fruchterman', plotting = True, graphScale=1.0, color_threshold=0.7, *args, **kwargs): """Provide the eigenvector covariances AAT from RESCAL_ALS output and Z the sampled network from one of the netCreate sampling algorithms """ # get system time to name figures time = str(dt.datetime.now().time()) time = time.replace(':','') time = time.replace('.','') # heatmap hm = ncFunctions.heatmap(AAT, plotting=plotting, color_threshold=color_threshold) if plotting: plt.suptitle(r'A(A^T) HAC for Induced Rank = %s, $\lambda_{A}$ = %s, $\lambda_{R}$ = %s ' %(r,lambda_A, lambda_R), fontweight='bold', fontsize=14) plt.savefig(title+'_heatmap_'+time, figsize=(6,6)) # NETWORK # Create networkx graph from Z g = nx.Graph() #add nodes with colors of group for n in np.arange(np.shape(hm['corder'])[0]-1): g.add_node(hm['corder'][n],color=hm['group'][n]) nodeColorList = list(nx.get_node_attributes(g,'color').values()) #add edges with weight of theta (probability the link exists) cardE = len(np.where(Z==1)[1]) edgeList = [(np.where(Z==1)[0][i], np.where(Z==1)[1][i]) for i in np.arange(cardE)] edgeWeightList = theta[np.where(Z==1)] * (2 / max(theta[np.where(Z==1)])) #scaled link prob Pr(Z[i,j]=1) * weight for e in np.arange(len(edgeList)-1): g.add_edge(edgeList[e][0],edgeList[e][1],weight=edgeWeightList[e]) # NODE SIZES # 1. cluster linkage importance #nodesizelist = cluster['linkage'] * (400 / max(cluster['linkage'])) # 2. betweenness centrality (wide range of sizes; very small on periphery) #nodesizelist = np.asarray(list(nx.betweenness_centrality(G,normalized=False).values())) * (400 / max(list(nx.betweenness_centrality(G,normalized=False).values()))) # 3. degree (smaller range of sizes; easier to see on the periphery) nodeSizeList = np.asarray(list(g.degree().values())) * (300 / max(list(g.degree().values()))) #scaled so the largest is size 350 # reproducibility np.random.seed(1) #bc = nx.betweenness_centrality(g) E = len(nx.edges(g)) V = len(g) k = round(E/V,3) #size = np.array(list(bc.values())) * 1000 # here replacing the hierarchical magnitude hm['corder'] fignx = plt.figure(figsize=(6,6)) ## use heatmap color groupings to color nodes and heatmap magnitudes to size nodes if layout == 'spring': nx.draw(g, pos=nx.spring_layout(g, scale=graphScale), node_color=nodeColorList, node_size=nodeSizeList, width=edgeWeightList) elif layout == 'fruchterman': nx.draw(g, pos=nx.fruchterman_reingold_layout(g, scale=graphScale), node_color=nodeColorList, node_size=nodeSizeList, width=edgeWeightList) else: print('Please indicate at a valid layout.') #else: #nx.graphviz_layout(g, prog=graphProg) plt.title('Network Created from Induced Rank = %s \n V = %s, E = %s, <k> = %s'%(r,V,E,k), fontweight='bold', fontsize=14) plt.savefig(title+'_graph_'+time, figsize=(6,6)) #plot log degree sequence degree_sequence=sorted(nx.degree(g).values(),reverse=True) fig3 = plt.figure(figsize=(5,3)) plt.loglog(degree_sequence) plt.title('Log Degree Distribution', fontweight='bold', fontsize=14) return {'cluster':hm, 'graph':g, 'linkage':hm['linkage'], 'group':hm['group']}
def timeStartEnd(time, instrument, interval): # 夜盘品种 if instrument in ['rb', 'j', 'i', 'jm']: # 夜盘品种,先解决tradeday if time.hour >= 21 and time.hour <= 23: # 周五晚上 if time.weekday() == 4: tradeday = (time + timedelta(days=2, hours=5)).replace(hour=0) else: tradeday = (time + timedelta(hours=5)).replace(hour=0) elif time.hour < 3: # 周六凌晨 if time.weekday() == 5: tradeday = (time + timedelta(days=2, hours=5)).replace(hour=0) else: tradeday = (time + timedelta(hours=5)).replace(hour=0) elif time.hour >= 9 and time.hour <= 15: tradeday = time.replace(hour=0) else: print('输入的时间不在交易范围之内,请检查') # 日盘品种 else: tradeday = time.replace(hour=9) # 解决start 和 end的问题 # 1分钟K线采用闭开区间定义start 和 end tradeday = tradeday.replace(second=0, microsecond=0) if interval == INTERVAL_1M: timestart = time.replace(second=0, microsecond=0) timeend = timestart + timedelta(minutes=1) return timestart, timeend, tradeday elif interval == INTERVAL_5M: minstart = math.floor(time.minute / 5) * 5 timestart = time.replace(minute=minstart) timeend = timestart + timedelta(minutes=4, seconds=59) return timestart, timeend, tradeday elif interval == INTERVAL_15M: minstart = math.floor(time.minute / 15) * 15 timestart = time.replace(minute=minstart) timeend = timestart + timedelta(minutes=14, seconds=59) return timestart, timeend, tradeday elif interval == INTERVAL_30M: minstart = math.floor(time.minute / 30) * 30 timestart = time.replace(minute=minstart) timeend = timestart + timedelta(minutes=29, seconds=59) # 30分钟的K线起始终止时间节点,要代替一下 if timeend.hour == 10 and timeend.minute == 29: timeend = timeend.replace(minute=14) return timestart, timeend, tradeday # 60分钟这里 elif interval == INTERVAL_60M: timestart = time.replace(minute=0) timeend = timestart + timedelta(minutes=59, seconds=59) # 30分钟的K线起始终止时间节点,要代替一下 if timeend.hour == 11 and timeend.minute == 59: timeend = timeend.replace(minute=29) elif timeend.hour == 2 and timeend.minute == 59: timeend = timeend.replace(minute=29) elif timeend.hour == 23 and timeend.minute == 59: # 晚上23点的这个,很麻烦,之前大商所是凌晨1点闭市,之后改为晚上23点半闭市 if instrument == 'j': if time.strftime('%Y-%m-%d') >= '2015-05-08': timeend = timeend.replace(minute=29) return timestart, timeend, tradeday elif interval == INTERVAL_1D: # 夜盘品种 timeend = tradeday.replace(hour=14, minute=59, second=59) # 夜盘品种 if instrument in ['rb', 'j', 'i', 'jm']: if time.hour >= 21 and time.hour <= 23: timestart = time.replace(hour=21) elif time.hour < 3: timestart = (time - timedelta(hours=5)).replace(hour=21) # 如果是夜盘品种,但是前一天没有数据,那么默认假日模式, elif time.hour >= 9 and time.hour <= 15: if time.weekday() == 0: timestart = (time - timedelta(days=3)).replace(hour=21) else: timestart = (time - timedelta(days=1)).replace(hour=21) else: print('输入的时间不在交易范围之内,请检查') # 日盘品种 else: timestart = time.replace(hour=9) else: print('输入的时间周期有误,请重新输入') return timestart, timeend, tradeday
class Chifoumi: scores = {} options = ['Rock', 'Paper', 'Scissors'] # prepare file to save sessions & scores f = open('chifoumi_scores.txt', 'a+') with open('chifoumi_scores.txt') as my_file: my_file.seek(0) first_char = my_file.read(1) if not first_char: # the file is empty f.write('<Player> <Score> <Time in seconds>\n') f.write('Standard 0 0\n') else: my_file.seek(0) next(my_file) for line in my_file: (name, score, time) = line.split(" ") time = time.replace('\n', '') scores[name] = list((int(score), float(time))) def __init__(self, player_name, num_rounds): self.name = player_name self.n = num_rounds if self.name not in self.scores: self.scores[self.name] = [0, 0] def start_game(self): ''' start the game ''' start = time() conteur = 0 i = 0 while i < self.n: while True: try: choice = int( input("\nRock: 0, Papre: 1, Scissors: 2 ==> ")) break except ValueError: print("\nPlease Choose 0, 1 or 2... Play Again!!") if choice in [0, 1, 2]: player_1 = self.options[choice] computer = self.options[randint(0, 2)] if player_1 == computer: print('\n*** Tie!! ***') elif player_1 == "Rock": if computer == "Paper": print("\n*** You lose!", computer, "covers", player_1, "***") else: print("\n*** You win!", player_1, "smashes", computer, "***") conteur += 1 elif player_1 == "Paper": if computer == "Scissors": print("\n*** You lose!", computer, "cut", player_1, "***") else: print("\n*** You win!", player_1, "covers", computer, "***") conteur += 1 elif player_1 == "Scissors": if computer == "Rock": print("\n*** You lose...", computer, "smashes", player_1, "***") else: print("\n*** You win!", player_1, "cut", computer, "***") conteur += 1 else: print("\nThat's not a valid play. Check your spelling! ") i += 1 else: print("\nPlease Choose 0, 1 or 2... Play Again!!") self.duree = float(time() - start) self.score = conteur # to save the highest score if conteur > self.scores[self.name][0]: self.scores[self.name][0] = conteur self.scores[self.name][1] = self.duree @classmethod def save_score(cls): ''' saving score to the file ''' cls.f.seek(0) cls.f.truncate() cls.f.write('<Player> <Score> <Time in seconds>\n') for i in cls.scores.keys(): cls.f.write("{} {} {}\n".format(i, cls.scores[i][0], cls.scores[i][1])) @classmethod def rankOfPlayers(cls): ''' Ranking players by score ''' listofTuples = sorted(cls.scores.items(), key=lambda x: x[1], reverse=True) print("\n*************") print("\nRanking:\n") for elem in listofTuples: print(elem[0], " ==> ", elem[1][0])
def net_plot(title, AAT, theta, Z, r, lambda_A, lambda_R, layout='fruchterman', plotting=True, graphScale=1.0, color_threshold=0.7, *args, **kwargs): """Provide the eigenvector covariances AAT from RESCAL_ALS output and Z the sampled network from one of the netCreate sampling algorithms """ # get system time to name figures time = str(dt.datetime.now().time()) time = time.replace(':', '') time = time.replace('.', '') # heatmap hm = ncFunctions.heatmap(AAT, plotting=plotting, color_threshold=color_threshold) if plotting: plt.suptitle( r'A(A^T) HAC for Induced Rank = %s, $\lambda_{A}$ = %s, $\lambda_{R}$ = %s ' % (r, lambda_A, lambda_R), fontweight='bold', fontsize=14) plt.savefig(title + '_heatmap_' + time, figsize=(6, 6)) # NETWORK # Create networkx graph from Z g = nx.Graph() #add nodes with colors of group for n in np.arange(np.shape(hm['corder'])[0] - 1): g.add_node(hm['corder'][n], color=hm['group'][n]) nodeColorList = list(nx.get_node_attributes(g, 'color').values()) #add edges with weight of theta (probability the link exists) cardE = len(np.where(Z == 1)[1]) edgeList = [(np.where(Z == 1)[0][i], np.where(Z == 1)[1][i]) for i in np.arange(cardE)] edgeWeightList = theta[np.where(Z == 1)] * (2 / max( theta[np.where(Z == 1)])) #scaled link prob Pr(Z[i,j]=1) * weight for e in np.arange(len(edgeList) - 1): g.add_edge(edgeList[e][0], edgeList[e][1], weight=edgeWeightList[e]) # NODE SIZES # 1. cluster linkage importance #nodesizelist = cluster['linkage'] * (400 / max(cluster['linkage'])) # 2. betweenness centrality (wide range of sizes; very small on periphery) #nodesizelist = np.asarray(list(nx.betweenness_centrality(G,normalized=False).values())) * (400 / max(list(nx.betweenness_centrality(G,normalized=False).values()))) # 3. degree (smaller range of sizes; easier to see on the periphery) nodeSizeList = np.asarray(list(g.degree().values())) * (300 / max( list(g.degree().values()))) #scaled so the largest is size 350 # reproducibility np.random.seed(1) #bc = nx.betweenness_centrality(g) E = len(nx.edges(g)) V = len(g) k = round(E / V, 3) #size = np.array(list(bc.values())) * 1000 # here replacing the hierarchical magnitude hm['corder'] fignx = plt.figure(figsize=(6, 6)) ## use heatmap color groupings to color nodes and heatmap magnitudes to size nodes if layout == 'spring': nx.draw(g, pos=nx.spring_layout(g, scale=graphScale), node_color=nodeColorList, node_size=nodeSizeList, width=edgeWeightList) elif layout == 'fruchterman': nx.draw(g, pos=nx.fruchterman_reingold_layout(g, scale=graphScale), node_color=nodeColorList, node_size=nodeSizeList, width=edgeWeightList) else: print('Please indicate at a valid layout.') #else: #nx.graphviz_layout(g, prog=graphProg) plt.title( 'Network Created from Induced Rank = %s \n V = %s, E = %s, <k> = %s' % (r, V, E, k), fontweight='bold', fontsize=14) plt.savefig(title + '_graph_' + time, figsize=(6, 6)) #plot log degree sequence degree_sequence = sorted(nx.degree(g).values(), reverse=True) fig3 = plt.figure(figsize=(5, 3)) plt.loglog(degree_sequence) plt.title('Log Degree Distribution', fontweight='bold', fontsize=14) return { 'cluster': hm, 'graph': g, 'linkage': hm['linkage'], 'group': hm['group'] }
def get_writer(command, time, result_dir): filename = "%s_%s.csv" % (command, time.replace(":", "")) if result_dir: filename = join(result_dir, filename) return csv.writer(open(filename, "w"))