def friendpage_login(opener, urls, code): # 对给定列表内所有URL进行数据提取,并写入文件 error_target = [] # 准备错误列表 for each in urls: # 对给定列表内所有URL进行遍历 req = urllib2.Request( each ) # 构建打开目标基本资料网页请求 try: response = opener.open(req, timeout = 10) # 打开目标基本资料网页,并设置最大超时为10s,超时返回错误 response_read = response.read() # 读取基本资料的网页源码 response.close() # 关闭网页 name = name_search(response_read, each) # 提取姓名数据 time_str = time_search(response_read, each) # 提取时间数据 if time_str: # 提取时间数据完成 pass # 无操作 else: # 提取时间数据失败 time_str = u"未知时间" # 将时间信息定义为未知 error_target.append(name + "\t" + each) # 将错误添加至错误列表中 content = name + '\t' + time_str + '\n' # 构建写入文件的内容 fr_fpath = r"fr_info" + time.strftime("%y%m%d", time.localtime()) + ".txt" # 确定数据输出文件路径 time_write(fr_fpath, content) # 写入内容 except Exception: # 基本资料页打开超时(浏览器打开时显示 系统繁忙,可能为目标用户不存在或已经注销) error_target.append("用户不存在或其他错误\t" + each) # 将错误添加至错误列表中 error_fpath = r"fr_info_error" + time.strftime("%y%m%d", time.localtime()) + ".txt" # 确定错误立标文件路径 error_content = "\n".join(error_target) + "\n" # 构建错误内容 if error_content != "\n": # 错误列表不为空,即有错误 time_write(error_fpath, error_content) # 写入内容 return 0 # 数据提取函数结束
def saveVerbrauchsData(v_wp,v_sz,zs_wp,zs_sz,interval): y = time.strftime('%Y', time.localtime()) m = time.strftime('%m', time.localtime()) d = time.strftime('%d', time.localtime()) f = open("/var/lib/heatpumpMonitor/verbrauch.%s-%s-%s" %(y,m,d) , 'a') f.write("%s %04d %04d %d %d %d\n" % (time.strftime('%Y %m %d %a %H %H:%M:%S', time.localtime()), v_wp, v_sz, zs_wp, zs_sz, interval)) f.close
def wav_file_gen(encoding_type, ir_code, frequency, signal_strength, btn_name, brand): # Name today = datetime.date.today() today_name = (str2md5(str2md5(str(today))))[0:10] wav_src = btn_name + time.strftime('%Y%m%d', time.localtime(time.time())) wav_name = (str2md5(wav_src))[0:10] brand_src = brand + time.strftime('%m%d', time.localtime(time.time())) brand_name = (str2md5(brand_src))[0:10] # Path path_brand = brand_name + "/" path_header = "/var/www/weixin/wechat/static/media/" path_today = path_header + today_name + "/" # File raw_data = path_today + path_brand + wav_name pcm_file = raw_data + ".pcm" wav_file = raw_data + ".wav" relative_wav_file = "media/" + today_name + "/" + path_brand + wav_name + ".wav" # Delete Older Path for day in range(1, 6): date_src = str(today - datetime.timedelta(days=day))
def format_date(timestamp): FORY = '%Y-%m-%d @ %H:%M' FORM = '%m-%d @ %H:%M' FORH = '%H:%M' os.environ["TZ"] = config.default_timezone time.tzset() r_time = time.strftime(FORM, time.localtime(timestamp)) h_time = time.strftime(FORH, time.localtime(timestamp)) now = int(time.time()) t = now - timestamp if t < 60: format_str = '刚刚' elif t < 60 * 60: min = t / 60 format_str = '%d 分钟前' % min elif t < 60 * 60 * 24: h = t / (60 * 60) format_str = '%d 小时前 %s' % (h, h_time) elif t < 60 * 60 * 24 * 3: d = t / (60 * 60 * 24) if d == 1: format_str = '昨天 ' + r_time else: format_str = '前天 ' + r_time else: format_str = time.strftime(FORY, time.localtime(timestamp)) return format_str
def countQuery(dbSpace): dbSpace.queryCount += 1 if time.localtime()[4] > dbSpace.queryMinCur: dbSpace.queryMinCur = time.localtime()[4] dbSpace.lastQueryPerMin = dbSpace.queryMinCount else: dbSpace.queryMinCount += 1
def main(argv): try: postTitle = argv[1] postCategory = argv[2] except: postTitle = "DEFAULT TITLE" postCategory = "DEFAULT CATEGORY" todayDate = time.strftime('%Y-%m-%d',time.localtime(time.time())) currentTime = time.strftime('%H:%M',time.localtime(time.time())) fileNameWithoutDate = postTitle.lower().replace(' ', '-') fileName = todayDate + "-" + fileNameWithoutDate + ".markdown" # fileFullName = os.path.join(POST_PATH, fileName) with open(fileName, 'w+') as fin: fin.write("---\n") fin.write("layout: post\n") fin.write('title: "%s"\n' % postTitle) fin.write('date: %s %s\n' %(todayDate, currentTime)) fin.write("comments: true\n") fin.write('categories: %s\n' % postCategory.capitalize()) fin.write("---\n\n\n\n") fin.write("<!--more-->\n\n\n") fin.close() print('"%s" was created successfully.' % fileName)
def delete(self, thema, id, beitragID=None): discussionpath = "./data/themen/" + thema + "/" + id + ".json" with open(discussionpath, "r") as discussionfile: discussion = json.load(discussionfile) if beitragID == None: if discussion["Status"] == "deleted": discussion["Status"] = " " else: discussion["Status"] = "deleted" discussion["Bearbeiter"] = cherrypy.session["Benutzername"] discussion["Bearbeitet"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) else: for post in discussion["Beitraege"]: if post["ID"] == beitragID: if post["Status"] == "deleted": post["Status"] = " " else: post["Status"] = "deleted" post["Bearbeiter"] = cherrypy.session["Benutzername"] post["Bearbeitet"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) with open(discussionpath, "w") as discussionfile: json.dump(discussion, discussionfile, indent=4)
def add_automatic_comment(self): if self.fixed is True: text = ( "This %s has been scheduled for fixed downtime from %s to %s. " "Notifications for the %s will not be sent out during that time period." % ( self.ref.my_type, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.start_time)), time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.end_time)), self.ref.my_type) ) else: hours, remainder = divmod(self.duration, 3600) minutes, seconds = divmod(remainder, 60) text = ("This %s has been scheduled for flexible downtime starting between %s and %s " "and lasting for a period of %d hours and %d minutes. " "Notifications for the %s will not be sent out during that time period." % ( self.ref.my_type, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.start_time)), time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.end_time)), hours, minutes, self.ref.my_type) ) if self.ref.my_type == 'host': comment_type = 1 else: comment_type = 2 c = Comment(self.ref, False, "(Nagios Process)", text, comment_type, 2, 0, False, 0) self.comment_id = c.id self.extra_comment = c self.ref.add_comment(c)
def generateur(presence) : # a si le ficher et present, w sinon global cible global nom global type pasdefichier = open(cible + ".txt", presence) # Ouvre le fichier fdebug("Ouvert / cree le fichier") continueraecrire = "c" while continueraecrire is not "q" : if type is "a" : action = raw_input ( VERT + "Entre l'action a inscrire dans le registre >>>" + NORMAL ) while action == "" : print (ROUGE + "Entre quelque chose" + NORMAL ) action = raw_input ( VERT + "Entre l'action a inscrire dans le registre >>>" + NORMAL ) pasdefichier.write(time.strftime('%d/%m/%y %H:%M',time.localtime()) + " *** " + action + "(" + nom + ")"+ "\n") else : commantaire = raw_input( VERT + "Entre le commentaire a inscrire dans le casier >>>" + NORMAL ) while commantaire == "" : print (ROUGE + "Entre quelque chose" + NORMAL ) commantaire = raw_input( VERT + "Entre le commentaire a inscrire dans le casier >>>" + NORMAL ) pasdefichier.write(time.strftime('%d/%m/%y %H:%M',time.localtime()) + " >>> " + commantaire + " (" + nom + ")"+ "\n") fdebug("Enregistrement dans le fichier") warn = raw_input(VERT + "Avez vous prevenu " + cible + " pour la faute ? (o/n) " + NORMAL) if "o" in warn : pasdefichier.write(time.strftime('%d/%m/%y %H:%M',time.localtime()) + " *** " + "warn " + "(" + nom + ")"+ "\n") continueraecrire = raw_input(VERT + "Continuer a écrire sur la meme personne ?(q pour quitter , c pour continuer)" + NORMAL) pasdefichier.close() # Je ferme la porte derriere mon fichier
def processRepeated(self, findRunningEvent = True): if self.repeated != 0: now = int(time()) + 1 #to avoid problems with daylight saving, we need to calculate with localtime, in struct_time representation localrepeatedbegindate = localtime(self.repeatedbegindate) localbegin = localtime(self.begin) localend = localtime(self.end) localnow = localtime(now) day = [] flags = self.repeated for x in (0, 1, 2, 3, 4, 5, 6): if flags & 1 == 1: day.append(0) else: day.append(1) flags >>= 1 # if day is NOT in the list of repeated days # OR if the day IS in the list of the repeated days, check, if event is currently running... then if findRunningEvent is false, go to the next event while ((day[localbegin.tm_wday] != 0) or (mktime(localrepeatedbegindate) > mktime(localbegin)) or (day[localbegin.tm_wday] == 0 and (findRunningEvent and localend < localnow) or ((not findRunningEvent) and localbegin < localnow))): localbegin = self.addOneDay(localbegin) localend = self.addOneDay(localend) #we now have a struct_time representation of begin and end in localtime, but we have to calculate back to (gmt) seconds since epoch self.begin = int(mktime(localbegin)) self.end = int(mktime(localend)) if self.begin == self.end: self.end += 1 self.timeChanged()
def cmd_list(self, args): """ @G%(name)s@w - @B%(cmdname)s@w list timers and the plugins they are defined in @CUsage@w: list """ tmsg = [] match = args['match'] tmsg.append('Local time is: %s' % time.strftime('%a %b %d %Y %H:%M:%S', time.localtime())) tmsg.append('%-20s : %-13s %-9s %-8s %s' % ('Name', 'Defined in', 'Enabled', 'Fired', 'Next Fire')) for i in self.timerlookup: if not match or match in i: timerc = self.timerlookup[i] tmsg.append('%-20s : %-13s %-9s %-8s %s' % ( timerc.name, timerc.plugin.sname, timerc.enabled, timerc.timesfired, time.strftime('%a %b %d %Y %H:%M:%S', time.localtime(timerc.nextcall)))) return True, tmsg
def on_data(self, data): if time.time() >= self.started + self.duration: stats = open('{0}-sample.stats'.format(int(self.started)), 'w+') stats.write("================= STATISTICS =================" + "\n") stats.write("Start time: " + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.started)) + "\n") stats.write("End time: " + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) + "\n") stats.write("First Tweet ID: " + self.first_tweet_id + "\n") stats.write("Last Tweet ID: " + self.last_tweet_id + "\n") stats.write("Language: " + self.lang + "\n") stats.write("Language classification threshold: " + str(self.lang_threshold) + "\n") stats.write("Above threshold: " + str(self.counter[self.lang + '-above']) + "\n") stats.write("Below threshold: " + str(self.counter[self.lang + '-below']) + "\n") stats.write("Exluded: " + str(self.counter['excluded']) + "\n") return False elif 'in_reply_to_status_id' in data: status = Status.parse(self.api, json.loads(data)) langclass = langid.classify(status.text) if (self.counter == {self.lang + '-above':0, self.lang + '-below':0, 'excluded':0}): self.first_tweet_id = str(status.id) self.last_tweet_id = str(status.id) if (langclass[0] == self.lang): if langclass[1] >= self.lang_threshold: self.above_output.write(data) self.counter[self.lang + '-above'] += 1 else: self.below_output.write(data) self.counter[self.lang + '-below'] += 1 else: self.excl_output.write(data) self.counter['excluded'] += 1 return True
def _fix_review_dates(self, item): ''' Convert dates so ES detect them ''' for date_field in ['timestamp','createdOn','lastUpdated']: if date_field in item.keys(): date_ts = item[date_field] item[date_field] = time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(date_ts)) if 'patchSets' in item.keys(): for patch in item['patchSets']: pdate_ts = patch['createdOn'] patch['createdOn'] = time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(pdate_ts)) if 'approvals' in patch: for approval in patch['approvals']: adate_ts = approval['grantedOn'] approval['grantedOn'] = \ time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(adate_ts)) if 'comments' in item.keys(): for comment in item['comments']: cdate_ts = comment['timestamp'] comment['timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(cdate_ts))
def newCalib(self,calibName=None, width=None, distance=None, gamma=None, notes=None, useBits=False, verbose=True): """create a new (empty) calibration for this monitor and makes this the current calibration""" if calibName==None: calibName= strFromDate(time.localtime()) #add to the list of calibrations self.calibNames.append(calibName) self.calibs[calibName]={} self.setCurrent(calibName) #populate with some default values: self.setCalibDate(time.localtime()) self.setGamma(gamma) self.setWidth(width) self.setDistance(distance) self.setNotes(notes) self.setPsychopyVersion(__version__) self.setUseBits(useBits) newGrid=numpy.ones((4,3), 'd') newGrid[:,0] *= 0 self.setGammaGrid(newGrid) self.setLineariseMethod(1)
def add_separator(self, timestamp): '''Add whitespace and timestamp between chat sessions.''' time_with_current_year = \ (time.localtime(time.time())[0], ) + \ time.strptime(timestamp, '%b %d %H:%M:%S')[1:] timestamp_seconds = time.mktime(time_with_current_year) if timestamp_seconds > time.time(): time_with_previous_year = \ (time.localtime(time.time())[0] - 1, ) + \ time.strptime(timestamp, '%b %d %H:%M:%S')[1:] timestamp_seconds = time.mktime(time_with_previous_year) message = TextBox(self, style.COLOR_BUTTON_GREY, style.COLOR_BUTTON_GREY, style.COLOR_WHITE, style.COLOR_BUTTON_GREY, False, None, timestamp_to_elapsed_string(timestamp_seconds)) self._message_list.append(message) box = Gtk.HBox() align = Gtk.Alignment.new( xalign=0.5, yalign=0.0, xscale=0.0, yscale=0.0) box.pack_start(align, True, True, 0) align.show() align.add(message) message.show() self._conversation.attach(box, 0, self._row_counter, 1, 1) box.show() self._row_counter += 1 self.add_log_timestamp(timestamp) self._last_msg_sender = None
def _createSearchRequest(self, search=None, tags=None, notebooks=None, date=None, exact_entry=None, content_search=None): request = "" if notebooks: for notebook in tools.strip(notebooks.split(',')): if notebook.startswith('-'): request += '-notebook:"%s" ' % tools.strip(notebook[1:]) else: request += 'notebook:"%s" ' % tools.strip(notebook) if tags: for tag in tools.strip(tags.split(',')): if tag.startswith('-'): request += '-tag:"%s" ' % tag[1:] else: request += 'tag:"%s" ' % tag if date: date = tools.strip(date.split('-')) try: dateStruct = time.strptime(date[0] + " 00:00:00", "%d.%m.%Y %H:%M:%S") request += 'created:%s ' % time.strftime("%Y%m%d", time.localtime(time.mktime(dateStruct))) if len(date) == 2: dateStruct = time.strptime(date[1] + " 00:00:00", "%d.%m.%Y %H:%M:%S") request += '-created:%s ' % time.strftime("%Y%m%d", time.localtime(time.mktime(dateStruct) + 60 * 60 * 24)) except ValueError, e: out.failureMessage('Incorrect date format in --date attribute. ' 'Format: %s' % time.strftime("%d.%m.%Y", time.strptime('19991231', "%Y%m%d"))) return tools.exitErr()
def setup(self): self.record = ACROSCORE + strftime('%Y-%m-%d-%H%M') + '.game' open(self.record, "w") self.active = True self.cumulative = {} self.start = mktime(localtime()) self.mark = mktime(localtime()) self.round = 1 self.stage = "waiting" self.matchlast = False self.killgame = False self.warned = False self.bypass = False self.displayed = False self.voters = [] self.players = [] self.gimps = {} self.selfsubbed = False self.paused = False self.killgame = False
def do_export(_): left_idx = g_pool.seek_control.trim_left right_idx = g_pool.seek_control.trim_right export_range = left_idx, right_idx + 1 # exclusive range.stop export_ts_window = pm.exact_window(g_pool.timestamps, (left_idx, right_idx)) export_dir = os.path.join(g_pool.rec_dir, "exports") export_dir = next_export_sub_dir(export_dir) os.makedirs(export_dir) logger.info('Created export dir at "{}"'.format(export_dir)) export_info = { "Player Software Version": str(g_pool.version), "Data Format Version": meta_info["Data Format Version"], "Export Date": strftime("%d.%m.%Y", localtime()), "Export Time": strftime("%H:%M:%S", localtime()), "Frame Index Range:": g_pool.seek_control.get_frame_index_trim_range_string(), "Relative Time Range": g_pool.seek_control.get_rel_time_trim_range_string(), "Absolute Time Range": g_pool.seek_control.get_abs_time_trim_range_string(), } with open(os.path.join(export_dir, "export_info.csv"), "w") as csv: write_key_value_file(csv, export_info) notification = { "subject": "should_export", "range": export_range, "ts_window": export_ts_window, "export_dir": export_dir, } g_pool.ipc_pub.notify(notification)
def assignmentsHTML(): html = "" bytes = 0 for i in assignments: fdata = os.stat(i[1]) # Get last modified date, and format to DOS format mdate = time.strftime("%m-%d-%y", time.localtime(fdata.st_mtime)) # Get last modified time, and format to DOS format mtime = time.strftime("%I", time.localtime(fdata.st_mtime)).strip("0") + \ time.strftime(":%M", time.localtime(fdata.st_mtime)) + \ time.strftime("%p", time.localtime(fdata.st_mtime)).lower()[0] # Get file size, and format to DOS format fsize = '{:,}'.format(fdata.st_size) elem = '{}{:>13}{:>9}{:>8}'.format(a('{:<21}'.format(i[0]), i[1]), fsize, mdate, mtime) html = html + elem + "\n" bytes = bytes + os.path.getsize(i[1]) files = len(assignments) free = 8589869056 - bytes html = html + '{:>18} file(s){:>14,} bytes\n'.format(files, bytes) html = html + '{:>40} bytes free\n'.format('{:,}'.format(free)) return html
def lastlogExit(self): starttime = time.strftime("%a %b %d %H:%M", time.localtime(self.logintime)) endtime = time.strftime("%H:%M", time.localtime(time.time())) duration = utils.durationHuman(time.time() - self.logintime) f = file("%s/lastlog.txt" % self.env.cfg.get("honeypot", "data_path"), "a") f.write("root\tpts/0\t%s\t%s - %s (%s)\n" % (self.clientIP, starttime, endtime, duration)) f.close()
def next_reset(self, t=None): """ Determine next reset time """ t = t or time.time() tm = time.localtime(t) if self.q_period == 'd': nx = (tm[0], tm[1], tm[2], self.q_hour, self.q_minute, 0, 0, 0, tm[8]) if (tm.tm_hour * 60 + tm.tm_min) >= (self.q_hour * 60 + self.q_minute): # If today's moment has passed, it will happen tomorrow t = time.mktime(nx) + 24 * 3600 tm = time.localtime(t) elif self.q_period == 'w': if self.q_day < tm.tm_wday + 1 or (self.q_day == tm.tm_wday + 1 and (tm.tm_hour * 60 + tm.tm_min) >= (self.q_hour * 60 + self.q_minute)): tm = time.localtime(next_week(t)) dif = abs(self.q_day - tm.tm_wday - 1) t = time.mktime(tm) + dif * 24 * 3600 tm = time.localtime(t) elif self.q_period == 'm': if self.q_day < tm.tm_mday or (self.q_day == tm.tm_mday and (tm.tm_hour * 60 + tm.tm_min) >= (self.q_hour * 60 + self.q_minute)): tm = time.localtime(next_month(t)) day = min(last_month_day(tm), self.q_day) tm = (tm[0], tm[1], day, self.q_hour, self.q_minute, 0, 0, 0, tm[8]) else: return tm = (tm[0], tm[1], tm[2], self.q_hour, self.q_minute, 0, 0, 0, tm[8]) self.q_time = time.mktime(tm) logging.debug('Will reset quota at %s', tm)
def recommender( recom_count = 25, test_times = 100, hotNode_degree = 60, year_sta = 2011): ''' 进行推荐实验,计算结果存储在txt文件中 @edge_del 随机删掉的边数 @recom_count 推荐列表大小 @test_times 实验次数 @hotNode_degree 定义热点最小邻居数 ''' file_input = open('/home/zhenchentl/out.txt','w+') file_input_re = open('/home/zhenchentl/out_re.txt','w+') file_input.write('recom_count:' + str(recom_count) + '\n') file_input.write('test_times:' + str(test_times) + '\n') file_input.write('hotNode_degree:' + str(hotNode_degree) + '\n') file_input.write('befor get graph time:' + time.strftime('%Y-%m-%d-%H-%M-%S', \ time.localtime(time.time())) + '\n') print 'befor get graph time:' + time.strftime('%Y-%m-%d-%H-%M-%S', \ time.localtime(time.time())) '''get the graph based on the coauhtor relationship''' mD = DigraphByYear() mDigraph = mD.getDigraph() getGraphAttr(mDigraph, file_input) file_input.write('after get graph time:' + time.strftime('%Y-%m-%d-%H-%M-%S', \ time.localtime(time.time())) + '\n') print 'after get graph time:' + time.strftime('%Y-%m-%d-%H-%M-%S', \ time.localtime(time.time())) recom_count = 5 while(recom_count <= 100): exp_recom(mDigraph, file_input,file_input_re,recom_count) recom_count += 5 file_input.close() file_input_re.close()
def mylocaltime(sec=None, mode=None): from scal3.cal_types import convert if mode==None:##DATE_GREG return list(localtime(sec)) t = list(localtime(sec)) t[:3] = convert(t[0], t[1], t[2], DATE_GREG, mode) return t
def print_(self, dic, cnt): if self.subtype == 'bot_message': print(str(cnt) + ' BOT名前: {0} : {1} ID:{2}\n'.format( dic[self.user], time.strftime("%Y/%m/%d %a %H:%M:%S", time.localtime(float(self.ts))), self.user)) print('\t' + self.getTextAs2CH(dic) + '\n') elif self.subtype == 'me_message': print(str(cnt) + ' 名前: {0} : {1} ID:{2}\n'.format(dic[self.user], time.strftime("%Y/%m/%d %a %H:%M:%S", time.localtime(float(self.ts))), self.user) + 'Type: /me message') print('\t' + self.getTextAs2CH(dic) + '\n') elif self.subtype == 'message_changed': print(str(cnt) + ' 名前: {0} : {1} ID:{2}\n'.format(dic[self.user], time.strftime("%Y/%m/%d %a %H:%M:%S", time.localtime(float(self.ts))), self.user) + 'Type: /me message') print('\t' + self.getTextAs2CH(dic) + '\n') print('Edited by {0} at {1}\n'.format( dic[self.edit_user], time.strftime("%Y/%m/%d %a %H:%M:%S", time.localtime(float(self.edit_ts))))) else: print(str(cnt) + ' 名前: {0} : {1} ID:{2}\n'.format(dic[self.user], time.strftime("%Y/%m/%d %a %H:%M:%S", time.localtime(float(self.ts))), self.user)) print('\t' + self.getTextAs2CH(dic) + '\n')
def png(self, start_timestamp, end_timestamp): self.load(start_timestamp, end_timestamp) plt.figure(figsize=(10, 7.52)) plt.rc("axes", labelsize=12, titlesize=14) plt.rc("font", size=10) plt.rc("legend", fontsize=7) plt.rc("xtick", labelsize=8) plt.rc("ytick", labelsize=8) plt.axes([0.08, 0.08, 1 - 0.27, 1 - 0.15]) for plot in self.plots: plt.plot(self.timestamps, self.plots[plot], self.series_fmt(plot), label=self.series_label(plot)) plt.axis("tight") plt.gca().xaxis.set_major_formatter( matplotlib.ticker.FuncFormatter(lambda x, pos=None: time.strftime("%H:%M\n%b %d", time.localtime(x))) ) plt.gca().yaxis.set_major_formatter( matplotlib.ticker.FuncFormatter(lambda x, pos=None: locale.format("%.*f", (0, x), True)) ) plt.grid(True) plt.legend(loc=(1.003, 0)) plt.xlabel("Time/Date") plt.title( self.description() + "\n%s to %s" % ( time.strftime("%H:%M %d-%b-%Y", time.localtime(start_timestamp)), time.strftime("%H:%M %d-%b-%Y", time.localtime(end_timestamp)), ) ) output_buffer = StringIO.StringIO() plt.savefig(output_buffer, format="png") return output_buffer.getvalue()
def date_format(epoch): """given an epoch, return a unix-ls like formatted string""" time_tuple = time.localtime(epoch) if time.localtime().tm_year != time_tuple.tm_year: return time.strftime('%b %d %Y ', time_tuple) return time.strftime('%b %d %H:%M ', time_tuple)
def getSearchSimilarEpg(ref, eventid): ref = unquote(ref) ret = [] ev = {} epgcache = eEPGCache.getInstance() events = epgcache.search(('IBDTSENR', 128, eEPGCache.SIMILAR_BROADCASTINGS_SEARCH, ref, eventid)); if events is not None: for event in events: ev = {} ev['id'] = event[0] ev['date'] = "%s %s" % (tstrings[("day_" + strftime("%w", (localtime(event[1]))))], strftime("%d.%m.%Y", (localtime(event[1])))) ev['begin_timestamp'] = event[1] ev['begin'] = strftime("%H:%M", (localtime(event[1]))) ev['duration_sec'] = event[2] ev['duration'] = int(event[2] / 60) ev['end'] = strftime("%H:%M",(localtime(event[1] + event[2]))) ev['title'] = event[3] ev['shortdesc'] = convertDesc(event[4]) ev['longdesc'] = convertDesc(event[5]) ev['sref'] = event[7] ev['sname'] = filterName(event[6]) ev['picon'] = getPicon(event[7]) ev['now_timestamp'] = None ret.append(ev) return { "events": ret, "result": True }
def recomByBasewalker(graph, targetNode, newCoAuthorList, recom_count, \ file_input, file_input_re, max_iterations, damping_factor): recom_list = [] file_input.write('befor BaseWalker time:' + time.strftime('%Y-%m-%d-%H-%M-%S', \ time.localtime(time.time())) + '\n') pagerank = PageRank(0, graph, targetNode, damping_factor, max_iterations) file_input.write('after BaseWalker time:' + time.strftime('%Y-%m-%d-%H-%M-%S', \ time.localtime(time.time())) + '\n') index = 0 for k, v in pagerank: # if not graph.has_edge((targetNode, k)): recom_list.append(k) file_input.write('recom:' + '(' + targetNode + ':' + k + ')' + str(v) + '\n') index += 1 if index >= recom_count - 1: break pagerank = [] file_input.write(str(newCoAuthorList) + '\n') node_count_right = len(list(set(newCoAuthorList) & set(recom_list))) path_dis = find_shortest_path(graph, targetNode, recom_list) file_input_re.write('2'+str(len(newCoAuthorList)) + ' ' + str(node_count_right) + \ ' ' + str(recom_count) + ' ' + str((1.0*path_dis)/recom_count) + '\n') recom_list = [] '''return the percision,recall and average of shortest path leghth''' return (1.0*node_count_right)/recom_count, (1.0*node_count_right)/len(newCoAuthorList), (1.0*path_dis)/recom_count
def newFunc(*args, **args2): t0 = time.time() print "@%s, {%s} start" % (time.strftime("%X", time.localtime()), func.__name__) back = func(*args, **args2) print "@%s, {%s} end" % (time.strftime("%X", time.localtime()), func.__name__) print "@%.3fs taken for {%s}" % (time.time() - t0, func.__name__) return back
def exec_cmd(self): global texto1 print "Soy el hilo" self.miSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.miSocket.bind(("192.168.1.2", 4545)) self.miSocket.settimeout(1) self.miSocket.listen(2) print("Esperando conexion") while hilo: try: self.channel, details = self.miSocket.accept() except: pass else: self.channel.settimeout(1) self.channel.send("OK\n") print "OK enviado" print "Esperando recibir, hilo = ",hilo while hilo: try: texto1 = self.channel.recv(1024) except: pass else: self.entry_lat.set_text("Leyendo") self.entry_long.set_text("posicion") self.new_thread(self.guardar) self.buffer.insert_at_cursor("Recibido :"+texto1+"\n") print time.localtime(time.time()) print "recibido", texto1 if texto1 == "quit": break print "Cerrando el socket" self.miSocket.shutdown(self.miSocket) self.miSocket.close()
print "step24.hit back" driver.back() driver.implicitly_wait(10) time.sleep(2) result = driver.title assert result == 'FastPass | Entitlements - Active allocations for site - Default sort by end date',"The page did not be opened correct" print "\n" print "Test Case end with successfully!" def tearDown(self): self.driver.quit() self.assertEqual([], self.verificationErrors) if __name__ == '__main__': now = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time())) testunit=unittest.TestSuite() testunit.addTest(FastPass_Agile("test_Case_SoftwareSubscription_VerifyLinks02")) filename="C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Agile\\result\\"+now+" FastPass_Test_Case_SoftwareSubscription_VerifyLinks02.html" fp=file(filename,'wb') runner = HTMLTestRunner.HTMLTestRunner(stream=fp,title='FastPass_Agile Test Case',description='This is SoftwareSubscription_VerifyLinks02 test case') runner.run(testunit)
print("Welcome " + name) time.sleep(1) print("\nPaper Stone Scissors Game Begins") time.sleep(1) print("==============================================") # This while loop holds the game number. while (game_counter < 10): # Local time is taken for each game. The second specified with "tm_sec". # "mod 10" of second is taken with mod mathematical operant. # The choice of computer is "stone" if the result is 0,3,6,9. # The choice of computer is "paper" if the result is 1,4,7. # The choice of computer is "scissor" if the result is 2,5,8. local_time = time.localtime() second_of_time = local_time.tm_sec mod_of_second = second_of_time % 10 if (mod_of_second == 0 or mod_of_second == 3 or mod_of_second == 6 or mod_of_second == 9): choice_of_computer = "stone" elif (mod_of_second == 1 or mod_of_second == 4 or mod_of_second == 7): choice_of_computer = "paper" else: choice_of_computer = "scissor" #This while loop was used for understand if the selection is valid or not. while (True): time.sleep(2)
def main(): if not torch.cuda.is_available(): print('no gpu device available') sys.exit(1) num_gpus = torch.cuda.device_count() np.random.seed(args.seed) args.gpu = args.local_rank % num_gpus torch.cuda.set_device(args.gpu) cudnn.benchmark = True cudnn.deterministic = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) group_name = 'search_space_shrinking' print('gpu device = %d' % args.gpu) print("args = %s", args) torch.distributed.init_process_group(backend='nccl', init_method='env://', group_name=group_name) args.world_size = torch.distributed.get_world_size() args.batch_size = args.batch_size // args.world_size criterion_smooth = CrossEntropyLabelSmooth(args.classes, args.label_smooth).cuda() total_iters = args.epochs * per_epoch_iters # Max shrinking iterations iters = config.op_num # Prepare data train_loader = get_train_dataloader(args.train_dir, args.batch_size, args.local_rank, total_iters) train_dataprovider = DataIterator(train_loader) operations = [] for _ in range(config.edges): operations.append(list(range(config.op_num))) print('operations={}'.format(operations)) # Prepare model base_model = Network_ImageNet().cuda(args.gpu) model, seed = get_warmup_model(train_dataprovider, criterion_smooth, operations, per_epoch_iters, args.seed, args) print('arch = {}'.format(model.module.architecture())) optimizer, scheduler = get_optimizer_schedule(model, args, total_iters) start_iter, ops_dim = 0, 0 checkpoint_tar = config.checkpoint_cache if os.path.exists(checkpoint_tar): checkpoint = torch.load( checkpoint_tar, map_location={'cuda:0': 'cuda:{}'.format(args.local_rank)}) start_iter = checkpoint['iter'] + 1 seed = checkpoint['seed'] operations = checkpoint['operations'] model.load_state_dict(checkpoint['state_dict']) now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) print('{} load checkpoint..., iter = {}, operations={}'.format( now, start_iter, operations)) # Reset the scheduler cur_iters = (config.first_stage_epochs + (start_iter - 1) * config.other_stage_epochs ) * per_epoch_iters if start_iter > 0 else 0 for _ in range(cur_iters): if scheduler.get_lr()[0] > args.min_lr: scheduler.step() # Save the base weights for computing angle if start_iter == 0 and args.local_rank == 0: torch.save(model.module.state_dict(), config.base_net_cache) print('save base weights ...') for i in range(start_iter, iters): print('search space size: {}'.format( get_search_space_size(operations))) # ABS finishes when the size of search space is less than the threshold if get_search_space_size( operations) <= config.shrinking_finish_threshold: # Save the shrunk search space pickle.dump(operations, open(args.operations_path, 'wb')) break per_stage_iters = config.other_stage_epochs * per_epoch_iters if i > 0 else config.first_stage_epochs * per_epoch_iters seed = train(train_dataprovider, optimizer, scheduler, model, criterion_smooth, operations, i, per_stage_iters, seed, args) if args.local_rank == 0: # Search space shrinking load(base_model, config.base_net_cache) operations = ABS(base_model, model.module, operations, i) now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) print('{} |=> iter = {}, operations={}, seed={}'.format( now, i + 1, operations, seed)) save_checkpoint( { 'operations': operations, 'iter': i, 'state_dict': model.state_dict(), 'seed': seed }, config.checkpoint_cache) operations = merge_ops(operations) ops_dim = len(operations) # Synchronize variable cross multiple processes ops_dim = broadcast(obj=ops_dim, src=0) if not args.local_rank == 0: operations = np.zeros(ops_dim, dtype=np.int) operations = broadcast(obj=operations, src=0) operations = split_ops(operations)
def getNewFormatTime(): """ 返回当前格式后的时间 :return: """ return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
import re import os import time import pymysql import socket # 首先清空各个端口的流量 os.popen("iptables -Z INPUT") os.popen("iptables -Z OUTPUT") while True: # 每4小时执行一次操作 now = time.strftime("%m-%d,%H:%M:%S", time.localtime(time.time())) hour = int(now.split(",")[1].split(":")[0]) day = int(now.split(",")[0].split("-")[1]) if hour % 4 != 0: time.sleep(1800) continue # 从数据库读取之前的结果 record = dict() db = pymysql.connect(host="58.205.208.72", port=8779, user="******", password="******", database="thuproxy") cur = db.cursor() cur.execute("select * from thuproxy_proxyaccount") for r in cur:
def update_label(self, *args): self.label.text = time.strftime('%d-%b %H:%M:%S', time.localtime())
from kivy.properties import ListProperty from kivy.uix.boxlayout import BoxLayout from kivy.uix.label import Label from kivy.clock import triggered class TestWidget(BoxLayout): def __init__(self, **kwargs): super(TestWidget, self).__init__(**kwargs) self.label = Label(font_size=60) self.add_widget(self.label) self.update_label() @triggered(3, interval=True) def update_label(self, *args): self.label.text = time.strftime('%d-%b %H:%M:%S', time.localtime()) if __name__ == '__main__': print time.strftime('%H%M%S', time.localtime()) runTouchApp(widget=TestWidget()) # Builder.load_string(""" # <TestWidget>: # Label: # text: time.strtime('%HMMSS', time.localtime()) # """)
#--------------------------------- if strand: strandD[gene] = lineL[5] #if type in aDict[gene]: # aDict[gene][type].append(lineL) #else: # aDict[gene][type] = [lineL] #-------------END reading file---------- #----close file handle for files----- if file != '-': fh.close() #-----------end close fh----------- #print aDict['NM_027855_3'] #print aDict getBins(aDict, nBins, strandD) #--------------------------------------- if verbose: print >>sys.stderr,\ "--Successful %s" % strftime(timeformat, localtime()) if __name__ == '__main__': startTime = strftime(timeformat, localtime()) main() endTime = strftime(timeformat, localtime()) fh = open('python.log', 'a') print >>fh, "%s\n\tRun time : %s - %s " % \ (' '.join(sys.argv), startTime, endTime) fh.close()
# _*_ coding: UTF-8 _*_ import time; import calendar; ticks = time.time() print '当前时间戳为:',ticks localtime = time.localtime(time.time()) print "本地时间为:",localtime localtime = time.asctime(time.localtime(time.time())) print "本地时间为:",localtime ''' %y 两位数的年份表示(00-99) %Y 四位数的年份表示(000-9999) %m 月份(01-12) %d 月内中的一天(0-31) %H 24小时制小时数(0-23) %I 12小时制小时数(01-12) %M 分钟数(00=59) %S 秒(00-59) %a 本地简化星期名称 %A 本地完整星期名称 %b 本地简化的月份名称 %B 本地完整的月份名称 %c 本地相应的日期表示和时间表示 %j 年内的一天(001-366) %p 本地A.M.或P.M.的等价符 %U 一年中的星期数(00-53)星期天为星期的开始 %w 星期(0-6),星期天为星期的开始
class MysSpider(scrapy.Spider): startTime = int(time.time()) name = 'pmys' allowed_domains = ['shopee.com.my'] start_urls = [] currentPage = 1 custom_settings = { 'ITEM_PIPELINES': { 'myscrapy.mypipelines.pmysPipeline.Pipeline': 300, }, 'DOWNLOADER_MIDDLEWARES': { 'myscrapy.mymiddlewares.pmysMiddleware.DownloaderMiddleware': 300 } } runId = time.strftime("%Y%m%d_%H%M%S", time.localtime()) shopUsername = '******' basePageUrl = '' def __init__(self): self.start_urls.append('https://shopee.com.my/' + self.shopUsername) driverPath = DRIVER_PATH chrome_options = webdriver.ChromeOptions() chrome_options.add_argument('--headless') # 注释后有界面 chrome_options.add_argument('--no-sandbox') # 非沙盘模式 chrome_options.add_argument( "service_args = ['–ignore - ssl - errors = true', '–ssl - protocol = TLSv1']" ) # Python2/3 chrome_options.add_argument('window-size=1920x3000') # 设置浏览器分辨率 chrome_options.add_argument('--disable-gpu') # 谷歌文档提到需要加上这个属性来规避bug prefs = { "profile.managed_default_content_settings.images": 2, 'permissions.default.stylesheet': 2 } chrome_options.add_experimental_option("prefs", prefs) chrome_options.add_experimental_option('excludeSwitches', ['enable-automation']) # 规避检测 self.browserPc = webdriver.Chrome(chrome_options=chrome_options, executable_path=driverPath) super(MysSpider, self).__init__() def parse(self, response): print('=== into parse ===') # 店鋪信息 # https://shopee.com.my/api/v2/shop/get?is_brief=0&shopid=32563007 # response = requests.get('https://shopee.com.my/api/v2/item/get?itemid=4854960706&shopid=118059163', params=json) # print(response.url) # print(type(response.text)) # obj = json.loads(response.text) # print(obj['item']['itemid']) # return # 所有产品链接 # https://shopee.com.my/shop/118059163/search self.basePageUrl = response.xpath( '//a[@class="navbar-with-more-menu__item"][1]/@href' ).extract_first() yield response.follow(url=self.basePageUrl, callback=self.parsePage, meta={ 'sort': 1, 'p': self.currentPage }) pass def parsePage(self, response): print('=== into parsePage ===') s = response.meta.get('sort') # 循环产品 for item in response.xpath( '//div[@class="shop-search-result-view__item col-xs-2-4"]/div' ): url = item.xpath('a/@href').extract_first() strlist = url.split('.') shopid = strlist[-2] itemid = strlist[-1] thisua = random.choice(UAPOOL) try: headers = { 'content-type': 'application/json', 'User-Agent': thisua } goodsInfo = requests.get( 'https://shopee.com.my/api/v2/item/get?itemid=%s&shopid=%s' % (itemid, shopid), headers=headers) goodsInfoJson = goodsInfo.json() except: try: time.sleep(random.randint(2, 5) / 10) headers = { 'content-type': 'application/json', 'User-Agent': thisua } goodsInfo = requests.get( 'https://shopee.com.my/api/v2/item/get?itemid=%s&shopid=%s' % (itemid, shopid), headers=headers) goodsInfoJson = goodsInfo.json() except: try: time.sleep(random.randint(10, 30) / 10) headers = { 'content-type': 'application/json', 'User-Agent': thisua } goodsInfo = requests.get( 'https://shopee.com.my/api/v2/item/get?itemid=%s&shopid=%s' % (itemid, shopid), headers=headers) goodsInfoJson = goodsInfo.json() except: goodsInfoJson = None print('goods info 获取失败,%s' % itemid) time.sleep(random.randint(1, 4) / 10) try: headers = { 'content-type': 'application/json', 'User-Agent': thisua } shopInfo = requests.get( 'https://shopee.com.my/api/v2/shop/get?is_brief=0&shopid=%s' % shopid, headers=headers) shopInfoJson = shopInfo.json() except: try: time.sleep(random.randint(2, 5) / 10) headers = { 'content-type': 'application/json', 'User-Agent': thisua } shopInfo = requests.get( 'https://shopee.com.my/api/v2/shop/get?is_brief=0&shopid=%s' % shopid, headers=headers) shopInfoJson = shopInfo.json() except: try: time.sleep(random.randint(10, 30) / 10) headers = { 'content-type': 'application/json', 'User-Agent': thisua } shopInfo = requests.get( 'https://shopee.com.my/api/v2/shop/get?is_brief=0&shopid=%s' % shopid, headers=headers) shopInfoJson = shopInfo.json() except: shopInfoJson = None print('shop info 获取失败,%s' % itemid) item = shopProductItem() item['run_id'] = self.runId item['query_name'] = self.shopUsername item['query_type'] = 'shop' if 'data' in shopInfoJson.keys(): item['shop_add_time'] = shopInfoJson['data']['mtime'] item['shop_location'] = shopInfoJson['data']['shop_location'] item['shop_username'] = shopInfoJson['data']['account'][ 'username'] item['goods_id'] = itemid if 'item' in goodsInfoJson.keys(): item['title'] = goodsInfoJson['item']['name'] item['sales'] = goodsInfoJson['item']['historical_sold'] if goodsInfoJson['item']['price_min_before_discount'] == -1: item['price'] = 0 else: if goodsInfoJson['item'][ 'price_min_before_discount'] != goodsInfoJson[ 'item']['price_max_before_discount']: item['price'] = str( int(goodsInfoJson['item'] ['price_min_before_discount']) / 100000) + '-' + str( int(goodsInfoJson['item'] ['price_max_before_discount']) / 100000) else: item['price'] = str( int(goodsInfoJson['item'] ['price_min_before_discount']) / 100000) if goodsInfoJson['item']['price_min'] != goodsInfoJson['item'][ 'price_max']: item['discount_price'] = str( int(goodsInfoJson['item']['price_min']) / 100000) + '-' + str( int(goodsInfoJson['item']['price_max']) / 100000) else: item['discount_price'] = str( int(goodsInfoJson['item']['price_min']) / 100000) item['desc'] = goodsInfoJson['item']['description'] item['add_time'] = goodsInfoJson['item']['ctime'] if len(goodsInfoJson['item']['images']) > 1: item['img_list'] = json.dumps([ 'https://cf.shopee.com.my/file/' + goodsInfoJson['item']['images'][0], 'https://cf.shopee.com.my/file/' + goodsInfoJson['item']['images'][1] ]) else: item['img_list'] = json.dumps([ 'https://cf.shopee.com.my/file/' + goodsInfoJson['item']['images'][0] ]) item['liked_count'] = goodsInfoJson['item']['liked_count'] item['url'] = 'https://shopee.com.my' + url item['sort'] = s item['page'] = response.meta.get('p') item['remark'] = '' item['created_at'] = str(int(time.time())) s = s + 1 yield item # 循环页数 if int( response.xpath( '//span[@class="shopee-mini-page-controller__current"]/text()' ).extract_first() ) < int( response.xpath( '//span[@class="shopee-mini-page-controller__total"]/text()' ).extract_first()): yield response.follow(url=self.basePageUrl + '?page=' + str(self.currentPage), callback=self.parsePage, meta={ 'sort': s, 'p': self.currentPage }) self.currentPage = self.currentPage + 1 def closed(self, reason): self.browserPc.quit() self.outputHtml(1) self.outputHtml(2) self.outputHtml(3) print('=== take time: ' + str(round((int(time.time()) - self.startTime) / 60, 2)) + ' minutes ===') def outputHtml(self, type): conn = pymysql.connect(host=DB_HOST, port=DB_PORT, user=DB_USER, password=DB_PASSWORD, database=DB_DATABASE, charset=DB_CHARSET) cursor = conn.cursor(pymysql.cursors.DictCursor) if type == 1: #avgsold typename = 'avgsold' sql = ''' select url,title,page,sort,discount_price, sales,liked_count,left(from_unixtime(add_time),10) add_time, ROUND((unix_timestamp(now())-add_time)/86400) days, img_list,`desc`, ROUND(sales/ROUND((unix_timestamp(now())-add_time)/86400),2) avgsold , ROUND(liked_count/ROUND((unix_timestamp(now())-add_time)/86400),2) avglike from shop_product where run_id = '%s' order by avgsold desc ''' % self.runId elif type == 2: #avglike typename = 'avglike' sql = ''' select url,title,page,sort,discount_price, sales,liked_count,left(from_unixtime(add_time),10) add_time, ROUND((unix_timestamp(now())-add_time)/86400) days, img_list,`desc`, ROUND(sales/ROUND((unix_timestamp(now())-add_time)/86400),2) avgsold , ROUND(liked_count/ROUND((unix_timestamp(now())-add_time)/86400),2) avglike from shop_product where run_id = '%s' order by avglike desc ''' % self.runId else: # days typename = 'days' sql = ''' select url,title,page,sort,discount_price, sales,liked_count,left(from_unixtime(add_time),10) add_time, ROUND((unix_timestamp(now())-add_time)/86400) days, img_list,`desc`, ROUND(sales/ROUND((unix_timestamp(now())-add_time)/86400),2) avgsold , ROUND(liked_count/ROUND((unix_timestamp(now())-add_time)/86400),2) avglike from shop_product where run_id = '%s' order by days ''' % self.runId cursor.execute(sql) results = cursor.fetchall() bodyHtml = '' for row in results: imgList = eval(row['img_list']) imgHtml = '' if len(imgList) > 0: for img in imgList: imgHtml = imgHtml + '<img src="%s">' % img bodyHtml = bodyHtml + (''' <div><div class="title"><a href="%(href)s">%(title)s</a></div><div><span class="tag1">Page: %(page)s</span> <span class="tag1">Sort: %(sort)s</span><span class="tag2">Price: RM %(price)s</span> <span class="tag2">Solds: %(solds)s</span> <span class="tag2">Liked: %(liked)s</span> <span class="tag2">AddTime: %(add_time)s</span> <span class="tag2">Days: %(days)s</span> <span class="tag2">AvgLike: %(avglike)s</span></div> <span class="tag2">AvgSolds: %(avg_solds)s</span></div> <div class="cover"> %(img)s </div> <div class="desc"><pre> %(desc)s </pre></div></div><hr>''' % { "href": row['url'], "title": row['title'], "page": row['page'], "sort": row['sort'], "price": row['discount_price'], "solds": row['sales'], "liked": row['liked_count'], "add_time": row['add_time'], "days": row['days'], "avglike": row['avglike'], "avg_solds": row['avgsold'], "img": imgHtml, "desc": row['desc'] }) # filename = '/Users/mac/www/demo/pys/file/'+self.name+'_'+self.shopUsername+'_'+self.runId+'.html' filename = FILE_SAVE_PATH + '/' + self.name + '_' + self.shopUsername + '_' + self.runId + '_' + typename + '.html' headHtml = '''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <script src="http://lib.sinaapp.com/js/jquery/1.7.2/jquery.min.js"></script> <style> body{margin: 20px;} .title{margin: 10px;} .tag1{padding: 10px; margin-right: 10px; background-color: green;} .tag2{padding: 10px; margin-right: 10px; background-color: brown;} .cover{margin: 15px;} .desc{margin: 15px;} img{width: 200px;} </style> </head> <body>''' footHtml = '</body></html>' with open(filename, 'w', encoding='utf-8') as file_object: file_object.write(headHtml + bodyHtml + footHtml) cursor.close() conn.close()
# 3 import time mytime = time.localtime() if mytime.tm_hour < 6 or mytime.tm_hour > 20: print ('It is night-time') else: print ('It is day-time')
def main(): args = parse_args() cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # set multi-process settings setup_multi_processes(cfg) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # work_dir is determined in this priority: CLI > segment in file > filename if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None cfg.work_dir = args.work_dir elif cfg.get('work_dir', None) is None: # use config filename as default work_dir if cfg.work_dir is None cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0]) if args.resume_from is not None: cfg.resume_from = args.resume_from if args.gpus is not None: cfg.gpu_ids = range(1) warnings.warn('`--gpus` is deprecated because we only support ' 'single GPU mode in non-distributed training. ' 'Use `gpus=1` now.') if args.gpu_ids is not None: cfg.gpu_ids = args.gpu_ids[0:1] warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. ' 'Because we only support single GPU mode in ' 'non-distributed training. Use the first GPU ' 'in `gpu_ids` now.') if args.gpus is None and args.gpu_ids is None: cfg.gpu_ids = [args.gpu_id] if args.autoscale_lr: # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8 # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False if len(cfg.gpu_ids) > 1: warnings.warn( f'We treat {cfg.gpu_ids} as gpu-ids, and reset to ' f'{cfg.gpu_ids[0:1]} as gpu-ids to avoid potential error in ' 'non-distribute training time.') cfg.gpu_ids = cfg.gpu_ids[0:1] else: distributed = True init_dist(args.launcher, **cfg.dist_params) # re-set gpu_ids with distributed training mode _, world_size = get_dist_info() cfg.gpu_ids = range(world_size) # create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # init the logger before other steps timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'{timestamp}.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # init the meta dict to record some important information such as # environment info and seed, which will be logged meta = dict() # log env info env_info_dict = collect_env() env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) dash_line = '-' * 60 + '\n' logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line) meta['env_info'] = env_info # log some basic info logger.info(f'Distributed training: {distributed}') logger.info(f'Config:\n{cfg.pretty_text}') # set random seeds seed = init_random_seed(args.seed) seed = seed + dist.get_rank() if args.diff_seed else seed logger.info(f'Set random seed to {seed}, ' f'deterministic: {args.deterministic}') set_random_seed(seed, deterministic=args.deterministic) cfg.seed = seed meta['seed'] = seed model = build_posenet(cfg.model) datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: val_dataset = copy.deepcopy(cfg.data.val) val_dataset.pipeline = cfg.data.train.pipeline datasets.append(build_dataset(val_dataset)) if cfg.checkpoint_config is not None: # save mmpose version, config file content # checkpoints as meta data cfg.checkpoint_config.meta = dict( mmpose_version=__version__ + get_git_hash(digits=7), config=cfg.pretty_text, ) train_model( model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta)
excel_train_line = 1 # train_excel写入的行的下标 excel_val_line = 1 # val_excel写入的行的下标 accumulation_steps = 2 # 梯度积累的次数,类似于batch-size=64 itr_to_lr = 10000 // BATCH_SIZE # 训练10000次后损失下降50% itr_to_excel = 64 // BATCH_SIZE # 训练64次后保存相关数据到excel # 由于loss数量过多,建议使用分步训练以降低显存占用。 loss_num = 12 # loss的数量。重建损失2个,At网络5个,去雾损失(正向反向),中间特征约束。 weight_At = [1, 1, 1, 1, 1] weight_ed = [1, 1, 1, 1, 0.01] weight_recon = [1, 1] weight = weight_At + weight_ed + weight_recon train_haze_path = '/home/aistudio/work/nyu/train/' # 去雾训练集的路径 val_haze_path = '/home/aistudio/work/nyu/val/' # 去雾验证集的路径 gt_path = '/home/aistudio/work/nyu/gth/' d_path = '/home/aistudio/work/nyu/depth/' save_path = './result_nyu_' + time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime()) + '/' save_model_ed_name = save_path + 'ed_model.pt' # 保存模型的路径 save_model_At_name = save_path + 'At_model.pt' # 保存模型的路径 excel_save = save_path + 'result.xls' # 保存excel的路径 mid_save_ed_path = './mid_model/ednet_model.pt' # 保存的中间模型,用于下一步ntire数据的训练。 mid_save_At_path = './mid_model/Atnet_model.pt' # 初始化excel f, sheet_train, sheet_val = init_excel() # 加载模型 ednet_path = './pre_model/ednet_model.pt' Atnet_path = './pre_model/Atnet_model.pt' ednet = torch.load(ednet_path) Atnet = torch.load(Atnet_path) ednet = ednet.cuda() Atnet = Atnet.cuda()
def isInTimer(self, eventid, begin, duration, service): returnValue = None type = 0 time_match = 0 isAutoTimer = False bt = None end = begin + duration refstr = str(service) for x in self.timer_list: if x.isAutoTimer == 1: isAutoTimer = True else: isAutoTimer = False check = x.service_ref.ref.toString() == refstr if not check: sref = x.service_ref.ref parent_sid = sref.getUnsignedData(5) parent_tsid = sref.getUnsignedData(6) if parent_sid and parent_tsid: # check for subservice sid = sref.getUnsignedData(1) tsid = sref.getUnsignedData(2) sref.setUnsignedData(1, parent_sid) sref.setUnsignedData(2, parent_tsid) sref.setUnsignedData(5, 0) sref.setUnsignedData(6, 0) check = sref.toCompareString() == refstr num = 0 if check: check = False event = eEPGCache.getInstance().lookupEventId( sref, eventid) num = event and event.getNumOfLinkageServices() or 0 sref.setUnsignedData(1, sid) sref.setUnsignedData(2, tsid) sref.setUnsignedData(5, parent_sid) sref.setUnsignedData(6, parent_tsid) for cnt in range(num): subservice = event.getLinkageService(sref, cnt) if sref.toCompareString( ) == subservice.toCompareString(): check = True break if check: timer_end = x.end if x.justplay and (timer_end - x.begin) <= 1: timer_end += 60 if x.repeated != 0: if bt is None: bt = localtime(begin) et = localtime(end) bday = bt.tm_wday begin2 = bday * 1440 + bt.tm_hour * 60 + bt.tm_min end2 = et.tm_wday * 1440 + et.tm_hour * 60 + et.tm_min if x.repeated & (1 << bday): xbt = localtime(x.begin) xet = localtime(timer_end) xbegin = bday * 1440 + xbt.tm_hour * 60 + xbt.tm_min xend = bday * 1440 + xet.tm_hour * 60 + xet.tm_min if xend < xbegin: xend += 1440 if begin2 < xbegin <= end2: if xend < end2: # recording within event time_match = (xend - xbegin) * 60 type = 3 else: # recording last part of event time_match = (end2 - xbegin) * 60 type = 1 elif xbegin <= begin2 <= xend: if xend < end2: # recording first part of event time_match = (xend - begin2) * 60 type = 4 else: # recording whole event time_match = (end2 - begin2) * 60 type = 2 else: if begin < x.begin <= end: if timer_end < end: # recording within event time_match = timer_end - x.begin type = 3 else: # recording last part of event time_match = end - x.begin type = 1 elif x.begin <= begin <= timer_end: if timer_end < end: # recording first part of event time_match = timer_end - begin type = 4 if x.justplay: type = 2 else: # recording whole event time_match = end - begin type = 2 if x.justplay: type += 5 elif x.always_zap: type += 10 if time_match: returnValue = (time_match, type, isAutoTimer) if type in (2, 7, 12): # When full recording do not look further break return returnValue
query_cam = result['query_cam'][0] query_label = result['query_label'][0] gallery_feature = torch.FloatTensor(result['gallery_f']) gallery_cam = result['gallery_cam'][0] gallery_label = result['gallery_label'][0] ret_dict = dict() max_index_200 = [] #print(query_label) for i in range(len(query_label)): max_index_i = [] qery_filename = os.path.basename(image_datasets['query'].imgs[i][0]) max_index_i = evaluate(query_feature[i],gallery_feature) max_index_200.append(max_index_i) ret_filenames = [ os.path.basename(image_datasets['gallery'].imgs[j][0]) for j in max_index_i] ret_dict[qery_filename] = ret_filenames save_name = time.strftime('%Y%m%d%H%M%S',time.localtime(time.time())) + '_submission_A.json' with open(save_name, 'w' ,encoding='utf-8') as f: # 提交文件 json.dump(ret_dict, f) # # print(opt.name) # result = './model/%s/result.txt'%opt.name # os.system('python evaluate_gpu.py | tee -a %s'%result) # # if opt.multi: # result = {'mquery_f':mquery_feature.numpy(),'mquery_label':mquery_label,'mquery_cam':mquery_cam} # scipy.io.savemat('multi_query.mat',result)
def _genDict(self, level, is_debug=0): """ Internal function. """ check_domains = [ ] simple_match = False if is_debug: _dict = self._debug_level _domains = self._debug_domains _label = self._debug_label else: _dict = self._level _domains = self._domains _label = self._label # no debug for domain in _dict: if domain == "*": # '*' matches everything: simple match if _dict[domain] >= level: simple_match = True if len(check_domains) > 0: check_domains = [ ] break else: if _dict[domain] >= level: check_domains.append(domain) if not simple_match and len(check_domains) < 1: return None if level not in _domains: return None f = inspect.currentframe() # go outside of logger module as long as there is a lower frame while f and f.f_back and f.f_globals["__name__"] == self.__module__: f = f.f_back if not f: raise ValueError("Frame information not available.") # get module name module_name = f.f_globals["__name__"] # simple module match test for all entries of check_domain point_module = module_name + "." for domain in check_domains: if point_module.startswith(domain): # found domain in module name check_domains = [ ] break # get code co = f.f_code # optimization: bail out early if domain can not match at all _len = len(module_name) for domain in _domains[level]: i = domain.find("*") if i == 0: continue elif i > 0: d = domain[:i] else: d = domain if _len >= len(d): if not module_name.startswith(d): return None else: if not d.startswith(module_name): return None # generate _dict for format output level_str = "" if level in _label: level_str = _label[level] _dict = { 'file': co.co_filename, 'line': f.f_lineno, 'module': module_name, 'class': '', 'function': co.co_name, 'domain': '', 'label' : level_str, 'level' : level, 'date' : time.strftime(self._date_format, time.localtime()) } if _dict["function"] == "?": _dict["function"] = "" # domain match needed? domain_needed = False for domain in _domains[level]: # standard domain, matches everything if domain == "*": continue # domain is needed domain_needed = True break # do we need to get the class object? if self._format.find("%(domain)") >= 0 or \ self._format.find("%(class)") >= 0 or \ domain_needed or \ len(check_domains) > 0: obj = self._getClass(f) if obj: _dict["class"] = obj.__name__ # build domain string _dict["domain"] = "" + _dict["module"] if _dict["class"] != "": _dict["domain"] += "." + _dict["class"] if _dict["function"] != "": _dict["domain"] += "." + _dict["function"] if len(check_domains) < 1: return _dict point_domain = _dict["domain"] + "." for domain in check_domains: if point_domain.startswith(domain) or \ fnmatch.fnmatchcase(_dict["domain"], domain): return _dict return None
def main(): args = get_argparse().parse_args() if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) args.output_dir = args.output_dir + '{}'.format(args.model_type) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) time_ = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) init_logger(log_file=args.output_dir + f'/{args.model_type}-{args.task_name}-{time_}.log') if os.path.exists(args.output_dir) and os.listdir( args.output_dir ) and args.do_train and not args.overwrite_output_dir: raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome." .format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed seed_everything(args.seed) # Prepare NER task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() label_list = processor.get_labels() args.id2label = {i: label for i, label in enumerate(label_list)} args.label2id = {label: i for i, label in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, cache_dir=args.cache_dir if args.cache_dir else None, ) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, ) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='train') global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = (model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_vocabulary(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained( args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted( glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))) logging.getLogger("pytorch_transformers.modeling_utils").setLevel( logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split( "-")[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split( '/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint, config=config) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) if global_step: result = { "{}_{}".format(global_step, k): v for k, v in result.items() } results.update(result) output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: for key in sorted(results.keys()): writer.write("{} = {}\n".format(key, str(results[key]))) if args.do_predict and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained( args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.predict_checkpoints > 0: checkpoints = list( os.path.dirname(c) for c in sorted( glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel( logging.WARN) # Reduce logging checkpoints = [ x for x in checkpoints if x.split('-')[-1] == str(args.predict_checkpoints) ] logger.info("Predict the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: prefix = checkpoint.split( '/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint, config=config) model.to(args.device) predict(args, model, tokenizer, prefix=prefix)
def log_to_file(err_content): file = open(ERROR_LOG_FILE, "a+") error_log = "error in build_server - %s : %s \n" % (time.strftime( "%Y-%m-%d %H:%M:%S", time.localtime()), err_content) file.write(error_log) file.close()
def getText(self): no_desc = '' if self.type != self.EXTENDED_DESCRIPTION_EVENT: service = self.source.service if isinstance(service, iPlayableServicePtr): info = service and service.info() ref = None else: info = service and self.source.info ref = service if info is None: return no_desc if self.type == self.NAMEVENT: name = ref and info.getName(ref) if name is None: name = info.getName() name = name.replace('\xc2\x86', '').replace('\xc2\x87', '') act_event = info and info.getEvent(0) if not act_event and info: refstr = info.getInfoString(iServiceInformation.sServiceref) act_event = self.epgQuery(eServiceReference(refstr), -1, 0) if act_event is None: return '%s - %s' % (name, no_desc) else: return '%s - %s' % (name, act_event.getEventName()) act_event = None try: act_event = self.epgQuery(eServiceReference(service.toString()), -1, 1) except: pass if act_event is None: return no_desc else: act_event = self.source.event if act_event is None: return no_desc if self.type == self.NEXTEVENT: return act_event.getEventName() elif self.type == self.STARTTIME: t = localtime(act_event.getBeginTime()) return '%02d:%02d' % (t.tm_hour, t.tm_min) elif self.type == self.ENDTIME: t = localtime(act_event.getBeginTime() + act_event.getDuration()) return '%02d:%02d' % (t.tm_hour, t.tm_min) elif self.type == self.DURATION: return '%d min' % int(act_event.getDuration() / 60) elif self.type == self.EXTENDED_DESCRIPTION or self.type == self.EXTENDED_DESCRIPTION_EVENT: short = act_event.getShortDescription() tmp = act_event.getExtendedDescription() if tmp == '' or tmp is None: tmp = short if tmp == '' or tmp is None: tmp = no_desc else: tmp = tmp.strip() else: tmp = tmp.strip() if short != '' or short is not None: if len(short) > 3: if short[:-2] not in tmp: tmp = short.strip() + '...' + tmp tmp = tmp.replace('\r', ' ').replace('\n', ' ').replace('\xc2\x8a', ' ') return re.sub('[\\s\t]+', ' ', tmp) else: return 'Error reading EPG data'
def analyzeWebPages(webPageList, db_enable): # cx = sqlite3.connect('sra.db') # cu = cx.cursor() lineList = [] for webPage in webPageList: weibo = '' dlList = [] scriptStart = webPage.find('STK && STK.pageletM && STK.pageletM.view({"pid":"pl_wb_feedlist","js"') if scriptStart != -1: weibo = webPage[scriptStart:] scriptEnd = weibo.find('</script>') weibo = weibo[:scriptEnd] scriptStart = webPage.find('STK && STK.pageletM && STK.pageletM.view({"pid":"pl_weibo_direct","js"') if scriptStart != -1: weibo = webPage[scriptStart:] scriptEnd = weibo.find('</script>') weibo = weibo[:scriptEnd] while True: start = weibo.find('<dl class=\\"feed_list W_linecolor') if start == -1: break wbTemp = weibo[start+3:] dlStart = wbTemp.find('<dl') dlEnd = wbTemp.find('dl>') if dlStart < dlEnd: wbTemp = wbTemp[dlEnd+3:] end = start + 3 + dlEnd + 3 + wbTemp.find('dl>') else: end = start + 3 + dlEnd dl = weibo[start:end+3] dlList.append(dl) weibo = weibo[end+3:] for item in dlList: #print item title = '无' trans = item.find('transparent.gif') if trans != -1: titleTemp = item[trans:] titleStart = titleTemp.find('title') titleEnd = titleTemp.find('alt') title = titleTemp[titleStart+9:titleEnd-3] if title == '\\u5fae\\u535a\\u673a\\u6784\\u8ba4\\u8bc1': title = '蓝V' elif title.find('\\u5fae\\u535a\\u4e2a\\u4eba\\u8ba4\\u8bc1') != -1: title = '黄V' elif title == '\\u5fae\\u535a\\u8fbe\\u4eba': title = '达人' comm = item.find('<dl class=\\"comment W_textc W_linecolor W_bgcolor') if comm != -1 and comm < trans: title = '无' #if title == 'n/a': # continue nicknameStart = item.find('<a nick-name=') nicknameEnd = nicknameStart + item[nicknameStart:].find('href=') nickname = item[nicknameStart+15:nicknameEnd-3].decode('unicode_escape') contentStart = item.find('<em>') item = item[contentStart+4:] contentEnd = item.find('<\\/em>') content = item[:contentEnd+6] content = content.replace('\\"', '"').replace("\\/", "/") contentTemp = '' while True: ltIndex = content.find('<') if ltIndex == -1 and len(content) == 0: break contentTemp = contentTemp + content[:ltIndex] gtIndex = content.find('>') content = content[gtIndex+1:] content = contentTemp.decode('unicode_escape') praised = '0' emStart = item.find('<em class=\\"W_ico20 icon_praised_b\\">') emTemp = item[emStart:] praisedEnd = emTemp.find(')') ahrefIndex = emTemp.find('<\\/a>') if praisedEnd < ahrefIndex: praisedStart = emTemp.find('(') praised = emTemp[praisedStart+1:praisedEnd] forward = '0' actionStart = item.find('action-type=\\"feed_list_forward') actionTemp = item[actionStart:] forwardEnd = actionTemp.find(')') ahrefIndex = actionTemp.find('<\\/a>') if forwardEnd < ahrefIndex: forwardStart = actionTemp.find('(') forward = actionTemp[forwardStart+1:forwardEnd] favorite = '0' actionStart = item.find('action-type=\\"feed_list_favorite') actionTemp = item[actionStart:] favoriteEnd = actionTemp.find(')') ahrefIndex = actionTemp.find('<\\/a>') if favoriteEnd < ahrefIndex: favoriteStart = actionTemp.find('(') favorite = actionTemp[favoriteStart+1:favoriteEnd] comment = '0' actionStart = item.find('action-type=\\"feed_list_comment') actionTemp = item[actionStart:] commentEnd = actionTemp.find(')') if commentEnd != -1: ahrefIndex = actionTemp.find('<\\/a>') if commentEnd < ahrefIndex: commentStart = actionTemp.find('(') comment = actionTemp[commentStart+1:commentEnd] dateIndex = actionTemp.find('date=') datetime = actionTemp[dateIndex+7:dateIndex+17] datespacetime = t.strftime('%Y-%m-%d %X', t.localtime(int(datetime))) dateAndTime = datespacetime.split(' ') date = dateAndTime[0] time = dateAndTime[1] linkStart = actionTemp.find('<a href') linkEnd = actionTemp.find('title') link = actionTemp[linkStart+10:linkEnd-3] link = link.replace('\\/', '/') #print '昵称:%s\t头衔:%s\t赞:%s\t转发:%s\t收藏:%s\t评论:%s\t日期:%s\t时间:%s' % (nickname, title, praised, forward, favorite, comment, date, time) line = '%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n' % (nickname, title, praised, forward, favorite, comment, date, time, link, content) try: print line except UnicodeEncodeError: pass lineList.append(line) # if (db_enable == 1): # sqlStr = 'INSERT INTO metaweibo (nickname, title, praised, forward, favorite, comment, date, time, datetime) VALUES ("%s", "%s", %s, %s, %s, %s, "%s", "%s", %s)' % (nickname, title, int(praised), int(forward), int(favorite), int(comment), date, time, int(datetime)) # cu.execute(sqlStr) # cx.commit() # cx.close() return lineList
def _run_interface(self, runtime): import csv from pathlib import Path from time import localtime, strftime # Read standard BIDS parcellation node description in TSV format with open(self.inputs.roi_bids_tsv, "r") as data: bids_dict_nodes = [] for line in csv.DictReader(data, delimiter="\t"): bids_dict_nodes.append(line) # Create colorLUT file, write header and parcellation node line color_lut_file = self._gen_output_filename(self.inputs.roi_bids_tsv, "colorlut") print("Create colorLUT file as %s" % color_lut_file) with open(color_lut_file, "w+") as f_color_lut: time_now = strftime("%a, %d %b %Y %H:%M:%S", localtime()) hdr_lines = [ "#$Id: {}_FreeSurferColorLUT.txt {} \n \n".format( Path(self.inputs.roi_bids_tsv).stem, time_now), "{:<4} {:<55} {:>3} {:>3} {:>3} {} \n \n".format( "#No.", "Label Name:", "R", "G", "B", "A"), ] f_color_lut.writelines(hdr_lines) del hdr_lines for bids_node in bids_dict_nodes: # Convert hexadecimal to RGB color h = bids_node["color"].lstrip("#") (r, g, b) = tuple(int(h[i:i + 2], 16) for i in (0, 2, 4)) line = [ "{:<4} {:<55} {:>3} {:>3} {:>3} {} \n".format( bids_node["index"], bids_node["name"], r, g, b, 0) ] f_color_lut.writelines(line) del line # Create graphml file, write header and parcellation node line graphml_file = self._gen_output_filename(self.inputs.roi_bids_tsv, "graphml") print("Create graphml_file as %s" % graphml_file) with open(graphml_file, "w+") as f_graphml: # Write header hdr_lines = [ "{}\n".format('<?xml version="1.0" encoding="utf-8"?>'), "{}\n".format( '<graphml xmlns="http://graphml.graphdrawing.org/xmlns" ' 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' 'xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">' ), "{}\n".format( '\t<key attr.name="dn_region" attr.type="string" for="node" id="d0" />' ), "{}\n".format( '\t<key attr.name="dn_fsname" attr.type="string" for="node" id="d1" />' ), "{}\n".format( '\t<key attr.name="dn_hemisphere" attr.type="string" for="node" id="d2" />' ), "{}\n".format( '\t<key attr.name="dn_multiscaleID" attr.type="int" for="node" id="d3" />' ), "{}\n".format( '\t<key attr.name="dn_name" attr.type="string" for="node" id="d4" />' ), "{}\n".format('\t<graph edgedefault="undirected" id="">'), ] f_graphml.writelines(hdr_lines) del hdr_lines for bids_node in bids_dict_nodes: # Write node description lines node_lines = [ "{}\n".format('\t\t<node id="%i">' % int(bids_node["index"])), "{}\n".format('\t\t\t<data key="d0">%s</data>' % "cortical"), "{}\n".format('\t\t\t<data key="d1">%s</data>' % bids_node["name"]), "{}\n".format('\t\t\t<data key="d2">%s</data>' % None), "{}\n".format('\t\t\t<data key="d3">%i</data>' % int(bids_node["index"])), "{}\n".format('\t\t\t<data key="d4">%s</data>' % bids_node["name"]), "{}\n".format("\t\t</node>"), ] f_graphml.writelines(node_lines) del node_lines # Write bottom lines bottom_lines = [ "{}\n".format("\t</graph>"), "{}\n".format("</graphml>") ] f_graphml.writelines(bottom_lines) del bottom_lines return runtime
def get_timestamp_str(timestamp): now = int(time.time()) release_time = time.localtime(timestamp) release_time_str = time.strftime('%d %b %Y %H:%M:%S', release_time) ago = readable_time_duration(now - timestamp) return "%s (%s ago)" % (release_time_str, ago)
def save_data(data,name): # 生成文件名 filename = name +'_'+time.strftime('%Y-%m_%d',time.localtime(time.time()))+".csv" data.to_csv(filename,sep=",",encoding="utf-8-sig") print("文件保存完毕.")
def do_chunks(self): # wait for chunk_size while True: t = int(time.time()) if t % self.chunk_size: time.sleep(.1) continue self.log.debug("chunk [%d] fetcher pid [%s]" % (t, self.pid)) ti = t - self.chunk_size period = 1 / float(self.fps) # make IMage Array index -> (imagetime, imagename) # save directories for removal imad = [] ima = [] while ti < t: # convert ti in gmtime and directory gmt = time.gmtime(ti) ddir = "%s/%04d/%02d/%02d/%02d/%02d/%02d" % ( self.raw, gmt.tm_year, gmt.tm_mon, gmt.tm_mday, gmt.tm_hour, gmt.tm_min, gmt.tm_sec) imad.append(ddir) # self.log.debug("[%s]: directory for sec %d: [%s]" % (self.name, ti, ddir)) for root, dirs, files in os.walk(ddir): for name in files: if name.endswith(".jpg"): usec = int(name[:-4]) # self.log.debug("[%s]: found FILE [%s], usec [%d]" % (self.name, name, usec)) f = "%s/%s" % (root, name) ima.append((f, float(ti + float(usec) / 1000000))) ti += 1 # self.log.debug("[%s]: images for chunk [%s]" % (self.name, ima)) # links to images to make chunks if not len(ima): if self.pid: self.log.warning("killing pid [%d]" % self.pid) try: os.system("pkill -P %d" % self.pid) os.system("pkill %d" % self.pid) # os.kill(self.pid, signal.SIGTERM) # SIGKILL ? except: self.log.exception("while killing pid [%d]" % self.pid) self.pid = None time.sleep(1) continue t = float(t) ti = float(t - self.chunk_size) outim = [] while ti < t: # search image nearest to "ti" td = 999.0 imname = None for name, tim in ima: # self.log.debug("[%s]: searching for image nearesto to [%f]" % (self.name, ti)) dt = abs(tim - ti) if dt < td: td = dt imname = name if imname: # self.log.debug("[%s]: found [%s]" % (self.name, imname)) outim.append(imname) ti += period # self.log.debug("[%s]: ordered images for chunk [%s]" % (self.name, outim)) # now gen symlink in tmp dir i = 0 for imax in outim: tmpfname = "%s/%d.jpg" % (self.tempdir, i) if os.path.lexists(tmpfname): os.remove(tmpfname) os.symlink(imax, tmpfname) i += 1 # generate chunk with ffmpeg gmt = time.gmtime(t) chunkdir = "%04d-%02d-%02d_%02d" % (gmt.tm_year, gmt.tm_mon, gmt.tm_mday, gmt.tm_hour) chunkname = "%04d-%02d-%02d_%02d-%02d-%02d.ts" % ( gmt.tm_year, gmt.tm_mon, gmt.tm_mday, gmt.tm_hour, gmt.tm_min, gmt.tm_sec) chunkdirpath = "%s/%s" % (self.chunks_path, chunkdir) chunkfullpath = "%s/%s" % (chunkdirpath, chunkname) chunkwebpath = "%s/%s/%s" % (self.webpath_chunks_prefix, chunkdir, chunkname) # make chunk dir if not exists Cam.mkdir(chunkdirpath) frames = len(outim) wtime = time.localtime(t) ptso = wtime.tm_hour * 3600 + wtime.tm_min * 60 + wtime.tm_sec cmd = "ffmpeg -loglevel panic -y -framerate %d -start_number 0 -i \"%s/%%d.jpg\" -frames %d " \ "-vf %ssetpts=PTS+%d/TB,drawbox=t=20:x=0:y=0:width=120:height=30:[email protected]," \ "drawtext=\"fontfile=%s:text=%02d\\\\\\:%02d\\\\\\:%d%%{expr_int_format\\\\\\:n/5\\\\\\:u\\\\\\:1}.%%{expr_int_format\\\\\\:mod(n\,5)*2\\\\\\:u\\\\\\:1}00:y=10:x=10:fontcolor=yellow\" " \ "%s " \ "%s" % (self.fps, self.tempdir, frames, self.ffmpeg_prefilter, ptso, self.font, wtime.tm_hour, wtime.tm_min, int(wtime.tm_sec/10), self.ffmpeg_options, chunkfullpath) self.log.debug("ffmpeg cmd: [%s]" % cmd) os.system(cmd) # update m3u8 self.m3u8.addstream(chunkwebpath) self.m3u8.write() # remove work images for imax, imt in ima: if os.path.exists(imax): # self.log.debug("[%s]: removing : [%s]" % (self.name, imax)) os.remove(imax) # remove dirs for dirname in imad: if os.path.exists(dirname): # self.log.debug("[%s]: removing dir: [%s]" % (self.name, dirname)) os.rmdir(dirname) while True: t = int(time.time()) if t % self.chunk_size: break time.sleep(.1)
def lsLine(name, s): """ Build an 'ls' line for a file ('file' in its generic sense, it can be of any type). """ mode = s.st_mode perms = array.array("B", b"-" * 10) ft = stat.S_IFMT(mode) if stat.S_ISDIR(ft): perms[0] = ord("d") elif stat.S_ISCHR(ft): perms[0] = ord("c") elif stat.S_ISBLK(ft): perms[0] = ord("b") elif stat.S_ISREG(ft): perms[0] = ord("-") elif stat.S_ISFIFO(ft): perms[0] = ord("f") elif stat.S_ISLNK(ft): perms[0] = ord("l") elif stat.S_ISSOCK(ft): perms[0] = ord("s") else: perms[0] = ord("!") # User if mode & stat.S_IRUSR: perms[1] = ord("r") if mode & stat.S_IWUSR: perms[2] = ord("w") if mode & stat.S_IXUSR: perms[3] = ord("x") # Group if mode & stat.S_IRGRP: perms[4] = ord("r") if mode & stat.S_IWGRP: perms[5] = ord("w") if mode & stat.S_IXGRP: perms[6] = ord("x") # Other if mode & stat.S_IROTH: perms[7] = ord("r") if mode & stat.S_IWOTH: perms[8] = ord("w") if mode & stat.S_IXOTH: perms[9] = ord("x") # Suid/sgid if mode & stat.S_ISUID: if perms[3] == ord("x"): perms[3] = ord("s") else: perms[3] = ord("S") if mode & stat.S_ISGID: if perms[6] == ord("x"): perms[6] = ord("s") else: perms[6] = ord("S") if isinstance(name, bytes): name = name.decode("utf-8") lsPerms = perms.tobytes() lsPerms = lsPerms.decode("utf-8") lsresult = [ lsPerms, str(s.st_nlink).rjust(5), " ", str(s.st_uid).ljust(9), str(s.st_gid).ljust(9), str(s.st_size).rjust(8), " ", ] # Need to specify the month manually, as strftime depends on locale ttup = localtime(s.st_mtime) sixmonths = 60 * 60 * 24 * 7 * 26 if s.st_mtime + sixmonths < time(): # Last edited more than 6mo ago strtime = strftime("%%s %d %Y ", ttup) else: strtime = strftime("%%s %d %H:%M ", ttup) lsresult.append(strtime % (_MONTH_NAMES[ttup[1]], )) lsresult.append(name) return "".join(lsresult)
def current_timestr(): return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
def procfile(f): f.linenum = 0 menu = None # convert these to a dictionary. showfooter = True showsourcelink = False showlastupdated = True showlastupdatedtime = True nodefaultcss = False fwtitle = False css = [] js = [] title = None while pc(f, False) == '#': l = f.inf.readline().decode('utf-8') f.linenum += 1 if doincludes(f, l[1:]): continue if l.startswith('# jemdoc:'): l = l[len('# jemdoc:'):] a = l.split(',') # jem only handle one argument for now. for b in a: b = b.strip() if b.startswith('menu'): sidemenu = True r = re.compile(r'(?<!\\){(.*?)(?<!\\)}', re.M + re.S) g = re.findall(r, b) if len(g) > 3 or len(g) < 2: raise SyntaxError('sidemenu error on line %d' % f.linenum) if len(g) == 2: menu = (f, g[0], g[1], '') else: menu = (f, g[0], g[1], g[2]) elif b.startswith('nofooter'): showfooter = False elif b.startswith('nodate'): showlastupdated = False elif b.startswith('notime'): showlastupdatedtime = False elif b.startswith('fwtitle'): fwtitle = True elif b.startswith('showsource'): showsourcelink = True elif b.startswith('nodefaultcss'): nodefaultcss = True elif b.startswith('addcss'): r = re.compile(r'(?<!\\){(.*?)(?<!\\)}', re.M + re.S) css += re.findall(r, b) elif b.startswith('addjs'): r = re.compile(r'(?<!\\){(.*?)(?<!\\)}', re.M + re.S) js += re.findall(r, b) elif b.startswith('addpackage'): r = re.compile(r'(?<!\\){(.*?)(?<!\\)}', re.M + re.S) f.eqpackages += re.findall(r, b) elif b.startswith('addtex'): r = re.compile(r'(?<!\\){(.*?)(?<!\\)}', re.M + re.S) f.texlines += re.findall(r, b) elif b.startswith('analytics'): r = re.compile(r'(?<!\\){(.*?)(?<!\\)}', re.M + re.S) f.analytics = re.findall(r, b)[0] elif b.startswith('title'): r = re.compile(r'(?<!\\){(.*?)(?<!\\)}', re.M + re.S) g = re.findall(r, b) if len(g) != 1: raise SyntaxError('addtitle error on line %d' % f.linenum) title = g[0] elif b.startswith('noeqs'): f.eqs = False elif b.startswith('noeqcache'): f.eqcache = False elif b.startswith('eqsize'): r = re.compile(r'(?<!\\){(.*?)(?<!\\)}', re.M + re.S) g = re.findall(r, b) if len(g) != 1: raise SyntaxError('eqsize error on line %d' % f.linenum) f.eqdpi = int(g[0]) elif b.startswith('eqdir'): r = re.compile(r'(?<!\\){(.*?)(?<!\\)}', re.M + re.S) g = re.findall(r, b) if len(g) != 1: raise SyntaxError('eqdir error on line %d' % f.linenum) f.eqdir = g[0] # Get the file started with the firstbit. out(f.outf, f.conf['firstbit']) if not nodefaultcss: out(f.outf, f.conf['defaultcss']) # Add per-file css lines here. for i in range(len(css)): if '.css' not in css[i]: css[i] += '.css' for x in css: hb(f.outf, f.conf['specificcss'], x) for x in js: hb(f.outf, f.conf['specificjs'], x) # Look for a title. if pc(f) == '=': # don't check exact number f.outf '=' here jem. t = br(nl(f), f)[:-1] if title is None: title = re.sub(' *(<br />)|( ) *', ' ', t) else: t = None #if title: hb(f.outf, f.conf['windowtitle'], title) out(f.outf, f.conf['bodystart']) if f.analytics: hb(f.outf, f.conf['analytics'], f.analytics) if fwtitle: out(f.outf, f.conf['fwtitlestart']) inserttitle(f, t) out(f.outf, f.conf['fwtitleend']) if menu: out(f.outf, f.conf['menustart']) insertmenuitems(*menu) out(f.outf, f.conf['menuend']) else: out(f.outf, f.conf['nomenu']) if not fwtitle: inserttitle(f, t) infoblock = False imgblock = False tableblock = False while 1: # wait for EOF. p = pc(f) if p == '': break elif p == '\\(': if not (f.eqs and f.eqsupport): break s = nl(f) # Quickly pull out the equation here: # Check we don't already have the terminating character in a whole-line # equation without linebreaks, eg \( Ax=b \): if not s.strip().endswith('\)'): while True: l = nl(f, codemode=True) if not l: break s += l if l.strip() == '\)': break r = br(s.strip(), f) r = mathjaxeqresub(r) out(f.outf, r) # look for lists. elif p == '-': dashlist(f, False) elif p == '.': dashlist(f, True) elif p == ':': colonlist(f) # look for titles. elif p == '=': (s, c) = nl(f, True) # trim trailing \n. s = s[:-1] hb(f.outf, '<h%d>|</h%d>\n' % (c, c), br(s, f)) # look for comments. elif p == '#': l = nl(f) elif p == '\n': nl(f) # look for blocks. elif p == '~': nl(f) if infoblock: out(f.outf, f.conf['infoblockend']) infoblock = False nl(f) continue elif imgblock: out(f.outf, '</td></tr></table>\n') imgblock = False nl(f) continue elif tableblock: out(f.outf, '</td></tr></table>\n') tableblock = False nl(f) continue else: if pc(f) == '{': l = allreplace(nl(f)) r = re.compile(r'(?<!\\){(.*?)(?<!\\)}', re.M + re.S) g = re.findall(r, l) else: g = [] # process jemdoc markup in titles. if len(g) >= 1: g[0] = br(g[0], f) if len(g) in (0, 1): # info block. out(f.outf, f.conf['infoblock']) infoblock = True if len(g) == 1: # info block. hb(f.outf, f.conf['blocktitle'], g[0]) out(f.outf, f.conf['infoblockcontent']) elif len(g) >= 2 and g[1] == 'table': # handles # {title}{table}{name} # one | two || # three | four || name = '' if len(g) >= 3 and g[2]: name += ' id="%s"' % g[2] out(f.outf, '<table%s>\n<tr class="r1"><td class="c1">' % name) f.tablerow = 1 f.tablecol = 1 tableblock = True elif len(g) == 2: codeblock(f, g) elif len(g) >= 4 and g[1] == 'img_left': # handles # {}{img_left}{source}{alttext}{width}{height}{linktarget}. g += ['']*(7 - len(g)) if g[4].isdigit(): g[4] += 'px' if g[5].isdigit(): g[5] += 'px' out(f.outf, '<table class="imgtable"><tr><td>\n') if g[6]: out(f.outf, '<a href="%s">' % g[6]) out(f.outf, '<img src="%s"' % g[2]) out(f.outf, ' alt="%s"' % g[3]) if g[4]: out(f.outf, ' width="%s"' % g[4]) if g[5]: out(f.outf, ' height="%s"' % g[5]) out(f.outf, ' />') if g[6]: out(f.outf, '</a>') out(f.outf, ' </td>\n<td align="left">') imgblock = True else: raise JandalError("couldn't handle block", f.linenum) else: s = br(np(f), f, tableblock) if s: if tableblock: hb(f.outf, '|\n', s) else: hb(f.outf, '<p>|</p>\n', s) if showfooter and (showlastupdated or showsourcelink): out(f.outf, f.conf['footerstart']) if showlastupdated: if showlastupdatedtime: ts = '%Y-%m-%d %H:%M:%S %Z' else: ts = '%Y-%m-%d' s = time.strftime(ts, time.localtime(time.time())) hb(f.outf, f.conf['lastupdated'], s) if showsourcelink: hb(f.outf, f.conf['sourcelink'], f.inname) out(f.outf, f.conf['footerend']) if menu: out(f.outf, f.conf['menulastbit']) else: out(f.outf, f.conf['nomenulastbit']) out(f.outf, f.conf['bodyend']) if f.outf is not sys.stdout: # jem: close file here. # jem: XXX this is where you would intervene to do a fast open/close. f.outf.close()
def logCommand(self, client, message, **kwargs): localtime = time.localtime(time.time()) date = ("%s-%s-%s, %s:%s:%s" % (localtime[1], localtime[2], localtime[0], localtime[3], localtime[4], localtime[5])) f = open('admin.log', 'a') f.write("Timestamp: \"%s\", Admin: %s, Command: %s\n" % (date, client, message)) f.close
elif '/' in v: format_str = format_str.replace("*", "/") print(v) print(format_str) testfunc("2016-12-20 13:13:13") testfunc("2016-12-20 131313") testfunc("2016-12-20 13:13:13") testfunc("2016.12.20 13:13:13") testfunc("2016/12/20 13:13:13") dt = datetime.now() print("时间".center(80, "-")) print(dt) print(dt.strftime("%Y%m%d%H%M%S%f")) print(type(dt.strftime("%Y%m%d%H%M%S%f"))) print(time.strftime("%Y-%m-%d %X %b", time.localtime())) dt = datetime.strptime('Thu, 26 May 2016 13:18:27 +0800', '%a, %d %b %Y %H:%M:%S %z') print(dt.strftime("%Y-%m-%d %H:%M:%S %z")) #testfunc("2016-12-20 13:13:13") dt = datetime.now() print(dt) print((dt + timedelta(minutes=-30))) #scan_start_dt + datetime.timedelta(minutes=-30)).timestamp() j_s = r"\\udsdsd\\u7868" j_s = j_s.replace(r'\\n', r'\n') print(j_s)
def handle(self): print 'Now connect form ',self.client_address[0] while True: recvdata=self.request.recv(1024) if not recvdata: continue elif recvdata=='Now client connect to Server': self.request.sendall('OK,I am ready') elif recvdata.startswith('username'): self.username=recvdata.split(':')[-1] if Myserver.userinfo.has_key(self.username): self.request.sendall('valid') else: self.request.sendall('invalid') elif recvdata.startswith('userpasswd'): self.userpasswd=recvdata.split(':')[-1] print self.userpasswd if Myserver.userinfo[self.username]==self.userpasswd: self.request.sendall('valid') time.sleep(0.5) self.request.sendall('%s broken connect with server'%time.strftime("%Y-%m-%d %X", time.localtime())) break else: self.request.sendall('invalid') print "broken connect with %s"%self.client_address[0]