def create_statement(self, cr, uid, line_invoice, partner, amount, journal, date_bank=None, account_id=None): bank_stmt_id = self.acc_bank_stmt_model.create( cr, uid, {"journal_id": journal, "date": date_bank or time.strftime("%Y") + "-07-01"} ) bank_stmt_line_id = self.acc_bank_stmt_line_model.create( cr, uid, { "name": "payment", "statement_id": bank_stmt_id, "partner_id": partner, "amount": amount, "date": date_bank or time.strftime("%Y") + "-07-01", }, ) val = { "credit": amount > 0 and amount or 0, "debit": amount < 0 and amount * -1 or 0, "name": line_invoice and line_invoice.name or "cash flow", } if line_invoice: val.update({"counterpart_move_line_id": line_invoice.id}) if account_id: val.update({"account_id": account_id}) self.acc_bank_stmt_line_model.process_reconciliation(cr, uid, bank_stmt_line_id, [val]) move_line_ids_complete = self.acc_bank_stmt_model.browse(cr, uid, bank_stmt_id).move_line_ids return move_line_ids_complete
def recomByBasewalker(graph, targetNode, newCoAuthorList, recom_count, \ file_input, file_input_re, max_iterations, damping_factor): recom_list = [] file_input.write('befor BaseWalker time:' + time.strftime('%Y-%m-%d-%H-%M-%S', \ time.localtime(time.time())) + '\n') pagerank = PageRank(0, graph, targetNode, damping_factor, max_iterations) file_input.write('after BaseWalker time:' + time.strftime('%Y-%m-%d-%H-%M-%S', \ time.localtime(time.time())) + '\n') index = 0 for k, v in pagerank: # if not graph.has_edge((targetNode, k)): recom_list.append(k) file_input.write('recom:' + '(' + targetNode + ':' + k + ')' + str(v) + '\n') index += 1 if index >= recom_count - 1: break pagerank = [] file_input.write(str(newCoAuthorList) + '\n') node_count_right = len(list(set(newCoAuthorList) & set(recom_list))) path_dis = find_shortest_path(graph, targetNode, recom_list) file_input_re.write('2'+str(len(newCoAuthorList)) + ' ' + str(node_count_right) + \ ' ' + str(recom_count) + ' ' + str((1.0*path_dis)/recom_count) + '\n') recom_list = [] '''return the percision,recall and average of shortest path leghth''' return (1.0*node_count_right)/recom_count, (1.0*node_count_right)/len(newCoAuthorList), (1.0*path_dis)/recom_count
def create_batch(): """ Collects all the data for the batch (whereafter the batch itself is to be created with create_batch()). :return: Returns gathered data from a trip in the correct batch format. """ arduino = serial.Serial('/dev/serial/by-id/usb-Gravitech_ARDUINO_NANO_13BP1066-if00-port0', 115200) batch_data = [] starttime = time.strftime( "%Y-%m-%dT%H:%M:%S") # the gps already has a fix when the function is executed so this is the correct start time ard_read = arduino.readline().strip() # added to prevent error first run of the while-loop while ard_read != '1995': # Stop condition: arduino sending '1995' to the Pi #adds accelerometer data and most of the time data from one sensor (GPS, humidity, temperature or heartbeat) to the batch_data list ard_read = arduino.readline().strip() if ard_read == '1234': batch_data += temphum_pointdata() if ard_read == '1337': batch_data += gps_pointdata() if ard_read == '1996': batch_data += beat_pointdata() batch_data += accelerometer_pointdata() endtime = time.strftime("%Y-%m-%dT%H:%M:%S") batch = [ {"startTime": starttime, "endTime": endtime, "groupID": "cwa2", "userID": ID, "sensorData": batch_data, "meta": {}}] return batch
def wav_file_gen(encoding_type, ir_code, frequency, signal_strength, btn_name, brand): # Name today = datetime.date.today() today_name = (str2md5(str2md5(str(today))))[0:10] wav_src = btn_name + time.strftime('%Y%m%d', time.localtime(time.time())) wav_name = (str2md5(wav_src))[0:10] brand_src = brand + time.strftime('%m%d', time.localtime(time.time())) brand_name = (str2md5(brand_src))[0:10] # Path path_brand = brand_name + "/" path_header = "/var/www/weixin/wechat/static/media/" path_today = path_header + today_name + "/" # File raw_data = path_today + path_brand + wav_name pcm_file = raw_data + ".pcm" wav_file = raw_data + ".wav" relative_wav_file = "media/" + today_name + "/" + path_brand + wav_name + ".wav" # Delete Older Path for day in range(1, 6): date_src = str(today - datetime.timedelta(days=day))
def recommender( recom_count = 25, test_times = 100, hotNode_degree = 60, year_sta = 2011): ''' 进行推荐实验,计算结果存储在txt文件中 @edge_del 随机删掉的边数 @recom_count 推荐列表大小 @test_times 实验次数 @hotNode_degree 定义热点最小邻居数 ''' file_input = open('/home/zhenchentl/out.txt','w+') file_input_re = open('/home/zhenchentl/out_re.txt','w+') file_input.write('recom_count:' + str(recom_count) + '\n') file_input.write('test_times:' + str(test_times) + '\n') file_input.write('hotNode_degree:' + str(hotNode_degree) + '\n') file_input.write('befor get graph time:' + time.strftime('%Y-%m-%d-%H-%M-%S', \ time.localtime(time.time())) + '\n') print 'befor get graph time:' + time.strftime('%Y-%m-%d-%H-%M-%S', \ time.localtime(time.time())) '''get the graph based on the coauhtor relationship''' mD = DigraphByYear() mDigraph = mD.getDigraph() getGraphAttr(mDigraph, file_input) file_input.write('after get graph time:' + time.strftime('%Y-%m-%d-%H-%M-%S', \ time.localtime(time.time())) + '\n') print 'after get graph time:' + time.strftime('%Y-%m-%d-%H-%M-%S', \ time.localtime(time.time())) recom_count = 5 while(recom_count <= 100): exp_recom(mDigraph, file_input,file_input_re,recom_count) recom_count += 5 file_input.close() file_input_re.close()
def saveVerbrauchsData(v_wp,v_sz,zs_wp,zs_sz,interval): y = time.strftime('%Y', time.localtime()) m = time.strftime('%m', time.localtime()) d = time.strftime('%d', time.localtime()) f = open("/var/lib/heatpumpMonitor/verbrauch.%s-%s-%s" %(y,m,d) , 'a') f.write("%s %04d %04d %d %d %d\n" % (time.strftime('%Y %m %d %a %H %H:%M:%S', time.localtime()), v_wp, v_sz, zs_wp, zs_sz, interval)) f.close
def main(argv): try: postTitle = argv[1] postCategory = argv[2] except: postTitle = "DEFAULT TITLE" postCategory = "DEFAULT CATEGORY" todayDate = time.strftime('%Y-%m-%d',time.localtime(time.time())) currentTime = time.strftime('%H:%M',time.localtime(time.time())) fileNameWithoutDate = postTitle.lower().replace(' ', '-') fileName = todayDate + "-" + fileNameWithoutDate + ".markdown" # fileFullName = os.path.join(POST_PATH, fileName) with open(fileName, 'w+') as fin: fin.write("---\n") fin.write("layout: post\n") fin.write('title: "%s"\n' % postTitle) fin.write('date: %s %s\n' %(todayDate, currentTime)) fin.write("comments: true\n") fin.write('categories: %s\n' % postCategory.capitalize()) fin.write("---\n\n\n\n") fin.write("<!--more-->\n\n\n") fin.close() print('"%s" was created successfully.' % fileName)
def delete(self, thema, id, beitragID=None): discussionpath = "./data/themen/" + thema + "/" + id + ".json" with open(discussionpath, "r") as discussionfile: discussion = json.load(discussionfile) if beitragID == None: if discussion["Status"] == "deleted": discussion["Status"] = " " else: discussion["Status"] = "deleted" discussion["Bearbeiter"] = cherrypy.session["Benutzername"] discussion["Bearbeitet"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) else: for post in discussion["Beitraege"]: if post["ID"] == beitragID: if post["Status"] == "deleted": post["Status"] = " " else: post["Status"] = "deleted" post["Bearbeiter"] = cherrypy.session["Benutzername"] post["Bearbeitet"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) with open(discussionpath, "w") as discussionfile: json.dump(discussion, discussionfile, indent=4)
def generateur(presence) : # a si le ficher et present, w sinon global cible global nom global type pasdefichier = open(cible + ".txt", presence) # Ouvre le fichier fdebug("Ouvert / cree le fichier") continueraecrire = "c" while continueraecrire is not "q" : if type is "a" : action = raw_input ( VERT + "Entre l'action a inscrire dans le registre >>>" + NORMAL ) while action == "" : print (ROUGE + "Entre quelque chose" + NORMAL ) action = raw_input ( VERT + "Entre l'action a inscrire dans le registre >>>" + NORMAL ) pasdefichier.write(time.strftime('%d/%m/%y %H:%M',time.localtime()) + " *** " + action + "(" + nom + ")"+ "\n") else : commantaire = raw_input( VERT + "Entre le commentaire a inscrire dans le casier >>>" + NORMAL ) while commantaire == "" : print (ROUGE + "Entre quelque chose" + NORMAL ) commantaire = raw_input( VERT + "Entre le commentaire a inscrire dans le casier >>>" + NORMAL ) pasdefichier.write(time.strftime('%d/%m/%y %H:%M',time.localtime()) + " >>> " + commantaire + " (" + nom + ")"+ "\n") fdebug("Enregistrement dans le fichier") warn = raw_input(VERT + "Avez vous prevenu " + cible + " pour la faute ? (o/n) " + NORMAL) if "o" in warn : pasdefichier.write(time.strftime('%d/%m/%y %H:%M',time.localtime()) + " *** " + "warn " + "(" + nom + ")"+ "\n") continueraecrire = raw_input(VERT + "Continuer a écrire sur la meme personne ?(q pour quitter , c pour continuer)" + NORMAL) pasdefichier.close() # Je ferme la porte derriere mon fichier
def run_once(self, test_name): if test_name == 'setup': return # # We need to be sure we run this on the right target machines # as this is really quite destructive! # if not os.uname()[1] in self.valid_clients: return date_start = time.strftime("%Y-%m-%d") time_start = time.strftime("%H%M") output = '' # # Test 3 different I/O schedulers: # for iosched in ['cfq', 'deadline', 'noop']: # # Test 5 different file systems, across 20+ tests.. # os.chdir(self.fio_tests_dir) cmd = './test.sh' cmd += ' -d ' + self.dev + '1 -m 8G -S -s ' + iosched + ' -f ext2,ext3,ext4,xfs,btrfs' cmd += ' -D ' + date_start + ' -T ' + time_start output += utils.system_output(cmd, retain_output=True) # # Move the results from the src tree into the autotest results tree where it will automatically # get picked up and copied over to the jenkins server. # os.rename(os.path.join(self.srcdir, 'fs-test-proto'), os.path.join(self.resultsdir, 'fs-test-proto'))
def add_automatic_comment(self): if self.fixed is True: text = ( "This %s has been scheduled for fixed downtime from %s to %s. " "Notifications for the %s will not be sent out during that time period." % ( self.ref.my_type, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.start_time)), time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.end_time)), self.ref.my_type) ) else: hours, remainder = divmod(self.duration, 3600) minutes, seconds = divmod(remainder, 60) text = ("This %s has been scheduled for flexible downtime starting between %s and %s " "and lasting for a period of %d hours and %d minutes. " "Notifications for the %s will not be sent out during that time period." % ( self.ref.my_type, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.start_time)), time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.end_time)), hours, minutes, self.ref.my_type) ) if self.ref.my_type == 'host': comment_type = 1 else: comment_type = 2 c = Comment(self.ref, False, "(Nagios Process)", text, comment_type, 2, 0, False, 0) self.comment_id = c.id self.extra_comment = c self.ref.add_comment(c)
def createTestWorkspace(self): """ Create a workspace for testing against with ideal log values """ from mantid.simpleapi import CreateWorkspace from mantid.simpleapi import AddSampleLog from time import gmtime, strftime,mktime import numpy as np # Create a matrix workspace x = np.array([1.,2.,3.,4.]) y = np.array([1.,2.,3.]) e = np.sqrt(np.array([1.,2.,3.])) wksp = CreateWorkspace(DataX=x, DataY=y,DataE=e,NSpec=1,UnitX='TOF') # Add run_start tmptime = strftime("%Y-%m-%d %H:%M:%S", gmtime(mktime(gmtime()))) AddSampleLog(Workspace=wksp,LogName='run_start',LogText=str(tmptime)) tsp_a=kernel.FloatTimeSeriesProperty("SensorA") tsp_b=kernel.FloatTimeSeriesProperty("SensorB") tsp_c=kernel.FloatTimeSeriesProperty("SensorC") for i in arange(25): tmptime = strftime("%Y-%m-%d %H:%M:%S", gmtime(mktime(gmtime())+i)) tsp_a.addValue(tmptime, 1.0*i*i) tsp_b.addValue(tmptime, 2.0*i*i) tsp_c.addValue(tmptime, 3.0*i*i) wksp.mutableRun()['SensorA']=tsp_a wksp.mutableRun()['SensorB']=tsp_b wksp.mutableRun()['SensorC']=tsp_c return wksp
def _fix_review_dates(self, item): ''' Convert dates so ES detect them ''' for date_field in ['timestamp','createdOn','lastUpdated']: if date_field in item.keys(): date_ts = item[date_field] item[date_field] = time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(date_ts)) if 'patchSets' in item.keys(): for patch in item['patchSets']: pdate_ts = patch['createdOn'] patch['createdOn'] = time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(pdate_ts)) if 'approvals' in patch: for approval in patch['approvals']: adate_ts = approval['grantedOn'] approval['grantedOn'] = \ time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(adate_ts)) if 'comments' in item.keys(): for comment in item['comments']: cdate_ts = comment['timestamp'] comment['timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(cdate_ts))
def cmd_list(self, args): """ @G%(name)s@w - @B%(cmdname)s@w list timers and the plugins they are defined in @CUsage@w: list """ tmsg = [] match = args['match'] tmsg.append('Local time is: %s' % time.strftime('%a %b %d %Y %H:%M:%S', time.localtime())) tmsg.append('%-20s : %-13s %-9s %-8s %s' % ('Name', 'Defined in', 'Enabled', 'Fired', 'Next Fire')) for i in self.timerlookup: if not match or match in i: timerc = self.timerlookup[i] tmsg.append('%-20s : %-13s %-9s %-8s %s' % ( timerc.name, timerc.plugin.sname, timerc.enabled, timerc.timesfired, time.strftime('%a %b %d %Y %H:%M:%S', time.localtime(timerc.nextcall)))) return True, tmsg
def FullTextQuery(calendar_service): print 'Full text query for events on Primary Calendar: \'%s\'' % (q) query = GServ.CalendarEventQuery(calendar, 'private', 'full', q) query.start_min = date # calling date to set the beginning of query range for the present day query.start_max = endDate # calling endDate to limit the query range to the next 14 days. change tmedelta(days) to set the range query.singleevents = 'true' # enables creation of repeating events query.orderBy = 'startTime' # sort by event start time query.sortorder = 'a' # sort order: ascending feed = calendar_service.CalendarQuery(query) for i, an_event in enumerate(feed.entry): for a_when in an_event.when: print " " print an_event.title.text ,"Scheduled:",i,"For:",time.strftime('%d-%m-%Y %H:%M',time.localtime(tf_from_timestamp(a_when.start_time))),"Current Time:",time.strftime('%d-%m-%Y %H:%M') if time.strftime('%d-%m-%Y %H:%M',time.localtime(tf_from_timestamp(a_when.start_time))) == time.strftime('%d-%m-%Y %H:%M'): print "Waking you up!" print "---" songfile = random.choice(os.listdir(mp3_path)) # choosing by random an .mp3 file from direcotry print "Now Playing:", songfile # plays the MP3 in it's entierty. As long as the file is longer # than a minute it will only be played once: command ="mpg321" + " " + mp3_path + "'"+songfile+"'"+ " -g 100" print command os.system(command) # plays the song else: print "Wait for it..." # the event's start time is not the system's current time
def on_data(self, data): if time.time() >= self.started + self.duration: stats = open('{0}-sample.stats'.format(int(self.started)), 'w+') stats.write("================= STATISTICS =================" + "\n") stats.write("Start time: " + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.started)) + "\n") stats.write("End time: " + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) + "\n") stats.write("First Tweet ID: " + self.first_tweet_id + "\n") stats.write("Last Tweet ID: " + self.last_tweet_id + "\n") stats.write("Language: " + self.lang + "\n") stats.write("Language classification threshold: " + str(self.lang_threshold) + "\n") stats.write("Above threshold: " + str(self.counter[self.lang + '-above']) + "\n") stats.write("Below threshold: " + str(self.counter[self.lang + '-below']) + "\n") stats.write("Exluded: " + str(self.counter['excluded']) + "\n") return False elif 'in_reply_to_status_id' in data: status = Status.parse(self.api, json.loads(data)) langclass = langid.classify(status.text) if (self.counter == {self.lang + '-above':0, self.lang + '-below':0, 'excluded':0}): self.first_tweet_id = str(status.id) self.last_tweet_id = str(status.id) if (langclass[0] == self.lang): if langclass[1] >= self.lang_threshold: self.above_output.write(data) self.counter[self.lang + '-above'] += 1 else: self.below_output.write(data) self.counter[self.lang + '-below'] += 1 else: self.excl_output.write(data) self.counter['excluded'] += 1 return True
def main(): # zprava zprava = matrix([[20],[17],[2],[5],[6]]) # klic klic = matrix([[18, 0,19,12,23], [22,30,32,19,10], [19,17, 2,32,32], [11,24,20,22, 5], [30, 0,19,26,22]]) print "Brutal force started in",strftime("%H:%M:%S", gmtime()) for a in range(26): print "" print a, for b in range(26): print ".", for c in range(26): for d in range(26): for e in range(26): matice = matrix([[a],[b],[c],[d],[e]]) nasobek = klic * matice if ( (nasobek[0]%33==28) & (nasobek[1]%33==9) & (nasobek[2]%33==8) & (nasobek[3]%33==4) & (nasobek[4]%33==14)): print matice print "" print "Brutal force ended in",strftime("%H:%M:%S", gmtime())
def make_payment(self, invoice_record, bank_journal, amount=0.0, amount_currency=0.0, currency_id=None): bank_stmt = self.acc_bank_stmt_model.create({ 'journal_id': bank_journal.id, 'date': time.strftime('%Y') + '-07-15', }) bank_stmt_line = self.acc_bank_stmt_line_model.create({'name': 'payment', 'statement_id': bank_stmt.id, 'partner_id': self.partner_agrolait_id, 'amount': amount, 'amount_currency': amount_currency, 'currency_id': currency_id, 'date': time.strftime('%Y') + '-07-15',}) #reconcile the payment with the invoice for l in invoice_record.move_id.line_ids: if l.account_id.id == self.account_rcv.id: line_id = l break amount_in_widget = currency_id and amount_currency or amount bank_stmt_line.process_reconciliation(counterpart_aml_dicts=[{ 'move_line': line_id, 'debit': amount_in_widget < 0 and -amount_in_widget or 0.0, 'credit': amount_in_widget > 0 and amount_in_widget or 0.0, 'name': line_id.name, }]) return bank_stmt
def _createSearchRequest(self, search=None, tags=None, notebooks=None, date=None, exact_entry=None, content_search=None): request = "" if notebooks: for notebook in tools.strip(notebooks.split(',')): if notebook.startswith('-'): request += '-notebook:"%s" ' % tools.strip(notebook[1:]) else: request += 'notebook:"%s" ' % tools.strip(notebook) if tags: for tag in tools.strip(tags.split(',')): if tag.startswith('-'): request += '-tag:"%s" ' % tag[1:] else: request += 'tag:"%s" ' % tag if date: date = tools.strip(date.split('-')) try: dateStruct = time.strptime(date[0] + " 00:00:00", "%d.%m.%Y %H:%M:%S") request += 'created:%s ' % time.strftime("%Y%m%d", time.localtime(time.mktime(dateStruct))) if len(date) == 2: dateStruct = time.strptime(date[1] + " 00:00:00", "%d.%m.%Y %H:%M:%S") request += '-created:%s ' % time.strftime("%Y%m%d", time.localtime(time.mktime(dateStruct) + 60 * 60 * 24)) except ValueError, e: out.failureMessage('Incorrect date format in --date attribute. ' 'Format: %s' % time.strftime("%d.%m.%Y", time.strptime('19991231', "%Y%m%d"))) return tools.exitErr()
def createSatellitesXMLfile(self, tp_list, save_xml_dir) : pos = self.orb_position if pos > 1800 : pos -= 3600 if pos < 0 : pos_name = '%dW' % (abs(int(pos))/10) else : pos_name = '%dE' % (abs(int(pos))/10) location = '%s/dmm_blindscan_%s_%s.xml' %(save_xml_dir, pos_name, strftime("%d-%m-%Y_%H-%M-%S")) tuner = nimmanager.nim_slots[self.feid].friendly_full_description xml = ['<?xml version="1.0" encoding="iso-8859-1"?>\n\n'] xml.append('<!--\n') xml.append(' File created on %s\n' % (strftime("%A, %d of %B %Y, %H:%M:%S"))) try: xml.append(' using %s receiver running Enigma2 image, version %s,\n' % (boxtype, about.getEnigmaVersionString())) xml.append(' image %s, with the Blind scan plugin\n\n' % (about.getImageTypeString())) except: xml.append(' using %s receiver running Enigma2 image (%s), with the Dreambox blind scan plugin\n\n' % (boxtype, tuner)) xml.append('-->\n\n') xml.append('<satellites>\n') xml.append(' <sat name="%s" flags="0" position="%s">\n' % (self.sat_name.replace('&', '&'), self.orb_position)) for tp in tp_list: xml.append(' <transponder frequency="%d" symbol_rate="%d" polarization="%d" fec_inner="%d" system="%d" modulation="%d"/>\n' % (tp.frequency, tp.symbol_rate, tp.polarisation, tp.fec, tp.system, tp.modulation)) xml.append(' </sat>\n') xml.append('</satellites>') f = open(location, "w") f.writelines(xml) f.close() return location
def watchRunFolder(run,sleep): """ Args: run -> A folder that contains an RTAcomplete.txt Method: The file will be polled every hour. If the first line is not the same as the last time it checked it will kick out and run the rest of the BCL pipeline """ RTAcomplete = run +"/RTAComplete.txt" iteration = 0 while True: if not os.path.isfile(RTAcomplete): print("Real Time Analysis has not begun yet. Time: %s" % time.strftime("%m-%d-%y %H:%M:%S",time.localtime())) else: with open(RTAcomplete,"r") as input_file: first_line = input_file.readline().strip() if not first_line: print("Real Time Analysis in process. Time %s" % time.strftime("%m-%d-%y %H:%M:%S",time.localtime())) else: print("Checked file at %s and the RTAComplete.txt shows that RTA has finished" % time.strftime("%m-%d-%y %H:%M:%S",time.localtime())) print("Moving on to Bcl Analysis") break time.sleep(sleep)
def assignmentsHTML(): html = "" bytes = 0 for i in assignments: fdata = os.stat(i[1]) # Get last modified date, and format to DOS format mdate = time.strftime("%m-%d-%y", time.localtime(fdata.st_mtime)) # Get last modified time, and format to DOS format mtime = time.strftime("%I", time.localtime(fdata.st_mtime)).strip("0") + \ time.strftime(":%M", time.localtime(fdata.st_mtime)) + \ time.strftime("%p", time.localtime(fdata.st_mtime)).lower()[0] # Get file size, and format to DOS format fsize = '{:,}'.format(fdata.st_size) elem = '{}{:>13}{:>9}{:>8}'.format(a('{:<21}'.format(i[0]), i[1]), fsize, mdate, mtime) html = html + elem + "\n" bytes = bytes + os.path.getsize(i[1]) files = len(assignments) free = 8589869056 - bytes html = html + '{:>18} file(s){:>14,} bytes\n'.format(files, bytes) html = html + '{:>40} bytes free\n'.format('{:,}'.format(free)) return html
def handle(self, data, fulltext, tokens, slackclient, channel, user): slackclient.post_message(channel, 'UTC: `' + time.strftime('%Y/%m/%d-%H:%M:%S', time.gmtime(self._epoch)) + '`') if self._additional_location and 'modules.google_tz_handler' in sys.modules: handler_module = sys.modules['modules.google_tz_handler'] handler_class = getattr(handler_module, 'google_tz_handler') handler_instance = handler_class(self._config) slackclient.post_message(channel, self._additional_location + ': `' + time.strftime('%Y/%m/%d-%H:%M:%S', time.gmtime(handler_instance.get_raw_local_time(handler_instance.get_cities(self._additional_location.replace(' ', '+'))[0], self._epoch))) + '`')
def strftime(dt, fmt): if dt.year >= 1900: return super(type(dt), dt).strftime(fmt) illegal_formatting = _illegal_formatting.search(fmt) if illegal_formatting: msg = 'strftime of dates before 1900 does not handle {0}' raise TypeError(msg.format(illegal_formatting.group(0))) year = dt.year # for every non-leap year century, advance by # 6 years to get into the 28-year repeat cycle delta = 2000 - year off = 6 * (delta // 100 + delta // 400) year += off # move to around the year 2000 year += ((2000 - year) // 28) * 28 timetuple = dt.timetuple() s1 = time.strftime(fmt, (year,) + timetuple[1:]) sites1 = _findall(s1, str(year)) s2 = time.strftime(fmt, (year + 28,) + timetuple[1:]) sites2 = _findall(s2, str(year + 28)) sites = [] for site in sites1: if site in sites2: sites.append(site) s = s1 syear = "%04d" % (dt.year,) for site in sites: s = s[:site] + syear + s[site + 4:] return s
def _format_data(self, start_time, timestamp, name, units, values): fields = _fields[:] file_timestamp = time.strftime('%Y%m%d%H%M',time.gmtime(start_time)) value_timestamp = time.strftime('%Y%m%d%H%M',time.gmtime(timestamp)) fields[_field_index['units']] = units fields[_field_index['commodity']] = self.commodity meter_id = name + '|1' if units: meter_id += '/%s' % units fields[_field_index['meter_id']] = meter_id fields[_field_index['receiver_id']] = '' fields[_field_index['receiver_customer_id']] = self.customer_name + '|' + self.account_name fields[_field_index['timestamp']] = file_timestamp # interval put into "MMDDHHMM" with MMDD = 0000 fields[_field_index['interval']] = '0000%02d%02d' % (self.period / 3600, (self.period % 3600) / 60) fields[_field_index['count']] = str(len(values)) value_sets = [] for value in values: try: value = '%f' % value protocol_text = '' except ValueError: value = '' protocol_text = 'N' value_set = (value_timestamp, protocol_text, value) value_sets.append(string.join(value_set, ',')) value_timestamp = '' fields[_field_index['interval_data']] = string.join(value_sets, ',') return string.join(fields, ',')
def banIP(IP, dport, service, timer = BANTIMER): """Returns 1 if IP is already BANNED/UNBANNED Returns 0 if BANNED/UNBANNED successfully """ print 'banIP:' if (IP, service) in bannedIPs: print 'IP:' + IP + 'is already BANNED' logging.info('IP:' + IP + 'is already BANNED') return 1 else: ip = bannedIP(IP, time.time(), service, timer) chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT") ip.rule = iptc.Rule(chain=chain) ip.rule.src = ip.IP + "/255.255.255.255" ip.rule.protocol = "tcp" ip.rule.target = iptc.Target(ip.rule, "REJECT") ip.rule.target.reject_with = "icmp-admin-prohibited" match = iptc.Match(ip.rule,"tcp") match.dport = dport ip.rule.add_match(match) bannedIPs[(ip.IP, service)] = ip chain.insert_rule(ip.rule) print 'IP:' + ip.IP + ' BANNED at ' + time.strftime("%b %d %H:%M:%S") logging.info('IP:' + ip.IP + ' BANNED at ' + time.strftime("%b %d %H:%M:%S")) resp = {"action": BANNEDIP, "data":{"IP":ip.IP, "time":time.strftime("%b %d %H:%M:%S", time.localtime(ip.time)), "timer":ip.timer, "service":ip.service}} server.send_message_to_all(json.dumps(resp))
def exec_cmd_servers(username): print '\nInput the \033[32mHost IP(s)\033[0m,Separated by Commas, q/Q to Quit.\n' while True: hosts = raw_input('\033[1;32mip(s)>: \033[0m') if hosts in ['q', 'Q']: break hosts = hosts.split(',') hosts.append('') hosts = list(set(hosts)) hosts.remove('') ip_all, ip_all_dict = ip_all_select(username) no_perm = set(hosts)-set(ip_all) if no_perm: print "You have NO PERMISSION on %s..." % list(no_perm) continue print '\nInput the \033[32mCommand\033[0m , The command will be Execute on servers, q/Q to quit.\n' while True: cmd = raw_input('\033[1;32mCmd(s): \033[0m') if cmd in ['q', 'Q']: break exec_log_dir = os.path.join(log_dir, 'exec_cmds') if not os.path.isdir(exec_log_dir): os.mkdir(exec_log_dir) os.chmod(exec_log_dir, 0777) filename = "%s/%s.log" % (exec_log_dir, time.strftime('%Y%m%d')) f = open(filename, 'a') f.write("DateTime: %s User: %s Host: %s Cmds: %s\n" % (time.strftime('%Y/%m/%d %H:%M:%S'), username, hosts, cmd)) for host in hosts: remote_exec_cmd(host, username, cmd)
def lastlogExit(self): starttime = time.strftime("%a %b %d %H:%M", time.localtime(self.logintime)) endtime = time.strftime("%H:%M", time.localtime(time.time())) duration = utils.durationHuman(time.time() - self.logintime) f = file("%s/lastlog.txt" % self.env.cfg.get("honeypot", "data_path"), "a") f.write("root\tpts/0\t%s\t%s - %s (%s)\n" % (self.clientIP, starttime, endtime, duration)) f.close()
def set_filter_date(self): dialog = xbmcgui.Dialog() if self.start_date == '': self.start_date = str(datetime.datetime.now())[:10] if self.end_date == '': self.end_date = str(datetime.datetime.now())[:10] try: d = dialog.numeric(1, common.getstring(30117) ,strftime("%d/%m/%Y",strptime(self.start_date,"%Y-%m-%d")) ) if d != '': self.start_date = strftime("%Y-%m-%d",strptime(d.replace(" ","0"),"%d/%m/%Y")) else: self.start_date ='' common.log('', str(self.start_date)) d = dialog.numeric(1, common.getstring(30118) ,strftime("%d/%m/%Y",strptime(self.end_date,"%Y-%m-%d")) ) if d != '': self.end_date = strftime("%Y-%m-%d",strptime(d.replace(" ","0"),"%d/%m/%Y")) else: self.end_date ='' common.log('', str(self.end_date)) except: pass if self.start_date != '' or self.end_date != '': self.getControl( BUTTON_DATE ).setLabel( self.start_date + ' ... ' + self.end_date ) else: self.getControl( BUTTON_DATE ).setLabel( common.getstring(30164) ) self.getControl( BUTTON_DATE ).setVisible(False) self.getControl( BUTTON_DATE ).setVisible(True)
def do_export(_): left_idx = g_pool.seek_control.trim_left right_idx = g_pool.seek_control.trim_right export_range = left_idx, right_idx + 1 # exclusive range.stop export_ts_window = pm.exact_window(g_pool.timestamps, (left_idx, right_idx)) export_dir = os.path.join(g_pool.rec_dir, "exports") export_dir = next_export_sub_dir(export_dir) os.makedirs(export_dir) logger.info('Created export dir at "{}"'.format(export_dir)) export_info = { "Player Software Version": str(g_pool.version), "Data Format Version": meta_info["Data Format Version"], "Export Date": strftime("%d.%m.%Y", localtime()), "Export Time": strftime("%H:%M:%S", localtime()), "Frame Index Range:": g_pool.seek_control.get_frame_index_trim_range_string(), "Relative Time Range": g_pool.seek_control.get_rel_time_trim_range_string(), "Absolute Time Range": g_pool.seek_control.get_abs_time_trim_range_string(), } with open(os.path.join(export_dir, "export_info.csv"), "w") as csv: write_key_value_file(csv, export_info) notification = { "subject": "should_export", "range": export_range, "ts_window": export_ts_window, "export_dir": export_dir, } g_pool.ipc_pub.notify(notification)
def date() -> str: ''' datetime ''' now = time.strftime('%Y-%m-%d %H:%M', time.localtime(time.time())) return now
print("\n*** Episode %i *** \ \nAv.reward: [last %i]: %.2f, [last 100]: %.2f, [all]: %.2f \ \nepsilon: %.2f, frames_total: %i" % ( i_episode, report_interval, sum(reward_total[-report_interval:])/report_interval, mean_reward_100, sum(reward_total)/len(reward_total), epsilon, frames_total ) ) elapsed_time = time.time() - start_time print("Elapsed time: ", time.strftime("%H:%M:%S", time.gmtime(elapsed_time))) break print("\n\n\n\nAverage reward: %.2f" % (sum(reward_total)/num_episodes)) print("Average reward (last 100 episodes): %.2f" % (sum(reward_total[-100:])/100)) if solved: print("Solved after %i episodes" % solved_after) plt.figure(figsize=(12,5)) plt.title("Rewards") plt.bar(torch.arange(len(reward_total)), reward_total, alpha=0.6, color='green') plt.show()
def __set__(self, instance, value): self.date = t.strftime('%c') self.val = value with open('record.txt', 'a') as f: f.writelines('%s 变量于北京时间 %s 被修改,%s = %s\n' % (self.name, self.date, self.name, self.val))
def __get__(self, instance, owner): self.date = t.strftime('%c') with open('record.txt', 'a') as f: f.writelines('%s 变量于北京时间 %s 被读取,%s = %s\n' % (self.name, self.date, self.name, self.val)) return self.val
def StartBackup(Host, TomcatBase): timestr = time.strftime("%Y_%m_%d_%H_%M_%S") Command = '/bin/mv' + ' ' + TomcatBase + ' ' + TomcatBase + '_' + timestr operation = CallProcess(Host, Command) return operation
save = True # search = driver.find_element_by_xpath( # "/html/body/div[1]/div/div/div[2]/div[1]/span/div/span/div/div[1]/div/label/input") search = driver.find_element_by_xpath( "/html/body/div[1]/div/div/div[2]/div[1]/span/div/span/div/div[1]/div/label/div/div[2]") search.click() sleep(5) name = "Dummy" search.send_keys(name) sleep(5) _open = driver.find_element_by_xpath( "/html/body/div[1]/div/div/div[2]/div[1]/span/div/span/div/div[2]/div[1]/div/div/div[1]/div/div/div[2]") _open.click() sleep(5) print("NOW TRACKING IS LIVE") t = strftime("%d/%m/%Y %H:%M:%S") speako, speakf, speakOthers = True, True, True while True: try: status = driver.find_element_by_xpath( "/html/body/div[1]/div/div/div[4]/div/header/div[2]/div[2]/span").text t = strftime("%Y-%m-%d %H:%M:%S") print("{1} : {2} is {0}".format(status, t[11:], name)) if speako and status == 'online': notify.send('📱 ' + name + ' is online 📱 ') print("{} {} : online".format(t[11:], name) + '\n') push_notify('{} is OnLinE'.format(name)) android_noti_result = push_service.notify_single_device(registration_id=registration_id, message_title='WaMon_Notification', message_body='{} is OnLinE'.format(name))
import HTMLTestRunnerNew import time import unittest from package_005.hwk5_30_testcase import RegisterTestCase # 创建一个测试集合 suite = unittest.TestSuite() # 创建一个执行器 runner = unittest.TextTestRunner() loader = unittest.TestLoader() # 添加测试用例 suite.addTest(loader.loadTestsFromTestCase(RegisterTestCase)) date2display = time.strftime('%y_%m_%d_%H_%M_%S', time.localtime()) with open('report_{}.html'.format(date2display), 'wb') as fb: test_run = HTMLTestRunnerNew.HTMLTestRunner(stream=fb, verbosity=2, title='py18_%s_report' % date2display, description='参数化报告', tester='july') test_run.run(suite) # runner.run(suite)
function_name = '' duong_link = '' function_csv = '' #------delete end space \n \r ----- def delete(a): b = '' b = a.replace("\r",'') b = b.replace("\n",'') b = b.replace(' ','') return(b) wb = openpyxl.load_workbook(excel) #-----------------write date of testing---- sheet = wb['フォーマット変更来歴'] sheet['C5'] = strftime("%m/%d/%Y", gmtime()) print('write date of testing Done') print('-------------------------') #---------------write test cover log-------- sheet = wb['カバレッジ結果'] temp1 = 'A' temp2 = 5 for line in codecs.open(file, encoding="utf-8"): #print(line) sheet[temp1 + str(temp2)] = line if temp2 == 5: chuoi_ten_function = line
def PostUsinglog(date, count): list_platform = readfile.readtext("platform.txt", "fakedata/") list_appkey_android = readfile.readtext("android_appkey.txt", "fakedata/") list_appkey_iphone = readfile.readtext("iphone_appkey.txt", "fakedata/") list_activities_android = readfile.readtext("android_activities.txt", "fakedata/") list_activities_iphone = readfile.readtext("iphone_activities.txt", "fakedata/") list_version = readfile.readtext("version.txt", "fakedata/") list_hours = readfile.readtext("hour.txt", "fakedata/") sdate = time.strptime(date, "%Y-%m-%d") startdate = datetime.datetime(sdate[0], sdate[1], sdate[2]) d = startdate print d.strftime("%Y-%m-%d") + "UsingLog" for i in range(0, int(count)): postitems = [] c = {} platform = getfakedata.getdata(list_platform) c["version"] = getfakedata.getdata(list_version) c["session_id"] = str(uuid.uuid4().hex) if platform == "android": c["appkey"] = getfakedata.getdata(list_appkey_android) else: c["appkey"] = getfakedata.getdata(list_appkey_iphone) usepages = random.randint(1, 12) tempdate = "" for j in range(0, usepages): if platform == "android": c["activities"] = getfakedata.getdata(list_activities_android) else: c["activities"] = getfakedata.getdata(list_activities_iphone) hour = getfakedata.getdata(list_hours) minute = random.randint(0, 59) second = random.randint(0, 59) if (j == 0): starttime = time.strptime( d.strftime("%Y-%m-%d") + " " + str(hour) + ":" + str(minute) + ":" + str(second), "%Y-%m-%d %H:%M:%S") c["start_millis"] = time.strftime("%Y-%m-%d %H:%M:%S", starttime) startdate = datetime.datetime(starttime[0], starttime[1], starttime[2], starttime[3], starttime[4], starttime[5]) else: starttime = tempdate c["start_millis"] = str(tempdate) startdate = tempdate random_second = random.randint(1, 220) enddate = startdate + datetime.timedelta(seconds=random_second) tempdate = enddate c["end_millis"] = str(enddate) c["duration"] = str(random_second * 1000) postitems.append(c) print " [" + c["start_millis"] + "]-[" + c["end_millis"] + "]" + "Activity Usage:" + str( j) + "/" + str(usepages) # end activities loop print i, "/", count, " Usinglog start" for ii in range(0, usepages): print ii, "Usinglog Start thread" thread = myThread_usinglog(ii, postitems[ii]) thread.start() print " [" + d.strftime("%Y-%m-%d") + "]" + "Post UsingLog One Time Usage" + str(i) + "/" + str(count)
def extract(self, values, request, folder, **args): user = get_current() site = get_site_folder(True) bodies = {} has_date_classification = any(isinstance(c, DateClassification) for c in self.all_classifications()) values = list(values) filter_parameters = args.get('filters', []) filter_parameters = filter_parameters[0] if filter_parameters else {} substitutions = [] inverse_substitutions = {} for value in values: object_values = {'object': value, 'current_user': user, 'state': None, 'has_date_classification': has_date_classification, 'has_only_one_date': has_only_one_date, 'date_normalize': date_normalize, 'filter_parameters': filter_parameters, 'text_normalize': french_normalize, 'site': site} #@TODO generalyse to apply the specific language normalization function not always french body = renderers.render(value.templates.get('extraction', None), object_values, request) value_substitutions = value.substitutions inverse_substitutions.update({substitution: value for substitution in value_substitutions}) bodies.update({substitution: body for substitution in value_substitutions}) substitutions.extend(value_substitutions) validated = args.get('validated', filter_parameters) if isinstance(validated, list) and validated: validated = validated[0] attributes = self.getattributes( substitutions, extraction_processing=True, inverse_substitutions=inverse_substitutions, **validated) items_classified = self.classification(substitutions, attributes, **validated) args.update({'extraction': True}) odt_content = self._render(items_classified, bodies, request, folder, inverse_substitutions=inverse_substitutions, **args).replace('\\n', '').\ replace('\\r', ' ').\ replace("\\'", "'").\ replace("\x1f", "") odt_content.encode("utf-8", 'replace') user_name = user.name extraction_template_file = site.extraction_template s_out = None if not extraction_template_file: raise ExecutionError(msg=_("You must configure the extraction" " ODT Pattern File")) with zipfile.ZipFile(extraction_template_file.fp) as template_zip: odt_meta = etree.parse( io.BytesIO(template_zip.read('meta.xml')), etree.XMLParser()) # Update metadatas tag_generator = odt_meta.xpath("//meta:generator", namespaces=ODT_NAME_SPACES) tag_generator[0].text = \ "ecreall.com/CreationCulturelle/Extraction.0.2".encode("utf-8") tag_creator = odt_meta.xpath("//dc:creator", namespaces=ODT_NAME_SPACES) tag_creator[0].text = user_name.encode("utf-8") tag_date = odt_meta.xpath("//dc:date", namespaces=ODT_NAME_SPACES) tag_date[0].text = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime()).encode("utf-8") tag_editingCycle = odt_meta.xpath("//meta:editing-cycles", namespaces=ODT_NAME_SPACES) tag_editingCycle[0].text = '1' # Generate new odt file from template with the meta file updated # and the extract content s_out = io.BytesIO() with zipfile.ZipFile(s_out, 'w') as zip_out: for a_file in template_zip.infolist(): if a_file.filename == 'content.xml': zip_out.writestr(a_file, odt_content) elif a_file.filename == 'meta.xml': zip_out.writestr(a_file, etree.tostring( odt_meta, encoding="UTF-8", xml_declaration=True, pretty_print=True)) else: zip_out.writestr( a_file, template_zip.read(a_file.filename)) s_out.seek(0) return s_out
def start(self, detected_callback=play_audio_file, interrupt_check=lambda: False, sleep_time=0.03): """ Start the voice detector. For every `sleep_time` second it checks the audio buffer for triggering keywords. If detected, then call corresponding function in `detected_callback`, which can be a single function (single model) or a list of callback functions (multiple models). Every loop it also calls `interrupt_check` -- if it returns True, then breaks from the loop and return. :param detected_callback: a function or list of functions. The number of items must match the number of models in `decoder_model`. :param interrupt_check: a function that returns True if the main loop needs to stop. :param float sleep_time: how much time in second every loop waits. :return: None """ self.audio = pyaudio.PyAudio() self.stream_in = self.audio.open( input=True, output=False, format=self.audio.get_format_from_width( self.detector.BitsPerSample() / 8), channels=self.detector.NumChannels(), rate=self.detector.SampleRate(), frames_per_buffer=2048, stream_callback=audio_callback) if interrupt_check(): logger.debug("detect voice return") return tc = type(detected_callback) if tc is not list: detected_callback = [detected_callback] if len(detected_callback) == 1 and self.num_hotwords > 1: detected_callback *= self.num_hotwords assert self.num_hotwords == len(detected_callback), \ "Error: hotwords in your models (%d) do not match the number of " \ "callbacks (%d)" % (self.num_hotwords, len(detected_callback)) logger.debug("detecting...") while True: if interrupt_check(): logger.debug("detect voice break") break data = self.ring_buffer.get() if len(data) == 0: time.sleep(sleep_time) continue ans = self.detector.RunDetection(data) if ans == -1: logger.warning( "Error initializing streams or reading audio data") elif ans > 0: message = "Keyword " + str(ans) + " detected at time: " message += time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) logger.info(message) callback = detected_callback[ans - 1] if callback is not None: callback() logger.debug("finished.")
def printCurrentTime(): return strftime("%Y-%m-%d %H:%M:%S", gmtime())
m = cv2.moments(cnt) cx = int(m['m10'] / m['m00']) cy = int(m['m01'] / m['m00']) x, y, w, h = cv2.boundingRect(cnt) new = True if cy in range(up_limit, down_limit): for i in cars: if abs(x - i.getX()) <= w and abs(y - i.getY()) <= h: new = False i.updateCoords(cx, cy) if i.going_UP(line_down, line_up) == True: cnt_up += 1 print("ID:", i.getId(), 'crossed going up at', time.strftime("%c")) cv2.imwrite( "captured/up/up{}.jpg".format(cnt_up), frame) elif i.going_DOWN(line_down, line_up) == True: cnt_down += 1 print("ID:", i.getId(), 'crossed going up at', time.strftime("%c")) cv2.imwrite( "captured/down/up{}.jpg".format(cnt_down), frame) break if i.getState() == '1': if i.getDir() == 'down' and i.getY() > down_limit: i.setDone() elif i.getDir() == 'up' and i.getY() < up_limit:
from biddingeye_1_0_0.utils.htmlparse import PyEventParser from biddingeye_1_0_0.utils.log import blog sys.path.insert(0, '..') import time import MySQLdb import logging from scrapy import Spider g_max_idx = 0 # 本轮采集的最大值 bid_home = os.getcwd() log_name = os.path.join( bid_home + "/output", "bee" + "-" + "scrapy" + "-" + time.strftime("%Y%m%d%H%M%S", time.localtime()) + ".log") logger = blog("d", log_name, logging.INFO).getLog() name_buyer = "采购人" name_agent = "采购代理" name_papertm = "标书购买截止时间" name_betm = "投标截止时间" name_budget = "项目预算" name_ntype = "公告类型" name_keylist = "关键字列表" name_aptitude = "资质要求" name_actualize = "实施内容" name_stitle = "包标题" _buyer = re.compile("(?<=(采购人为))[^-~]+?公司(?=[,。])")
#mport Libraries import requests import json import time import datetime import smtplib #Define Constants PINCODE = "560037" #Example 600040 MY_EMAIL = "<ENTER YOUR EMAIL ID>" #From this mail id, the alerts will be sent MY_PASSWORD = "******" #Enter the email id's password #Derive the date and url #url source is Cowin API - https://apisetu.gov.in/public/api/cowin today = time.strftime("%d/%m/%Y") url = f"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode={PINCODE}&date={today}" #Write a loop which checks for every 1000 seconds while True: #Start a session with requests.session() as session: headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'} response = session.get(url, headers=headers) #Receive the response response = response.json() for center in response['centers']: for session in center['sessions']: #For Age not equal to 45 and capacity is above zero if (session['min_age_limit'] == 45) & (session['available_capacity'] > 0):
#!/usr/bin/python import time import os now = time.strftime("%c") ## date and time representation print "Current date & time " + time.strftime("%c") ## Only date representation print "Current date " + time.strftime("%x") ## Only time representation print "Current time " + time.strftime("%X") ## Display current date and time from now variable print ("Current time %s" % now ) f = open('./examples/er-rest-example/compile_time.h','w') f.write('#define COMPILE_TIME "') # python will convert \n to os.linesep f.write("%s" % now ) f.write('"') f.close() print("Path at terminal when executing this file") print(os.getcwd() + "\n") full_path = os.getcwd() print(full_path + "\n")
def buildVideoLink(self, displayObject, mediathek, objectCount): if (displayObject.subTitle == "" or displayObject.subTitle == displayObject.title): title = transformHtmlCodes(displayObject.title) else: title = transformHtmlCodes(displayObject.title + " - " + displayObject.subTitle) if displayObject.date is not None: title = "(%s) %s" % (time.strftime("%d.%m", displayObject.date), title) if displayObject.picture is not None: listItem = xbmcgui.ListItem(title, iconImage="DefaultFolder.png", thumbnailImage=displayObject.picture) else: listItem = xbmcgui.ListItem(title, iconImage="DefaultFolder.png") if (displayObject.isPlayable): if (displayObject.isPlayable == "PlayList"): link = displayObject.link[0] url = "%s?type=%s&action=openPlayList&link=%s" % ( sys.argv[0], mediathek.name(), urllib.quote_plus(link.basePath)) listItem.setProperty('IsPlayable', 'true') xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=listItem, isFolder=False, totalItems=objectCount) elif (displayObject.isPlayable == "JsonLink"): link = displayObject.link url = "%s?type=%s&action=openJsonLink&link=%s" % ( sys.argv[0], mediathek.name(), urllib.quote_plus(link)) listItem.setProperty('IsPlayable', 'true') listItem.setInfo( "video", { "title": title, "plot": transformHtmlCodes(displayObject.description), "duration": displayObject.duration }) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=listItem, isFolder=False, totalItems=objectCount) else: self.log(displayObject.title) link = self.extractLink(displayObject.link) if (type(link).__name__ == "ComplexLink"): self.log("PlayPath:" + link.playPath) listItem.setProperty("PlayPath", link.playPath) self.log("URL:" + link.basePath) try: listItem.setInfo( "video", { "size": link.size, "date": time.strftime("%d.%m.%Y", displayObject.date), "year": int(time.strftime("%Y", displayObject.date)), "title": title, "plot": transformHtmlCodes( displayObject.description), "duration": displayObject.duration }) except: pass listItem.setProperty('IsPlayable', 'true') xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=link.basePath, listitem=listItem, isFolder=False, totalItems=objectCount) else: url = "%s?type=%s&action=openTopicPage&link=%s" % ( sys.argv[0], mediathek.name(), urllib.quote_plus(displayObject.link)) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=listItem, isFolder=True, totalItems=objectCount)
# passrate=100-len(TestResult.failures)/TestResult.testsRun*100 # # logging.info("fail the test: "+str(TestResult.failures)) # logging.info("total fail: "+str(len(TestResult.failures))) # logging.info("total run: "+str(TestResult.testsRun)) # logging.info("TestCases Pass Rate: "+str(passrate)+"%") # if __name__ == "__main__": if not os.path.exists("./picture"): os.mkdir("./picture") if not os.path.exists("./data"): os.mkdir("./data") else: os.rename("data","data"+time.strftime("%Y%m%d%H%M%S", time.localtime())) os.mkdir("./data") #定义测试用例,组成一个测试场景 # testscene1=['testcase1','testcase2'] #testscene1=['testcase3'] # testscene2=['testcase2','testcase3'] # testpath="E:\python_space/xingneng/test_login1.py" # testpath=['testlogin'] #利用线程进行并发测试,三个参数分别表示测试场景、并发次数和持续时间,None表示一次并发 thread1 = testThread(10) # thread2 = testThread(testscene2,100,100) monitor() thread1.start() thread1.join() teardownps()
def get_current_time(): return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
def parse_comment_update(self, response): aid = response.meta['aid'] mid = response.meta['mid'] last_pn = response.meta['last_pn'] text = response.text data = json.loads(text) data = data['data'] if last_pn == 20: coms = data['replies'] else: coms = data['replies'][:last_pn] comments = [] for com in coms: content = com['content'] message = content['message'] member = com['member'] time_local = time.localtime(com['ctime']) pubdate = time.strftime("%Y-%m-%d %H:%M:%S", time_local) user_id = member['mid'] user_name = member['uname'] level = member['level_info']['current_level'] sex = member['sex'] vip = member['vip']['vipStatus'] rcount = com['rcount'] comment = [ aid, mid, message, user_id, user_name, level, vip, sex, rcount, pubdate ] comments.append(comment) if rcount > 0 and rcount <= 3: com_replies = com['replies'] for comm in com_replies: content = comm['content'] time_local = time.localtime(comm['ctime']) pubdate = time.strftime("%Y-%m-%d %H:%M:%S", time_local) message = content['message'] member = comm['member'] user_id = member['mid'] user_name = member['uname'] level = member['level_info']['current_level'] sex = member['sex'] vip = member['vip']['vipStatus'] rcount = comm['rcount'] comment = [ aid, mid, message, user_id, user_name, level, vip, sex, rcount, pubdate ] comments.append(comment) if rcount > 3: pns = math.ceil(rcount / 10) rpid = com['rpid'] for pn in range(1, pns + 1): yield scrapy.Request( 'https://api.bilibili.com/x/v2/reply/reply?pn=' + str(pn) + '&type=1&oid=' + str(aid) + '&ps=10&root=' + str(rpid), meta={ 'aid': aid, 'mid': mid, 'last_pn': 20 }, callback=self.parse_comment_update) item = av_comment() item['comment'] = comments yield item
while True: type = input("\n 请输入数字:\n 1:查询时间\n 2:恢复时间\n 3:修改时间\n") if type == "1": urls = "http://106.75.7.235/update_time.php?f=get" # 疯狂猜成语获取服务器时间 request = urllib.request.Request(urls) response = urllib.request.urlopen(request) openurl = response.read() timenow = openurl.decode() print("\033[1;31m 当前服务时间: \033[0m") print(timenow) # break if type == "2": nowtime = time.strftime('%Y-%m-%d%%20%H:%M:%S') fullurl2 = url + "&datetime=" + nowtime # print(fullurl2) request = urllib.request.Request(fullurl2) response = urllib.request.urlopen(request) fullurl2bytes = response.read() defullurl2 = fullurl2bytes.decode() print("\033[1;31m 恢复成功 \033[0m") print(defullurl2) # break if type == "3": while True: # 无限循环语句 nowtime = time.strftime('%Y-%m-%d%%20%H:%M:%S') print("例子:" + nowtime) yeartime = input("请输入你想修改的时间: \033[1;31m!!!请复制例子进行修改时间!!!\033[0m\n恢复时间请输入:0\n")
import json import sys import time if not (sys.path[0] + '/modules/dysms_python') in sys.path: sys.path.append(sys.path[0] + '/modules/dysms_python') from demo_sms_send import send_sms if __name__ == '__main__': time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) params = {'time': time, 'seconds': 55} params = json.dumps(params) print( send_sms("13052316968", sign_name='ATIsys', template_code='SMS_143712599', template_param=params))
def now(): # stamp = time.strftime('[%Y-%m-%d %H:%M:%S]: ') # msg = '\033[92m'+stamp+'\033[0m' # time in green stamp = time.strftime(' [%H:%M:%S] ') msg = stamp+' ' return msg
async def purge_data(self, config): """" Purge readings table based on the set configuration :return: total rows removed rows removed that were not sent to any historian """ total_rows_removed = 0 unsent_rows_removed = 0 unsent_retained = 0 start_time = time.strftime('%Y-%m-%d %H:%M:%S.%s', time.localtime(time.time())) payload = PayloadBuilder().AGGREGATE(["min", "last_object"]).payload() result = await self._storage_async.query_tbl_with_payload("streams", payload) last_object = result["rows"][0]["min_last_object"] if result["count"] == 1: # FIXME: Remove below check when fix from storage layer # Below check is required as If no streams entry exists in DB storage layer returns response as below: # {'rows': [{'min_last_object': ''}], 'count': 1} # BTW it should return integer i.e 0 not in string last_id = 0 if last_object == '' else last_object else: last_id = 0 flag = "purge" if config['retainUnsent']['value'].lower() == "false" else "retain" try: if int(config['age']['value']) != 0: result = await self._readings_storage_async.purge(age=config['age']['value'], sent_id=last_id, flag=flag) total_rows_removed = result['removed'] unsent_rows_removed = result['unsentPurged'] unsent_retained = result['unsentRetained'] except ValueError: self._logger.error("Configuration item age {} should be integer!".format(config['age']['value'])) except StorageServerError as ex: # skip logging as its already done in details for this operation in case of error # FIXME: check if ex.error jdoc has retryable True then retry the operation else move on pass try: if int(config['size']['value']) != 0: result = await self._readings_storage_async.purge(size=config['size']['value'], sent_id=last_id, flag=flag) total_rows_removed += result['removed'] unsent_rows_removed += result['unsentPurged'] unsent_retained += result['unsentRetained'] except ValueError: self._logger.error("Configuration item size {} should be integer!".format(config['size']['value'])) except StorageServerError as ex: # skip logging as its already done in details for this operation in case of error # FIXME: check if ex.error jdoc has retryable True then retry the operation else move on pass end_time = time.strftime('%Y-%m-%d %H:%M:%S.%s', time.localtime(time.time())) if total_rows_removed > 0: """ Only write an audit log entry when rows are removed """ await self._audit.information('PURGE', {"start_time": start_time, "end_time": end_time, "rowsRemoved": total_rows_removed, "unsentRowsRemoved": unsent_rows_removed, "rowsRetained": unsent_retained }) else: self._logger.info("No rows purged") return total_rows_removed, unsent_rows_removed
def setPower_mW(self, mW): mW = min(mW, self.getMaxPower_mW) self.logger.log("Setting laser power to %.4fW at %s" % (mW / 1000.0, time.strftime('%Y-%m-%d %H:%M:%S'))) return self.send("@cobasp %.4f" % (mW / 1000.0))
#!/usr/bin/python __plugin__ = "poppy.plugin" from scapy.all import * import sys import os import time import logging RescoursesDir = os.getcwd() dandtime = time.strftime("%H:%M:%S") logfile = "%s/storage/logs/%s.log" % (RescoursesDir, dandtime) class Tee(object): def __init__(self): self.file = open(logfile, 'a') self.stdout = sys.stdout def __del__(self): sys.stdout = self.stdout self.file.close() def write(self, data): self.file.write(data) self.stdout.write(data)
def AStockHisData(self,symbols,start_date,end_date,step=0): ''' 逐个股票代码查询行情数据 wsd代码可以借助 WindNavigator自动生成copy即可使用;时间参数不设,默认取当前日期,可能是非交易日没数据; 只有一个时间参数时,默认作为为起始时间,结束时间默认为当前日期;如设置两个时间参数则依次为起止时间 ''' print self.getCurrentTime(),": Download A Stock Starting:" for symbol in symbols: w.start() try: #stock=w.wsd(symbol,'trade_code,open,high,low,close,volume,amt',start_date,end_date) ''' wsd代码可以借助 WindNavigator自动生成copy即可使用; 时间参数不设,默认取当前日期,可能是非交易日没数据; 只有一个时间参数,默认为起始时间到最新;如设置两个时间参数则依次为起止时间 ''' table_name = 'table_3M_data' #第一处修改 stock=w.wsd(symbol, "profit_ttm,bps,fcff,cfps_ttm,wgsd_oper_cf,wgsd_assets,yoy_or,qfa_yoysales,yoyprofit,qfa_yoyprofit,grossprofitmargin,roe_ttm2,roa_ttm2,yoybps,yoy_assets,wgsd_yoyocf,roa,roe,grossprofitmargin_ttm2,assetsturn,faturn,op_ttm2,current,cashtocurrentdebt,quick,wgsd_com_eq_paholder,longdebttodebt,tot_liab,debttoassets,mkt_cap_ard,wgsd_com_eq,close,trade_status", start_date, end_date, "unit=1;currencyType=;rptType=1;Period=Q;Days=Alldays;PriceAdj=F") index_data = pd.DataFrame() index_data['trade_date']=stock.Times index_data['stock_code'] =symbol index_data['profit_ttm']=stock.Data[0] index_data['bps']=stock.Data[1] index_data['fcff']=stock.Data[2] index_data['cfps_ttm']=stock.Data[3] index_data['wgsd_oper_cf']=stock.Data[4] index_data['wgsd_assets']=stock.Data[5] index_data['yoy_or']=stock.Data[6] index_data['qfa_yoysales']=stock.Data[7] index_data['yoyprofit']=stock.Data[8] index_data['qfa_yoyprofit']=stock.Data[9] index_data['grossprofitmargin']=stock.Data[10] index_data['roe_ttm2']=stock.Data[11] index_data['roa_ttm2']=stock.Data[12] index_data['yoybps']=stock.Data[13] index_data['yoy_assets']=stock.Data[14] index_data['wgsd_yoyocf']=stock.Data[15] index_data['roa']=stock.Data[16] index_data['roe']=stock.Data[17] index_data['grossprofitmargin_ttm2']=stock.Data[18] index_data['assetsturn']=stock.Data[19] index_data['faturn']=stock.Data[20] index_data['op_ttm2']=stock.Data[21] index_data['current']=stock.Data[22] index_data['cashtocurrentdebt']=stock.Data[23] index_data['quick']=stock.Data[24] index_data['wgsd_com_eq_paholder']=stock.Data[25] index_data['longdebttodebt']=stock.Data[26] index_data['tot_liab']=stock.Data[27] index_data['debttoassets']=stock.Data[28] index_data['mkt_cap_ard']=stock.Data[29] index_data['wgsd_com_eq']=stock.Data[30] index_data['close']=stock.Data[31] index_data['trade_status']=stock.Data[32] index_data['data_source']='Wind' index_data['created_date']=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) #index_data.fillna(0) try: index_data.to_sql(table_name,engine,if_exists='append'); except Exception as e: #如果写入数据库失败,写入日志表,便于后续分析处理 error_log=pd.DataFrame() error_log['trade_date']=stock.Times error_log['stock_code']=symbol error_log['start_date']=start_date error_log['end_date']=end_date error_log['table']=table_name error_log['args']='Symbol: '+symbol+' From '+start_date+' To '+end_date error_log['error_info']=e error_log['created_date']=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) error_log.to_sql('stock_error_log',engine,if_exists='append') print self.getCurrentTime(),": SQL Exception :%s" % (e) continue w.start() except Exception as e: #如果读取处理失败,可能是网络中断、频繁访问被限、历史数据缺失等原因。写入相关信息到日志表,便于后续补充处理 error_log=pd.DataFrame() error_log['trade_date']=stock.Times error_log['stock_code']=symbol error_log['start_date']=start_date error_log['end_date']=end_date error_log['table']=table_name error_log['args']='Symbol: '+symbol+' From '+start_date+' To '+end_date error_log['error_info']=e error_log['created_date']=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) error_log.to_sql('stock_error_log',engine,if_exists='append') print self.getCurrentTime(),":index_data %s : Exception :%s" % (symbol,e) time.sleep(sleep_time) w.start() continue print self.getCurrentTime(),": Downloading [",symbol,"] From "+start_date+" to "+end_date print self.getCurrentTime(),": Download A Stock Has Finished ."
def export_mode(humble_session, order_details): cls() export_key_headers = [ 'human_name', 'redeemed_key_val', 'is_gift', 'key_type_human_name', 'is_expired', 'steam_ownership' ] steam_session = None reveal_unrevealed = False confirm_reveal = False owned_app_details = None keys = [] print("Please configure your export:") export_steam_only = prompt_yes_no("Export only Steam keys?") export_revealed = prompt_yes_no("Export revealed keys?") export_unrevealed = prompt_yes_no("Export unrevealed keys?") if (not export_revealed and not export_unrevealed): print("That leaves 0 keys...") exit() if (export_unrevealed): reveal_unrevealed = prompt_yes_no( "Reveal all unrevealed keys? (This will remove your ability to claim gift links on these)" ) if (reveal_unrevealed): extra = "Steam " if export_steam_only else "" confirm_reveal = prompt_yes_no( f"Please CONFIRM that you would like ALL {extra}keys on Humble to be revealed, this can't be undone." ) steam_config = prompt_yes_no( "Would you like to sign into Steam to detect ownership on the export data?" ) if (steam_config): steam_session = steam_login() if (verify_logins_session(steam_session)[1]): owned_app_details = get_owned_apps(steam_session) desired_keys = "steam_app_id" if export_steam_only else "key_type_human_name" keylist = list(find_dict_keys(order_details, desired_keys, True)) for idx, tpk in enumerate(keylist): revealed = "redeemed_key_val" in tpk export = (export_revealed and revealed) or (export_unrevealed and not revealed) if (export): if (export_unrevealed and confirm_reveal): # Redeem key if user requests all keys to be revealed tpk["redeemed_key_val"] = redeem_humble_key( humble_session, tpk) if (owned_app_details != None and "steam_app_id" in tpk): # User requested Steam Ownership info owned = tpk["steam_app_id"] in owned_app_details.keys() if (not owned): # Do a search to see if user owns it best_match = match_ownership(owned_app_details, tpk) owned = best_match[1] is not None and best_match[ 1] in owned_app_details.keys() tpk["steam_ownership"] = owned keys.append(tpk) ts = time.strftime("%Y%m%d-%H%M%S") filename = f"humble_export_{ts}.csv" with open(filename, 'w', encoding="utf-8-sig") as f: f.write(','.join(export_key_headers) + "\n") for key in keys: row = [] for col in export_key_headers: if col in key: row.append("\"" + str(key[col]) + "\"") else: row.append("") f.write(','.join(row) + "\n") print(f"Exported to {filename}")
def getCurrentTime(self): # 获取当前时间 return time.strftime('[%Y-%m-%d %H:%M:%S]', time.localtime(time.time()))
def disable(self): self.logger.log("Turning laser OFF at %s" % time.strftime('%Y-%m-%d %H:%M:%S')) self.write('l0') return self.readline()