def get_takeout_list(): """ 获取外卖首页列表数据 """ try: takeout_click_time = time.time() while not d(resourceId="com.yiwosi.kbb:id/tVTitle").exists: h_appear_time = time.time() h_cost_time = h_appear_time - takeout_click_time if h_cost_time > 15: logger.warning('等待外卖列表超10s,返回上个页面重新操作') d.app_stop(app_name) takeout_click_time = time.time() break logger.info('外卖列表页耗时{}'.format(h_cost_time)) e = EexeclData(file=target_file, sheet_name='列表页') e.write_cell(i + 1, 1, i) e.write_cell(i + 1, 2, h_cost_time) now_time = time.strftime("%Y-%m-%d_%H_%M_%S", time.localtime()) e.write_cell(i + 1, 3, now_time) logger.info('外卖列表页表格数据更新成功') except BaseException as e: logger.error('外卖列表页出错{}'.format(e)) send_msg(takeout_webhook, '外卖列表页出错') now_time = time.strftime("%Y-%m-%d_%H_%M_%S", time.localtime()) d.screenshot(t_screenshot_path + '{}.jpg'.format(now_time)) get_adb_all_process(now_time) time.sleep(1) d.app_stop(app_name)
def get_menu(): try: a = randint(1, 5) for m in range(a): d.swipe(500, 1000, 500, 300) time.sleep(1) takeout_name = d(resourceId="com.yiwosi.kbb:id/tVTitle")[a].get_text() logger.info('点击的外卖是{}'.format(takeout_name)) d(resourceId="com.yiwosi.kbb:id/tVTitle")[a].click() takeout_get_menu_time = time.time() while not d(resourceId="com.yiwosi.kbb:id/tv_price").exists: h_appear_time = time.time() h_cost_time = h_appear_time - takeout_get_menu_time if h_cost_time > 15: logger.warning('等待店铺商品超15s,返回上个页面重新操作') d.app_stop(app_name) takeout_get_menu_time = time.time() break logger.info('外卖店铺商品详情页耗时{}'.format(h_cost_time)) e = EexeclData(file=target_file, sheet_name='详情页') e.write_cell(i + 1, 1, i) e.write_cell(i + 1, 2, h_cost_time) now_time = time.strftime("%Y-%m-%d_%H_%M_%S", time.localtime()) e.write_cell(i + 1, 3, now_time) logger.info('外卖店铺商品详情页表格数据更新成功') except BaseException as e: logger.error('外卖店铺商品详情页出错{}'.format(e)) send_msg(takeout_webhook, '外卖店铺商品详情页出错') now_time = time.strftime("%Y-%m-%d_%H_%M_%S", time.localtime()) d.screenshot(t_screenshot_path + '{}.jpg'.format(now_time)) get_adb_all_process(now_time) time.sleep(1) d.app_stop(app_name)
def getStatus(session_id, host_id): """Returns a dictionary describing the status of the host""" # Get some basic info hostname = getHostName(session_id, host_id) hoststatus = getHostStatus(hostname) # Get the host's CFengine status status = getCfengineHostStatus(session_id, hoststatus.host._properties) # Get state information status["reachable"] = hoststatus.reachable status["operating_rev"] = hoststatus.operatingRevision status["operating_rev_status"] = hoststatus.operatingRevisionStatus status["operating_rev_text"] = hoststatus.operatingRevisionText status["operating_rev_hint"] = hoststatus.operatingRevisionHint status["active_rev"] = hoststatus.activeRevision status["generated_rev"] = hoststatus.generatedRevision status["current_load"] = hoststatus.currentLoad status["uptime"] = hoststatus.uptime status["infoUpdatedAt"] = time.strftime("%d/%m/%Y %H:%M:%S", \ time.localtime(hoststatus.infoUpdatedAt)) status["lastCheckedAt"] = time.strftime("%d/%m/%Y %H:%M:%S", \ time.localtime(hoststatus.lastCheckedAt)) return status
def InsertKeyWordToDB(fromSubDir,toSubDir): db = DB() parser = Parser() for index in range(fromSubDir,toSubDir): for root,dirs,files in os.walk('test/keyword/'+str(index)+"/"): #each subdir: 1000record start = time.time() for afile in files: if afile == '.DS_Store': continue words = afile.split('_') aExpert = Expert(words[0].strip(),words[1].strip(),words[2].replace(".html","").strip()) aExpert.setKeyword(parser.parseKeyword(root,afile)) aExpert.ChangeKeywordsToString() #print aExpert.keywordsList if not db.isExpertExist(aExpert): db.insertExpert(aExpert) end = time.time() db.conn.commit() print ("KeywordSubDir %d is Done!"%index), print time.strftime('%m-%d %H:%M:%S',time.localtime(time.time())),"total:",end-start f = open("KeywordsToDB.log","a") f.write(time.strftime('%m-%d %H:%M:%S',time.localtime(time.time()))+" keywordSubDir"+str(index)+" is Done! "+"total"+str(end-start) ) f.close() db.close()
def InsertPaperToDB(fromSubDir,toSubDir): db = DB() parser = Parser() for index in range(fromSubDir,toSubDir): for root,dirs,files in os.walk('test/paper/'+str(index)+"/"): n = 1000*index start = time.time() for afile in files: if afile == '.DS_Store': continue words = afile.split('_') papers = (parser.parsePaper(root,afile)) for eachPapaer in papers: if not db.isPaperExist(eachPapaer): db.insertPaper(eachPapaer) print "n:",n, print "Expert_ID %s is done"%words[0] n = n + 1 db.conn.commit() end = time.time() print ("PaperSubDir %d is Done!"%index), print time.strftime('%m-%d %H:%M:%S',time.localtime(time.time())),"time:",end-start, f = open("PaperToDB.log","a") f.write(time.strftime('%m-%d %H:%M:%S',time.localtime(time.time()))+" paperSubDir"+str(index)+" is Done! "+"total"+str(end-start) ) f.close() db.close()
def create_data_no_feature_selection(self): """ This function create the data for the models if not using feature selection :return: """ selected_features = list(self.group_dict.keys()) features_group = [ self.group_dict[group][0] for group in selected_features ] self.features = [ item for sublist in features_group for item in sublist ] features = [item for sublist in features_group for item in sublist] features.append('group_number') self.X_train = self.featuresDF[features] features_names = [ self.group_dict[feature][1] for feature in selected_features ] print('{}: Start training with the groups: {}'.format( (time.asctime(time.localtime(time.time()))), features_names)) logging.info('{}: Start training with the groups: {}'.format( (time.asctime(time.localtime(time.time()))), features_names)) group_results = self.models_iteration() for model in group_results: model.append(features_names) model.append(opts.k_fold) columns_names = [ 'classifier_name', 'score', 'auc', 'train_time', 'features_list', 'k_fold' ] group_results_df = pd.DataFrame(group_results, columns=columns_names) return group_results_df
def __init__(self): self.X_train = None self.feature_names = None print('{}: Loading the data '.format((time.asctime(time.localtime(time.time()))))) self.featuresDF = pd.read_excel('FinalFeatures.xlsx') self.labels = self.featuresDF['IsEfficient'] self.submission_author_features = ['submission_author_number_original_subreddit', 'submission_author_number_recommend_subreddit', 'submission_created_time_hour'] self.sub_comment_author_relation_features = ['cosine_similarity_subreddits_list', 'comment_submission_similarity', 'comment_title_similarity'] self.comment_author_features =['comment_author_number_original_subreddit', 'comment_author_number_recommend_subreddit', 'percent_efficient_references_comment_author', 'number_of_references_comment_author'] self.comment_features = ['comment_created_time_hour', 'submission_created_time_hour', 'time_between_messages', 'comment_len', 'number_of_r', 'number_of_references_to_submission'] self.subreddit_features = ['number_of_references_to_recommended_subreddit', 'subreddits_similarity'] # self.subreddit_features = self.featuresDF['number_of_references_to_recommended_subreddit'] self.group_dic = {0: [self.submission_author_features, 'submission_author_features'], 1: [self.sub_comment_author_relation_features, 'sub_comment_author_relation_features'], 2: [self.comment_author_features, 'comment_author_features'], 3: [self.comment_features, 'comment_features'], 4: [self.subreddit_features, 'subreddit_features']} print('{}: Data loaded '.format((time.asctime(time.localtime(time.time()))))) return
def __init__(self): self.X_train = None self.features = None self.feature_names = None print('{}: Loading the data: FinalFeatures_with_comment_time'.format( (time.asctime(time.localtime(time.time()))))) self.original_data = pd.read_excel( 'FinalFeatures_with_comment_time.xlsx') self.labels = None self.featuresDF = None # self.featuresDF['percent_efficient_references_comment_author'].astype(str) # self.featuresDF.to_csv('sorted_group.csv', encoding='utf-8') self.submission_author_features = [ 'submission_author_number_original_subreddit', 'submission_author_number_recommend_subreddit', 'submission_created_time_hour' ] self.sub_comment_author_relation_features = [ 'cosine_similarity_subreddits_list', 'comment_submission_similarity', 'comment_title_similarity' ] self.comment_author_features = [ 'comment_author_number_original_subreddit', 'comment_author_number_recommend_subreddit', # 'percent_efficient_references_comment_author', 'number_of_references_comment_author' ] self.comment_features = [ 'comment_created_time_hour', 'time_between_messages', 'comment_len', 'number_of_r', 'number_of_references_to_submission' ] self.subreddit_features = [ 'number_of_references_to_recommended_subreddit', 'subreddits_similarity' ] # for 50Doc2Vec: # self.text_features = range(50) # for Word2Vec and 100Doc2Vec: self.text_features = range(100) self.group_dic = { 0: [self.submission_author_features, 'submission_author_features'], 1: [ self.sub_comment_author_relation_features, 'sub_comment_author_relation_features' ], 2: [self.comment_author_features, 'comment_author_features'], 3: [self.comment_features, 'comment_features'], 4: [self.subreddit_features, 'subreddit_features'], 5: [self.text_features, 'text_features'] } print('{}: Data loaded '.format( (time.asctime(time.localtime(time.time()))))) return
def friendtime(dt,format='%Y-%m-%d %H:%M'): '''时间友好显示化''' t = time.localtime(time.time()) today = time.mktime(time.strptime(time.strftime('%Y-%m-%d 00:00:00', t),'%Y-%m-%d %H:%M:%S')) yestoday = today - 3600*24 if dt > today: return u'今天' + time.strftime('%H:%M',time.localtime(dt)) if dt > yestoday and dt < today: return u'昨天' + time.strftime('%H:%M',time.localtime(dt)) return time.strftime(format,time.localtime(dt))
def split_relevant_data(self, Peff_up_threshold, Peff_down_threshold): self.featuresDF = self.original_data.loc[ (self.original_data['percent_efficient_references_comment_author'] <= Peff_up_threshold) & (self.original_data['percent_efficient_references_comment_author'] >= Peff_down_threshold)] # Split the data to k=15 groups, each comment_author in one group only i = 0 number_sample_group = 0 if Peff_up_threshold == 50.0 or Peff_up_threshold == 60.0 or Peff_up_threshold == 100.0: opts.k_fold = 4 sample_per_group = self.featuresDF.shape[0] / opts.k_fold last_comment_author = '' for index, row in self.featuresDF.iterrows(): if number_sample_group < sample_per_group: self.featuresDF.set_value(index, 'group_number', i) number_sample_group += 1 last_comment_author = row['comment_author'] else: if last_comment_author != row['comment_author']: i += 1 self.featuresDF.set_value(index, 'group_number', i) print( '{}: finish split samples for group number {} with {} samples' .format((time.asctime(time.localtime(time.time()))), i - 1, number_sample_group)) print('{}: start split samples for group number {}'.format( (time.asctime(time.localtime(time.time()))), i)) logging.info( '{}: finish split samples for group number {} with {} samples' .format((time.asctime(time.localtime(time.time()))), i - 1, number_sample_group)) logging.info( '{}: start split samples for group number {}'.format( (time.asctime(time.localtime(time.time()))), i)) last_comment_author = row['comment_author'] number_sample_group = 1 else: self.featuresDF.set_value(index, 'group_number', i) number_sample_group += 1 last_comment_author = row['comment_author'] print('{}: {} group is larger, number of samples is: {}'. format((time.asctime(time.localtime(time.time()))), i, number_sample_group)) print('{}: finish split samples for group number {} with {} samples'. format((time.asctime(time.localtime(time.time()))), i, number_sample_group)) logging.info( '{}: finish split samples for group number {} with {} samples'. format((time.asctime(time.localtime(time.time()))), i, number_sample_group)) opts.k_fold = i + 1 self.labels = self.featuresDF[['IsEfficient', 'group_number']] print('{}: Finish split the data for Peff between: {} and {}'.format( (time.asctime(time.localtime(time.time()))), Peff_down_threshold, Peff_up_threshold)) logging.info( '{}: Finish split the data for Peff between: {} and {}'.format( (time.asctime(time.localtime(time.time()))), Peff_down_threshold, Peff_up_threshold))
def add_menu(): try: time.sleep(1) if d(resourceId="com.yiwosi.kbb:id/car_badge").exists: # 情况购物车 d(resourceId="com.yiwosi.kbb:id/iv_shop_car").click() time.sleep(1) d(resourceId="com.yiwosi.kbb:id/tViewClear").click() time.sleep(1) d(text='清空').click() time.sleep(2) for m in range(4): d.swipe(500, 1500, 500, 300) time.sleep(1) # 点击加入购物车按钮 d(resourceId="com.yiwosi.kbb:id/addbutton")[m].click() time.sleep(1) if d(resourceId="com.yiwosi.kbb:id/tViewOK").exists: d(resourceId="com.yiwosi.kbb:id/tViewOK").click() time.sleep(1) m+=1 logger.info('购物车数据添加成功') time.sleep(1) # 点击去结算按钮 d(resourceId="com.yiwosi.kbb:id/car_limit").click() takeout_add_menu_time = time.time() while not d(resourceId="com.yiwosi.kbb:id/text_show_time").exists: # 判断送达时间是否存在 h_appear_time = time.time() h_cost_time = h_appear_time - takeout_add_menu_time if h_cost_time > 15: logger.warning('等待提交订单页面超15s,返回上个页面重新操作') takeout_add_menu_time = time.time() break logger.info('提交订单页面耗时{}'.format(h_cost_time)) e = EexeclData(file=target_file, sheet_name='提交订单页') e.write_cell(i + 1, 1, i) e.write_cell(i + 1, 2, h_cost_time) now_time = time.strftime("%Y-%m-%d_%H_%M_%S", time.localtime()) e.write_cell(i + 1, 3, now_time) logger.info('提交订单页面表格数据更新成功') time.sleep(1) except BaseException as e: logger.error('提交订单页面出错{}'.format(e)) send_msg(takeout_webhook, '提交订单页面出错') now_time = time.strftime("%Y-%m-%d_%H_%M_%S", time.localtime()) d.screenshot(t_screenshot_path + '{}.jpg'.format(now_time)) get_adb_all_process(now_time) time.sleep(1) d.app_stop(app_name)
def iterate_over_features_groups(self, peff_up_threshold, peff_down_threshold): all_groups_results = pd.DataFrame() for number_of_groups in range(1, 7): feature_list = list(combinations(range(0, 6), number_of_groups)) for groups in feature_list: if 5 not in groups: continue # compare 2 features in group 2: # if groups != (2,3): # continue features_group = [self.group_dic[group][0] for group in groups] self.features = [ item for sublist in features_group for item in sublist ] features = [ item for sublist in features_group for item in sublist ] features.append('group_number') self.X_train = self.featuresDF[features] group_names = [self.group_dic[group][1] for group in groups] print('{}: Start training with the groups: {} '.format( (time.asctime(time.localtime(time.time()))), group_names)) logging.info('{}: Start training with the groups: {} '.format( (time.asctime(time.localtime(time.time()))), group_names)) group_results = self.ModelsIteration() print('{}: Finish training with the groups: {}'.format( (time.asctime(time.localtime(time.time()))), group_names)) logging.info('{}: Finish training with the groups: {}'.format( (time.asctime(time.localtime(time.time()))), group_names)) for model in group_results: model.append(group_names) model.append(opts.k_fold) model.append(peff_up_threshold) model.append(peff_down_threshold) columns_names = [ 'classifier_name', 'score', 'auc', 'train_time', 'group_list', 'k_fold', 'Peff_up_threshold', 'Peff_down_threshold' ] group_resultsDF = pd.DataFrame(group_results, columns=columns_names) # group_results.append(group_names).append([opts.k_fold]) all_groups_results = all_groups_results.append( group_resultsDF, ignore_index=True) all_groups_results.to_csv('test_results.csv', encoding='utf-8') # all_groups_results.to_csv('test_results_final_both.csv', encoding='utf-8') return all_groups_results
def __init__(self): self.X_train = None self.features = None self.feature_names = None print('{}: Loading the data: 100w2v_scale_2_causality'.format( (time.asctime(time.localtime(time.time()))))) self.original_data = pd.read_excel('100w2v_scale_2_causality.xlsx') self.labels = None self.featuresDF = None # for 50Doc2Vec: # self.text_features = range(50) # for Word2Vec and 100Doc2Vec: self.text_features = range(100) self.group_dic = { 0: [['submission_author_number_original_subreddit'], 'submission_author_number_original_subreddit'], 1: [['submission_author_number_recommend_subreddit'], 'submission_author_number_recommend_subreddit'], 2: [['submission_created_time_hour'], 'submission_created_time_hour'], 3: [['cosine_similarity_subreddits_list'], 'cosine_similarity_subreddits_list'], 4: [['comment_submission_similarity'], 'comment_submission_similarity'], 5: [['comment_title_similarity'], 'comment_title_similarity'], 6: [['comment_author_number_original_subreddit'], 'comment_author_number_original_subreddit'], 7: [['comment_author_number_recommend_subreddit'], 'comment_author_number_recommend_subreddit'], 8: [['number_of_references_comment_author'], 'number_of_references_comment_author'], 9: [['comment_created_time_hour'], 'comment_created_time_hour'], 10: [['time_between_messages'], 'time_between_messages'], 11: [['comment_len'], 'comment_len'], 12: [['number_of_r'], 'number_of_r'], 13: [['number_of_references_to_submission'], 'number_of_references_to_submission'], 14: [['number_of_references_to_recommended_subreddit'], 'number_of_references_to_recommended_subreddit'], 15: [['subreddits_similarity'], 'subreddits_similarity'], 16: [['treated'], 'treated'] # 16: [self.text_features, 'text_features'] } print('{}: Data loaded '.format( (time.asctime(time.localtime(time.time()))))) return
def formattime(dt,format='%Y-%m-%d %H:%M'): mins = int(time.time())-dt if mins < 60: return u'%s 秒前' % mins elif mins < 3600: return u'%s 分钟前' % (mins / 60) elif mins < 24*3600: return u'%s 小时前' % (mins/3600) elif mins < 3*24*3600: return u'%s 天前' % (mins/(3600*24)) _time = time.localtime(dt) _now = time.localtime(time.time()) if _time.tm_year == _now.tm_year: format='%m-%d %H:%M' return time.strftime(format,time.localtime(dt))
def setRTCoffset(forsleep=None): import time if time.localtime().tm_isdst == 0: forsleep = 7200 + time.timezone else: forsleep = 3600 - time.timezone t_local = time.localtime(int(time.time())) # Set RTC OFFSET (diff. between UTC and Local Time) try: open("/proc/stb/fp/rtc_offset", "w").write(str(forsleep)) print("[StbHardware] set RTC offset to %s sec." % (forsleep)) except IOError: print("[StbHardware] setRTCoffset failed!")
def get_file_by_date(*arg): """ 以文件的修改时间获取文件 :param arg: :return: """ for dir_path, dir_names, file_names in os.walk(arg[0]): for filename in file_names: # ================1.过滤文件,规则buckFileText,还有就是除去临时文件========== absolute_file_name = os.path.join(dir_path, filename) date = time.strftime( '%Y%m%d%H%M%S', time.localtime(os.path.getctime(absolute_file_name))) if arg[1] > date: # 进行时间的过滤 continue try: # 这里预防文件过大,排除一些文件 file_split = os.path.splitext(filename) # 分离文件名中的名称和后缀 if (file_split[1].lower() not in [ '.pdf', '.txt', '.doc', '.html', '.docx', '.ppt' ]) or ('~$' in file_split[1]): continue except Exception as e: print(e) # ================2.文件的名称的构造==================== absolute_file_name = os.path.join(dir_path, filename) will_copy_file = get_will_dest_name(arg[2], absolute_file_name) print(absolute_file_name) # ================3.文件的复制=========================== if not os.path.isfile(will_copy_file): GetFile.do_copy(old=absolute_file_name, new=will_copy_file)
def do_sync_adcs_sequence(self): ''' synchronize the ADCs ''' print("SYNCHRONIZING ADCS") self.sync_adcs_sequence_result["text"] = "SYNCHRONIZING ADCS - IN PROGRESS" self.update_idletasks() self.test_result = 0 now = time.strftime("%Y%m%dT%H%M%S", time.localtime(time.time())) start_time = time.time() resolved = self.runner(**self.params, datasubdir="sync_adcs", executable="femb_sync_adcs", test_start_time = self.now, outlabel = "{datasubdir}-{test_start_time}", chip_list = self.chiplist, argstr="{paramfile}", save_results = self.save_sync_results) #look to see if any chips failed the configuration and skip over them for the future tests for chip_name in self.chiplist: file_name = os.path.join(resolved['datadir'],chip_name[1],"sync_adcs","results.json") print("file_name for sync results check: {}".format(file_name)) if os.path.isfile(file_name): with open(file_name, 'r') as f: jsondata = json.load(f) self.params['config_list'] = jsondata['config_list']
def ind_horario(self, horario): import time # se consulta horario de dia actual dweek = time.strftime("%a", time.localtime()) dsem = dweek[0:2].capitalize() if dsem.find("Lu") != -1 or dsem.find("Mo") != -1: ind = horario.find("Mo") elif dsem.find("Ma") != -1 or dsem.find("Tu") != -1: ind = horario.find("Tu") elif dsem.find("Mi") != -1 or dsem.find("We") != -1: ind = horario.find("We") elif dsem.find("Ju") != -1 or dsem.find("Th") != -1: ind = horario.find("Th") elif dsem.find("Vi") != -1 or dsem.find("Fr") != -1: ind = horario.find("Fr") elif dsem.find("Sa") != -1 or dsem.find("Sa") != -1: ind = horario.find("Sa") elif dsem.find("Do") != -1 or dsem.find("Su") != -1: ind = horario.find("Su") else: ind = -1 return ind
def click_bookdate(start_date, end_date): """ int类型 预订日期函数,预订日期开始日期start_date,结束日期end_date """ try: d(resourceId="com.yiwosi.kbb:id/tv_total_day").click() time.sleep(1) d.swipe(500, 1600, 500, 500) time.sleep(1) # temp_date = datetime.datetime.now() # start_date = (temp_date + datetime.timedelta(days=+start_date)).strftime("%d") # if int(start_date)>27: # print(0) # start_date = '27' # if start_date.startswith('0'): # start_date = start_date.replace('0', '') # end_date = (temp_date + datetime.timedelta(days=+end_date)).strftime("%d") # if end_date.startswith('0'): # end_date = end_date.replace('0', '') d(text=start_date).click() time.sleep(1) d(text=end_date).click() time.sleep(1) logger.info('选择的预订起止日期:{}号-{}号'.format(start_date, end_date)) except BaseException as e: send_msg(hotel_webhook, '预订日期选择出错') logger.error('预订日期选择出错{}'.format(e)) now_time = time.strftime("%Y-%m-%d_%H_%M_%S", time.localtime()) d.screenshot(h_screenshot_path + '{}.jpg'.format(now_time)) get_adb_all_process(now_time) time.sleep(1) d.app_stop(app_name)
def parse_chejiahao_comments(more_url): html = parse_art(more_url) if html: tree = etree.HTML(html) title = tree.xpath('//div[@class="all_com_title"]/span/a/text()')[0] bbsname = None for index, each in enumerate(tree.xpath('//dl[@class="rev_dl"]//dt')): try: username = each.xpath('.//span[@class="rmembername-span"]/text()')[0] try: pushtime = each.xpath('./span[1]/text()[3]')[0].strip() except: pushtime = each.xpath('./span[1]/text()[2]')[0].strip() pushtime = parse_time(pushtime) if pushtime < START_TIME: break comtstr = tree.xpath('//dl[@class="rev_dl"]//dd[{}]/p/text()'.format(index+1))[0] item = {} item['title'] = title item['bbs_name'] = '汽车之家' item['sonbbs_name'] = bbsname item['username'] = username item['comment_detail'] = comtstr item['comment_url'] = more_url item['push_time'] = pushtime item['catch_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) item['car_type'] = None item['collection'] = "(汽车之家文章)自动驾驶" # 设置存入表名 item['usergender'] = None item['userlocation'] = None item['userage'] = None save_to_db(item) except Exception as e: print(e.__traceback__.tb_lineno,e)
def messbox(): global msgbox if msgbox == 1: pop = Toplevel(root) pop.title("ALERT!") pop.geometry("300x100") # wxh pop.maxsize(300, 100) global pop_label label_1 = Label(pop, text="Suspicious Activity Detected!", fg='red', font=("helvetica", 12)) label_1.pack(pady=5) f1 = Frame(pop) f1.pack(pady=10) time_text = Label(f1, text='Time:', font=("helvetica", 11)) time_text.grid(row=2, column=1) pop_label = Label(f1, text="", font=("helvetica", 10)) pop_label.grid(row=2, column=2) t = time.localtime() current_time = time.strftime("%H:%M:%S", t) pop_label.config(text=current_time) msgbox = 0
def output_latex_code(self, result): import time evaluate_index = list(result.keys()) values = list(result.values()) print(evaluate_index) print(values) index = [] tmp_values = [] c_control = [] for i in range(len(evaluate_index)): index += ' & ' index += evaluate_index[i] tmp_values += ' & $' tmp_values += str(values[i]) tmp_values += '$' c_control += 'c' index = ''.join(index) tmp_values = ''.join(tmp_values) c_control = ''.join(c_control) text = '\\begin{tabular}{l|' + c_control + '}\n\t\hline\n\tEvaluation metrics' + index + ' \\\\\n\t\hline\n\t\hline\n\t' + self.config[ 'model'] + tmp_values + ' \\\\\n\t\hline\n\end{tabular}' save_path = self.config['model'] + '_' + time.strftime( "%Y-%m-%d", time.localtime()) + '_latex_code.tex' file_handle = open(save_path, mode='w') file_handle.write(text)
def getTime(daterange=1): '''获取指定时间戳''' t = time.localtime(time.time()) t = list(t) t[3] = t[4] = t[5] = 0 date = 0 if daterange == 1: #当天 t[3] = t[4] = t[5] = 0 date = time.mktime(tuple(t)) elif daterange == 2: #本周 t[2] -= t[6] date = time.mktime(tuple(t)) elif daterange == 3: #本月 t[2] = 1 date = time.mktime(tuple(t)) elif daterange == 4: #昨天 t[2] = t[2] - 1 date = time.mktime(tuple(t)) elif daterange == 5: #前天 t[2] = t[2] - 2 date = time.mktime(tuple(t)) else: return 0 date = int(date) return date
def datestring(t=None,sec=False): """ Datestring Inputs: (optional) t - time.localtime() sec - bool - whether to include sec [SS] in output Outputs: ds - str - date in YYYYMMDD-HHMM[SS] format by Sam Burden 2012 """ if t is None: import time t = time.localtime() ye = '%04d'%t.tm_year mo = '%02d'%t.tm_mon da = '%02d'%t.tm_mday ho = '%02d'%t.tm_hour mi = '%02d'%t.tm_min se = '%02d'%t.tm_sec if not sec: se = '' return ye+mo+da+'-'+ho+mi+se
def to_sql(self): event_attr = [ "event_id", "plugin_id", "plugin_sid", "protocol", "src_ip", "src_port", "dst_ip", "dst_port", "date", "log", "binary_data" ] self.event['event_id'] = Event.__get_uuid() self.event['date'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) query = 'INSERT INTO event (' for attr in event_attr: query += '%s,' % attr query = query.rstrip(',') query += ') VALUES (' for attr in event_attr: value = '' if self.event[attr] is not None: if attr == "log": value = b64encode(self.event[attr]) else: value = self.event[attr] print self.event[attr] else: value = "" query += "'%s'," % value query = query.rstrip(',') query += ');' #debug(query) return query
def parse_time(pushtime): if re.search(r"天前", pushtime): num = int(re.search(r"\d+", pushtime).group()) sec = num*86400 pushtime = time.strftime("%Y-%m-%d", time.localtime(time.time() - sec)) if re.search('小时前', pushtime): NUM = int(re.search('\d+', pushtime).group()) sec = NUM * 60 * 60 today = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - sec)) pushtime = today if re.search('分钟前', pushtime): NUM = int(re.search('\d+', pushtime).group()) sec = NUM * 60 today = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - sec)) pushtime = today return pushtime
def get_gld_info(self, name): temp_type = request.env['syt.oa.gld'].sudo().search([('name', '=', name.lstrip())]) copy_users = request.env['syt.oa.gld'].sudo().search([ ('copy_users.user_id', '=', request.session['uid']) ]) temp_type_list = [] if temp_type: for value in temp_type: temp_item = {} temp_item['name'] = value.name # 单号 temp_item['company_name'] = value.company_id.name # 公司 temp_item['dept'] = value.dept # 部门 temp_item['id'] = value.create_uid.id # 创建员工ID temp_item['user_name'] = value.create_uid.name # 创建员工姓名 timeArray = time.strptime(str(value.create_date), "%Y-%m-%d %H:%M:%S") timeStamp = int(time.mktime(timeArray)) create_time = timeStamp + 8 * 60 * 60 # 加8个小时 timeArray = time.localtime(create_time) otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray) temp_item['write_date'] = otherStyleTime # 创建更新时间 temp_item['state'] = value.state # 状态 temp_item['subject'] = value.subject # 标题 temp_item['content'] = value.content # 正文 if copy_users: temp_item['copy_users'] = 'yes' # 区别 判断是抄送人还是审批人 else: temp_item['copy_users'] = 'no' # 区别 判断是抄送人还是审批人 temp_type_list.append(temp_item) return JSONEncoder().encode(temp_type_list)
def get_sns(self, gld_name): temp_list = [] gld = request.env['syt.oa.gld'].sudo().search([('name', '=', gld_name) ]) message = request.env['mail.message'].sudo().search([ ('res_id', '=', gld.id), ('model', '=', 'syt.oa.gld') ]) if message: for value in message: temp_item = {} employee = request.env['hr.employee'].sudo().search([ ('user_id', '=', int(value.create_uid)) ]) # temp_item['operator'] = employee.name # 操作人 temp_item['id'] = employee.id # 员工id temp_item['name'] = employee.name # 操作人 temp_item['email'] = employee.work_email # 员工邮箱 temp_item['body'] = str(value.body).replace("<p>", "").replace( "</p>", "") # 内容 timeArray = time.strptime(str(value.create_date), "%Y-%m-%d %H:%M:%S") timeStamp = int(time.mktime(timeArray)) create_time = timeStamp + 8 * 60 * 60 # 加8个小时 timeArray = time.localtime(create_time) otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray) temp_item['time'] = otherStyleTime # 更新时间 temp_list.append(temp_item) return JSONEncoder().encode(temp_list)
def parse_news_comments(url): html = parse_art(url) if html: tree = etree.HTML(html) title = tree.xpath('//h1/a[1]/text()')[0] bbsname = None for index, each in enumerate(tree.xpath('//dl[@id="reply-list"]//dt')): try: username = each.xpath('./span[1]/a[1]/text()')[0] pushtime = each.xpath('./span[2]/text()[1]')[0].replace("[", '') pushtime = parse_time(pushtime) if pushtime < START_TIME: break comtstr = tree.xpath('//dl[@id="reply-list"]//dd[{}]/@datacontent'.format(index+1))[0] item = {} item['title'] = title item['bbs_name'] = '汽车之家' item['sonbbs_name'] = bbsname item['username'] = username item['comment_detail'] = comtstr item['comment_url'] = url item['push_time'] = pushtime item['catch_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) item['car_type'] = None item['collection'] = "(汽车之家文章)自动驾驶" # 设置存入表名 item['usergender'] = None item['userlocation'] = None item['userage'] = None save_to_db(item) except Exception as e: print(e.__traceback__.tb_lineno,e)
def addDailyAction(self, hour, minute, task, name): """ This method is used to add an action to be run every day at a specific time. If a task with the given name is already registered with the scheduler, that task will be removed from the scheduling queue and registered anew as a periodic task. Can we make this addCalendarAction? What if we want to run something once a week? We probably don't need that for Webware, but this is a more generally useful module. This could be a difficult function, though. Particularly without mxDateTime. """ import time current = time.localtime(time.time()) currHour = current[3] currMin = current[4] #minute difference if minute > currMin: minuteDifference = minute - currMin elif minute < currMin: minuteDifference = 60 - currMin + minute else: #equal minuteDifference = 0 #hourDifference if hour > currHour: hourDifference = hour - currHour elif hour < currHour: hourDifference = 24 - currHour + hour else: #equal hourDifference = 0 delay = (minuteDifference + (hourDifference * 60)) * 60 self.addPeriodicAction(time.time() + delay, 24 * 60 * 60, task, name)
def fillModels(cv,mname,fname,comment=None): import os import time import stat if (comment==None): comment=" " pmmlfile=file(fname) sql='SELECT CURDATE()' cv.execute(sql) date=cv.fetchone()[0] atime=os.stat(fname)[stat.ST_ATIME] atime=time.asctime(time.localtime(atime)) ctime=os.stat(fname)[stat.ST_CTIME] ctime=time.asctime(time.localtime(ctime)) mtime=os.stat(fname)[stat.ST_MTIME] mtime=time.asctime(time.localtime(mtime)) mode=os.stat(fname)[stat.ST_MODE] mode=oct(mode & 0777) # we were using the mysql specific LOAD_FILE, but it # wasn't working in Korea, so we're doing the file load # the hard way load_file = file (fname, "rb") file_content = load_file.read () load_file.close () sql="INSERT INTO models VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)" try: login=getlogin() except: login='******'; cv.execute (sql, ( mname \ , file_content \ , comment \ , str(os.getuid()) \ , login \ , str(os.getgid()) \ , atime \ , mtime \ , ctime \ , str(mode) \ )) return
def fillModels(cv, mname, fname, comment=None): import os import time import stat if (comment == None): comment = " " pmmlfile = file(fname) sql = 'SELECT CURDATE()' cv.execute(sql) date = cv.fetchone()[0] atime = os.stat(fname)[stat.ST_ATIME] atime = time.asctime(time.localtime(atime)) ctime = os.stat(fname)[stat.ST_CTIME] ctime = time.asctime(time.localtime(ctime)) mtime = os.stat(fname)[stat.ST_MTIME] mtime = time.asctime(time.localtime(mtime)) mode = os.stat(fname)[stat.ST_MODE] mode = oct(mode & 0777) # we were using the mysql specific LOAD_FILE, but it # wasn't working in Korea, so we're doing the file load # the hard way load_file = file(fname, "rb") file_content = load_file.read() load_file.close() sql = "INSERT INTO models VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)" try: login = getlogin() except: login = '******' cv.execute (sql, ( mname \ , file_content \ , comment \ , str(os.getuid()) \ , login \ , str(os.getgid()) \ , atime \ , mtime \ , ctime \ , str(mode) \ )) return
def benchmark(self, clf, clf_name='default'): print('_' * 80) print('{}: Traininig: {}'.format((time.asctime(time.localtime(time.time()))), clf)) logging.info('_' * 80) logging.info('{}: Traininig: {}'.format((time.asctime(time.localtime(time.time()))), clf)) t0 = time.time() # Cross validation part k = opts.k_fold if clf_name == 'GaussianNB': self.X_train = self.X_train.toarray() predicted = cross_val_predict(clf, self.X_train, self.labels, cv=k) score = metrics.accuracy_score(self.labels, predicted) train_time = time.time() - t0 print("cross validation time: {}".format(train_time)) logging.info("cross validation time: {}".format(train_time)) # if hasattr(clf, 'coef_'): # print("dimensionality: %d" % clf.coef_.shape[1]) # print("density: %f" % density(clf.coef_)) # if opts.print_top10 and self.feature_names is not None: # print("top 10 keywords per class:") # for i, label in enumerate(self.labels): # top10 = np.argsort(clf.coef_[i])[-10:] # print(trim("%s: %s" % (label, " ".join(self.feature_names[top10])))) # print() # if True: # opts.print_report: # print("classification report:") # print(metrics.classification_report(self.labels, predicted, # self.labels=self.labels)) if opts.print_cm: print("confusion matrix:") print(metrics.confusion_matrix(self.labels, predicted, labels=[-1, 1])) logging.info("confusion matrix:") logging.info(metrics.confusion_matrix(self.labels, predicted, labels=[-1, 1])) clf_descr = str(clf).split('(')[0] print("Accuracy: {} (+/- {})".format(score.mean(), score.std() * 2)) logging.info("Accuracy: {} (+/- {})".format(score.mean(), score.std() * 2)) auc = metrics.roc_auc_score(self.labels, predicted, average='samples') print('AUC: {}'.format(auc)) logging.info('AUC: {}'.format(auc)) return [clf_descr, score, auc, train_time]
def runTest(self): driver = self.getDriver() param = self.param tool = utils driver.refresh() # 左上方公共节点 driver.find_element_by_class_name('lebra-navbar-left-icon').click() sleep(3) # 进入财务管理 driver.find_element_by_xpath('//*[text()="财务管理"]').click() sleep(3) # 进入一级节点 menu2 = driver.find_element_by_css_selector('span[title="总账"]') actions = ActionChains(driver) actions.move_to_element(menu2) actions.click(menu2) actions.perform() sleep(3) # 进入二级节点 menu3 = driver.find_element_by_css_selector( 'li[class="bottomBar"][title="辅助余额表"]') actions.move_to_element(menu3) actions.click(menu3) actions.perform() sleep(6) titleName = driver.find_element_by_css_selector( '#home_header > div > div.tab--38iB- > ul > li > p').get_attribute( 'title') assert u"辅助余额表" in titleName, u"页面源码中不存在该关键字!" sleep(5) iframe = driver.find_element_by_id('help-balance') driver.switch_to.frame(iframe) # 查询 driver.find_element_by_xpath( '//label[text()="账簿"]/..//span[@class="ant-select-arrow"]/i' ).click() driver.find_element_by_xpath('//li[text()="yontest云创股份"]').click() nowtime = time.strftime('%Y-%m', time.localtime(time.time())) driver.find_element_by_xpath( '//span[text()="至"]/../div[2]//span[@class="ant-select-arrow"]' ).click() driver.find_element_by_xpath( '//li[text()="{}"]'.format(nowtime)).click() driver.find_element_by_xpath( '//span[text()="至"]/../div[3]//span[@class="ant-select-arrow"]' ).click() driver.find_element_by_xpath( '//div[6]//li[text()="{}"]'.format(nowtime)).click() search_button = driver.find_element_by_xpath('//*[text()="查 询"]') driver.execute_script("arguments[0].click();", search_button) driver.switch_to.default_content() driver.find_element_by_class_name('u-button').click() sleep(3) driver.find_element_by_class_name('u-dropdown-menu-item').click() sleep(3)
def calc(self): load = ld.load_data() data = load.csv('busi_yyt.csv') dict_data = dict(data) starttime = 1522080000 endtime = 1523030340 for i in range(starttime, endtime, 60): tmptime = [] tmpdata = [] tmpvalue = [] j = i for j in range(i, i + 60 * 29, 60): tmpdata.append((j, dict_data[j])) tmptime.append(j) tmpvalue.append(dict_data[j]) anomalous, ensemble, datatime, datapoint = skyline_algorithms.run_selected_algorithm( tmpdata, 'test') time_local = time.localtime(datatime) if anomalous == True: data1 = [] data2 = [] data3 = [] data_new = [] ''' for t in tmptime: data1.append((t - 86400 * 1,dict_data[t - 86400 * 1])) data2.append((t - 86400 * 2,dict_data[t - 86400 * 2])) data3.append((t - 86400 * 3,dict_data[t - 86400 * 3])) data_new.append((t - 86400 * 1,(dict_data[t - 86400 * 1] + dict_data[t - 86400 * 2] + dict_data[t - 86400 * 1])/3)) e1 = euctsd_euclidean_metrictE(data1,data2) e2 = euctsd_euclidean_metrictE(data2,data3) e3 = euctsd_euclidean_metrictE(data1,data3) ''' v_mean = 0 for t in range(1, 10): v_mean += dict_data[tmpdata[-1][0] - 86400 * t] v_mean = v_mean / 10 print(v_mean) if tmpdata[-1][1] < v_mean * 0.7: print( str(ensemble) + ':' + str(time.strftime("%Y-%m-%d %H:%M:%S", time_local)) + ':' + str(datapoint)) '''
def formatDate(cls, date): _format = "%Y-%m-%d\T%H:%M:%S" if isinstance(date, datetime): d = date.strftime(_format) elif isinstance(date, int): d = datetime.strptime(time.localtime(date), _format) else: d = date return d
def adjust_system_clock(): c = ntplib.NTPClient() try: response = c.request('pool.ntp.org', version=3) os.system( 'date ' + time.strftime('%m%d%H%M%Y.%S', time.localtime(response.tx_time))) except: pass
def daybeforehollyday(d): #get the epoch from current date object x = time.mktime(d); #add 86400 seconds (one day) x=x+86400 #create a struct time object from that d2 = time.localtime(x) #check if that date is a weekend return isHollyday(d2)
def getTimeOClockOfToday(year, month, day): import time t = time.localtime(time.time()) time1 = time.mktime( time.strptime(time.strftime('%Y-%m-%d 00:00:00', t), '%Y-%m-%d %H:%M:%S')) return int(time1)
def getHostStatusSummary(session_id, host_name): """Returns an overall status for the host""" status = {} try: hoststatus = getHostStatus(host_name) except: status = {"status":STATUS_UNKNOWN,"status_text":"UNKNOWN", \ "status_hint":"No status information available"} return status status["infoUpdatedAt"] = time.strftime("%d/%m/%Y %H:%M:%S", \ time.localtime(hoststatus.infoUpdatedAt)) status["lastCheckedAt"] = time.strftime("%d/%m/%Y %H:%M:%S", \ time.localtime(hoststatus.lastCheckedAt)) status["status"] = STATUS_OK status["status_text"] = "UP" status["status_hint"] = "Last checked at %s" % status["lastCheckedAt"] if not hoststatus.reachable: status["status"] = STATUS_CRITICAL status["status_text"] = "Down" status["status_hint"] = "Not seen since %s" % status["infoUpdatedAt"] return status if hoststatus.operatingRevisionStatus != STATUS_OK: status["status"] = hoststatus.operatingRevisionStatus status["status_text"] = hoststatus.operatingRevisionText status["statux_hint"] = hoststatus.operatingRevisionHint return status cstatus = getCfengineHostStatus(session_id, hoststatus.host._properties) if cstatus["ssh_key_status"] != STATUS_OK: status["status"] = cstatus["ssh_key_status"] status["status_text"] = "SSH Key Error" status["status_hint"] = cstatus["ssh_key_text"] return status if cstatus["cfengine_key_status"] != STATUS_OK: status["status"] = cstatus["cfengine_key_status"] status["status_text"] = "CFengine Key Error" status["status_hint"] = cstatus["cfengine_key_text"] return status return status
def d_print(*args): import time if MDEBUG: if MDEBUG_TIMESTAMP: s = '%s - ' % time.strftime('%H:%M:%S',time.localtime()) else: s = '' for arg in args: s += str(arg) print s
def get_border_time(): # 第一段时间 # 上午10:00:00 ten_clock_am = time.strftime('%Y-%m-%d', time.localtime()) + ' 10:00:00' ten_clock_am = get_date_time_str(ten_clock_am) # 下午22:00:00 ten_clock_pm = time.strftime('%Y-%m-%d', time.localtime()) + ' 23:00:00' ten_clock_pm = get_date_time_str(ten_clock_pm) # print(ten_clock_pm) # 第二段时间 # 次日02:00:00 two_clock = time.strftime( '%Y-%m-%d', time.localtime(time.time() + 86400)) + ' 02:00:00' two_clock = get_date_time_str(two_clock) # print(two_clock) return ten_clock_am, ten_clock_pm, two_clock
def procdt(self,args=None): view=self.view edit=self.edit dicdt={'dt':'TM_TH_DTTM','date':'TM_TH_DATE','time':'TM_TH_TIME'} ctime=time.localtime(time.time()) dicvl={'dt':time.strftime('%Y/%m/%d %H:%M:%S',ctime),'date':time.strftime('%Y/%m/%d',ctime),'time':time.strftime('%H/%M/%S',ctime)} if not args==None: mcdt='#'+dicdt[args] reg_bb=view.find(mcdt,0) view.replace(edit, reg_bb, dicvl[args])
def d_print(*args): import time if MDEBUG: if MDEBUG_TIMESTAMP: s = '%s - ' % time.strftime('%H:%M:%S', time.localtime()) else: s = '' for arg in args: s += str(arg) print s
def insertExpert(self,expert): sql = "insert into GeniusExpert(Expert_ID,Expert_name,Expert_keywords) values('%s','%s','%s')" %(expert.expertID,expert.name,str(expert.keywordsList)) try: #print sql self.cursor.execute(sql) except Exception, e: f = open("ExpertError.log","a") f.write(str(e)+"\n") f.write(sql+"\n") f.write(time.strftime('%m-%d %H:%M:%S',time.localtime(time.time()))) f.close()
def insertPaper(self,paper): sql = "insert into Paper(Expert_ID,Paper_ID,Paper_Title,Paper_Authorslist,Paper_Url,Paper_Origin,Paper_Pubtime,Paper_Database,Paper_Citation) values('%s','%s','%s','%s','%s','%s','%s','%s','%s')" %(paper.expertID,paper.paperID,paper.title,paper.authorsList,paper.url,paper.origin,paper.pubtime,paper.database,paper.citation) try: #print sql self.cursor.execute(sql) except Exception, e: f = open("PaperError.log","a") f.write(str(e)+"\n") f.write(sql+"\n") f.write(time.strftime('%m-%d %H:%M:%S',time.localtime(time.time()))) f.close()
def procautoflush(self): view=self.view edit=self.edit curtime=time.strftime('%Y/%m/%d %H:%M:%S',time.localtime(time.time())) sel=view.sel()[0] reg=r"^>{3}[^>][\r\n\S\s]*?[^<]<{3}\d{4}/\d{2}/\d{2}\s\d{2}:\d{2}:\d{2}" reg_bb=view.find(reg,0) while not reg_bb.empty(): if reg_bb.begin()<sel.begin() and reg_bb.end()>sel.end(): treg=sublime.Region(reg_bb.end()-19,reg_bb.end()) view.replace(edit,treg,curtime) reg_bb=view.find(reg,reg_bb.end())
def SetTime(): print "[VFD-GIGA] Set RTC time" import time if time.localtime().tm_isdst == 0: forsleep = 7200+time.timezone else: forsleep = 3600-time.timezone t_local = time.localtime(int(time.time())) print "set Gigabox RTC to %s (rtc_offset = %s sec.)" % (time.strftime("%Y/%m/%d %H:%M", t_local), forsleep) # Set RTC OFFSET (diff. between UTC and Local Time) try: open("/proc/stb/fp/rtc_offset", "w").write(str(forsleep)) except IOError: print "[VFD-GIGA] set RTC Offset failed!" # Set RTC try: open("/proc/stb/fp/rtc", "w").write(str(int(time.time()))) except IOError: print "[VFD-GIGA] set RTC time failed!"
def addDailyAction(self, hour, minute, task, name): """Add an action to be run every day at a specific time. If a task with the given name is already registered with the scheduler, that task will be removed from the scheduling queue and registered anew as a periodic task. Can we make this addCalendarAction? What if we want to run something once a week? We probably don't need that for Webware, but this is a more generally useful module. This could be a difficult function, though. Particularly without mxDateTime. """ import time current = time.localtime(time.time()) currHour = current[3] currMin = current[4] if hour > currHour: hourDifference = hour - currHour if minute > currMin: minuteDifference = minute - currMin elif minute < currMin: minuteDifference = 60 - currMin + minute hourDifference -= 1 else: minuteDifference = 0 elif hour < currHour: hourDifference = 24 - currHour + hour if minute > currMin: minuteDifference = minute - currMin elif minute < currMin: minuteDifference = 60 - currMin + minute hourDifference -= 1 else: minuteDifference = 0 else: if minute > currMin: hourDifference = 0 minuteDifference = minute - currMin elif minute < currMin: minuteDifference = 60 - currMin + minute hourDifference = 23 else: hourDifference = 0 minuteDifference = 0 delay = (minuteDifference + (hourDifference * 60)) * 60 self.addPeriodicAction(time.time() + delay, 24 * 60 * 60, task, name)
def __init__(self, month=None, day=None, year=None, epoch=None): import time if epoch is not None: (year, month, day, hour, mm, ss, ms, x, y) = time.localtime(epoch) else: if year is None: year = nowdict["year"] if day is None: day = nowdict["day"] if month is None: month = nowdict["month"] epoch = time.mktime( (year,month,day, 0, 0, 0, 0, 0, 0) ) #print year, month, day, epoch, "<br>" self.year, self.month, self.day, self.epoch = year, month, day, epoch
def getDayOfTheWeek(timestamp, as_day_short = True, my_local = "de_DE"): #import locale now = time.localtime(timestamp) timestamp = int(time.mktime(now)) day = None #locale.setlocale(locale.LC_ALL, my_local) if as_day_short: index = (datetime.datetime.fromtimestamp(int(timestamp)).strftime('%w')) daylist = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'] day = daylist[int(index)] else: index = (datetime.datetime.fromtimestamp(int(timestamp)).strftime('%w')) daylist = ['Sonntag','Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag', 'Samstag'] day = daylist[int(index)] return day
def _gen_headers(self, code): """ Generates HTTP response Headers. Ommits the first line! """ # determine response code h = '' # if (code == 200): h = 'HTTP/1.1 200 OK\n' # elif(code == 404): # h = 'HTTP/1.1 404 Not Found\n' # write further headers current_date = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()) h += 'Date: ' + current_date +'\n' h += 'Server: Roku UPnP/1.0 MiniUPnPd/1.4\n' h += 'Connection: close\n\n' # signal that the conection wil be closed after complting the request return h
def currentThreadResults(self): if self.outfile != " ": appendLineToFile('[+]'+time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),self.outfile) for connection in self.connections: connection.join() if connection.status == 1: print "[#] TargetIp: %s " % connection.targetIp print "[#] Username: %s " % connection.userName print "[#] Password: %s " % connection.passWord if self.outfile != " ": appendLineToFile("ip: %s username: %s password: %s " % (connection.targetIp,connection.userName,connection.passWord), self.outfile) appendLineToFile(" ",self.outfile) if self.singleMode: self.completed() else: pass self.clearOldThreads()
def preprocess_time(data): months = [] hours = [] wdays = [] for i in data['start_timestamp']: localTime = time.localtime(i) month = localTime.tm_mon hour = localTime.tm_hour wday = localTime.tm_wday months.append(month) hours.append(hour) wdays.append(wday) data['month']=pd.Series(months) data['wday']=pd.Series(wdays) data['hour']=pd.Series(hours) del data['start_timestamp'] del data['row_id']
def go_next_screen(self): self.index = (self.index + 1) % len(self.available_screens) screen = self.load_screen(self.index) sm = self.root.ids.sm sm.switch_to(screen, direction='left') self.current_title = screen.name if self.index == 2: # take photo camera = picamera.PiCamera() now = time.localtime() timestamp = "%04d-%02d-%02d__%02d_%02d_%02d" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec) camera.capture(timestamp + '.jpg') time.sleep(1) apex_rs232_proc_start(port_num) if self.index == 3: apex_rs232_proc_stop()
def getCurrentTime(): t = time.localtime() return { "status": True, "time": "%2d:%02d:%02d" % (t.tm_hour, t.tm_min, t.tm_sec) }
Created on 1 déc. 2015 @author: takiguchi ''' from time import time # ~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Variables # ~~~~~~~~~~~~~~~~~~~~~~~~~~~ ''' Liste des opérations que peut exécuter le serveur DST ''' listeOperations = ["start", "stop", "restart", "install", "update", "save"] ''' Format de date pour le nom du fichier de log qui sera créé ''' logFormatDate = '%d-%m-%y_%H-%M' ''' Date au format définit précédamment afin de créer le fichier de logs ''' dateExecution = str(time.strftime(logFormatDate, time.localtime())) ''' Le nom du screen qui sera lancé pour démarrer le serveur DontStarveTogether ''' valDstScreen = "DontStarveServer" ''' Le chemin du dossier contenant la configration et la partie du serveur, seulement à partir du chemin "/home/steam/.klei" ''' valDstConfDir = "configs/server" # ~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Chemins # ~~~~~~~~~~~~~~~~~~~~~~~~~~~ ''' Le chemin du dossier contenant les fichiers du serveur DST ''' pathDst = "/home/steam/steamapps/DST" ''' Le chemin du dossier contenant l'exécutable permettant de démarrer le serveur DST '''
def getDateTimeFromTimestamp2(timestamp): mytime = None now = time.localtime(timestamp) timestamp = int(time.mktime(now)) mytime = (datetime.datetime.fromtimestamp(int(timestamp)).strftime('%d.%m.%y %H:%M')) return mytime
def getCurrentTimestamp(): #now = datetime.datetime.now() #return int(time.mktime(now.timetuple())) now = time.localtime() timestamp = int(time.mktime(now)) return timestamp
def getCurrentTime(): t = time.localtime() return {'status': True, 'time': '%2d:%02d:%02d' % (t.tm_hour, t.tm_min, t.tm_sec)}