def _check_div_popup(self): start = utils.current_time() body = self.driver.find_elements_by_tag_name('body')[0] body_height = body.size['height'] body_width = body.size['width'] try: # body_z_index = body.value_of_css_property('z-index') div_list = self.driver.find_element_by_xpath( "//div[@position='fixed']") count = 0 for div in div_list: div_height = div.size['height'] div_width = div.size['width'] if div.is_displayed() and div_width != 0 and div_height != 0: if (body_height / div_height) < 1.1 and (body_width / div_width) < 1.1: # all_children = div.find_elements_by_xpath(".//*") self.driver.execute_script( "var element = arguments[0];element.parentNode.removeChild(element);", div) # self.removed_blocks[str(count)] = (div, all_children) count += 1 except NoSuchElementException: pass #print('nessun elemento trovato') elapsed = utils.current_time() - start print("elapsed time: " + str(elapsed))
def get_website(et_date): # 处理信息 global i i += 1 print utils.current_time(), "正在更新第%s条数据" % i r = "(https\:\/\/www(?:\.+[\w-]+)+)|(http\:www(?:\.+[\w-]+)+)|" \ "(http\:\/\/[\w-]+(?:\.+[\w-]+)+)|" \ "(www(?:\.+[\w-]+)+)" res = et_date['etwebsite'] # print res # print type(res) website = re.search(r, res) if website: website = website.group() else: # 站内搜索 domains = ['51job', 'dajie', 'chinahr', 'liepin', 'zhilian'] # print('公司:%s' % company[1]) for domain in domains: url = 'https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&rsv_idx=1' \ '&tn=93153557_hao_pg&wd=site%3A' + domain + '.com%20"' + et_date['etname'] + '"' if domain == '51job': # print '前程无忧' website = qiancheng.anal_html(url) if website: break if domain == 'dajie': # print '大街' website = dajie.anal_html(url) if website: break if domain == 'chinahr': # print '中华英才网' website = chinahr.anal_html(url) if website: break if domain == 'zhilian': # print '智联招聘' website = zhilian.anal_html(url) if website: break if domain == 'shixiseng': # print '实习僧' website = shixiseng.anal_html(url) if website: break # print website if website: doms = ['51job.com', 'dajie.com', 'chinahr.com', 'liepin.com', 'zhilian.com', 'lagou.com','liepin.com'] for dom in doms: if dom in website: website = "" break et_date["etwebsite"] = website else: et_date["etwebsite"] = "" # print website return et_date
def get_etid(): """ 获取未处理的etid :return: """ print utils.current_time(), '本地表中读取部分未处理的etid...' logging.info('%s 本地表中读取所有未处理的etid...' % utils.current_time()) conn = utils.get_local_db() result = conn.query( "select etid from et_info_status where email_status=1 limit 200") conn.close() return result
def get_companys(etids): # 获取公司信息 companys = [] conn = utils.get_read_db() print utils.current_time(), '从线上读取部分的etid的信息 ' logging.info('%s 从线上读取部分的etid的信息 ' % utils.current_time()) for etid in etids: # print etid sql = 'select etid,etname,etwebsite,etfullname from et_info where etid={}'.format( etid['etid']) result = conn.query(sql) companys.append(result) return companys conn.close()
def update_status_db(et_info_url): if et_info_url['etwebsite']: print utils.current_time(), '正在更新状态表数据status=3......' conn = utils.get_local_db() et_status = {} et_status['etid'] = et_info_url['etid'] et_status['url_status'] = 3 update_db(conn, et_status, 'et_info_status') else: print utils.current_time(), '正在更新状态表数据status=2......' conn = utils.get_local_db() et_status = {} et_status['etid'] = et_info_url['etid'] et_status['url_status'] = 2 update_db(conn, et_status, 'et_info_status')
def settings_context(context): return { 'STATIC_URL': settings.STATIC_URL, 'PROJECT_NAME': settings.PROJECT_NAME, 'PROJECT_NAME_SHORT': settings.PROJECT_NAME_SHORT, 'current_time': current_time(), }
def soil_sensor_check(self, n_samples=10, rate=0.5): try: samples = self.read_samples(n_samples, rate) sampled_adc = average(samples) self._soilmoistperc = adc_map( sampled_adc, self.config["moisture_sensor_cal"]["dry"], self.config["moisture_sensor_cal"]["wet"], ) if self._soilmoistperc <= 100: print("[DEBUG] Current Soil moisture: %s%%" % self._soilmoistperc) self.ubidots.post_request( {"soil_moisture": self._soilmoistperc}) if self._soilmoistperc <= self.config["moisture_sensor_cal"].get( "Threshold", 50): self._water_me = True self.message_send( "[INFO] Soil Moisture Sensor: %.2f%% \t %s" % (self._soilmoistperc, current_time()), True, ) else: self._water_me = False except Exception as exc: print("Exception: %s", exc) finally: force_garbage_collect()
def soldier_import(db): data = request.files.data error = '' if data and data.file: tmp_root = './tmp/' if not isdir(tmp_root): # 若目录tmp_root不存在,则创建 os.mkdir(tmp_root) tmp_filename = os.path.join( tmp_root, current_time('tmpsoldier_%Y%m%d%H%M%S.xls')) tmp_file = open(tmp_filename, 'w') # 新建一个xls后缀的文件,然后将读取的excel文件的内容写入该文件中 rows = data.file.readlines() if not rows: # 文件空 error = '数据格式错误[2]' return template('error', error=error) for row in rows: tmp_file.write(row) tmp_file.close() # 在导入新的数据前,先将数据库原有数据导出到tmp目录,作为备份,数据导入失败时可以恢复数据 export_sqls = SOLDIER_EXPORT_SQLS try: # 若备份文件已存在,则删除重新写入 if os.path.exists(os.path.join(tmp_root, SOLDIER_BACK_FILE)): os.remove(os.path.join(tmp_root, SOLDIER_BACK_FILE)) excel_export(export_sqls, tmp_root, SOLDIER_BACK_FILE, db) except Exception, e: print 'soldier 数据备份错误: %s' % e all_sqls = SOLDIER_IMPORT_SQLS error = excel_import(all_sqls, tmp_filename, db) os.remove(tmp_filename) # 删除上传的临时文件
def act_stopped(self, activity): """called by the core when an act has finished the levels.""" self.logger.debug("act_stopped called") self.act_score = self.act_score / int(self.currentactdata['cycles']) stime = utils.calculate_time(self.act_start_time, \ utils.current_time()) self.dbmapper.insert('start_time', self.act_start_time) self.dbmapper.insert('end_time', utils.current_time()) self.dbmapper.insert('timespend', stime) self.dbmapper.insert('score', self.act_score) self.dbmapper.insert('done', 1) self.dbmapper.commit() # TODO: when we have a proper dbase setup we can get the results from the dbase # but for now we keep it local for use in the end display. self.sessionresults[activity.get_helptitle()] = '%4.2f' % self.act_score self.sessionresults_raw[activity.get_name()] = self.act_score
def __init__(self, config): self.path = config.get('path') self.action = config.get('action', 'delete') self.threshold = config.get('age_threshold') self.delete_empty_folder = config.get('delete_empty_folder') self.current_time = current_time() if self.action == 'archive': self.archive_path = config.get('archive_path') self.total_directories = 0 self.total_objects = 0 self.old_objects = 0 self.empty_folders = 0 # generator that will contain all the items in the path # items can either be files or folders self.items = self._list_items() # list of items to be deleted self.delete_file_list = [] self.delete_folder_list = [] # list of items to be archived self.archive_file_list = []
def route_message(request): username = current_user(request) # 如果此时用户未登录,重定向到 '/' if username == '游客': return redirect('/login') # 判断 POST 请求 if request.method == 'POST': # 先加载原有数据 form = request.form() t = Message.new(form) # 加个时间 t.time = current_time() item = t.saveMessage() save(item, 'data/Message.txt') # 将 list 转换成 str body = templateM('message.html', messages=item) elif request.method == 'GET': # 也就是说,当我第一次访问 http://localhost:3000/messages 时,会先发送 GET 请求 # 定向到了新的 url # http://localhost:3000/messages?message=gua if any(request.query) == False: # 说明是进入网页的时候提交的 GET 请求 # 提取出现有的 Message. path = 'data/Message.txt' data = load(path) body = templateM('message.html', messages=data) header = 'HTTP/1.1 200 OK\r\nContent-Type: text/html\r\n' r = header + '\r\n' + body return r.encode(encoding='utf-8')
def soldier_import(db): data = request.files.data error = '' if data and data.file: tmp_root = './tmp/' if not isdir(tmp_root): # 若目录tmp_root不存在,则创建 os.mkdir(tmp_root) tmp_filename = os.path.join(tmp_root, current_time('tmpsoldier_%Y%m%d%H%M%S.xls')) tmp_file = open(tmp_filename, 'w') # 新建一个xls后缀的文件,然后将读取的excel文件的内容写入该文件中 rows = data.file.readlines() if not rows: # 文件空 error = '数据格式错误[2]' return template('error', error=error) for row in rows: tmp_file.write(row) tmp_file.close() # 在导入新的数据前,先将数据库原有数据导出到tmp目录,作为备份,数据导入失败时可以恢复数据 export_sqls = SOLDIER_EXPORT_SQLS try: # 若备份文件已存在,则删除重新写入 if os.path.exists(os.path.join(tmp_root, SOLDIER_BACK_FILE)): os.remove(os.path.join(tmp_root, SOLDIER_BACK_FILE)) excel_export(export_sqls, tmp_root, SOLDIER_BACK_FILE, db) except Exception, e: print 'soldier 数据备份错误: %s' %e all_sqls = SOLDIER_IMPORT_SQLS error = excel_import(all_sqls, tmp_filename, db) os.remove(tmp_filename) # 删除上传的临时文件
def main(): et_dingding = [] num = 0 etids = opetating_db.get_etid() et_dates = opetating_db.get_companys(etids) pool = Pool(15) pool.map(spider, et_dates) pool.join() print "et_cons:%s" % et_cons print "et_exts:%s" % et_exts lss = [] for et_con in et_cons: # 形成列表 ls = tuple(et_con.values()) lss.append(ls) chachongs = utils.query(lss) for chachong in chachongs: if chachong in lss: lss.remove(chachong) if lss: conn2 = utils.open_line_db() a = utils.insert_update_con(conn2, lss, 'et_contact') if et_exts: conn3 = utils.open_line_db() utils.insert_update_many(conn3, et_exts, 'et_email_extend') conn1 = utils.open_local_db() num = utils.insert_update_many(conn1, et_statuss, "et_info_status") logging.info("%s 添加钉钉数据" % utils.current_time()) type = 2 et_dingding.append(type) et_dingding.append(num) et_dingding.append(a) utils.insert_one(et_dingding)
def soldier_restrain_import(db): data = request.files.data error = '' if data and data.file: tmp_root = './tmp/' if not isdir(tmp_root): os.mkdir(tmp_root) tmp_filename = os.path.join(tmp_root, current_time('tmpsoldierrestrain_%Y%m%d%H%M.xls')) tmp_file = open(tmp_filename, 'w') rows = data.file.readlines() if not rows: error = '数据个数错误[2]' return template('error', error=error) for row in rows: tmp_file.write(row) tmp_file.close() # 导入数据前先备份当前数据库中数据 export_sqls = SOLDIER_RESTRAIN_EXPORT_SQLS try: if os.path.exists(os.path.join(tmp_root, SOLDIER_RESTRAIN_BACK_FILE)): os.remove(os.path.join(tmp_root, SOLDIER_RESTRAIN_BACK_FILE)) excel_export(export_sqls, tmp_root, SOLDIER_RESTRAIN_BACK_FILE, db) except Exception, e: print 'soldier restrain 数据备份错误:%s' %e all_sqls = SOLDIER_RESTRAIN_IMPORT_SQLS error = excel_import(all_sqls, tmp_filename, db) os.remove(tmp_filename)
def soldier_restrain_import(db): data = request.files.data error = '' if data and data.file: tmp_root = './tmp/' if not isdir(tmp_root): os.mkdir(tmp_root) tmp_filename = os.path.join( tmp_root, current_time('tmpsoldierrestrain_%Y%m%d%H%M.xls')) tmp_file = open(tmp_filename, 'w') rows = data.file.readlines() if not rows: error = '数据个数错误[2]' return template('error', error=error) for row in rows: tmp_file.write(row) tmp_file.close() # 导入数据前先备份当前数据库中数据 export_sqls = SOLDIER_RESTRAIN_EXPORT_SQLS try: if os.path.exists( os.path.join(tmp_root, SOLDIER_RESTRAIN_BACK_FILE)): os.remove(os.path.join(tmp_root, SOLDIER_RESTRAIN_BACK_FILE)) excel_export(export_sqls, tmp_root, SOLDIER_RESTRAIN_BACK_FILE, db) except Exception, e: print 'soldier restrain 数据备份错误:%s' % e all_sqls = SOLDIER_RESTRAIN_IMPORT_SQLS error = excel_import(all_sqls, tmp_filename, db) os.remove(tmp_filename)
def generate_asg_instance(tz): asgList = {"1": "name"} print "creating autoscaling instance map" response = asgclient.describe_auto_scaling_groups() #print response #nextToken = response['NextToken'] asgs = response['AutoScalingGroups'] for asg in asgs: name = asg['AutoScalingGroupName'] tags = asg["Tags"] ## starting suspended asgs based on tiem v = utils.get_time(utils.CONST_ASG_RESUME_KEY, tags) if not v: print "no asg schedule(nothing to resume)" else: c = utils.current_time(tz) if c[0] > v[0]: resume_asg(name) if c[0] == v[0] and c[1] >= v[1]: resume_asg(name) # end asg stuff #print asg['AutoScalingGroupName'],'\n' for instance in asg['Instances']: iid = instance['InstanceId'] asgList[iid] = name return asgList
def soil_sensor_check(self): try: samples = self.read_samples() sampled_adc = self.average(samples) SoilMoistPerc = self.adc_map( sampled_adc, self.config["moisture_sensor_cal"]["dry"], self.config["moisture_sensor_cal"]["wet"], 0, 100, ) self.ubidots.post_request({"soil_moisture": SoilMoistPerc}) if SoilMoistPerc <= self.config["moisture_sensor_cal"].get( "Threshold", 15): msg = "Soil Moisture Sensor: %.2f%% \t %s" % ( SoilMoistPerc, current_time(), ) self.slack(msg) print(msg) elif SoilMoistPerc <= 50: msg = "Soil Moisture is at 50% You should probably Water the plant." self.slack(msg) print(msg) force_garbage_collect() except Exception as exc: print("Exception: %s", exc)
def error_import(db): data = request.files.data error = '' if data and data.file: tmp_root = './tmp/' if not isdir(tmp_root): os.mkdir(tmp_root) tmp_filename = os.path.join(tmp_root, current_time('tmperror_%Y%m%d%H%M.xls')) tmp_file = open(tmp_filename, 'w') # 新建一个xls后缀的文件,然后将读取的excel文件的内容写入该文件 rows = data.file.readlines() if not rows: # 文件空 error = '数据个数错误[2]' return template('error', error=error) for row in rows: tmp_file.write(row) tmp_file.close() # 导入数据前,先对数据库数据进行备份 export_sqls = ERROR_EXPORT_SQLS try: # 若备份文件存在,删除重新写入 if os.path.exists(os.path.join(tmp_root, ERROR_BACK_FILE)): os.remove(os.path.join(tmp_root, ERROR_BACK_FILE)) excel_export(export_sqls, tmp_root, ERROR_BACK_FILE, db) except Exception, e: print 'error 数据备份错误: %s' %e all_sqls = ERROR_IMPORT_SQLS error = excel_import(all_sqls, tmp_filename, db) os.remove(tmp_filename) # 删除临时文件
def check_buy_conditions(self): date = utils.current_time().date() price = self.item.price if (self.date_limit and self.date_limit < date) or (self.price_limit and self.price_limit < price): return False return True
def act_stopped(self, activity): """called by the core when an act has finished the levels.""" self.logger.debug("act_stopped called") self.act_score = self.act_score / int(self.currentactdata['cycles']) stime = utils.calculate_time(self.act_start_time, \ utils.current_time()) self.dbmapper.insert('start_time', self.act_start_time) self.dbmapper.insert('end_time', utils.current_time()) self.dbmapper.insert('timespend', stime) self.dbmapper.insert('score', self.act_score) self.dbmapper.insert('done', 1) self.dbmapper.commit() # TODO: when we have a proper dbase setup we can get the results from the dbase # but for now we keep it local for use in the end display. self.sessionresults[ activity.get_helptitle()] = '%4.2f' % self.act_score self.sessionresults_raw[activity.get_name()] = self.act_score
def total_time(self): """ Returns the total time this item has been checked out in seconds. """ if self.checkin_time is not None: return (self.checkin_time - self.checkout_time) return (utils.current_time() - self.checkout_time)
def getHistory(self): date = current_time() try: history = ItemHistory.objects.filter(date__lte=date, item=self).order_by('-date')[0] except IndexError: return None else: return history
def update(request): if request.method == 'POST': form = request.form() todo_id = int(str(form.get('id', -1))) t = Todo.find_by(id=todo_id) t.title = form.get('title', t.title) t.updated_time = current_time() t.save() return redirect('/todo')
def tcpdump_end(output_dir, package=None, current_time=None): current_time = current_time if current_time is not None else current_time( ) package = package if package is not None else 'collect' UIExerciser.run_adb_cmd( 'shell ps | grep tcpdump | awk \'{print $2}\' | xargs adb -s ' + UIExerciser.series + ' shell kill') out_pcap = output_dir + package + current_time + '.pcap' cmd = 'pull /sdcard/' + package + '_' + current_time + '.pcap ' + out_pcap UIExerciser.run_adb_cmd(cmd)
def tcpdump_begin(package=None, current_time=None, nohup=False): current_time = current_time if current_time is not None else current_time( ) package = package if package is not None else 'collect' if not nohup: cmd = ' shell /data/local/tcpdump -w /sdcard/' + package + '_' + current_time + '.pcap' else: sub = "nohup /data/local/tcpdump -w /sdcard/" + package + "_" + current_time + ".pcap" cmd = ' shell "' + sub + '"' UIExerciser.run_adb_cmd(cmd)
def train_pools(_): print(FLAGS) pools = load_pickle(FLAGS.pool_dir) start_pool_idx = int(FLAGS.start_pool) end_pool_idx = int(FLAGS.end_pool) now = datetime.datetime.now() time = current_time(now) if not os.path.exists(FLAGS.save_model_dir): os.makedirs(FLAGS.save_model_dir) if not os.path.exists(FLAGS.result_dir): os.makedirs(FLAGS.result_dir) trained_models_info = [] for idx in range(start_pool_idx, end_pool_idx + 1): pool = pools['data'][str(idx)] print('pool idx: ', idx) print('****************') print_split_report('train', pool['train_report']) print_split_report('val', pool['val_report']) print_split_report('test', pool['test_report']) print('-----------------') name = pools['pool_name'] + '_' + str(idx) log_path = os.path.join(FLAGS.log_dir, name, FLAGS.architecture) save_model_path = os.path.join(FLAGS.save_model_dir, name + '_' + str(FLAGS.architecture)) results = train_single_pool(pool, FLAGS.image_dir, log_path, FLAGS.architecture, save_model_path, FLAGS.train_batch, FLAGS.test_batch, FLAGS.is_augmented) model_info = { 'hyper_param_setting': sgd_hyper_params, 'pool_idx': str(idx), 'pool_name': pool['data_name'], 'time': time, 'architecture': FLAGS.architecture, 'train_batch': FLAGS.train_batch, 'test_batch': FLAGS.test_batch, 'log_path': log_path, 'save_model_path': save_model_path, 'results': results, 'final_results': results['final_result'] } trained_models_info.append(model_info) # save result to .pickle trained_models_info_pickle_name = pools['pool_name'] + '_' + str( start_pool_idx) + '_' + str(end_pool_idx) dump_pickle( trained_models_info, os.path.join(FLAGS.result_dir, trained_models_info_pickle_name)) return trained_models_info
def save(self, force_insert=False, force_update=False, using=None): is_new = False if not self.id: is_new = True result = super(WalletItem, self).save(force_insert, force_update, using) if is_new: self.created = utils.current_time().date() return result
def start_instance(dbinstance, tz): identifier = dbinstance['DBInstanceIdentifier'] arn = dbinstance['DBInstanceArn'] current = utils.current_time(tz) rds.add_tags_to_resource(ResourceName=arn, Tags=[{ "Key": "stopinator:start:time", "Value": current[2] }]) response = rds.start_db_instance(DBInstanceIdentifier=identifier)
def blob_from_request(r): f = r.files['file'] fr = f.read() ts = current_time() rb = Blob(item=fr, filename=f.filename, extension=f.content_type, size=len(fr), created_at = ts, last_sync = ts) if r.form: if 'timestamp' in r.form: rb.last_sync = string_to_timestamp(r.form['timestamp']) if 'global_id' in r.form: rb.global_id = r.form['global_id'] return rb
def error_export(db): tmp_root = './tmp/' filename = current_time("error_%Y%m%d%H%M.xls") error = '' if not isfile(tmp_root + filename): all_sqls = ERROR_EXPORT_SQLS error = excel_export(all_sqls, tmp_root, filename, db) if error: return template('error', error=error) else: return static_file(filename, root = tmp_root, download = filename)
def soldier_restrain_export(db): tmp_root = './tmp/' filename = current_time('soldierrestrain_%Y%m%d%H%M.xls') error = '' if not isfile(tmp_root + filename): all_sqls = SOLDIER_RESTRAIN_EXPORT_SQLS error = excel_export(all_sqls, tmp_root, filename, db) if error: return template('error', error=error) else: return static_file(filename, root=tmp_root, download=filename)
def stop_instance(instance, asglist, tz): iid = instance.get("InstanceId") current = utils.current_time(tz) ec2.create_tags(Resources=[iid], Tags=[{ "Key": "stopinator:stop:time", "Value": current[2] }]) if iid in asglist: print "has associated asg.need to suspend it first" asg = asglist[iid] suspend_asg(asg) print ec2.stop_instances(InstanceIds=[iid])
def spider(et_date): # print et_date if et_date != []: et_info_url = get_website(et_date[0]) et_info_url['etupdatetime'] = time.time() et_info_urls.append(et_info_url) et_status = {} if et_info_url['etwebsite']: print utils.current_time(), '正在更新状态表数据status=3......' et_status['etid'] = et_info_url['etid'] et_status["addtime"] = time.time() et_status['url_status'] = 3 global a a += 1 # print et_status et_statuss.append(et_status) else: print utils.current_time(), '正在更新状态表数据status=2......' et_status['etid'] = et_info_url['etid'] et_status["addtime"] = time.time() et_status['url_status'] = 2 # print et_status et_statuss.append(et_status)
def old_soldier_export(db): tmp_root = './tmp/' filename = current_time("soldier_%Y%m%d%H%M.csv") error = '' if not isfile(tmp_root + filename): sql = 'SELECT soldier_id,soldier_type_id,soldier_quantity,soldier_race,soldier_name,\ soldier_description,asset_id,food_need,wood_need,stone_need,ore_need,recruit_time,food_per_hour,\ population_need,attack,hp,might,attack_level,hp_level,res_carry,move_rate FROM tb_soldier' all_result = db.select(sql) csv = [ 'soldier_id,soldier_type_id,soldier_quantity,soldier_race,soldier_name,\ soldier_description,asset_id,food_need,wood_need,stone_need,ore_need,recruit_time,food_per_hour,\ population_need,attack,hp,might,attack_level,hp_level,res_carry,move_rate\n' ] for data in all_result: row = ','.join([ str(data[0]).strip(), str(data[1]).strip(), str(data[2]).strip(), str(data[3]).strip(), str(data[4]).strip(), str(data[5]).strip(), str(data[6]).strip(), str(data[7]).strip(), str(data[8]).strip(), str(data[9]).strip(), str(data[10]).strip(), str(data[11]).strip(), str(data[12]).strip(), str(data[13]).strip(), str(data[14]).strip(), str(data[15]).strip(), str(data[16]).strip(), str(data[17]).strip(), str(data[18]).strip(), str(data[19]).strip(), str(data[20]).strip() ]) csv.append(row + '\n') if csv: try: file(tmp_root + filename, 'wb').writelines(csv) except Exception, e: error = str(e)
def checkouts_checkin(_id): """ Check-in an item. @path '/checkouts/:id/checkin' @param 'id' """ checkouts = Checkout.query.filter_by(_id=_id) if checkouts.count() == 0: abort(404) checkout = checkouts[0] checkout.checkin_time = utils.current_time() checkout.save() return redirect(url_for("checkouts_list"))
def epilogue(self): global taskloads self.wtime_ended = MPI.Wtime() taskloads = self.circle.comm.gather(self.reduce_items) if self.circle.rank == 0: if self.totalsize == 0: print("\nZero filesize detected, done.\n") return tlapse = self.wtime_ended - self.wtime_started rate = float(self.totalsize) / tlapse print("\nFCP Epilogue:\n") print("\t{:<20}{:<20}".format("Ending at:", utils.current_time())) print("\t{:<20}{:<20}".format("Completed in:", utils.conv_time(tlapse))) print("\t{:<20}{:<20}".format("Transfer Rate:", "%s/s" % bytes_fmt(rate))) print("\t{:<20}{:<20}".format("FCP Loads:", "%s" % taskloads))
def saveInfo(self, fpath): t = current_time() with open(fpath + '_%s.tot-race' % t, 'w') as fd: for inst_race in self.tot_inst_race_.keys(): fd.write(str(inst_race)) fd.write('\n') summary = {} summary['nr-acc-distinct-race'] = self.nrAccDistinctRaceEachFile() summary['nr-distinct-race'] = self.nrDistinctRaceEachFile() summary['nr-normal-race'] = self.nrNormalRaceEachFile() summary['nr-total-race'] = self.nrTotalRaceEachFile() yaml_dump(fpath + '_%s.tot-summary' % t, summary)
def save(self, force_insert=False, force_update=False, using=None): is_new = False if not self.id: is_new = True result = super(Transaction, self).save(force_insert, force_update, using) if is_new: self.created = utils.current_time().date() self.updated = utils.current_time().date() if self.state != 1: return result if self.type == 'C': self.__chargeWallet() elif self.type == 'B': if self.check_buy_conditions(): self.__addWalletItem() elif self.type == 'S': self.__sellWalletItem() return result
def get_orders(cookies, order_status='16'): ''' 查询所需要的工单 order_status:工单状态对应的id;待派单:10,待接收:11,,已派工:16,全部:'',已预约:15 ''' api_url = 'https://cs.midea.com/c-css/wom/serviceorderunit/listdata' headers = { 'Accept': '*/*', 'User-Agent': 'Mozilla/5.0(Windows;U;WindowsNT5.1;zh-CN;rv:1.9.1.5)Gecko/20091102Firefox/3.5.5', 'Host': 'cs.midea.com', 'X-Requested-With': 'XMLHttpRequest', 'Referer': 'https://cs.midea.com/c-css/wom/serviceorderunit/list', 'Content-Type': 'application/json', 'Cookie': cookies } CONTACT_TIME_end, CONTACT_TIME = current_time() # 查询截止时间,为当前时间 form_data = { 'page': '1', 'rows': '50', 'pageIndex': '0', 'pageSize': '50', 'formConditions': { 'CONTACT_TIME': CONTACT_TIME, # 此处设置为当前日期前5天 'CONTACT_TIME_end': CONTACT_TIME_end, # 时间设置为当前日期 'SERVICE_ORDER_NO': '', 'SERVICE_CUSTOMER_TEL1': '', 'SERVICE_ORDER_STATUS': order_status, # 待派单:10,待接收:11,全部:'' 'IMPLEMENT_SUB_TYPE_CODE': '', 'ORDER_ORIGIN': '', 'SERVICE_PROCESS_TIME_OUT': '', 'URGE_COMPLAINT_ADDITIONAL': '', 'CONTAIN_EJFWS': 'N', 'PROD_CODE': '', 'refreshRowFlag': 'false', 'data': '' } } form_data = json.dumps(form_data) r = request(api_url, method='post', data=form_data, headers=headers) # print(r) if r and r['content']['status'] == False: errorReport(api_url + ':' + 'get api data fail') return r
def epilogue(self): global taskloads self.wtime_ended = MPI.Wtime() taskloads = self.circle.comm.gather(self.reduce_items) if self.circle.rank == 0: if self.totalsize == 0: print("\nZero filesize detected, done.\n") return tlapse = self.wtime_ended - self.wtime_started rate = float(self.totalsize) / tlapse print("\nFCP Epilogue:\n") print("\t{:<20}{:<20}".format("Ending at:", utils.current_time())) print("\t{:<20}{:<20}".format("Completed in:", utils.conv_time(tlapse))) print("\t{:<20}{:<20}".format("Transfer Rate:", "%s/s" % bytes_fmt(rate))) print("\t{:<20}{:<20}".format("Use store chunksums:", "%s" % self.use_store)) print("\t{:<20}{:<20}".format("Use store workq:", "%s" % self.circle.use_store)) print("\t{:<20}{:<20}".format("FCP Loads:", "%s" % taskloads))
def add(request): # 添加事件前验证是否登陆 uname = current_user(request) u = User.find_by(username=uname) if u is None: return redirect('/login') if request.method == 'POST': form = request.form() t = Todo.new(form) t.user_id = u.id t.created_time = current_time() t.save() ''' 我们看到页面刷新的过程: README.md 中的 网络部分 5. 请求数据和发送数据(具体到各阶段的原始报文信息) ''' return redirect('/todo')
def main(): et_dingding = [] etids = opetating_db.get_etid() et_dates = opetating_db.get_companys(etids) pool = Pool(15) pool.map(spider, et_dates) pool.join() conn = utils.open_line_db() num = utils.insert_update_many(conn, et_info_urls, 'et_info') con = utils.open_local_db() utils.insert_update_many(con, et_statuss, 'et_info_status') logging.info("%s 添加钉钉数据" % utils.current_time()) type = 1 et_dingding.append(type) et_dingding.append(num) et_dingding.append(a) utils.insert_one(et_dingding) logging.info("—————————结束更新官网URL——————————")
def old_soldier_export(db): tmp_root = './tmp/' filename = current_time("soldier_%Y%m%d%H%M.csv") error = '' if not isfile(tmp_root + filename): sql = 'SELECT soldier_id,soldier_type_id,soldier_quantity,soldier_race,soldier_name,\ soldier_description,asset_id,food_need,wood_need,stone_need,ore_need,recruit_time,food_per_hour,\ population_need,attack,hp,might,attack_level,hp_level,res_carry,move_rate FROM tb_soldier' all_result = db.select(sql); csv = ['soldier_id,soldier_type_id,soldier_quantity,soldier_race,soldier_name,\ soldier_description,asset_id,food_need,wood_need,stone_need,ore_need,recruit_time,food_per_hour,\ population_need,attack,hp,might,attack_level,hp_level,res_carry,move_rate\n'] for data in all_result: row = ','.join([str(data[0]).strip(), str(data[1]).strip(), str(data[2]).strip(), str(data[3]).strip(), str(data[4]).strip(), str(data[5]).strip(), str(data[6]).strip(), str(data[7]).strip(), str(data[8]).strip(), str(data[9]).strip(), str(data[10]).strip(), str(data[11]).strip(), str(data[12]).strip(), str(data[13]).strip(), str(data[14]).strip(), str(data[15]).strip(), str(data[16]).strip(), str(data[17]).strip(), str(data[18]).strip(), str(data[19]).strip(), str(data[20]).strip()]) csv.append(row + '\n') if csv: try: file(tmp_root + filename, 'wb').writelines(csv) except Exception, e: error = str(e)
def gen_signature(bfsign, totalsize): """ Generate a signature for dataset, it assumes the checksum option is set and done """ if comm.rank == 0: print("\nAggregating dataset signature ...\n") tbegin = MPI.Wtime() sig = aggregate_checksums(bfsign) tend = MPI.Wtime() if comm.rank == 0: #print("\t{:<20}{:<20}".format("Aggregated chunks:", size)) print("\t{:<20}{:<20}".format("Running time:", utils.conv_time(tend - tbegin))) print("\t{:<20}{:<20}".format("SHA1 Signature:", sig)) with open(args.output, "w") as f: f.write("sha1: %s\n" % sig) f.write("chunksize: %s\n" % fcp.chunksize) f.write("fcp version: %s\n" % __version__) f.write("src: %s\n" % fcp.src) f.write("destination: %s\n" % fcp.dest) f.write("date: %s\n" % utils.current_time()) f.write("totoalsize: %s\n" % utils.bytes_fmt(totalsize))
def main(): global args, log, circle, fcp, treewalk # This might be an overkill function signal.signal(signal.SIGINT, sig_handler) args = parse_and_bcast(comm, gen_parser) tally_hosts() G.loglevel = args.loglevel G.fix_opt = False if args.no_fixopt else True G.preserve = args.preserve G.resume = True if args.cpid else False G.reduce_interval = args.reduce_interval G.verbosity = args.verbosity G.am_root = True if os.geteuid() == 0 else False if args.signature: # with signature implies doing verify as well args.verify = True G.src, G.dest = check_source_and_target(args.src, args.dest) dbname = get_workq_name() circle = Circle() circle.dbname = dbname if args.rid: circle.resume = True args.signature = False # when recovery, no signature if not args.cpid: ts = utils.timestamp() args.cpid = circle.comm.bcast(ts) if circle.rank == 0: print("Running Parameters:\n") print("\t{:<25}{:<20}".format("Starting at:", utils.current_time())) print("\t{:<25}{:<20}".format("FCP version:", __version__)) print("\t{:<25}{:<20}".format("Source:", utils.choplist(G.src))) print("\t{:<25}{:<20}".format("Destination:", os.path.abspath(args.dest))) print("\t{:<25}{:<10}{:5}{:<25}{:<10}".format("Num of Hosts:", num_of_hosts, "|", "Num of Processes:", comm.size)) print("\t{:<25}{:<10}{:5}{:<25}{:<10}".format("Overwrite:", "%r" % args.force, "|", "Copy Verification:", "%r" % args.verify)) print("\t{:<25}{:<10}{:5}{:<25}{:<10}".format("Dataset signature:", "%r" % args.signature, "|", "Stripe Preserve:", "%r" % G.preserve)) print("\t{:<25}{:<10}{:5}{:<25}{:<10}".format("Checkpoint interval:", "%s" % utils.conv_time(args.cptime), "|", "Checkpoint ID:", "%s" % args.cpid)) # if args.verbosity > 0: print("\t{:<25}{:<20}".format("Copy Mode:", G.copytype)) fcp_start() if args.pause and args.verify: if circle.rank == 0: # raw_input("\n--> Press any key to continue ...\n") print("Pause, resume after %s seconds ..." % args.pause) sys.stdout.flush() time.sleep(args.pause) circle.comm.Barrier() # do checksum verification if args.verify: circle = Circle() pcheck = PVerify(circle, fcp, G.totalsize) circle.begin(pcheck) tally = pcheck.fail_tally() tally = comm.bcast(tally) if circle.rank == 0: print("") if tally == 0: print("\t{:<20}{:<20}".format("Result:", "PASS")) else: print("\t{:<20}{:<20}".format("Result:", "FAILED")) comm.Barrier() if args.signature and tally == 0: gen_signature(fcp, G.totalsize) # fix permission comm.Barrier() if G.fix_opt and treewalk: if comm.rank == 0: print("\nFixing ownership and permissions ...") fix_opt(treewalk) if treewalk: treewalk.cleanup() if fcp: fcp.epilogue() fcp.cleanup() # if circle: # circle.finalize(cleanup=True) # TODO: a close file error can happen when circle.finalize() # if isinstance(circle.workq, DbStore): circle.workq.cleanup()
else: level = self.prev_data[self.currentactname]['level'] except Exception, msg: self.logger.warning("Failed to query previous dt data: %s" % msg) self.logger.warning("Possible dbase corruption, prevdata was: %s" % self.prev_data) self.logger.warning("setting level to 2 and continue") level = 2 self.logger.debug("old level: %s new level: %s" % \ (self.prev_data[self.currentactname]['level'], level)) else: self.logger.warning("No prevdata found for %s, getting level from current data" % self.currentactname) level = self.actdatahash[self.currentactname]['level'] cycles = self.currentactdata['cycles'] self.dbmapper.insert('cycles', cycles) self.dbmapper.insert('level', level) self.act_start_time = utils.current_time() self.dbmapper.insert('activity', self.currentactname) self.SPG.tellcore_show_level_indicator() self.SPG.tellcore_enable_score_button() self.SPG._menu_activity_userchoice(self.currentactname, level, cycles) def next_level(self,level,dbmapper): """Mandatory method. Return True if there levels left. False when no more levels left.""" if not self.runme:# in case of an error or when there are no act left this flag is set self.logger.debug("DT ends, no more acts") self.display_results() return False self.logger.debug("nextlevel called with: %s" % level) self.SPG.tellcore_disable_level_indicator()
def main(): global args, comm signal.signal(signal.SIGINT, sig_handler) args = parse_and_bcast(comm, gen_parser) try: G.src = utils.check_src(args.path) except ValueError as e: err_and_exit("Error: %s not accessible" % e) G.loglevel = args.loglevel G.use_store = args.use_store G.reduce_interval = args.interval hosts_cnt = tally_hosts() circle = Circle() if circle.rank == 0: print("Running Parameters:\n") print("\t{:<20}{:<20}".format("FSUM version:", __version__)) print("\t{:<20}{:<20}".format("Num of hosts:", hosts_cnt)) print("\t{:<20}{:<20}".format("Num of processes:", MPI.COMM_WORLD.Get_size())) print("\t{:<20}{:<20}".format("Root path:", utils.choplist(G.src))) fwalk = FWalk(circle, G.src) circle.begin(fwalk) if G.use_store: fwalk.flushdb() totalsize = fwalk.epilogue() circle.finalize() # by default, we use adaptive chunksize chunksize = utils.calc_chunksize(totalsize) if args.chunksize: chunksize = conv_unit(args.chunksize) if circle.rank == 0: print("Chunksize = ", chunksize) circle = Circle() fcheck = Checksum(circle, fwalk, chunksize, totalsize) circle.begin(fcheck) circle.finalize() if circle.rank == 0: sys.stdout.write("\nAggregating ... ") chunkl = circle.comm.gather(fcheck.chunkq) if circle.rank == 0: chunks = [item for sublist in chunkl for item in sublist] chunks.sort() sys.stdout.write("%s chunks\n" % len(chunks)) sha1val = do_checksum(chunks) with open(args.output, "w") as f: f.write("sha1: %s\n" % sha1val) f.write("chunksize: %s\n" % chunksize) f.write("fwalk version: %s\n" % __version__) f.write("src: %s\n" % utils.choplist(G.src)) f.write("date: %s\n" % utils.current_time()) f.write("totalsize: %s\n" % totalsize) print("\nSHA1: %s" % sha1val) print("Signature file: [%s]" % args.output) if args.export_block_signatures: export_checksum2(chunks, args.output) print("Exporting block signatures ... \n") fcheck.epilogue()
def main(): global args, comm signal.signal(signal.SIGINT, sig_handler) args = parse_and_bcast(comm, gen_parser) try: G.src = utils.check_src(args.path) except ValueError as e: err_and_exit("Error: %s not accessible" % e) G.loglevel = args.loglevel #G.use_store = args.use_store G.reduce_interval = args.interval G.memitem_threshold = args.item hosts_cnt = tally_hosts() circle = Circle() if circle.rank == 0: print("Running Parameters:\n") print("\t{:<20}{:<20}".format("FSUM version:", __version__)) print("\t{:<20}{:<20}".format("Num of hosts:", hosts_cnt)) print("\t{:<20}{:<20}".format("Num of processes:", MPI.COMM_WORLD.Get_size())) print("\t{:<20}{:<20}".format("Root path:", utils.choplist(G.src))) print("\t{:<20}{:<20}".format("Items in memory:", G.memitem_threshold)) fwalk = FWalk(circle, G.src) circle.begin(fwalk) if G.use_store: fwalk.flushdb() fwalk.epilogue() circle.finalize() # by default, we use adaptive chunksize chunksize = utils.calc_chunksize(T.total_filesize) if args.chunksize: chunksize = conv_unit(args.chunksize) if circle.rank == 0: print("Chunksize = ", chunksize) circle = Circle() fcheck = Checksum(circle, fwalk, chunksize, T.total_filesize, T.total_files) circle.begin(fcheck) circle.finalize() if circle.rank == 0: sys.stdout.write("\nAggregating ... ") """ chunkl = circle.comm.gather(fcheck.chunkq) if circle.rank == 0: chunks = [item for sublist in chunkl for item in sublist] chunks.sort() sys.stdout.write("%s chunks\n" % len(chunks)) sha1val = do_checksum(chunks) with open(args.output, "w") as f: f.write("sha1: %s\n" % sha1val) f.write("chunksize: %s\n" % chunksize) f.write("fwalk version: %s\n" % __version__) f.write("src: %s\n" % utils.choplist(G.src)) f.write("date: %s\n" % utils.current_time()) f.write("totalsize: %s\n" % T.total_filesize) print("\nSHA1: %s" % sha1val) print("Signature file: [%s]" % args.output) if args.export_block_signatures: export_checksum2(chunks, args.output) print("Exporting block signatures ... \n") """ if circle.rank > 0: circle.comm.send(fcheck.bfsign.bitarray, dest=0) else: for p in xrange(1, circle.comm.size): other_bitarray = circle.comm.recv(source=p) fcheck.bfsign.or_bf(other_bitarray) circle.comm.Barrier() if circle.comm.rank == 0: sha1val = fcheck.bfsign.gen_signature() with open(args.output, "w") as f: f.write("sha1: %s\n" % sha1val) f.write("chunksize: %s\n" % chunksize) f.write("fwalk version: %s\n" % __version__) f.write("src: %s\n" % utils.choplist(G.src)) f.write("date: %s\n" % utils.current_time()) f.write("totalsize: %s\n" % T.total_filesize) print("\nSHA1: %s" % sha1val) print("Signature file: [%s]" % args.output) fcheck.epilogue() if circle.comm.rank == 0: if os.path.exists(G.tempdir): shutil.rmtree(G.tempdir, ignore_errors=True)
def main(): global args, log, circle, fcp, treewalk # This might be an overkill function signal.signal(signal.SIGINT, sig_handler) args = parse_and_bcast(comm, gen_parser) tally_hosts() G.loglevel = args.loglevel G.fix_opt = False if args.no_fixopt else True G.preserve = args.preserve G.resume = True if args.cpid else False G.reduce_interval = args.reduce_interval G.verbosity = args.verbosity G.am_root = True if os.geteuid() == 0 else False G.memitem_threshold = args.item if args.signature: # with signature implies doing verify as well args.verify = True if args.rid: G.resume = True args.force = True G.rid = args.rid args.signature = False # when recovery, no signature if not args.cpid: ts = utils.timestamp() args.cpid = MPI.COMM_WORLD.bcast(ts) G.tempdir = os.path.join(os.getcwd(),(".pcircle" + args.cpid)) if not os.path.exists(G.tempdir): try: os.mkdir(G.tempdir) except OSError: pass G.src, G.dest = check_source_and_target(args.src, args.dest) dbname = get_workq_name() circle = Circle(dbname="fwalk") #circle.dbname = dbname global oflimit if num_of_hosts != 0: max_ofile, _ = resource.getrlimit(resource.RLIMIT_NOFILE) procs_per_host = circle.size // num_of_hosts oflimit = ((max_ofile - 64) // procs_per_host) // 2 if oflimit < 8: oflimit = 8 if circle.rank == 0: print("Running Parameters:\n") print("\t{:<25}{:<20}".format("Starting at:", utils.current_time())) print("\t{:<25}{:<20}".format("FCP version:", __version__)) print("\t{:<25}{:<20}".format("Source:", utils.choplist(G.src))) print("\t{:<25}{:<20}".format("Destination:", os.path.abspath(args.dest))) print("\t{:<25}{:<10}{:5}{:<25}{:<10}".format("Num of Hosts:", num_of_hosts, "|", "Num of Processes:", comm.size)) print("\t{:<25}{:<10}{:5}{:<25}{:<10}".format("Overwrite:", "%r" % args.force, "|", "Copy Verification:", "%r" % args.verify)) print("\t{:<25}{:<10}{:5}{:<25}{:<10}".format("Dataset signature:", "%r" % args.signature, "|", "Stripe Preserve:", "%r" % G.preserve)) print("\t{:<25}{:<10}{:5}{:<25}{:<10}".format("Checkpoint interval:", "%s" % utils.conv_time(args.cptime), "|", "Checkpoint ID:", "%s" % args.cpid)) print("\t{:<25}{:<10}{:5}{:<25}{:<10}".format("Items in memory: ", " % r" % G.memitem_threshold, "|", "O file limit", "%s" % oflimit)) # if args.verbosity > 0: print("\t{:<25}{:<20}".format("Copy Mode:", G.copytype)) fcp_start() if args.pause and args.verify: if circle.rank == 0: # raw_input("\n--> Press any key to continue ...\n") print("Pause, resume after %s seconds ..." % args.pause) sys.stdout.flush() time.sleep(args.pause) circle.comm.Barrier() # do checksum verification if args.verify: circle = Circle(dbname="verify") pcheck = PVerify(circle, fcp, G.total_chunks, T.total_filesize, args.signature) circle.begin(pcheck) circle.finalize() tally = pcheck.fail_tally() tally = comm.bcast(tally) if circle.rank == 0: print("") if tally == 0: print("\t{:<20}{:<20}".format("Verify result:", "PASS")) else: print("\t{:<20}{:<20}".format("Verify result:", "FAILED")) comm.Barrier() if args.signature and tally == 0: gen_signature(pcheck.bfsign, T.total_filesize) # fix permission comm.Barrier() if G.fix_opt and treewalk: if comm.rank == 0: print("\nFixing ownership and permissions ...") fix_opt(treewalk) if treewalk: treewalk.cleanup() if fcp: fcp.cleanup() #if circle: # circle.finalize(cleanup=True) comm.Barrier() if comm.rank == 0: try: os.rmdir(G.tempdir) except: pass