def content_handler(self, msg): #清洗数据 #处理,标记入库 database = 'DATABASE' mysql_obj1 = mysql_ORM(conf.get(database, 'outer_server'), conf.get(database, 'user'), conf.get(database, 'pwd'), int(conf.get(database, 'port')), "businessdata") mysql_conn1 = mysql_obj1.connect_fc() db_sql1 = 'insert into chatdata_classify(date,content,msgType,owner_wxid,realChatUsr,fromUsr,toUsr,repeat_num,account,classify) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)' mysql_obj1.mult_add(mysql_conn1, [[ msg.message_id, msg.message_body_md5, msg.message_tag, msg.consumed_times, msg.publish_time, msg.message_body, msg.next_consume_time, msg.receipt_handle, '111', '111' ]], db_sql1) # print("Receive, MessageId: %s\nMessageBodyMD5: %s \ MessageTag: %s\nConsumedTimes: %s \ PublishTime: %s\nBody: %s \ NextConsumeTime: %s \ ReceiptHandle: %s" % \ (msg.message_id, msg.message_body_md5, msg.message_tag, msg.consumed_times, msg.publish_time, msg.message_body, msg.next_consume_time, msg.receipt_handle)) time.sleep(1)
def weixin_alert(self, content): if not conf.get('SC_KEY'): return True url = "https://sc.ftqq.com/{KEY}.send?text={TEXT}&desp={DESP}".format( KEY=conf.get('SC_KEY'), TEXT=content.get('title'), DESP=content.get('context') ) requests.get(url) return True
def _update_version(version): # Create version folder if not exist if not path.isdir(version): makedirs(version) lst_repo = conf.get(version, 'list_repository').split(',') for item in lst_repo: repo_dir = './%s/%s' % (version, item) if not path.isdir(repo_dir): if conf.get(version, item): print "Getting %s" % (conf.get(version, item)) if conf.get(version, item)[0:4] == 'git:': my_repo = conf.get(version, item).split(' -b ') Repo.clone_from(my_repo[0], repo_dir, branch=my_repo[1], depth=1) elif conf.get(version, item)[0:3] == 'lp:': remote_branch = Branch.open(conf.get(version, item)) local_branch = remote_branch.bzrdir.sprout( repo_dir).open_branch() else: raise 'Unavailable to get %s. Non Implemented' % (conf.get( version, item)) else: pass else: pass
def base_url(with_slash=False): without = conf.get("server", "base_url").rstrip("/") if with_slash: return without + "/" else: return without
def sync_bid_list(self, data, key="list.json"): try: TOKEN = conf.get('Github').get('Token') REPO = "tylzh97/weibo-backup" g = Github(TOKEN) repo = g.get_repo(REPO) # 尝试变量复用 self.github = g self.repo = repo # ID文件位置 # key = 'list.json' # 远程内容对象 remote_contents = repo.get_contents(key, ref="main") # 请求更新ID内容 resp = repo.update_file( path=remote_contents.path, # 提交信息 message="更新列表: " + str(datetime.now()), # 字符串, 使用base64编码 content=data, sha=remote_contents.sha, branch="main" ) # 文件的raw链接 url = resp.get('content').download_url # print('ID列表同步成功:\t', url) logging.info('向ID列表托管的Github仓库中, 更新ID列表成功') return url except Exception as e: # print('向github中同步id列表时出现异常,错误内容如下:') # print(e) logging.info('向github中同步id列表时出现异常,错误内容如下: ' + str(e)) return ''
def init_history_ids(self, retry=3, timeout=20): url = conf.get('History_Tweets_ids') if not url: return {} url = url + '?time=' + str(int(time.time()*1000)) # 尝试[retry]次下载内容 resp = None for i in range(retry): try: resp = requests.get( url, headers=self.header, timeout=timeout ) break except Exception as e: # print(e) logger.warning('同步ID列表异常, 异常信息: ' + str(e)) ret = {} if resp and resp.status_code: if resp.status_code != 200: # print('资源同步成功, 但是服务器返回了异常状态') logger.warning('同步ID列表时, 服务器响应了异常状态, 状态码: ' + str(resp.status_code)) else: # print('资源下载成功') logger.info('历史Tweet ID列表同步成功') ret = json.loads(resp.content.decode('UTF-8')) else: # print('资源下载尝试超出最大尝试次数.') logger.warning('同步ID列表时, 超过最大重试次数, 备份结束') return ret
def generate_dancecard(): participants = [] labels = defaultdict(list) column = 0 previous_part = None stickers_per_row = 4 for p in session.query(Participant).order_by(Participant.final_part).order_by(Participant.lastname): name = "%s %s" % (p.firstname, p.lastname) part = parts[p.final_part] column += 1 if part != previous_part: column = 0 previous_part = part elif column >= stickers_per_row: column = 0 labels[part].append("") labels[part].append(name) tex = render_template("printed/dancecard.tex", shortname=conf.get("application", "shortname"), tenors=labels["Tenor"], leads=labels["Lead"], baris=labels["Bari"], basses=labels["Bass"]) return compile_and_send_pdf("dancecard.pdf", tex)
def main(argv): ### usage: e.g. `python import_graph 4 Papers` ### use csplit to split the huge file into small files. ### need raw files under filedir/Papers/ filedir = os.path.join(conf.get("data.filedir"), conf.get("data.version")) numthreads = int(argv[1]) data_type = argv[2] print("Importing", data_type) p = Pool(numthreads) if data_type in options: filepath = os.path.join(filedir, data_type) print("Reading files in dir", filepath) files = [os.path.join(filedir, data_type, f) for f in sorted(os.listdir(filepath))] print(files) p.map(graph_import, [(data_type, f) for f in files])
def setTitle(self, title=None): self.moduleH1.removeAllChildren() if title is None: title = conf.get("vi.name") if title: self.moduleH1.appendChild( html5.TextNode(html5.utils.unescape(title)))
def word_count(self, path): reducers = self.reducers threshold = conf.get("mapper.phase-one.message-buffer") prev_perc = 0 prev_docid = -1 prev_time = time.time() num_msg = 0 buffers = [] def humanize_time(secs): mins, secs = divmod(secs, 60) hours, mins = divmod(mins, 60) return '%02d:%02d:%02d' % (hours, mins, secs) for reducer in xrange(len(reducers)): buffers.append([]) for perc, doc_id, word in DocumentExtractor(path).get_words(): # Distribute on word basis buffers[hash(word) % len(reducers)].append((word, doc_id)) num_msg += 1 if prev_docid != doc_id: self.tasks += 1 prev_docid = doc_id if perc - prev_perc >= 0.05: now = time.time() diff = now - prev_time eta = (diff / (perc - prev_perc)) * (1.0 - perc) log.info( "Mapper at {:02d}% - {:d} documents - ETA: {:s}".format( int(perc * 100), self.tasks, humanize_time(eta) ) ) prev_time = time.time() prev_perc = perc if num_msg >= threshold: num_msg = 0 for id, buffer in enumerate(buffers): if buffer: comm.send(buffer, dest=reducers[id]) buffers[id] = [] for id, buffer in enumerate(buffers): if buffer: comm.send(buffer, dest=reducers[id]) buffers[id] = []
def setTitle(self, title=None): if title: title = [title] else: title = [] addendum = conf.get("vi.name") if addendum: title.append(addendum) html5.document.title = conf["vi.title.delimiter"].join(title)
def onShowSelector(self, *args, **kwargs): """ Opens a TreeWidget sothat the user can select new values """ if not self.currentSelector: fileSelector = conf.get("fileSelector") if not fileSelector or conf["mainWindow"].containsWidget( fileSelector): fileSelector = FileWidget(self.destModule, isSelector="leaf") if not conf.get("fileSelector"): conf["fileSelector"] = fileSelector self.currentSelector = fileSelector self.currentSelector.selectionReturnEvent.register(self, reset=True) conf["mainWindow"].stackWidget(self.currentSelector) self.parent().addClass("is_active")
def update(self): user = conf.get("currentUser") if not user: NetworkService.request("user", "view/self", successHandler=self.onCurrentUserAvailable, cacheable=False) return if "root" in user["access"]: self.show()
def load_auth(self): """ Loads up the API object from twitter module. """ section = 'Twitter' consumer_key = conf.get(section, 'consumer_key') consumer_secret = conf.get(section, 'consumer_secret') access_token_key = conf.get(section, 'access_token_key') access_token_secret = conf.get(section, 'access_token_secret') api = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret, access_token_key=access_token_key, access_token_secret=access_token_secret) if not api.VerifyCredentials(): raise TwitterAuthError() self.twitter = api
async def linkff14(self, ctx, first_name: str = None, last_name: str = None, server: str = None): params = { 'name': '{} {}'.format(first_name, last_name), 'server': server, 'private_key': conf.get("FF14_TOKEN") } channel = ctx.message.channel author_id = ctx.message.author.id if not first_name or not last_name or not server: await channel.send( "Incorrect command format. Usage: `-linkff14 FIRSTNAME LASTNAME SERVER`" ) return try: r = requests.get(url="http://xivapi.com/character/search", params=params).json() player = r['Results'][0] if player['Name'].lower() != "{} {}".format(first_name, last_name).lower(): await channel.send( "Player not found, did you mean {}? Please try the command again." .format(player['Name'])) return else: embed = discord.Embed() embed.set_thumbnail(url=player['Avatar']) embed.add_field(name="Name:", value=player['Name'], inline=False) embed.add_field(name="Server:", value=player['Server'].replace('\\xa0', ' '), inline=False) await channel.send("Your character has been linked!", embed=embed) with closing(self.db.cursor()) as cursor: # Create `users` table if it does not exist cursor.execute( 'INSERT INTO ffxiv (id, ffxivid) VALUES(%s, %s) ON DUPLICATE KEY UPDATE id=%s, ffxivid=%s', (author_id, player['ID'], author_id, player['ID'])) return except Exception as e: track = traceback.format_exc() print(track) await channel.send( "Something went wrong while reaching the FFXIV servers. Please try again later." ) return
def rollpoll(self): param_str = 'PARAM' #长轮询表示如果topic没有消息则请求会在服务端挂住3s,3s内如果有消息可以消费则立即返回 #长轮询时间3秒(最多可设置为30秒) wait_seconds = int(conf.get(param_str, 'wait_seconds')) #一次最多消费3条(最多可设置为16条) batch = int(conf.get(param_str, 'batch')) print( "%sConsume And Ak Message From Topic%s\nTopicName:%s\nMQConsumer:%s\nWaitSeconds:%s\n" % (10 * "=", 10 * "=", self.topic_name, self.group_id, wait_seconds)) while True: try: #长轮询消费消息 recv_msgs = self.consumer.consume_message(batch, wait_seconds) for msg in recv_msgs: self.content_handler(msg) except MQExceptionBase as e: if e.type == "MessageNotExist": print("No new message! RequestId: %s" % e.req_id) time.sleep(3) continue print("Consume Message Fail! Exception:%s\n" % e) time.sleep(3) continue #msg.next_consume_time前若不确认消息消费成功,则消息会重复消费 #消息句柄有时间戳,同一条消息每次消费拿到的都不一样 try: receipt_handle_list = [msg.receipt_handle for msg in recv_msgs] self.consumer.ack_message(receipt_handle_list) print("Ak %s Message Succeed.\n\n" % len(receipt_handle_list)) except MQExceptionBase as e: print("\nAk Message Fail! Exception:%s" % e) #某些消息的句柄可能超时了会导致确认不成功 '''
def update(self): user = conf.get("currentUser") if not user: NetworkService.request("user", "view/self", successHandler=self.onCurrentUserAvailable, cacheable=False) return aa = html5.A() aa["title"] = user["name"] aa["class"].append("icon accountmgnt") aa.appendChild(html5.TextNode(user["name"])) self.appendChild(aa)
def __init__(self): self.cookie = conf.get('Cookie') self.header = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:61.0) Gecko/20100101 Firefox/61.0', } self.url_pattern = 'https://api.weibo.cn/2/profile/statuses/tab?from=10AA393010&c=iphone&s=0ddddddd&containerid=2304136826971661_-_WEIBO_SECOND_PROFILE_WEIBO&count=20&page={}' self.url_detail_pattern = 'https://api.weibo.cn/2/statuses/show?from=10AB193010&c=iphone&s=2c81ab39&id={}' # 设置Cookie if self.cookie: self.header['Cookie'] = self.cookie # 初始化历史推文ID记录 self.history_ids = self.init_history_ids() # 初始化OSS相关对象 self.init_oss() self.new_cards = [] self.new_images = [] self.github = None self.repo = None pass
def _update_version(version): # Create version folder if not exist if not path.isdir(version): makedirs(version) lst_repo = conf.get(version, "list_repository").split(",") for item in lst_repo: repo_dir = "./%s/%s" % (version, item) if not path.isdir(repo_dir): if conf.get(version, item): print "Getting %s" % (conf.get(version, item)) if conf.get(version, item)[0:4] == "git:": my_repo = conf.get(version, item).split(" -b ") Repo.clone_from(my_repo[0], repo_dir, branch=my_repo[1], depth=1) elif conf.get(version, item)[0:3] == "lp:": remote_branch = Branch.open(conf.get(version, item)) local_branch = remote_branch.bzrdir.sprout(repo_dir).open_branch() else: raise "Unavailable to get %s. Non Implemented" % (conf.get(version, item)) else: pass else: pass
def get_mqtt_client(): """Returns a mqtt client bases on conf.py settings""" sslcontext = None if conf.get('ssl'): sslcontext = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) sslcontext.load_cert_chain( certfile=conf.get('certfile', ''), keyfile=conf.get('keyfile'), password=conf.get('password'), ) # (password=XXX) givs pw for keyfiles. Key can be in the CA file, but first. mc = mqtt_client.Mqtt_Client("mqtt_tree") mc.configure(conf.get('mqtt_server'), conf.get('mqtt_port'), sslcontext=sslcontext) mc.run() return mc
def reduce_word_count(self): heap = [] length = 0 threshold = conf.get("reducer.phase-one.threshold") remaining = self.num_workers while remaining > 0: msg = comm.recv(source=MPI.ANY_SOURCE, status=status) if msg == MSG_COMMAND_QUIT: remaining -= 1 log.debug("Received termination message from %d" % \ status.Get_source()) else: for word, doc_id in msg: length += len(str(doc_id)) + len(word) + 1 heappush(heap, (word, doc_id)) if length > threshold or remaining == 0: self.write_partition(heap) heap = [] length = 0 self.write_partition(heap)
def __init__(self): #初始化 client rocket_str = 'ROCKETMQ' self.mq_client = MQClient( #设置HTTP接入域名(此处以公共云生产环境为例) conf.get(rocket_str, 'HTTP_ENDPOINT'), #AccessKey 阿里云身份验证,在阿里云服务器管理控制台创建 conf.get(rocket_str, 'ACCESS_KEY'), #SecretKey 阿里云身份验证,在阿里云服务器管理控制台创建 conf.get(rocket_str, 'SECRET_KEY')) #所属的 Topic self.topic_name = conf.get(rocket_str, 'TOPIC') #您在控制台创建的 Consumer ID(Group ID) self.group_id = conf.get(rocket_str, 'GROUP_ID') #Topic所属实例ID,默认实例为空None self.instance_id = conf.get(rocket_str, 'INSTANCE_ID') self.consumer = self.mq_client.get_consumer(self.instance_id, self.topic_name, self.group_id)
from paypal import Paypal, find_payment, ParticipantNotFoundException, PaymentFailedException, PaymentNotFoundException, DuplicatePaymentException from tables import * from itertools import groupby from helpers import * from flask import request, flash from wtforms import Form, StringField, validators, SelectField, IntegerField, TextAreaField, BooleanField from config import conf, currency_symbol from discount import * pp1 = Paypal(1, lambda url: redirect(url, code=302), lambda id, message: applyWithPaypalError(id, message), "/payment-success.html", "/payment-cancelled.html") application_fee = float(conf.get("paypal", "fee")) event_name = conf.get("application", "name") event_shortname = conf.get("application", "shortname") conf_data = {"name": event_name, "shortname": event_shortname, "s_application_fee": str(application_fee) } # @app.route("/apply.html", methods=["GET",]) class CodeNotFoundException(Exception): def __init__(self, code): self.code = code
def init_oss(self): oss = conf.get('OSS') auth = oss2.Auth(oss.get('A_K'), oss.get('A_S')) self.bucket = oss2.Bucket(auth, oss.get('Region'), oss.get('Bucket')) return True
def analyse(): file_name = conf.get('general', 'input_file') version_from = conf.get('general', 'version_from') version_to = conf.get('general', 'version_to') lst_repo_from = conf.get(version_from, 'list_repository').split(',') lst_repo_to = conf.get(version_to, 'list_repository').split(',') main_analysis_file = conf.get('general', 'main_analysis_file') fo = open(main_analysis_file, "r+") analysis_lines = fo.readlines() fo.close() res = [] sum_res_from = {} sum_res_to = {} with open(file_name, 'rb') as csv_file: spamreader = csv.reader(csv_file, delimiter=conf.get('general', 'input_delimiter'), quotechar=conf.get('general', 'input_quotechar')) for row in spamreader: value = {} module_name = row[conf.getint('general', 'input_module_col')] value['name'] = module_name # Manage FROM apath_from = False repo_found = False for repo_from in lst_repo_from: if path.isdir('./%s/%s/%s' % (version_from, repo_from, module_name)): apath_from = './%s/%s/%s' % (version_from, repo_from, module_name) elif path.isdir('./%s/%s/addons/%s' % (version_from, repo_from, module_name)): apath_from = './%s/%s/addons/%s' % (version_from, repo_from, module_name) elif path.isdir('./%s/%s/openerp/addons/%s' % (version_from, repo_from, module_name)): apath_from = './%s/%s/openerp/addons/%s' % ( version_from, repo_from, module_name) if apath_from: repo_found = repo_from break if not repo_found: value['version_from'] = 'ERROR - NOT FOUND' else: value['version_from'] = repo_found if sum_res_from.get(repo_found): sum_res_from[repo_found] += 1 else: sum_res_from[repo_found] = 1 # Manage To repo_found = False check_main_analysis = False for repo_to in lst_repo_to: if path.isdir('./%s/%s/%s' % (version_to, repo_to, module_name)): apath_to = './%s/%s/%s' % (version_to, repo_to, module_name) repo_found = repo_to elif path.isdir('./%s/%s/addons/%s' % (version_to, repo_to, module_name)): apath_to = './%s/%s/addons/%s' % (version_to, repo_to, module_name) repo_found = repo_to check_main_analysis = True elif path.isdir('./%s/%s/openerp/addons/%s' % (version_to, repo_to, module_name)): apath_to = './%s/%s/openerp/addons/%s' % ( version_to, repo_to, module_name) repo_found = repo_to check_main_analysis = True if not repo_found: if value['version_from'] == 'ERROR - NOT FOUND': value['version_to'] = 'ERROR - NOT FOUND' else: value['version_to'] = 'TODO - PORT MODULE' value['analysis_state'] = 'TODO - ANALYSE' else: value['version_to'] = repo_found if sum_res_to.get(repo_found): sum_res_to[repo_found] += 1 else: sum_res_to[repo_found] = 1 if check_main_analysis: value['analysis_state'] = 'RENAMED ?' for analysis_line in analysis_lines: if analysis_line.startswith('|' + module_name + ' '): analyse = analysis_line.split('|')[2].strip() if analyse != '': value['analysis_state'] = analysis_line.split( '|')[2].strip() else: value['analysis_state'] = 'TODO - UPGRADE' break else: value['analysis_state'] = 'TODO - ANALYSE' # Size of the analysis file value['upgrade_size'] = '' if value['analysis_state'] == 'TODO - UPGRADE': if path.isdir(apath_to + '/migrations'): for dir in listdir(apath_to + '/migrations'): if path.isfile(apath_to + '/migrations/' + dir + '/openupgrade_analysis.txt'): num_lines = sum( 1 for line in open(apath_to + '/migrations/' + dir + '/openupgrade_analysis.txt')) if value['upgrade_size'] == '': value['upgrade_size'] = num_lines else: value['upgrade_size'] += num_lines if value['upgrade_size'] == '': value['upgrade_size'] = 'ERROR' else: value['upgrade_size'] = str(value['upgrade_size']) # Size of the module if value['version_from'] == 'ERROR - NOT FOUND': value['technic_size'] = 'ERROR' elif value['analysis_state'] in [ 'TODO - ANALYSE', 'TODO - UPGRADE' ]: num_lines = 0 num_class = 0 for (dirpath, dirnames, filenames) in walk(apath_from): for filename in filenames: if filename[ -3:] == '.py' and filename != '__openerp__.py': for line in open(dirpath + '/' + filename): if not line.strip().startswith( '#') and line.strip() != '': num_lines += 1 if line.strip().startswith('class '): num_class += 1 value['technic_size'] = str(num_class) + ' / ' + str(num_lines) else: value['technic_size'] = '' res.append(value) csv_file.close() MODULE_SIZE = 40 FROM_SIZE = 30 TO_SIZE = 30 ANALYSIS_SIZE = 20 UPGRADE_SIZE = 5 TECHNIC_SIZE = 10 INTERLINE = "-" * (7 + MODULE_SIZE + FROM_SIZE + TO_SIZE + ANALYSIS_SIZE + UPGRADE_SIZE + TECHNIC_SIZE) print INTERLINE print "|"\ + 'Module Name' + " " * (MODULE_SIZE-len('Module Name')) + "|"\ + 'Repository (' + version_from + ')'+\ " " * (FROM_SIZE-len('Repository (' + version_from + ')')) + "|"\ + 'Repository (' + version_to + ')'+\ " " * (FROM_SIZE-len('Repository (' + version_to + ')')) + "|"\ + 'Analysis File' + " " * (ANALYSIS_SIZE-len('Analysis File')) + "|"\ + 'Size' + " " * (UPGRADE_SIZE-len('Size')) + "|"\ + 'Cls/lines' + " " * (TECHNIC_SIZE-len('Cls/lines')) + "|" print INTERLINE for item in res: print "|" + item['name'][0:MODULE_SIZE] \ + " " * (MODULE_SIZE-len(item['name'][0:MODULE_SIZE])) \ + "|" + item['version_from'][0:FROM_SIZE] \ + " " * (FROM_SIZE-len(item['version_from'][0:FROM_SIZE]))\ + "|"+ item['version_to'][0:TO_SIZE] \ + " " * (TO_SIZE-len(item['version_to'][0:TO_SIZE]))\ + "|"+ item['analysis_state'][0:ANALYSIS_SIZE] \ + " " * (ANALYSIS_SIZE-len(item['analysis_state'][0:ANALYSIS_SIZE]))\ + "|"+ item['upgrade_size'][0:UPGRADE_SIZE] \ + " " * (UPGRADE_SIZE-len(item['upgrade_size'][0:UPGRADE_SIZE]))\ + "|"+ item['technic_size'][0:TECHNIC_SIZE] \ + " " * (TECHNIC_SIZE-len(item['technic_size'][0:TECHNIC_SIZE]))\ + "|" print INTERLINE total_from = 0 total_to = 0 for k, v in sum_res_from.items(): print " - FROM : %s : %d (%s)" % (k, v, conf.get(version_from, k)) total_from += v print " - FROM : NOT FOUND : %d " % (len(res) - total_from) print INTERLINE for k, v in sum_res_to.items(): print " - FROM : %s : %d (%s)" % (k, v, conf.get(version_to, k)) total_to += v print " - TO : NOT FOUND : %d " % (len(res) - total_to) print INTERLINE
def update_both_version(): version_from = conf.get("general", "version_from") version_to = conf.get("general", "version_to") _update_version(version_from) _update_version(version_to)
signal.signal(signal.SIGTERM, trap_signal) signal.signal(signal.SIGINT, trap_signal) # create global scheduler # scheduler = sched.sched() while bRunning: scheduler.clean() serverConnect() getConfig() # job to refresh the configuration from the server every hour scheduler.add("get configuration", conf.get("scheduler", "get_conf"), getConfig, None, 2) # push the results stored in the redis queue every 8 seconds scheduler.add("push results", conf.get("scheduler", "push_results"), popResults, db, 2) # ping the server for connectivity check every minute scheduler.add("ping server", conf.get("scheduler", "ping_server"), ping, None, 2) # check if probe process has exited every 30" scheduler.add("check probes process", conf.get("scheduler", "check_probes"), checkProbes, probeProcess)
def eur(eval_ctx, value): return "%s %.2f" % (conf.get("application", "currency_symbol"), value)
from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker from config import conf def to_dict(self): return { c.name: getattr(self, c.name, None) for c in self.__table__.columns } Base = declarative_base() Base.to_dict = to_dict db_conf = conf.get('DB').get('mysql') engine = create_engine( 'mysql+mysqlconnector://{user}:{password}@{ip}:{port}/{database}'.format( **{ 'user': db_conf.get('user'), 'password': db_conf.get('password'), 'ip': db_conf.get('ip'), 'port': db_conf.get('port'), 'database': db_conf.get('database'), })) # 创建DBSession类型: DBSession = sessionmaker(bind=engine) session = DBSession()
def send(recipients, subject, bodies, sent_from, replyto=None, dryrun=False, charset="iso-8859-15", delay=0): """ Sends an email using the connection specified in ehb.conf. :param recipients: list of participant IDs of the recipients :param subject: email subject :param bodies: list of email bodies, one for each recipient :param sent_from: specification of where this email originated (e.g. through mail tool, as automatic confirmation of application, etc.) :param replyto: (optional) specify a reply-to address which differs from the sender in the config file :param dryrun: generate email, but do not send it :param charset: charset in which the email body will be encoded :param delay: the delay (in milliseconds) between any two subsequent messages :return: the list of email messages that were generated, as tables.Email objects """ server = conf.get("email", "server") sender = conf.get("email", "sender") name = conf.get("email", "name") password = conf.get("email", "password") shortname = conf.get("application", "shortname") sent_emails = [] if not dryrun: server = smtplib.SMTP(server) server.starttls() try: server.login(sender, password) except SMTPAuthenticationError: # if authentication fails, log failure and continue silently with dry-run log_email(0, subject, "ERROR: SMTP authentication failed in email sending", replyto, sent_from) dryrun = True first_message = True for recipient, body in zip(recipients, bodies): prt = lp(recipient) # type: Participant if not prt: raise Exception("Could not find participant for ID %d" % recipient) encoded = body.encode(charset) full_subject = "[%s] %s" % (shortname, subject) msg = MIMEText(encoded, _charset=charset) msg["Subject"] = full_subject msg["From"] = "%s <%s>" % (name, sender) msg["To"] = prt.email if replyto: msg.add_header('reply-to', replyto) if not dryrun: # sleep for the specified number of milliseconds before sending next email if first_message: first_message = False else: time.sleep(delay / 1000.0) server.sendmail(sender, prt.email, msg.as_string()) em = log_email(recipient, full_subject, body, replyto or None, sent_from, dryrun=dryrun) sent_emails.append(em) if not dryrun: server.quit() return sent_emails
def main(): args = parser.parse_args() print('args.data_url', args.data_url) if conf.cloud: mox.file.copy_parallel(args.data_url, '/cache/face_train/') args.data_url = '/cache/face_train/' conf.use_data_folder = args.data_url if args.work_path: conf.work_path = Path(args.work_path) conf.model_path = conf.work_path / 'models' conf.log_path = conf.work_path / 'log' conf.save_path = conf.work_path / 'save' else: args.work_path = conf.work_path conf.update(args.__dict__) if conf.local_rank is not None: torch.cuda.set_device(conf.local_rank) torch.distributed.init_process_group(backend='nccl', init_method="env://") if torch.distributed.get_rank() != 0: set_stream_logger(logging.WARNING) # if osp.exists(conf.save_path): # logging.info('ok') # exit(1) # simplify_conf(conf) # exit(0) from Learner import face_learner # decs = msgpack_load('decs.pk') # conf.decs = decs learner = face_learner(conf, ) # fstrs = learner.list_fixed_strs('work_space/sglpth.casia/models') # stps = learner.list_steps('work_space/sglpth.casia/models') # fstr = fstrs[np.argmax(stps)] # stt_dct = torch.load('work_space/sglpth.casia/models/model_' + fstr) # learner.model.module.load_state_dict_sglpth(stt_dct) # print(fstrs, stps, fstr, ) if conf.get('load_from'): # p= 'r100.128.retina.clean.arc', # 'hrnet.retina.arc.3', # 'mbv3.retina.arc', # 'mbfc.lrg.retina.arc.s48', # 'effnet.casia.arc', # 'mbfc.retina.cl.distill.cont2', # 'mbfc2', # 'r18.l2sft', # 'r18.adamrg', # 'mbfc.se.elu.ms1m.radam.1', # 'mbfc.se.elu.specnrm.allbutdw.ms1m.adam.1', # 'mbfc.se.prelu.specnrm.ms1m.cesigsft.1', # 'irse.elu.ms1m', # 'irse.elu.casia.arc.2048', p = Path(conf.load_from) print( 'try to load from ', p, ) learner.load_state( resume_path=p, load_optimizer=False, load_head=conf.head_load, # todo note! load_imp=False, latest=True, strict=False, ) # simplify_conf(conf) learner.cloud_sync_log() # res = learner.validate_ori(conf, valds_names=('cfp_fp', )) # exit(0) # learner.calc_img_feas(out='work_space/mbfc.crash.h5') # log_lrs, losses = learner.find_lr( # num=999, # bloding_scale=1000) # losses[np.isnan(losses)] = 999 # best_lr = 10 ** (log_lrs[np.argmin(losses)]) # print('best lr is ', best_lr) # conf.lr = best_lr # exit(0) # learner.init_lr() # conf.tri_wei = 0 # log_conf(conf) # learner.train(conf, 1, name='xent') learner.init_lr() simplify_conf(conf) if conf.head_init: learner.head_initialize() if conf.warmup: learner.warmup(conf, conf.warmup) learner.train_simple(conf, conf.epochs) # learner.train_dist(conf, conf.epochs) if conf.net_mode == 'sglpth': decs = learner.model.module.get_decisions() msgpack_dump(decs, 'decs.pk') # learner.train_cotching(conf, conf.epochs) # learner.train_cotching_accbs(conf, conf.epochs) # learner.train_ghm(conf, conf.epochs) # learner.train_with_wei(conf, conf.epochs) # learner.train_use_test(conf, conf.epochs) # res = learner.validate_ori(conf, ) if not conf.cloud: from tools.test_ijbc3 import test_ijbc3 res = test_ijbc3(conf, learner) tpr6, tpr4, tpr3 = res[0][1], res[1][1], res[2][1] learner.writer.add_scalar('ijbb/6', tpr6, learner.step) learner.writer.add_scalar('ijbb/4', tpr4, learner.step) learner.writer.add_scalar('ijbb/3', tpr3, learner.step) learner.writer.close() if conf.never_stop: img = torch.randn((conf.batch_size // 2, 3, conf.input_size, conf.input_size)).cuda() learner.model.eval() logging.info('never stop') while True: _ = learner.model(img)
def countdown(): input_msg = conf.get('msg', '') input_date = conf.get('date', '') return render_template('countdown.html', msg = input_msg, date = input_date)
def setTitle(self): title = conf.get("vi.name") if title: self.modulH1.appendChild( html5.TextNode(html5.utils.unescape(title)))
import ast import datetime import json import re # https://github.com/paypal/PayPal-Python-SDK import paypalrestsdk from paypalrestsdk.payments import Payment from config import conf from __init__ import * from helpers import PP_TOKEN, PP_ERROR, PP_APPROVED, PP_SUCCESS from tables import PaypalHistory, Participant, Extra paypalrestsdk.configure({ "mode": conf.get("paypal", "mode"), # sandbox or live "client_id": conf.get("paypal", "client_id"), "client_secret": conf.get("paypal", "client_secret") }) callback = conf.get("server", "base_url").rstrip("/") currency = conf.get("paypal", "currency") payment_steps = {1: Participant, 2: Extra} def log(id, payment_step, status, message): ph = PaypalHistory(participant_id=id, timestamp=datetime.datetime.now(), _paypal_status=status, data=message, payment_step=payment_step) pst = payment_steps[payment_step]
import hashlib from sqlalchemy import Column, Integer, String from sqlalchemy.ext.declarative import declarative_base from __init__ import app, session from flask import request, flash, render_template from wtforms import Form, validators, IntegerField from config import conf Base = declarative_base() metadata = Base.metadata event_shortname = conf.get("application", "shortname") application_fee = str(conf.get("paypal", "fee")) class DiscountCode(Base): # user_id is participant code, default is NULL __tablename__ = 'discounts' id = Column(Integer, primary_key=True) amount = Column(Integer) code = Column(String) user_id = Column(String(16)) def generate_discount(i): s = "%s #%d" % (event_shortname, i) return hashlib.sha224(s.encode()).hexdigest()[:8] class DiscountForm(Form): discount = IntegerField("Discount", validators=[validators.NumberRange(min=0)],
def analyse(): file_name = conf.get('general', 'input_file') version_from = conf.get('general', 'version_from') version_to = conf.get('general', 'version_to') lst_repo_from = conf.get(version_from, 'list_repository').split(',') lst_repo_to = conf.get(version_to, 'list_repository').split(',') main_analysis_file = conf.get('general', 'main_analysis_file') fo = open(main_analysis_file, "r+") analysis_lines = fo.readlines() fo.close() res = [] sum_res_from = {} sum_res_to = {} with open(file_name, 'rb') as csv_file: spamreader = csv.reader( csv_file, delimiter=conf.get('general', 'input_delimiter'), quotechar=conf.get('general', 'input_quotechar')) for row in spamreader: value = {} module_name = row[conf.getint('general', 'input_module_col')] value['name'] = module_name # Manage FROM apath_from = False repo_found = False for repo_from in lst_repo_from: if path.isdir('./%s/%s/%s' % (version_from, repo_from, module_name)): apath_from = './%s/%s/%s' % (version_from, repo_from, module_name) elif path.isdir('./%s/%s/addons/%s' % (version_from, repo_from, module_name)): apath_from = './%s/%s/addons/%s' % (version_from, repo_from, module_name) elif path.isdir('./%s/%s/openerp/addons/%s' % (version_from, repo_from, module_name)): apath_from = './%s/%s/openerp/addons/%s' % (version_from, repo_from, module_name) if apath_from: repo_found = repo_from break if not repo_found: value['version_from'] = 'ERROR - NOT FOUND' else: value['version_from'] = repo_found if sum_res_from.get(repo_found): sum_res_from[repo_found] += 1 else: sum_res_from[repo_found] = 1 # Manage To repo_found = False check_main_analysis = False for repo_to in lst_repo_to: if path.isdir('./%s/%s/%s' % (version_to, repo_to, module_name)): apath_to = './%s/%s/%s' % (version_to, repo_to, module_name) repo_found = repo_to elif path.isdir('./%s/%s/addons/%s' % (version_to, repo_to, module_name)): apath_to = './%s/%s/addons/%s' % (version_to, repo_to, module_name) repo_found = repo_to check_main_analysis = True elif path.isdir('./%s/%s/openerp/addons/%s' % (version_to, repo_to, module_name)): apath_to = './%s/%s/openerp/addons/%s' % (version_to, repo_to, module_name) repo_found = repo_to check_main_analysis = True if not repo_found: if value['version_from'] == 'ERROR - NOT FOUND': value['version_to'] = 'ERROR - NOT FOUND' else: value['version_to'] = 'TODO - PORT MODULE' value['analysis_state'] = 'TODO - ANALYSE' else: value['version_to'] = repo_found if sum_res_to.get(repo_found): sum_res_to[repo_found] += 1 else: sum_res_to[repo_found] = 1 if check_main_analysis: value['analysis_state'] = 'RENAMED ?' for analysis_line in analysis_lines: if analysis_line.startswith('|' + module_name + ' '): analyse = analysis_line.split('|')[2].strip() if analyse != '': value['analysis_state'] = analysis_line.split('|')[2].strip() else: value['analysis_state'] = 'TODO - UPGRADE' break else: value['analysis_state'] = 'TODO - ANALYSE' # Size of the analysis file value['upgrade_size'] = '' if value['analysis_state'] == 'TODO - UPGRADE': if path.isdir(apath_to + '/migrations'): for dir in listdir(apath_to + '/migrations'): if path.isfile(apath_to + '/migrations/' + dir + '/openupgrade_analysis.txt'): num_lines = sum(1 for line in open(apath_to + '/migrations/' + dir + '/openupgrade_analysis.txt')) if value['upgrade_size'] == '': value['upgrade_size'] = num_lines else: value['upgrade_size'] += num_lines if value['upgrade_size'] == '': value['upgrade_size'] = 'ERROR' else: value['upgrade_size'] = str(value['upgrade_size']) # Size of the module if value['version_from'] == 'ERROR - NOT FOUND': value['technic_size'] = 'ERROR' elif value['analysis_state'] in ['TODO - ANALYSE', 'TODO - UPGRADE']: num_lines = 0 num_class = 0 for (dirpath, dirnames, filenames) in walk(apath_from): for filename in filenames: if filename[-3:] == '.py' and filename != '__openerp__.py': for line in open(dirpath + '/' + filename): if not line.strip().startswith('#') and line.strip() != '' : num_lines += 1 if line.strip().startswith('class '): num_class += 1 value['technic_size'] = str(num_class) + ' / ' + str(num_lines) else: value['technic_size'] = '' res.append(value) csv_file.close() MODULE_SIZE = 40 FROM_SIZE = 30 TO_SIZE = 30 ANALYSIS_SIZE = 20 UPGRADE_SIZE = 5 TECHNIC_SIZE = 10 INTERLINE = "-" * (7 + MODULE_SIZE + FROM_SIZE + TO_SIZE + ANALYSIS_SIZE + UPGRADE_SIZE + TECHNIC_SIZE) print INTERLINE print "|"\ + 'Module Name' + " " * (MODULE_SIZE-len('Module Name')) + "|"\ + 'Repository (' + version_from + ')'+\ " " * (FROM_SIZE-len('Repository (' + version_from + ')')) + "|"\ + 'Repository (' + version_to + ')'+\ " " * (FROM_SIZE-len('Repository (' + version_to + ')')) + "|"\ + 'Analysis File' + " " * (ANALYSIS_SIZE-len('Analysis File')) + "|"\ + 'Size' + " " * (UPGRADE_SIZE-len('Size')) + "|"\ + 'Cls/lines' + " " * (TECHNIC_SIZE-len('Cls/lines')) + "|" print INTERLINE for item in res: print "|" + item['name'][0:MODULE_SIZE] \ + " " * (MODULE_SIZE-len(item['name'][0:MODULE_SIZE])) \ + "|" + item['version_from'][0:FROM_SIZE] \ + " " * (FROM_SIZE-len(item['version_from'][0:FROM_SIZE]))\ + "|"+ item['version_to'][0:TO_SIZE] \ + " " * (TO_SIZE-len(item['version_to'][0:TO_SIZE]))\ + "|"+ item['analysis_state'][0:ANALYSIS_SIZE] \ + " " * (ANALYSIS_SIZE-len(item['analysis_state'][0:ANALYSIS_SIZE]))\ + "|"+ item['upgrade_size'][0:UPGRADE_SIZE] \ + " " * (UPGRADE_SIZE-len(item['upgrade_size'][0:UPGRADE_SIZE]))\ + "|"+ item['technic_size'][0:TECHNIC_SIZE] \ + " " * (TECHNIC_SIZE-len(item['technic_size'][0:TECHNIC_SIZE]))\ + "|" print INTERLINE total_from = 0 total_to = 0 for k, v in sum_res_from.items(): print " - FROM : %s : %d (%s)" % (k, v, conf.get(version_from, k)) total_from += v print " - FROM : NOT FOUND : %d " % (len(res) - total_from) print INTERLINE for k, v in sum_res_to.items(): print " - FROM : %s : %d (%s)" % (k, v, conf.get(version_to, k)) total_to += v print " - TO : NOT FOUND : %d " % (len(res) - total_to) print INTERLINE
if no_more_inputs and source not in mappers: log.info("Sending termination messages for the second " "phase to mapper to %d" % source) comm.send(MSG_COMMAND_QUIT, dest=source) mappers.add(source) else: comm.send(MSG_COMMAND_WAIT, dest=source) def initialize_indexer(input_path, output_path, num_mappers, num_reducers): if size != 2 + num_mappers + num_reducers: print "Error: size does not match" sys.exit(-1) else: input_path = os.path.abspath(input_path) output_path = os.path.abspath(output_path) if rank == 0: master = Master(input_path, output_path, num_mappers, num_reducers) master.start() elif rank == 1: Combiner(num_mappers, output_path) elif rank < num_reducers + SNODES: Reducer(output_path, num_mappers) else: Mapper([i + SNODES for i in range(num_reducers)]) if __name__ == "__main__": initialize_indexer(conf.get("main.input"), conf.get("main.output"), conf.get("main.mapper"), conf.get("main.reducer"))
def countdown(): input_msg = conf.get('msg', '') input_date = conf.get('date', '') return render_template('countdown.html', msg=input_msg, date=input_date)
from helpers import * from flask import request, flash from flask import session as flask_session from wtforms import Form, StringField, validators, SelectField, IntegerField, TextAreaField, BooleanField from config import conf, currency_symbol from discount import * from confirmation_token import generate_confirmation_token, confirm_token import urllib.parse pp1 = Paypal(1, lambda url: redirect(url, code=302), lambda id, message: applyWithPaypalError(id, message), "/payment-success.html", "/payment-cancelled.html") application_fee = float(conf.get("paypal", "fee")) event_name = conf.get("application", "name") event_shortname = conf.get("application", "shortname") base_url = conf.get("server", "base_url") conf_data = {"name": event_name, "shortname": event_shortname, "s_application_fee": str(application_fee), } # @app.route("/apply.html", methods=["GET",]) class CodeNotFoundException(Exception): def __init__(self, code): self.code = code
from flask import render_template from flask_sqlalchemy_session import flask_scoped_session from jinja2 import evalcontextfilter from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from config import conf from tables import Base from sqlalchemy.orm.session import Session __author__ = 'koller' # set up Flask app = Flask(__name__, static_url_path='') app.secret_key = conf.get("server", "secret") app.config['UPLOAD_FOLDER'] = "/tmp" start_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) # set up database connection db_url = conf.get("database", "url") engine = create_engine(db_url, pool_recycle=3600) # flask-mysqlalchemy integration Base.metadata.bind = engine DBSession = sessionmaker(bind=engine) session = flask_scoped_session(DBSession, app) # type: Session
import erppeek from config import conf def init_openerp(url, login, password, database): openerp = erppeek.Client(url) uid = openerp.login(login, password=password, database=database) return openerp, uid openerp, uid = init_openerp( conf.get('openerp', 'url'), conf.get('auth', 'user_login'), conf.get('auth', 'user_password'), conf.get('openerp', 'database'), )
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- import pyodbc from config import conf server = conf.get('db', 'server') username = conf.get('db', 'username') password = conf.get('db', 'password') database = conf.get('db', 'database') driver = conf.get('db', 'driver') def get_connect_str(): return 'DRIVER={{{0}}};SERVER={1};DATABASE={2};UID={3};PWD={4}'.format( driver, server, database, username, password) def read_series(): list = [] cursor.execute('SELECT Code, Description FROM dbo.Catalog') for row in cursor: item = {} item['id'] = row[0] item['name'] = row[1] list.append(item) return list conn = pyodbc.connect(get_connect_str()) cursor = conn.cursor()
def ehbrev(eval_ctx, value): return conf.get("application", "name")
def generate_badges(pdf_filename, template_filename, table): dirpath = tempfile.mkdtemp() logger().info("Temp directory for badges is %s" % (dirpath)) # ensure paths exist iqdir = os.path.join(dirpath, "iq") if not os.path.exists(iqdir): os.makedirs(iqdir) flagdir = os.path.join(dirpath, "flags") if not os.path.exists(flagdir): os.makedirs(flagdir) shutil.copyfile("static/Logo.pdf", os.path.join(dirpath, "Logo.pdf")) songs = read_songs() i = 0 prev_part = -1 just_ended = MutableBoolean() # type: MutableBoolean just_ended.val = False f = None for p in session.query(table).order_by(table.final_part): # download flag if necessary flag_filename = os.path.join(flagdir, "%s.png" % p.country) if not os.path.exists(flag_filename): to_download = p.country.lower() if to_download == "hk": to_download = "cn" download_binary_file("http://flagpedia.net/data/flags/normal/%s.png" % to_download, flag_filename) fp = p.final_part if fp != prev_part: if f: f.flush() close(f, i, just_ended) local_tex_name = "%s.tex" % parts[fp] f = open(os.path.join(dirpath, local_tex_name), "w", encoding="utf-8") prev_part = fp i = 0 if i % 2 == 0: f.write(start.replace("PPPPP", parts[prev_part]) + "\n") just_ended.val = False f.write(makebadge(i, p, iqdir) + "\n") just_ended.val = False i = i + 1 if i % 2 == 0: print_end(f, just_ended) f.flush() close(f, i, just_ended) template = texenv.get_template(template_filename) tex = template.render(event_name=conf.get("application", "name"), songs=songs) return compile_and_send_pdf(pdf_filename, tex, runs=2, dirpath=dirpath)
def init_es(): connections.create_connection(hosts = conf.get("elasticsearch.hostname"), timeout=60) print("Elasticsearch connections initialized")
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- from flask import Flask, jsonify from config import APP_MODE, conf from db import read_series app_name = conf.get('app', 'name') app_host = conf.get('app', 'host') app_port = conf.get('app', 'port') is_debug = True if APP_MODE == 'dev' or APP_MODE == None else False app = Flask(app_name) app.config['JSON_AS_ASCII'] = False @app.route('/') def home(): return 'welcome to tac!' @app.route('/get-series') def get_series(): list = read_series() return jsonify(list) app.run(host=app_host, port=app_port, debug=is_debug)