def _get_flow_results(self): self._logger.info("Getting {0} Machine Learning Results from HDFS".format(self._date)) flow_results = "{0}/flow_results.csv".format(self._data_path) # get hdfs path from conf file HUSER = self._oni_conf.get('conf','HUSER').replace("'","").replace('"','') hdfs_path = "{0}/flow/scored_results/{1}/scores/flow_results.csv".format(HUSER,self._date) # get results file from hdfs get_command = Util.get_ml_results_form_hdfs(hdfs_path,self._data_path) self._logger.info("{0}".format(get_command)) # valdiate files exists if os.path.isfile(flow_results): # read number of results based in the limit specified. self._logger.info("Reading {0} flow results file: {1}".format(self._date,flow_results)) self._flow_results = Util.read_results(flow_results,self._limit,self._results_delimiter) if len(self._flow_results) == 0: self._logger.error("There are not flow results.");sys.exit(1) else: self._logger.error("There was an error getting ML results from HDFS") sys.exit(1) # add headers. self._logger.info("Adding headers based on configuration file: score_fields.json") self._flow_scores = [ [ str(key) for (key,value) in self._conf['flow_score_fields'].items()] ] ldaab_index = self._conf["flow_results_fields"]["lda_score_ab"] ldaba_index = self._conf["flow_results_fields"]["lda_score_ba"] # filter results add sev and rank. self._logger.info("Filtering required columns based on configuration") self._flow_scores.extend([ [0] + [ conn[i] for i in self._conf['column_indexes_filter'] ] + [(conn[ldaab_index] if (conn[ldaab_index]<= conn[ldaba_index]) else conn[ldaba_index])] + [n] for n, conn in enumerate(self._flow_results) ])
def prepare(self, word_type): """ Generates trie structure containing all words from clp :param word_type: part of speech of which kind read words from clp data :return: None """ old = "" index = 0 keys = [] values = [] for i in range(16777231, 18663982): if Util.is_word_unneeded(i): continue if Util.is_word_appropriate_type(self.plp.label(i)[0], word_type): continue form = self.plp.bform(i) if old != form: for s in self.plp.forms(i): if len(s) > 0: a = Util.substring(s, form) to_remove = s[len(a): len(s)] to_add = form[len(a): len(form)] keys.append(Util.reverse(s)) a = unicode(to_remove).encode('utf-8') b = unicode(to_add).encode('utf-8') values.append((a, b)) index += 1 old = self.plp.bform(i) return zip(keys, values)
def _get_dns_results(self): self._logger.info("Getting {0} Machine Learning Results from HDFS".format(self._date)) dns_results = "{0}/dns_results.csv".format(self._data_path) # get hdfs path from conf file. HUSER = self._oni_conf.get('conf','HUSER').replace("'","").replace('"','') hdfs_path = "{0}/dns/scored_results/{1}/scores/dns_results.csv".format(HUSER,self._date) # get results file from hdfs. get_command = Util.get_ml_results_form_hdfs(hdfs_path,self._data_path) self._logger.info("{0}".format(get_command)) # valdiate files exists if os.path.isfile(dns_results): # read number of results based in the limit specified. self._logger.info("Reading {0} dns results file: {1}".format(self._date,dns_results)) self._dns_results = Util.read_results(dns_results,self._limit,self._results_delimiter)[:] if len(self._dns_results) == 0: self._logger.error("There are not flow results.");sys.exit(1) else: self._logger.error("There was an error getting ML results from HDFS") sys.exit(1) # add headers. self._logger.info("Adding headers") self._dns_scores_headers = [ str(key) for (key,value) in self._conf['dns_score_fields'].items() ] # add dns content. self._dns_scores = [ conn[:] for conn in self._dns_results][:]
def _get_flow_results(self): self._logger.info("Getting {0} Machine Learning Results from HDFS".format(self._date)) flow_results = "{0}/flow_results.csv".format(self._data_path) # get hdfs path from conf file HUSER = self._spot_conf.get('conf', 'HUSER').replace("'", "").replace('"', '') hdfs_path = "{0}/flow/scored_results/{1}/scores/flow_results.csv".format(HUSER,self._date) # get results file from hdfs get_command = Util.get_ml_results_form_hdfs(hdfs_path,self._data_path) self._logger.info("{0}".format(get_command)) # valdiate files exists if os.path.isfile(flow_results): # read number of results based in the limit specified. self._logger.info("Reading {0} flow results file: {1}".format(self._date,flow_results)) self._flow_results = Util.read_results(flow_results,self._limit,self._results_delimiter) if len(self._flow_results) == 0: self._logger.error("There are not flow results.");sys.exit(1) else: self._logger.error("There was an error getting ML results from HDFS") sys.exit(1) # filter results add rank. self._logger.info("Filtering required columns based on configuration") self._flow_scores.extend([ [ conn[i] for i in self._conf['column_indexes_filter'] ] + [n] for n, conn in enumerate(self._flow_results) ])
def _initialize_members(self,date,limit,logger): # get logger if exists. if not, create new instance. self._logger = logging.getLogger('OA.Flow') if logger else Util.get_logger('OA.Flow',create_file=False) # initialize required parameters. self._scrtip_path = os.path.dirname(os.path.abspath(__file__)) self._date = date self._table_name = "flow" self._flow_results = [] self._limit = limit self._data_path = None self._ipynb_path = None self._ingest_summary_path = None self._flow_scores = [] self._results_delimiter = '\t' # get app configuration. self._oni_conf = Util.get_oni_conf() # get scores fields conf conf_file = "{0}/flow_conf.json".format(self._scrtip_path) self._conf = json.loads(open (conf_file).read(),object_pairs_hook=OrderedDict) # initialize data engine self._db = self._oni_conf.get('conf','DBNAME').replace("'","").replace('"','') self._engine = Data(self._db, self._table_name,self._logger)
def _is_ip_internal(self,ip, ranges): result = 0 for row in ranges: if Util.ip_to_int(ip) >= row[0] and Util.ip_to_int(ip) <= row[1]: result = 1 break return result
def _create_flow_scores_csv(self): flow_scores_csv = "{0}/flow_scores.csv".format(self._data_path) Util.create_csv_file(flow_scores_csv,self._flow_scores) # create bk file flow_scores_bu_csv = "{0}/flow_scores_bu.csv".format(self._data_path) Util.create_csv_file(flow_scores_bu_csv,self._flow_scores)
def view(self): Util.setup3d() self.drawMap() self.drawModel() glFlush() glutSwapBuffers()
def sign_for_jspay(self, prepay_id): """jssdk调起支付时需要的sign""" timestamp = Util.timestamp() nonce_str = Util.generate_nonce(15) package = 'prepay_id=%s' % prepay_id sign_type = 'MD5' pay_sign = self._generate_sign(appId=self._appid, timeStamp=timestamp, nonceStr=nonce_str, package=package, signType=sign_type) return {'timestamp':timestamp, 'nonceStr':nonce_str, 'package':package, 'signType':sign_type, 'paySign':pay_sign}
def draw(): Util.setup2D() glColor(.1,.1,.5) glRectf(0,Model.getWinfo().h/2,Model.getWinfo().w,Model.getWinfo().h) glColor(.1,.5,.1) glRectf(0,0,Model.getWinfo().w,Model.getWinfo().h/2) Util.finish2D()
def motion(self,x,y): turnh = -(x-Model.getMouseInfo().clickedx)/(Model.getWinfo().w*.31) turnv = (y-Model.getMouseInfo().clickedy)/(Model.getWinfo().w*.31) if Model.getMouseInfo().side == "left": Util.restoreCamera() Util.turnCamera(turnh,turnv) elif Model.getMouseInfo().side == "right": Util.restoreCamera() Util.turnCamera(turnh,turnv) Util.resetPlayer()
def _create_dns_scores_csv(self): dns_scores_csv = "{0}/dns_scores.csv".format(self._data_path) dns_scores_final = self._move_time_stamp(self._dns_scores) dns_scores_final.insert(0,self._dns_scores_headers) Util.create_csv_file(dns_scores_csv,dns_scores_final) # create bk file dns_scores_bu_csv = "{0}/dns_scores_bu.csv".format(self._data_path) Util.create_csv_file(dns_scores_bu_csv,dns_scores_final)
def change(self, name): if name == "maingame": self.horizontal = 0 self.vertical = 0.25 Util.resetCamera() elif name == "mainmenu": self.location = [30, 30, 0] self.lookAt = [-70, 10, 0] self.up = [0, 1, 0] self.horizon = 0 self.vertical = -1.0 self.distance = 50
def draw(): Util.setup2D() glColor(.15,.21,.41) barwidth = 400 x1 = (Model.getWinfo().w/2)-(barwidth/2) x2 = (Model.getWinfo().w/2)+(barwidth/2) y1 = 20 y2 = 50 glRectf(x1,y1,x2,y2) Util.finish2D()
def view(self): Util.setup3d() Util.updateCamera() self.drawBackground() self.drawMap() self.drawModel(Model.getGame().getPlayer()) self.drawOthers() self.drawTarget() self.drawInterface() glFlush() glutSwapBuffers()
def _generate_sign(self, **kwargs): """ 签名算法,返回得到的签名字符串 """ valid_keys = [k for k in kwargs if kwargs[k] and k != 'sign'] valid_keys.sort() kv_str = '' for k in valid_keys: kv_str += '%s=%s&' % (k, kwargs[k]) kv_str += '%s=%s' % ('key', self._sign_key) kv_str = Util.encode_data(kv_str) sign = Util.md5(kv_str).upper() return sign
def unified_order(self, trade_type, out_trade_no, body, total_fee, notify_url, **kwargs): """ 微信统一下单api(文档http://pay.weixin.qq.com/wiki/doc/api/index.php?chapter=9_1) :param trade_type: 交易类型('JSAPI', 'APP', 'NATIVE') :param out_trade_no: 商户订单号,32个字符内 :param body: 商品或支付简单描述 :param total_fee: 金额(分),默认RMB,需要其他货币类型在kwargs内指定 :param notify_url: 支付结果通知地址 :return: 返回tuple(code, result), 统一下单结果 """ if not trade_type: raise WxPayError(u"缺少统一支付接口必填参数trade_type!") if not out_trade_no: raise WxPayError(u"缺少统一支付接口必填参数out_trade_no!") if not body: raise WxPayError(u"缺少统一支付接口必填参数body!") if not total_fee: raise WxPayError(u"缺少统一支付接口必填参数total_fee!") if not notify_url: raise WxPayError(u"异步通知url未设置") #检查ip if not kwargs.get('spbill_create_ip'): if trade_type == 'NATIVE': kwargs.update(spbill_create_ip=Util.get_local_ip()) else: raise WxPayError(u'APP和网页支付需提交用户端ip') if trade_type == 'NATIVE': assert kwargs.get('product_id'), u'trade_type为NATIVE时,product_id为必填参数' elif trade_type == 'JSAPI': assert kwargs.get('openid'), u'trade_type为JSAPI时,openid为必填参数!' elif trade_type == 'APP': pass else: raise WxPayError(u"支付类型trade_type错误,为('JSAPI', 'APP', 'NATIVE')之一") kwargs.update(device_info='WEB') kwargs.update(appid=self._appid, mch_id=self._mchid) kwargs.update(trade_type=trade_type, out_trade_no=out_trade_no, body=body, total_fee=total_fee, notify_url=notify_url) kwargs.update(nonce_str=Util.generate_nonce(20)) kwargs.update(sign=self._generate_sign(**kwargs)) #sign return self._post('/pay/unifiedorder', kwargs)
def collide(self, other_object): distance = Util.dist(self.get_position(), other_object.get_position()) sum_r = self.get_radius() + other_object.get_radius() if distance > sum_r: return False else: return True
def pack_news(self, item_list): """ 回复图文消息 :param item_list: 图文消息列表,每项是一个字典:{'title':'', 'description':'', 'picurl':'', 'url':''} :return: xml字符串 """ article_count = len(item_list) assert article_count in range(1, 11), 'news count should be in [1,10]' xml_data = ''' <xml> <ToUserName><![CDATA[%s]]></ToUserName> <FromUserName><![CDATA[%s]]></FromUserName> <CreateTime>%s</CreateTime> <MsgType><![CDATA[news]]></MsgType> <ArticleCount>%s</ArticleCount> <Articles> ''' % (self.message.fromUserName, self.message.toUserName, Util.timestamp(), article_count) for item in item_list: item_xml = ''' <item> <Title><![CDATA[%s]]></Title> <Description><![CDATA[%s]]></Description> <PicUrl><![CDATA[%s]]></PicUrl> <Url><![CDATA[%s]]></Url> </item> ''' % (item.get('title'), item.get('description'), item.get('picurl'), item.get('url')) xml_data += item_xml xml_data += "</Articles></xml>" return self._ensure_encrypt(xml_data)
def find_basic_form(self, strange_form): """ Method finds basic form for given inflectional form :param strange_form: inflectional form of word :return: basic form of given word """ similar_words = self.find_similar_words(strange_form) how_many_forms = dict() word_labels = dict() for word in similar_words: form = self.atergo_trie[word] if form[0] in how_many_forms: how_many_forms[form[0]] += 1 else: how_many_forms[form[0]] = 1 if not form[0] in word_labels: word_labels[form[0]] = [] word_labels[form[0]].append(Util.reverse(word)) max_form = max(how_many_forms.iteritems(), key=operator.itemgetter(1))[0] max_form0 = max_form[0].split('\x00')[0].decode('utf-8') max_form1 = max_form[1].split('\x00')[0].decode('utf-8') result = collections.namedtuple('result', ['basic_form', 'word_labels']) result.basic_form = strange_form[:len(strange_form) - len(max_form0)] + max_form1 result.word_labels = word_labels[max_form] return result
def refund(self, out_refund_no, total_fee, refund_fee, transaction_id='', out_trade_no='', **kwargs): """ 退款,请求需要双向证书 """ if not out_refund_no: raise WxPayError(u"退款申请接口中,缺少必填参数out_refund_no(商户系统内部退款单号)!") if not total_fee: raise WxPayError(u"退款申请接口中,缺少必填参数total_fee(订单总金额,单位为分, 整数)!") if not refund_fee: raise WxPayError(u"退款申请接口中,缺少必填参数refund_fee(退款金额,单位为分, 整数)!") if not (transaction_id or out_trade_no): raise WxPayError(u"订单查询接口中,out_trade_no、transaction_id至少填一个") kwargs.update( appid=self._appid, mch_id=self._mchid, out_refund_no=out_refund_no, total_fee=total_fee, refund_fee=refund_fee, transaction_id=transaction_id, out_trade_no=out_trade_no, nonce_str=Util.generate_nonce(20) ) kwargs.update(sign=self._generate_sign(**kwargs)) #sign return self._post('/secapi/pay/refund', kwargs, cert=True)
def pack_music(self, musicurl='', hqmusicurl='', thumb_media_id='', title='', description=''): """ 响应音乐 :param musicurl: 音乐链接 :param hqmusicurl: 高质量音乐链接,WIFI环境优先使用该链接播放音乐 :param thumb_media_id: 缩略图的媒体id :param title: 标题 :param description: 描述 :return:xml字符串 """ template = ''' <xml> <ToUserName><![CDATA[%s]]></ToUserName> <FromUserName><![CDATA[%s]]></FromUserName> <CreateTime>%s</CreateTime> <MsgType><![CDATA[music]]></MsgType> <Music> <Title><![CDATA[%s]]></Title> <Description><![CDATA[%s]]></Description> <MusicUrl><![CDATA[%s]]></MusicUrl> <HQMusicUrl><![CDATA[%s]]></HQMusicUrl> <ThumbMediaId><![CDATA[%s]]></ThumbMediaId> </Music> </xml> ''' result = template % (self.message.fromUserName, self.message.toUserName, Util.timestamp(), title, description, musicurl, hqmusicurl, thumb_media_id) return self._ensure_encrypt(result)
def GET(self): util = Util() inputdata = web.input() if hasattr(inputdata, 'id'): lottery_id = inputdata.id item = util.getLottery(_id = inputdata.id) if item: return item.stringify() else: return web.notfound("lottery not found") else: ps = int(inputdata.ps) if hasattr(inputdata, 'ps') else 0 pn = int(inputdata.pn) if hasattr(inputdata, 'pn') else 10 pe = ps + pn - 1 items = util.getAllLotteries(ps, pe) lottery_cnt = util.getLotteryCount() if not items is None: return '{' + '"data": [{0}], "ps": {1}, "pn": {2}, "total": {3}'.format(", ".join([i.stringify() for i in items]), ps, pn, lottery_cnt) + '}' else: return web.notfound("lotteries not found")
def initialize(self): sources = Util.get_source_content() if sources is not None: self.db = SqlConnector() self.sources_cache = sources print("started..") return True return False
def shoot(self, missile_group): forward = Util.angle_to_vector(self.angle) radius = self.get_radius() missile_pos = [self.pos[0] + radius * forward[0], self.pos[1] - radius * forward[1]] missile_speed = [15 * forward[0], -15 * forward[1]] missile_vel = [self.vel[0] + missile_speed[0], self.vel[1] + missile_speed[1]] a_missile = Bullet(missile_pos, missile_vel, self.angle, 0, self.canvas) missile_group.add(a_missile)
def parse_notify_result(self, body): """ 微信服务器通知支付结果时使用 将request body的xml格式转为dict,签名错误时返回None """ dresult = Util.xml_to_dict(body) if self._generate_sign(**dresult) != dresult.get('sign'): return None return dresult
def do_GET(self): json = str(GraphGrapper().get_data(Util.get_source_content())) response = "jsonpCallback({0})".format(json) self.send_response(200) self.send_header("Content-type", "application/json") self.end_headers() self.wfile.write(bytes(response, "utf-8"))
def handleKey(self,key,x,y): if key == 'c': Model.getGame().getPlayer().toggleAuto() if key == 'z': Util.resetCamera() if key == '\x20': Model.getGame().getPlayer().jump() if key == '\t': Model.getGame().toggleTarget() if key == '\x1b': Model.getGame().clearTarget() if key == 'q': sys.exit() if key == 'g': Model.getGame().getPlayer().blink() if key == 't': Model.changeView("mainmenu") self.handleConstantDown(key)
def pack_transfer_kf(self): """回复转发到客服系统的消息""" template = ''' <xml> <ToUserName><![CDATA[%s]]></ToUserName> <FromUserName><![CDATA[%s]]></FromUserName> <CreateTime>%s</CreateTime> <MsgType><![CDATA[transfer_customer_service]]></MsgType> </xml>''' xml_data = template % (self.message.fromUserName, self.message.toUserName, Util.timestamp()) return self._ensure_encrypt(xml_data)
def sign(cls, appid, jsapi_ticket, page_url): """ 对页面进行签名. 网页url, 不含#之后的部分(函数内会自动过滤) 返回{'appid':'', 'timestamp':123456789, 'nonce_str':'', 'signature':''} """ params = { 'noncestr': Util.generate_nonce(15), 'timestamp': Util.timestamp(), 'jsapi_ticket': jsapi_ticket, 'url': page_url.partition('#')[0], } string = '&'.join(['%s=%s' % (key.lower(), params[key]) for key in sorted(params.keys())]) signature = Util.sha1(string) return { 'appid': appid, 'timestamp': params['timestamp'], 'nonce_str': params['noncestr'], 'signature':signature }
def close_order(self, out_trade_no): """ 关闭交易 :param out_trade_no: 商户订单号 :return: 返回tuple(code, result) """ if not out_trade_no: raise WxPayError("订单查询接口中,out_trade_no必填!") kwargs = {'appid':self._appid, 'mch_id':self._mchid} kwargs.update(out_trade_no=out_trade_no) kwargs.update(nonce_str=Util.generate_nonce(20)) kwargs.update(sign=self._generate_sign(**kwargs)) #sign return self._post('/pay/closeorder', kwargs)
async def backupLocal(self, ctx): if not botUtil.isVexrax(ctx.message.author.id): return db = self.mongoClient["Skynet"] collection = db["Quotes"] f = open("quoteBackup.json", "a") f.write("[") newDict = {} for document in collection.find(): # this is bad but its internal Command so whatever newDict['quote'] = document['quote'] newDict['year'] = document['year'] newDict['author'] = document['author'] newDict['context'] = document['context'] f.write(json.dumps(newDict) + ",") f.write("]") f.close()
def rotateAllTarget(target, labels): new_target = [] new_labels = [] print(labels) for i in range(len(target)): img = [] #img_labels = [] for j in range(len(target[i])): for angle in range(Constants.LEFT_ANGLE, Constants.RIGHT_ANGLE): newChar = Util.rotate(target[i][j], angle) img.append(newChar) new_labels.append(labels[i * 4 + j]) new_target.append(img) #new_labels.append(img_labels) print(len(new_target[0])) print(new_labels) return new_target, new_labels
def on_data(self, data): text_i = data.find("text") source_i = data.find("source") text = data[text_i + 8:source_i].decode('utf_8') sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_address = ('localhost', 9999) print >> sys.stderr, 'connecting to %s port %s' % server_address sock.connect(server_address) try: print >> sys.stderr, 'sending "%s"' % text sock.sendall(Util.normalize(text) + '\n') finally: print >> sys.stderr, 'closing socket' sock.close()
def testAllPoints(): ret = [] labels = [] dir_path = Constants.IMG_DIR_TENCENT_TRAIN for lists in os.listdir(dir_path): #print(lists) path = os.path.join(dir_path, lists) if not os.path.isdir(path): oriImg = cv2.imread(path, 0) img = Util.binaryzation(oriImg) points, paths = getCornerPointsAndPaths(img) print("----------------RESULT----------------") print(path) print(points) print(len(paths)) #target = readOneImg(dir_path + "YbUB_39_68_85.jpg") return ret, labels
def __init__(self,config,logger=None): self._logger = logging.getLogger('OA.IANA') if logger else Util.get_logger('OA.IANA',create_file=False) if COL_CLASS in config: self._qclass_file_path = config[COL_CLASS] if COL_QTYPE in config: self._qtype_file_path = config[COL_QTYPE] if COL_RCODE in config: self._rcode_file_path = config[COL_RCODE] if COL_PRESP in config: self._http_rcode_file_path = config[COL_PRESP] self._qclass_dict = {} self._qtype_dict = {} self._rcode_dict = {} self._http_rcode_dict = {} self._init_dicts()
def query_order(self, transaction_id='', out_trade_no=''): """ 查询订单api, 文档(http://pay.weixin.qq.com/wiki/doc/api/index.php?chapter=9_2) :param transaction_id: 微信订单号, 优先 :param out_trade_no: 商户内部订单号 :return 返回tuple(code, result) """ if not (transaction_id or out_trade_no): raise WxPayError(u"订单查询接口中,out_trade_no、transaction_id至少填一个") kwargs = {'appid':self._appid, 'mch_id':self._mchid} kwargs.update(transaction_id=transaction_id, out_trade_no=out_trade_no) kwargs.update(nonce_str=Util.generate_nonce(20)) kwargs.update(sign=self._generate_sign(**kwargs)) #sign return self._post('/pay/orderquery', kwargs)
def save_csv(self, _path=None): _path = self.dir_data if _path == None else _path if (not os.path.exists(_path)): os.makedirs(_path) suffix = self.adjust_type if self.adjust_type == "" else "_" + self.adjust_type + ".csv" self.open_df.to_csv(Util.path_add_file(_path, "open" + suffix)) self.high_df.to_csv(Util.path_add_file(_path, "high" + suffix)) self.low_df.to_csv(Util.path_add_file(_path, "low" + suffix)) self.close_df.to_csv(Util.path_add_file(_path, "close" + suffix)) self.turnover_df.to_csv(Util.path_add_file(_path, "turnover" + suffix)) self.outstanding_df.to_csv( Util.path_add_file(_path, "outstanding" + suffix)) log.info("历史行情csv保存完毕,路径:{}".format(_path))
class Claptcha: def __init__(self): self.util = Util() def start(self): # for x in range(1,6): # image = self.util.loadImage() # image.save('images/%d.png'%x) image = Image.open('images/1.png') # # code = pytesseract.image_to_string(image) # # print(code) cuts = self.util.vertical(image, 10) w, h = image.size print(cuts) for i, n in enumerate(cuts, 1): temp = image.crop([n[0], 0, n[1], h]) temp.save("cut%s.png" % i)
def _create_dns_scores(self): # get date parameters. yr = self._date[:4] mn = self._date[4:6] dy = self._date[6:] value_string = "" dns_scores_final = self._move_time_stamp(self._dns_scores) self._dns_scores = dns_scores_final for row in dns_scores_final: value_string += str(tuple(Util.cast_val(item) for item in row)) + "," load_into_impala = (""" INSERT INTO {0}.dns_scores partition(y={2}, m={3}, d={4}) VALUES {1} """).format(self._db, value_string[:-1], yr, mn, dy) impala.execute_query(load_into_impala)
def get_image(self, resolution: int): if (self.__pixelArt != None): return self.__pixelArt[int(resolution)]["image"] if (not self.__success): raise ImageWasntDetectedException() resp = requests.post(self.CONVERT_URL, headers=self.__headers, json=self.__image) content = Util.parse_json(resp.content) if self.DEBUG: with open("temp.txt", "wb") as f: f.write(resp.content) if (resp.status_code == 200 and content['meta']['status'] == "ok"): self.__pixelArt = content['data']['images'] return self.__pixelArt[resolution.value]["image"] else: raise WrongConvertringException(resp.status_code, resp.content)
def __make_conspicuity_map(self, srcs): util = Util() intensity = self.__scale_add(map(util.normalize, srcs['intensity'])) for key in srcs['colors'].keys(): srcs['colors'][key] = map(util.normalize, srcs['colors'][key]) color = self.__scale_add([ srcs['colors']['bg'][x] + srcs['colors']['ry'][x] for x in xrange(len(srcs['colors']['bg'])) ]) orientation = np.zeros(intensity.shape) for key in srcs['orientations'].keys(): orientation += self.__scale_add( map(util.normalize, srcs['orientations'][key])) return { 'intensity': intensity, 'color': color, 'orientation': orientation }
def update(self): # change image if self.thrust == False: self.image = self.image1 self.original = self.image1 else: self.image = self.image2 self.original = self.image2 # update angle self.angle += self.angle_vel self.angle %= 360 # update position # move by move_ip will move by upleft coord, # but it is not good # TODO: there are some bugs about rect.move, # the move is not smooth, make it smooth self.rect.move_ip(self.vel) # make ship in screen if self.rect.right < self.area.left: self.rect.move_ip([self.area.width, 0]) elif self.rect.left > self.area.right: self.rect.move_ip([-self.area.width, 0]) elif self.rect.bottom < self.area.top: self.rect.move_ip([0, self.area.height]) elif self.rect.top > self.area.bottom: self.rect.move_ip([0, -self.area.height]) # rotate, not smooth, bad for pygame self.rotate_img() # update velocity if self.thrust == True: acc = Util.angle_to_vector(self.angle) self.vel[0] += acc[0] * .2 self.vel[1] -= acc[1] * .2 self.vel[0] *= .99 self.vel[1] *= .99 if self.game_on == False: self.angle_vel *= 0.98
def update(self): # update angle self.angle += self.angle_vel # update position self.pos[0] = (self.pos[0] + self.vel[0]) % self.canvas.CANVAS_WIDTH self.pos[1] = (self.pos[1] + self.vel[1]) % self.canvas.CANVAS_HEIGHT # update velocity if self.thrust == True: acc = Util.angle_to_vector(self.angle) self.vel[0] += acc[0] * .2 self.vel[1] -= acc[1] * .2 self.vel[0] *= .99 self.vel[1] *= .99 if self.game_on == False: self.angle_vel *= 0.99
def post(self, path, rfile, headers): form = cgi.FieldStorage(fp=rfile, headers=headers, environ={ 'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': headers['Content-Type'], }) filename = form.getvalue("file-name") content = form.getvalue("content") file_status = form.getvalue("saved-status") if Util.save_file(filename, content, file_status): self.server.send_response(200) self.server.send_header('Content-type', 'text/html') self.server.end_headers() self.server.wfile.write("Success") else: self.server.send_response(403) self.server.send_header('Content-type', 'application/json') self.server.end_headers()
def train(self, directory): train_email_bodies, train_labels = Util.read_csv(directory + "/train.csv") self.tokenizer.fit_on_texts(train_email_bodies) word_index = self.tokenizer.word_index train_sequences = self.tokenizer.texts_to_sequences(train_email_bodies) train_padded = pad_sequences( train_sequences, maxlen=self.max_length, padding=self.padding_type, truncating=self.trunc_type, ) self.label_tokenizer.fit_on_texts(train_labels) training_label_seq = np.array( self.label_tokenizer.texts_to_sequences(train_labels) ) # label_tokenizer.word_index self.model = tf.keras.Sequential( [ tf.keras.layers.Embedding(self.vocab_size, 64), tf.keras.layers.Conv1D(128, 5, activation="relu"), tf.keras.layers.GlobalAveragePooling1D(), tf.keras.layers.Dense(64, activation="relu"), tf.keras.layers.Dense(12, activation="sigmoid"), ] ) opt = tf.keras.optimizers.Adam(lr=0.01, decay=1e-6) self.model.compile( loss="sparse_categorical_crossentropy", optimizer=opt, metrics=["accuracy"], ) history = self.model.fit( train_padded, training_label_seq, epochs=self.num_epochs, verbose=2 ) return self.model
async def temprole(self, ctx: commands.Context, user: discord.Member, durationNumber: int, durationIdentifier: str, *, rolename: str): """Gives a role to someone temporarily.""" role = discord.utils.find(lambda m: rolename.lower() in m.name.lower(), ctx.guild.roles) duration = Util.convertToSeconds(durationNumber, durationIdentifier) until = time.time() + duration if not role: await ctx.send("That role doesn't exist, try again?") try: await user.add_roles(role) await ctx.send( f"Added {role.name} to {user} for {durationNumber}{durationIdentifier}." ) await asyncio.sleep(duration) await user.remove_roles(role) except discord.Forbidden: await ctx.send("I need **Manage Roles** for this.")
def start_crawl(self): sites = CRAWLED_SITES.keys() for k in sites: func_name = '_crawl_site_%s' % k try: if getattr(self, func_name): func = getattr(self, func_name) func() except AttributeError: Util.log_to_file(traceback.format_exc(), 1) pass except: Util.log_to_file('Exception occured when crawl website {0}.'.format(CRAWLED_SITES[k]), 0) Util.log_to_file(traceback.format_exc(), 1)
def get_ip_geo_localization(self, ip): self._logger.debug("Getting {0} geo localization ".format(ip)) if ip.strip() != "" and ip is not None: result = linecache.getline( self._ip_localization_file, bisect.bisect(self._ip_localization_ranges, Util.ip_to_int(ip))) result.strip('\n') reader = csv.reader([result]) row = reader.next() geo_loc = ";".join(row[4:6]) + " " + ";".join(row[8:9]) domain = row[9:10][0] result = {"geo_loc": geo_loc, "domain": domain} return result
def get(self, inputs, operation): inputs = str(inputs).split(";") paths = [] texts = [] for input in inputs: if "http://" in input: path = utils.downloadRasterFile(input) path = "file:///" + path["file"].replace("\\", "/") paths.append(path) else: texts.append(input) result = requests.post("http://127.0.0.1:2525", data=json.dumps({ "files": paths, "texts": ",".join(texts), "operation": operation })) return json.loads(result.text)
def query_refund(self, transaction_id='', out_trade_no='', out_refund_no='', refund_id='', **kwargs): """ 查询退款, 参数四选一 """ if not (transaction_id and out_refund_no and out_trade_no, refund_id): raise WxPayError(u"订单查询接口中,transaction_id and out_refund_no and out_trade_no, refund_id至少填一个") kwargs.update( appid=self._appid, mch_id=self._mchid, nonce_str=Util.generate_nonce(20), transaction_id=transaction_id, out_refund_no=out_refund_no, out_trade_no=out_trade_no, refund_id=refund_id ) kwargs.update(sign=self._generate_sign(**kwargs)) #sign return self._post('/pay/refundquery', kwargs, need_cert=True)
def rock_spawner(self): if self.is_started: if len(self.rock_group) >= Rock.LIMIT: return rock_pos = [ random.randrange(0, self.canvas.CANVAS_WIDTH), random.randrange(0, self.canvas.CANVAS_HEIGHT) ] rock_vel = [random.random() * .6 - .3, random.random() * .6 - .3] rock_avel = random.random() * .2 - .1 add_vel = self.score * 0.5 + 1 rock_vel = [rock_vel[0] * add_vel, rock_vel[1] * add_vel] rock = Rock(rock_pos, rock_vel, 0, rock_avel, self.canvas) distance = Util.dist(rock.get_position(), self.ship.get_position()) if distance < 200: return self.rock_group.add(rock)
def on_webgubun_activated(self, text): self.web_id.setPlaceholderText("") self.web_pw.setPlaceholderText("") self.web_id_cb.clear() if text == "email": self.web_cb.clear() self.web_cb.addItems(self.email_lst) text = self.web_cb.currentText() self.setup_web_widgets(text) elif text == "websites": self.web_cb.clear() self.web_cb.addItems(self.website_lst) text = self.web_cb.currentText() self.setup_web_widgets(text) elif text == "banks": self.web_cb.clear() popup = Util.Errpop() msg = "개발중...<br>comming soon..." popup.critical_pop(msg)
def refresh_proxy_pool(cls): ''' 重复验证https和http两个代理池 :return: 验证报告 ''' all_https_proxies = cls.db.sget_all_https_item() all_http_proxies = cls.db.sget_all_http_item() all_proxies = list(all_https_proxies) + list(all_http_proxies) http_total_count, http_abandoned_count = len(all_http_proxies), 0 https_total_count, https_abandoned_count = len(all_https_proxies), 0 for item in all_proxies: proxy_item = json.loads(item) proxy_type = proxy_item['type'] result = Util.valid_proxy(proxy_item['ip'], proxy_item['port'], proxy_type) if not result and proxy_type.lower() == 'http': cls.db.sremove_http_item(item) http_abandoned_count += 1 elif not result and proxy_type.lower() == 'https': cls.db.sremove_https_item(item) https_abandoned_count += 1 else: pass validation_report = 'Validation Report: \n' \ '{0} proxies are validated, {1} http proxy, {2} https proxy; \n' \ '{3} proxies are unavailable and abandoned, {4} http proxy, {5} https proxy; \n' \ '{6} proxies are available and remain, {7} http proxy, {8} https proxy.'.format( str(http_total_count + https_total_count), str(http_total_count), str(https_total_count), str(http_abandoned_count + https_abandoned_count), str(http_abandoned_count), str(https_abandoned_count), str(len(all_proxies) - http_abandoned_count - https_abandoned_count), str(http_total_count - http_abandoned_count), str(https_total_count - https_abandoned_count) ) return validation_report
def setup_iftAdapter(self): if not os.path.isfile( r'C:\Infotech\Common\iftWinExAdapter.dll'): # 인포텍모듈 없으면 title = "공인인증서 모듈설치" msg = "지금 공인인증서 모듈을 설치하시겠습니까 ??<br>나중에 설치가능 합니다!!" inst = Util.MsgBoxTF(title, msg) TF = inst.initUI() if TF == True: driverutil.setup_iftCertAdapter() else: pass elif os.path.isfile( r'C:\Infotech\Common\iftWinExAdapter.dll'): # 인포텍모듈 있으면 # 공인인증서 모듈 실행 cert_nm, cert_pw = iftutil.cert_nm_pw() self.cert_nm = cert_nm self.cert_pw = cert_pw nts_dict['secret']['공인인증서명칭'] = cert_nm nts_dict['secret']['공인인증서비번'] = cert_pw # 2. 수정된 딕셔너리를 json 파일로 만들어 저장 jsconverter.dict_to_json(nts_dict, FULLPATH_NTS_JSON)
def validate(cls): ''' 从爬取的代理列表里逐个验证,验证成功的存入有效代理池 :return: ''' while True: item = cls.db.rpop_crawled_item() if item: proxy_item = json.loads(item) proxy_type = proxy_item['type'] result = Util.valid_proxy(proxy_item['ip'], proxy_item['port'], proxy_type) if result and proxy_type.lower() == 'http': cls.db.sadd_valid_http_item(item) elif result and proxy_type.lower() == 'https': cls.db.sadd_valid_https_item(item) else: pass else: break
def predict(self, **data): x_data = self.data.predict_data(**data) predict_dataloader, predict_examples_len = Util.load_data(news=x_data, category=None, data_type='predict', label_list=self.args.label_list, max_length=self.args.max_seq_length, tokenizer=self.tokenizer, batch_size=1) for step, batch in enumerate(predict_dataloader): batch = tuple(t.to(DEVICE) for t in batch) _, input_ids, input_mask, segment_ids = batch input_ids = input_ids.to(DEVICE) input_mask = input_mask.to(DEVICE) segment_ids = segment_ids.to(DEVICE) with torch.no_grad(): logits = self.model(input_ids, segment_ids, input_mask, labels=None) return self.data.to_categorys(torch.argmax(logits.view(-1, len(self.args.label_list))).cpu().numpy().tolist())
def save(): try: print("User %s logged in!" % request.form['fname']) db = mysql.connector.connect(host=host, user=user, password=password, database=database) cursor = db.cursor(buffered=True) util = Util() status, uid = util.saveUser(db, cursor, request) if status and status == 200: return render_template("thankyou.html", fname=request.form['fname'], uid=uid) raise Exception("Unable to insert data!") except Exception as e: print(json.dumps({"error": str(e)})) return "<h1>Oops! Something went wrong.. Could you try after sometime or reach out to the host!</h1>" finally: db.close() cursor.close()
def PerFreqDist(path): news = Util.listdir(path) count_word = 0 FreqWord = {} for new in news: Text = open(new, encoding='utf-8') text = Text.read() count_word = len([word for word in text.split()]) + count_word Text.close() for new in news: Text = open(new, encoding='utf-8') text = Text.read() Text.close() fdist = nltk.FreqDist(word.lower() for word in text.split()) for word in fdist: if word in FreqWord.keys(): FreqWord[word] = FreqWord[word] + 1 else: FreqWord[word] = 1 for word in FreqWord.keys(): FreqWord[word] = FreqWord[word] / count_word return FreqWord
def Get2gram(author_name): word_dict = {} files = Util.listdir(sys.path[0] + '/Preparation/data/' + author_name) tagger = StanfordPOSTagger(model_filename, path_to_jar) a = "CC CD DT EX FW IN JJ JJR JJS LS MD NN NNS NNP NNPS PDT POS PRP PRP$ RB RBR RBS RP SYM TO UH VB VBD VBG VBN VBP VBZ WDT WP WP$ WRB , $ :" Temp = [x for x in range(len(a.split()))] tag_dict = {} tag = a.split() func_list = [] for i in itertools.product(Temp, repeat=2): tag_dict[tag[i[0]] + " " + tag[i[1]]] = 0 for file in files: with open(file, 'r', encoding='utf-8') as Reader: for index, line in enumerate(Reader): sent_real = line.split() sent_tag = tagger.tag(sent_real) for WordTag in sent_tag: if WordTag[1] in func_tag: func_list.append(WordTag[0]) for i in range(len(sent_tag) - 1): tag_dict[sent_tag[i][1] + ' ' + sent_tag[i + 1][1]] = tag_dict[sent_tag[i][1] + ' ' + sent_tag[i + 1][1]] + 1 func_list = list(set(func_list)) for tag in tag_dict.keys(): if tag_dict[tag] == 0: tag_dict[tag] = 0.5 count = sum(tag_dict.values()) for tag in tag_dict.keys(): tag_dict[tag] = tag_dict[tag] / count with open(sys.path[0] + '/Preparation/save/2gram_tag_' + author_name, 'w', encoding='utf-8') as Writer: for tag in tag_dict.keys(): Writer.write(str(tag) + ':' + str(tag_dict[tag]) + '\n') return tag_dict, func_list