def _get_runnable_keywords(self, context, args): keywords = Keywords([]) for keyword in self._get_keywords(args): if self._variable_syntax_in(keyword.name, context): continue keywords.add_keyword(keyword) return keywords
def __parse_dedup_detail(self, bidpools): ids = [] updatetimes = [] resume_details = [] for bid in bidpools: __resume_details = {} _id = bid["userId"] if bid.get("updateTime", None): _updatetime = re.search(r'\d{4}-\d{2}-\d{2}', str(bid["updateTime"])) updatetimes.append(_updatetime.group()) else: from datetime import date _updatetime = date.today().strftime("%Y-%m-%d") updatetimes.append(_updatetime) ids.append(_id) __resume_details["workyear"] = bid.get('jobYeay') __resume_details["sex"] = Keywords().Sex(bid.get('sex')) if bid.get("sex", None) else "" __resume_details["latestcompany"] = bid.get('latelyCompName') __resume_details["desworklocation"] = bid.get('cityName') __resume_details["latestdegree"] = Keywords().Education(str(bid.get('education'))) if bid.get("education", None) else "" __resume_details["desindustry"] = bid.get('jobTitle') for k, v in __resume_details.items(): if v == "": __resume_details.pop(k) resume_details.append(__resume_details) # print "8888%s%s%s" % (ids, updatetimes, resume_details) return ids, updatetimes, resume_details
def __init__(self, keyword, libname): self.name = keyword.name self.keywords = Keywords(keyword.steps) self.return_value = keyword.return_.value self._libname = libname self.doc = self._doc = keyword.doc.value self._timeout = keyword.timeout self._keyword_args = keyword.args.value
class RunnableTestCase(BaseTestCase): def __init__(self, tc_data, parent, defaults): BaseTestCase.__init__(self, tc_data.name, parent) self.doc = tc_data.doc.value self.setup = defaults.get_setup(tc_data.setup) self.teardown = defaults.get_teardown(tc_data.teardown) self.tags = defaults.get_tags(tc_data.tags) self.timeout = defaults.get_timeout(tc_data.timeout) template = defaults.get_template(tc_data.template) self.keywords = Keywords(tc_data.steps, template) def run(self, context, suite_errors): self._suite_errors = suite_errors self._start_run(context) if self.run_errors.is_allowed_to_run(): self._run(context) else: self._not_allowed_to_run() self._end_run(context) def _start_run(self, context): self.run_errors = TestRunErrors(self._suite_errors) self.status = 'RUNNING' self.starttime = utils.get_timestamp() self.run_errors.init_err(self._init_test(context)) context.start_test(self) def _init_test(self, context): errors = [] self.doc = context.replace_vars_from_setting('Documentation', self.doc, errors) self.setup.replace_variables(context.get_current_vars(), errors) self.teardown.replace_variables(context.get_current_vars(), errors) self.tags = utils.normalize_tags(context.replace_vars_from_setting('Tags', self.tags, errors)) self.timeout.replace_variables(context.get_current_vars()) if errors: return 'Test case initialization failed:\n%s' % '\n'.join(errors) if not self.name: return 'Test case name is required.' if not self.keywords: return 'Test case contains no keywords' return None def _run(self, context): self.timeout.start() self._run_setup(context) if not self.run_errors.setup_failed(): try: self.keywords.run(context) except ExecutionFailed, err: self.run_errors.kw_err(unicode(err)) self.keyword_failed(err) context.set_test_status_before_teardown(*self._report_status()) self._run_teardown(context) self._report_status_after_teardown()
def __init__(self): self.FOURSQUARE_API_URL = 'https://api.foursquare.com/v2/venues/search' self._foursquare_client = foursquare.Foursquare( client_id=os.environ['FOURSQUARE_CLIENT_ID'], client_secret=os.environ['FOURSQUARE_CLIENT_SECRET'], lang='ja') categories = self._foursquare_client.venues.categories() self._categories = self._flatten_categories(categories) self._keyword = Keywords()
def __init__(self, tc_data, parent, defaults): BaseTestCase.__init__(self, tc_data.name, parent) self.doc = tc_data.doc.value self.setup = defaults.get_setup(tc_data.setup) self.teardown = defaults.get_teardown(tc_data.teardown) self.tags = defaults.get_tags(tc_data.tags) self.timeout = defaults.get_timeout(tc_data.timeout) template = defaults.get_template(tc_data.template) self.keywords = Keywords(tc_data.steps, template)
class RunnableTestCase(BaseTestCase): def __init__(self, tc_data, parent, defaults): BaseTestCase.__init__(self, tc_data.name, parent) self.doc = tc_data.doc.value self.setup = defaults.get_setup(tc_data.setup) self.teardown = defaults.get_teardown(tc_data.teardown) self.tags = defaults.get_tags(tc_data.tags) self.timeout = defaults.get_timeout(tc_data.timeout) template = defaults.get_template(tc_data.template) self.keywords = Keywords(tc_data.steps, template) def run(self, context, suite_errors): self._suite_errors = suite_errors self._start_run(context) if self.run_errors.is_allowed_to_run(): self._run(context) else: self._not_allowed_to_run() self._end_run(context) def _start_run(self, context): self.run_errors = TestRunErrors(self._suite_errors) self.status = 'RUNNING' self.starttime = utils.get_timestamp() self.run_errors.init_err(self._init_test(context)) context.start_test(self) def _init_test(self, context): errors = [] self.doc = context.replace_vars_from_setting('Documentation', self.doc, errors) self.setup.replace_variables(context.get_current_vars(), errors) self.teardown.replace_variables(context.get_current_vars(), errors) self.tags = utils.normalize_tags( context.replace_vars_from_setting('Tags', self.tags, errors)) self.timeout.replace_variables(context.get_current_vars()) if errors: return 'Test case initialization failed:\n%s' % '\n'.join(errors) if not self.name: return 'Test case name is required.' if not self.keywords: return 'Test case contains no keywords' return None def _run(self, context): self.timeout.start() self._run_setup(context) if not self.run_errors.setup_failed(): try: self.keywords.run(context) except ExecutionFailed, err: self.run_errors.kw_err(unicode(err)) self.keyword_failed(err) context.set_test_status_before_teardown(*self._report_status()) self._run_teardown(context) self._report_status_after_teardown()
def GET(self): web.header('Content-Type', 'text/html;charset=UTF-8') form = web.input(email="*****@*****.**", adgroupId=None) logger.info("Received user " + form.email + " request to add or update ad keywords") httpsSession = createHttpsSession(form.email) wordsPath = cf.get("apiservices", "keywords") wordhandler = Keywords(wordsPath, httpsSession, logger) if form.adgroupId is not None: result = wordhandler.query_keywords(email=form.email, adgroupId=form.adgroupId) else: result = wordhandler.query_all_targeted_keywords(email=form.email) return result
def is_person_page(page): """Determines whether or not the given page object is a person page.""" if "list" in page.title.lower(): return False return len([c for c in page.categories if Keywords.check_match("people", c) and "list" not in c.lower() and "errors" not in c.lower() and "wikipedia" not in c.lower()]) > 0
def __init__(self): self.FOURSQUARE_API_URL = 'https://api.foursquare.com/v2/venues/search' self._foursquare_client = foursquare.Foursquare( client_id=SECRETS['FOURSQUARE_CLIENT_ID'], client_secret=SECRETS['FOURSQUARE_CLIENT_SECRET'], lang='ja' ) categories = self._foursquare_client.venues.categories() self._categories = self._flatten_categories(categories) self._keyword = Keywords()
def is_person_page(page): """Determines whether or not the given page object is a person page.""" if "list" in page.title.lower( ): # TODO This catches "list" as in "journalist", etc. return False return len([ c for c in page.categories if Keywords.check_match("people", c) and "list" not in c.lower() and "errors" not in c.lower() and "wikipedia" not in c.lower() ]) > 0
def is_animal_page(page): """Determines whether or not the given page object is aa animal page.""" if "list" in page.title.lower(): return False if is_person_page(page): return False return len([ c for c in page.categories if Keywords.check_match("animals", c) and "list" not in c.lower() and "errors" not in c.lower() and "wikipedia" not in c.lower() ]) > 0
def run_import(self): for source in self.get_sources(): try: _parser_class = Parser_Provider.get_parser(source.name) _parser = _parser_class(source.rss_url) for news in _parser.parse_news(): if news['content']: news_words = Parse_Text.parse(news['content'], lang = source.lang) print news_words shingles = Shingles.generate(news_words) keywords_weight = Keywords.get_weights(news_words) news['source_id'] = source.id news_id = self.insert_news(news) if news_id: self.insert_shingles(news_id, shingles) self.insert_keywords(news_id, keywords_weight) except ValueError: pass
class Bot: def __init__(self): self.keyword_fetcher = Keywords() self.spot_client = Spot() def fetch_spot(self, sentence): result = self.keyword_fetcher.extract_from_sentence(sentence) message = {} message_body = '' spot = self.spot_client.recommend_spot(list(result[1])[0], result[0]) if spot: message_body += spot['name'] message_body += 'はどうでしょうか?' message_body += 'オススメポイントは' message_body += spot['reason'] message_body += ' です' message['body'] = message_body message['image'] = spot['image'] else: message_body = '申し訳ありません、候補が見つかりませんでした' message['body'] = message_body return message
VERIFY_TOKEN = "pe:/4H>}]245kph" PAGE_ACCESS_TOKEN = "EAAOTV8ZBzN5EBAEGGkXKbgl7uCzrgPlZCo2fGSHZBbTnVdixE8oxl3ROtVfZB5wT0nOZCxVz2APPpnxZAMDy48vnPFDKd0gsu41pPSuVtlLvYZASZBDZAMnCfNo5YvCpaZC6RPVDmZCTHIYm3gKCDP8vPmQwfukTJD3QVSQQDLoGQKeMwZDZD" # Instantiate data object # In real situations, load data from API / database dataObj = Data() luminaria_embutir = dataObj.luminaria_embutir controladores = dataObj.controladores eletrofita = dataObj.eletrofita fita_led = dataObj.fita_led lampada_led = dataObj.lampada_led luminaria_led = dataObj.luminaria_led lustres = dataObj.lustres # Instantiate keywords keywords = Keywords() @app.route('/webhook', methods=['GET', 'POST']) def index(): # Verify webhook if request.method == 'GET': mode = request.values['hub.mode'] token = request.values['hub.verify_token'] challenge = request.values['hub.challenge'] if mode is not None and token is not None: if mode == 'subscribe' and token == VERIFY_TOKEN: print("WEBHOOK_VERIFIED") return challenge, 200
class Spot: def __init__(self): self.FOURSQUARE_API_URL = 'https://api.foursquare.com/v2/venues/search' self._foursquare_client = foursquare.Foursquare( client_id=SECRETS['FOURSQUARE_CLIENT_ID'], client_secret=SECRETS['FOURSQUARE_CLIENT_SECRET'], lang='ja' ) categories = self._foursquare_client.venues.categories() self._categories = self._flatten_categories(categories) self._keyword = Keywords() def _flatten_categories(self, nested_categories): result = [] for categories in nested_categories['categories']: result.append((categories['name'], categories['id'])) for sub_category in self._flatten_categories(categories): result.append(sub_category) return result def _match_category_ids(self, keywords): matched_category_ids = [(category[0], category[1]) for category in self._categories if category[0] in keywords] return matched_category_ids def recommend_spot(self, location, keywords): target_categories = self._match_category_ids(keywords) target_category_names = set(category[0] for category in target_categories) target_category_ids = set(category[1] for category in target_categories) params={ 'near':location, 'categoryId':reduce(lambda i, s:i+','+s, target_category_ids), 'intent': 'browse', 'limit':50, } try : response = self._foursquare_client.venues.search(params=params) except: return None ## Reccomend comment candidates = [] for venue in response['venues']: venue_id = venue['id'] candidate = {} if venue['stats']['tipCount'] >= 2 and venue['stats']['checkinsCount'] >= 1500: venue_detail = self._foursquare_client.venues(venue_id)['venue'] if len(venue_detail['tips']['groups']) > 1: tips = venue_detail['tips']['groups'][1]['items'] else: tips = venue_detail['tips']['groups'][0]['items'] word_counter = Counter() for tip in tips: if 'lang' in tip and tip['lang'] != 'ja': continue keywords, _ = self._keyword.extract_from_sentence(tip['text']) for keyword in keywords: like_count = tip['likes']['count'] word_counter[keyword] += 1 * (1 if like_count == 0 else like_count) print(venue['id'], venue['name'], word_counter) tags = [] for key, count in word_counter.most_common(10): if count > 1: tags.append(key) if len(tags) > 0: candidate['name'] = venue['name'] candidate['tags'] = tags if 'photos' in venue_detail and venue_detail['photos']['groups'] and venue_detail['photos']['groups'][0]['items']: photo = venue_detail['photos']['groups'][0]['items'][0] candidate['image'] = photo['prefix'] + '128x128' +photo['suffix'] candidates.append(candidate) if len(candidates) > 0: break if len(candidates) == 0: return None # Instantly return most popular candidate print(candidates) top_candidate = candidates[0] ret = { 'name': top_candidate['name'], 'reason': reduce(lambda i, s: i+' '+s, top_candidate['tags']), # Add recommend reason 'image': top_candidate['image'] } return ret
class Analyze(object): def __init__(self): self.keywords_model = None self.pos_model = None self.seg_model = None self.init_cws() self.init_pos() def init_cws(self): if self.seg_model is None: self.seg_model = bilstm_crf.Predict( add_curr_dir('model/cws.model')) def init_pos(self): if self.pos_model is None: self.pos_model = bilstm_crf.Predict( add_curr_dir('model/pos.model')) def init_mmseg(self): if self.seg_mmseg is None: self.seg_mmseg = mmseg.MMSeg() @staticmethod def __lab2word(sentence, labels): sen_len = len(sentence) tmp_word = "" words = [] for i in range(sen_len): label = labels[i] w = sentence[i] if label == "B": tmp_word += w elif label == "M": tmp_word += w elif label == "E": tmp_word += w words.append(tmp_word) tmp_word = "" else: tmp_word = "" words.append(w) if tmp_word: words.append(tmp_word) return words def cws_text(self, sentence): if sentence == '': return [''] labels = self.seg_model.predict([sentence])[0] return self.__lab2word(sentence, labels) def cws_list(self, sentences): text_list = sentences all_labels = self.seg_model.predict(text_list) sent_words = [] for ti, text in enumerate(text_list): seg_labels = all_labels[ti] sent_words.append(self.__lab2word(text, seg_labels)) return sent_words def cws(self, sentence, input='text', model='default'): """中文分词 :param sentence: str or list 文本或者文本列表,根据input的模式来定 :param input: str 句子输入的格式,text则为默认的文本,batch则为批量的文本列表 :param model: str 分词所使用的模式,default为默认模式,mmseg为mmseg分词方式 :return: """ if model == 'default': self.init_cws() if input == 'batch': words_list = self.cws_list(sentence) return words_list else: words = self.cws_text(sentence) return words elif model == 'mmseg': self.init_mmseg() words = self.seg_mmseg.cws(sentence) return words else: pass return [] def keywords(self, text, topkey=5): if self.keywords_model == None: self.keywords_model = Keywords(tol=0.0001, window=2) return self.keywords_model.keywords(text, topkey) def pos(self, sentence, input='words'): # 传入的是词语 self.init_pos() if input == 'batch': all_labels = self.pos_model.predict(sentence) return all_labels else: labels = self.pos_model.predict([sentence])[0] return labels
def keywords(self, text, topkey=5): if self.keywords_model == None: self.keywords_model = Keywords(tol=0.0001, window=2) return self.keywords_model.keywords(text, topkey)
def __parse_resume_details(self, response_datas): resume = response_datas yfkeywords = Keywords() _resume = {} assert resume if 'resume' not in resume: raise Exception('No Resume Return! Maybe Over 300!') _resume["sex"] = yfkeywords.Sex(str(resume["resume"].get("sex"))) if resume["resume"].get("sex", None) else None _resume["jobState"] = yfkeywords.JobState(str(resume["resume"].get("jobState"))) if resume["resume"].get( "jobState") else None _resume["maritalStatus"] = yfkeywords.MaritalStatus(str(resume["resume"].get("maritalStatus"))) if resume[ "resume"].get("maritalStatus") else None _resume["expectWorkType"] = yfkeywords.Worktype(str(resume["resume"].get("expectWorkType"))) if resume[ "resume"].get("expectWorkType", None) else None _resume["education"] = yfkeywords.Education(str(resume["resume"].get("education"))) if resume["resume"].get( "education", None) else None for field in ('expectCity', 'city', 'province', 'hukouProvince', 'hukouCity'): if "," in str(resume["resume"].get(field)): citys = str(resume["resume"].get(field)) parsed_citys = [] for i in citys.split(","): parsed_citys.append(yfkeywords.Expectcity(str(i))) _resume[field] = ",".join(parsed_citys) else: _resume[field] = yfkeywords.Expectcity(str(resume["resume"].get(field))) if resume["resume"].get(field, None) else None _resume["expectSalary"] = yfkeywords.Expectsalary(str(resume["resume"].get("expectSalary"))) if resume[ "resume"].get("expectSalary", None) else None if "," in str(resume["resume"].get("jobTitle")): jobtitles = str(resume["resume"].get("jobTitle")) parsed_jobtitles = [] for i in jobtitles.split(","): parsed_jobtitles.append(yfkeywords.Jobtitle(str(i))) _resume["jobTitle"] = ",".join(parsed_jobtitles) else: _resume["jobTitle"] = yfkeywords.Jobtitle(str(resume["resume"].get("jobTitle"))) if resume["resume"].get( "jobTitle", None) else None for k, v in _resume.iteritems(): resume['resume'][k] = v for field in ['work_experiences', 'educations']: if field in resume: items = [] for item in resume[field]: if 'salary' in item: item["salary"] = yfkeywords.Expectsalary(str(item.get("salary"))) if item.get("salary", None) else None if 'compSize' in item: item["compSize"] = yfkeywords.CompSize(str(item.get("compSize"))) if item.get("compSize", None) else None if 'compIndustry' in item: item["compIndustry"] = yfkeywords.Industry(str(item.get("compIndustry"))) if item.get( "compIndustry", None) else None if 'compProperty' in item: item["compProperty"] = yfkeywords.CompProperty(str(item.get("compProperty"))) if item.get( "compProperty", None) else None if 'education' in item: item["education"] = yfkeywords.Education(str(item.get("education"))) if item.get("education", None) else None items.append(item) resume[field] = items return resume
class UserKeywordHandler(object): type = "user" def __init__(self, keyword, libname): self.name = keyword.name self.keywords = Keywords(keyword.steps) self.return_value = keyword.return_.value self._libname = libname self.doc = self._doc = keyword.doc.value self._timeout = keyword.timeout self._keyword_args = keyword.args.value @property def longname(self): return "%s.%s" % (self._libname, self.name) if self._libname else self.name @property def shortdoc(self): return self.doc.splitlines()[0] if self.doc else "" def init_keyword(self, varz): self._errors = [] self.doc = varz.replace_meta("Documentation", self._doc, self._errors) self.timeout = KeywordTimeout(self._timeout.value, self._timeout.message) self.timeout.replace_variables(varz) def run(self, context, arguments): context.namespace.start_user_keyword(self) try: return self._run(context, arguments) finally: context.namespace.end_user_keyword() def _run(self, context, argument_values): args_spec = UserKeywordArguments(self._keyword_args, self.longname) variables = context.get_current_vars() if context.dry_run: return self._dry_run(context, variables, args_spec, argument_values) return self._variable_resolving_run(context, variables, args_spec, argument_values) def _dry_run(self, context, variables, args_spec, argument_values): resolved_arguments = args_spec.resolve_arguments_for_dry_run(argument_values) self._execute(context, variables, args_spec, resolved_arguments) return None def _variable_resolving_run(self, context, variables, args_spec, argument_values): resolved_arguments = args_spec.resolve(argument_values, variables) self._execute(context, variables, args_spec, resolved_arguments) return self._get_return_value(variables) def _execute(self, context, variables, args_spec, resolved_arguments): args_spec.set_variables(resolved_arguments, variables, context.output) self._verify_keyword_is_valid() self.timeout.start() self.keywords.run(context) def _verify_keyword_is_valid(self): if self._errors: raise DataError("User keyword initialization failed:\n%s" % "\n".join(self._errors)) if not (self.keywords or self.return_value): raise DataError("User keyword '%s' contains no keywords" % self.name) def _get_return_value(self, variables): if not self.return_value: return None try: ret = variables.replace_list(self.return_value) except DataError, err: raise DataError("Replacing variables from keyword return value " "failed: %s" % unicode(err)) if len(ret) != 1 or is_list_var(self.return_value[0]): return ret return ret[0]
class Spot: def __init__(self): self.FOURSQUARE_API_URL = 'https://api.foursquare.com/v2/venues/search' self._foursquare_client = foursquare.Foursquare( client_id=os.environ['FOURSQUARE_CLIENT_ID'], client_secret=os.environ['FOURSQUARE_CLIENT_SECRET'], lang='ja') categories = self._foursquare_client.venues.categories() self._categories = self._flatten_categories(categories) self._keyword = Keywords() def _flatten_categories(self, nested_categories): result = [] for categories in nested_categories['categories']: result.append((categories['name'], categories['id'])) for sub_category in self._flatten_categories(categories): result.append(sub_category) return result def _match_category_ids(self, keywords): matched_category_ids = [(category[0], category[1]) for category in self._categories if category[0] in keywords] return matched_category_ids def recommend_spot(self, location, keywords): print(keywords) target_categories = self._match_category_ids(keywords) target_category_names = set(category[0] for category in target_categories) target_category_ids = set(category[1] for category in target_categories) params = { 'near': location, 'categoryId': reduce(lambda i, s: i + ',' + s, target_category_ids), 'limit': 50, } try: response = self._foursquare_client.venues.search(params=params) except: return None ## Reccomend comment candidates = [] for venue in response['venues']: venue_id = venue['id'] candidate = {} if venue['stats']['tipCount'] >= 2 and venue['stats'][ 'checkinsCount'] >= 1500: venue_detail = self._foursquare_client.venues( venue_id)['venue'] if len(venue_detail['tips']['groups']) > 1: tips = venue_detail['tips']['groups'][1]['items'] else: tips = venue_detail['tips']['groups'][0]['items'] word_counter = Counter() for tip in tips: if 'lang' in tip and tip['lang'] != 'ja': continue keywords, _ = self._keyword.extract_from_sentence( tip['text']) for keyword in keywords: like_count = tip['likes']['count'] word_counter[keyword] += 1 * (1 if like_count == 0 else like_count) tags = [] for key, count in word_counter.most_common(10): if count > 1: tags.append(key) if len(tags) > 0: candidate['name'] = venue['name'] candidate['tags'] = tags if 'photos' in venue_detail and venue_detail['photos'][ 'groups'] and venue_detail['photos']['groups'][0][ 'items']: photo = venue_detail['photos']['groups'][0]['items'][0] candidate['image'] = photo[ 'prefix'] + '128x128' + photo['suffix'] candidates.append(candidate) if len(candidates) > 0: break if len(candidates) == 0: return None # Instantly return most popular candidate print(candidates) top_candidate = candidates[0] ret = { 'name': top_candidate['name'], 'reason': reduce(lambda i, s: i + ' ' + s, top_candidate['tags']), # Add recommend reason 'image': top_candidate['image'] } return ret
def __init__(self): self.keyword_fetcher = Keywords() self.spot_client = Spot()
class PythonParser(): def __init__(self): self.keywords = Keywords("python") self.math_parser = MathParser() def parse(self, dictation): words = dictation.split() # print(str(words)) if self.keywords.is_keyword(words[0]): parser = "create_" + words[0] return getattr(self, parser)(words[1:]) else: return ["Bad command"] def create_class(self, words): snippet = ['class '] start_index_v = helper.find_next_index({'base'}, words) if start_index_v == -1: snippet[0] += format_class(words) + '():' else: snippet[0] += format_class(words[0:start_index_v]) + '(' while True: next_index_v = helper.find_next_index({'base'}, words, start_index_v) if next_index_v == -1: snippet[0] += format_class(words[start_index_v+1:]) + '):' break else: snippet[0] += format_class(words[start_index_v+1:next_index_v]) + ', ' start_index_v = next_index_v return snippet def create_function(self, words): snippets = [""] snippets[0] = 'def ' start_index_v = helper.find_next_index({'variable'}, words) if start_index_v == -1: for w in words: snippets[0] += w.lower() + '_' snippets[0] = snippets[0][:-1] + '():' else: snippets[0] += format_variable(words[0:start_index_v]) + '(' while True: next_index_v = helper.find_next_index({'variable'}, words, start_index_v) if next_index_v == -1: snippets[0] += format_variable(words[start_index_v+1:]) + '):' break else: snippets[0] += format_variable(words[start_index_v+1:next_index_v]) + ', ' start_index_v = next_index_v return snippets def create_define(self, words): return self.create_function(words) def create_for(self, words): snippets = [""] # X from start to end index_from = helper.find_next_index({'from'}, words) index_in = helper.find_next_index({'in'}, words) if index_from > 0: snippets[0] = 'for ' + format_variable(words[0:index_from]) + ' in range( ' index_to = helper.find_next_index({'to'}, words) start = words[index_to-1] stop = words[index_to+1] snippets[0] += start + ', ' + stop + '):' # X in List elif index_in > 0: snippets[0] = 'for ' + format_variable(words[0:index_in]) + ' in ' + format_variable(words[index_in+1:]) + ':' else: snippets[0] = 'for ' return snippets def create_while(self, words): snippets = [""] if words[0] == 'call': snippets[0] = 'while'+ self.create_call(words[1:])[0] + ':' elif words[0] == 'variable': snippets[0] = 'while ' + format_variable(words) + ':' else: # used process later snippets[0] = 'while ' + self.process_conditional_math(helper.convert_to_string(words)) + ':' return snippets def create_if(self, words): snippets = ["if "] processed = self.process_conditional_math(helper.convert_to_string(words)) snippets[0] += processed + ':' return snippets def create_elif(self, words): snippets = ["elif "] processed = self.process_conditional_math(helper.convert_to_string(words)) snippets[0] += processed + ':' return snippets def create_else(self, words): snippets = ["else:", "nl"] return snippets def create_return(self, words): snippets = [""] if words[0] == 'variable': snippets[0] = format_variable(words[1:]) else: snippets[0] = helper.convert_to_string(words) return snippets def create_print(self, words): snippets = [""] length = len(words) print(str(words)) if words[0] == 'variable' and length > 1: snippets[0] = 'print(str(' + format_variable(words[1:]) + '))' else: pass snippets[0] = 'print(' snippets.append('quote') snippets.append(format(helper.convert_to_string(words))) snippets.append('quote') snippets.append(')') return snippets def create_variable(self, words): snippets = [""] index_type = helper.find_next_index({'array', 'dictionary', 'set', 'call', 'integer', 'string', 'equals'}, words) if index_type > 0: if words[index_type] == 'array': snippets[0] = format_variable(words[0:index_type]) + ' = []' elif words[index_type] == 'dictionary' or words[index_type] == 'set': snippets[0] = format_variable(words[0:index_type]) + ' = {}' elif words[index_type] == 'call': snippets[0] = format_variable(words[0:index_type]) + ' = ' + self.create_call(words[index_type+1:])[0] elif words[index_type] == 'integer': snippets[0] = format_variable(words[0:index_type]) + ' = ' + helper.verify_number(words[index_type+1]) elif words[index_type] == 'string': snippets[0] = format_variable(words[0:index_type]) + ' = ' snippets.append('quote') snippets.append(helper.convert_to_string(words[index_type+1:])) snippets.append('quote') elif words[index_type] == 'equals': snippets[0] = format_variable(words[0:index_type]) + ' = ' + self.process_conditional_math(helper.convert_to_string(words[index_type+1:])) else: snippets[0] = format_variable(words[0:index_type]) + ' = ' else: snippets[0] = format_variable(words) + ' = ' return snippets def create_call(self, words): snippets = [""] start_index_arg = -1 start_index_sub = helper.find_next_index({'sub', 'period', 'function', 'class'}, words) #print(str(start_index_sub)) if start_index_sub == -1: start_index_arg = helper.find_next_index({'variable', 'arg', 'argument'}, words) if start_index_arg == -1: snippets[0] += format_variable(words) else: snippets[0] += format_variable(words[0:start_index_arg]) elif start_index_sub == 0: while True: next_index_sub = helper.find_next_index({'sub', 'period', 'class', 'function'}, words, start_index_sub) if next_index_sub == -1: start_index_arg = helper.find_next_index({'variable', 'arg', 'argument'}, words) if start_index_arg == -1: if words[start_index_sub] == 'class': snippets[0] += format_class(words[start_index_sub+1:]) else: snippets[0] += format_variable(words[start_index_sub+1:]) else: if words[start_index_sub] == 'class': snippets[0] += format_class(words[start_index_sub+1:start_index_arg]) else: snippets[0] += format_variable(words[start_index_sub+1:start_index_arg]) break else: if words[start_index_sub] == 'class': snippets[0] += format_class(words[start_index_sub+1:next_index_sub]) + '.' else: snippets[0] += format_variable(words[start_index_sub+1:next_index_sub]) + '.' start_index_sub = next_index_sub else: snippets[0] += format_variable(words[0:start_index_sub]) + '.' while True: next_index_sub = helper.find_next_index({'sub', 'period', 'class', 'function'}, words, start_index_sub) if next_index_sub == -1: start_index_arg = helper.find_next_index({'variable', 'arg', 'argument'}, words) if start_index_arg == -1: if words[start_index_sub] == 'class': snippets[0] += format_class(words[start_index_sub+1:]) else: snippets[0] += format_variable(words[start_index_sub+1:]) else: if words[start_index_sub] == 'class': snippets[0] += format_class(words[start_index_sub+1:start_index_arg]) else: snippets[0] += format_variable(words[start_index_sub+1:start_index_arg]) break else: snippets[0] += format_class(words[start_index_sub+1:next_index_sub]) + '.' start_index_sub = next_index_sub # handle any arguments if start_index_arg == -1: snippets[0] += '()' else: snippets[0] += '(' while True: next_index_arg = helper.find_next_index({'variable', 'arg', 'argument'}, words, start_index_arg) if next_index_arg == -1: snippets[0] += format_variable(words[start_index_arg+1:]) + ')' break else: snippets[0] += format_variable(words[start_index_arg+1:next_index_arg]) + ', ' start_index_arg = next_index_arg return snippets def process_math(self, words): replaced = words for i in range(0,len(Keywords.math_keyword_list)): if Keywords.math_keyword_list[i] in replaced: replaced = replaced.replace(Keywords.math_keyword_list[i], Keywords.math_replacement_list[i]) return replaced def process_conditional_math(self, words): replaced = words for i in range(0,len(Keywords.conditional_keyword_list)): if Keywords.conditional_keyword_list[i] in replaced: replaced = replaced.replace(Keywords.conditional_keyword_list[i], Keywords.conditional_replacement_list[i]) replaced = self.process_math(replaced) replaced_array = replaced.split(' ') # print(str(replaced_array)) snippet = '' start_index_symbol = helper.find_next_index(Keywords.symbols, replaced_array) snippet += format_variable(replaced_array[0:start_index_symbol]) + ' ' while True: next_index_symbol = helper.find_next_index(Keywords.symbols, words, start_index_symbol) if next_index_symbol == -1: snippet += replaced_array[start_index_symbol] + ' ' + format_variable(replaced_array[start_index_symbol+1:]) break else: snippet += replaced_array[start_index_symbol] + ' ' + format_variable(replaced_array[start_index_symbol+1:next_index_symbol]) start_index_symbol = next_index_symbol return snippet
'kt1s', 'kt2s', 'kp1s', 'kp2s', 'kp3s', 'kbgs', 'kbvs', 'kbds', 'kfcs', 'kbes', 'kbms', 'kros', 'kcas', 'kcwi' ] server = [] for i in allServers: server.append(i) for i in range(1, 4): for j in range(1, 17): server.append('kp' + str(i) + 's') #print(server) #print(binary_keywords) fdata = FactoryData(process_names) binVals = Keywords(server, binary_keywords) histKeys = Keywords() #print(binVals.get_keyword()) external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = dash.Dash(__name__) app.config.suppress_callback_exceptions = True theme = { 'dark': False, 'detail': '#007439', 'primary': '#00EA64', 'secondary': '#6E6E6E' }
def test_keywords_merger(self): keys = Keywords() self.assertEqual(12, len(keys.merge(KEYWORDS, addwords=ADDWORDS, prewords=PREWORDS)))
class UserKeywordHandler(object): type = 'user' def __init__(self, keyword, libname): self.name = keyword.name self.keywords = Keywords(keyword.steps) self.return_value = keyword.return_.value self._libname = libname self.doc = self._doc = keyword.doc.value self._timeout = keyword.timeout self._keyword_args = keyword.args.value @property def longname(self): return '%s.%s' % (self._libname, self.name) if self._libname else self.name @property def shortdoc(self): return self.doc.splitlines()[0] if self.doc else '' def init_keyword(self, varz): self._errors = [] self.doc = varz.replace_meta('Documentation', self._doc, self._errors) self.timeout = KeywordTimeout(self._timeout.value, self._timeout.message) self.timeout.replace_variables(varz) def run(self, context, arguments): context.namespace.start_user_keyword(self) try: return self._run(context, arguments) finally: context.namespace.end_user_keyword() def _run(self, context, argument_values): args_spec = UserKeywordArguments(self._keyword_args, self.longname) variables = context.get_current_vars() if context.dry_run: return self._dry_run(context, variables, args_spec, argument_values) return self._variable_resolving_run(context, variables, args_spec, argument_values) def _dry_run(self, context, variables, args_spec, argument_values): resolved_arguments = args_spec.resolve_arguments_for_dry_run( argument_values) self._execute(context, variables, args_spec, resolved_arguments) return None def _variable_resolving_run(self, context, variables, args_spec, argument_values): resolved_arguments = args_spec.resolve(argument_values, variables) self._execute(context, variables, args_spec, resolved_arguments) return self._get_return_value(variables) def _execute(self, context, variables, args_spec, resolved_arguments): args_spec.set_variables(resolved_arguments, variables, context.output) self._verify_keyword_is_valid() self.timeout.start() self.keywords.run(context) def _verify_keyword_is_valid(self): if self._errors: raise DataError('User keyword initialization failed:\n%s' % '\n'.join(self._errors)) if not (self.keywords or self.return_value): raise DataError("User keyword '%s' contains no keywords" % self.name) def _get_return_value(self, variables): if not self.return_value: return None try: ret = variables.replace_list(self.return_value) except DataError, err: raise DataError('Replacing variables from keyword return value ' 'failed: %s' % unicode(err)) if len(ret) != 1 or is_list_var(self.return_value[0]): return ret return ret[0]
def test_keywords_merger(self): keys = Keywords() self.assertEqual( 12, len(keys.merge(KEYWORDS, addwords=ADDWORDS, prewords=PREWORDS)))
def __init__(self): self.keywords = Keywords("python") self.math_parser = MathParser()
class UserKeywordHandler(object): type = 'user' def __init__(self, keyword, libname): self.name = keyword.name self.keywords = Keywords(keyword.steps) self.return_value = keyword.return_.value self.teardown = keyword.teardown self.libname = libname self.doc = self._doc = keyword.doc.value self._timeout = keyword.timeout self._keyword_args = keyword.args.value @property def longname(self): return '%s.%s' % (self.libname, self.name) if self.libname else self.name @property def shortdoc(self): return self.doc.splitlines()[0] if self.doc else '' def init_keyword(self, varz): self._errors = [] self.doc = varz.replace_meta('Documentation', self._doc, self._errors) self.timeout = KeywordTimeout(self._timeout.value, self._timeout.message) self.timeout.replace_variables(varz) def run(self, context, arguments): context.namespace.start_user_keyword(self) try: return self._run(context, arguments) finally: context.namespace.end_user_keyword() def _run(self, context, argument_values): args_spec = UserKeywordArguments(self._keyword_args, self.longname) variables = context.get_current_vars() if context.dry_run: return self._dry_run(context, variables, args_spec, argument_values) return self._variable_resolving_run(context, variables, args_spec, argument_values) def _dry_run(self, context, variables, args_spec, argument_values): resolved_arguments = args_spec.resolve_arguments_for_dry_run(argument_values) self._execute(context, variables, args_spec, resolved_arguments) return None def _variable_resolving_run(self, context, variables, args_spec, argument_values): resolved_arguments = args_spec.resolve(argument_values, variables, context.output) self._execute(context, variables, args_spec, resolved_arguments) return self._get_return_value(variables) def _execute(self, context, variables, args_spec, resolved_arguments): args_spec.set_variables(resolved_arguments, variables, context.output) self._verify_keyword_is_valid() self.timeout.start() try: self.keywords.run(context) except ExecutionFailed, error: pass else:
# -*- coding: utf-8 -*- from tweepy.streaming import StreamListener from tweepy import OAuthHandler from tweepy import Stream from tweepy import API import json import pika import time import sys from config import config from keywords import Keywords keywordObj = Keywords(['python', 'javascript']) #Variables that contains the user credentials to access Twitter API access_token = config['access_token'] access_token_secret = config['access_token_secret'] consumer_key = config['consumer_key'] consumer_secret = config['consumer_secret'] class TweetProducer(StreamListener): def __init__(self, api): self.api = api super(StreamListener, self).__init__() #setup rabbitMQ Connection connection = pika.BlockingConnection( pika.ConnectionParameters(host='localhost')) self.channel = connection.channel()
def download_resume(self, id, headers): logger.info('headers %s of download resume' % (headers)) try_times = 0 url = "http://www.yifengjianli.com/bidme/getUserResume" _resume = {} yfkeywords = Keywords() while True: try_times += 1 try: time.sleep(random.uniform(3, 10)) response = self.session.post(url, data={ "userId": id, "resumeCookie": "", }, headers=headers, timeout=30, proxies=self.proxies) assert response assert response.status_code == 200 response.encoding = 'utf-8' except Exception: logger.warning( 'fetch url %s with %s fail:\n%s' % (url, self.proxies, traceback.format_exc())) if try_times > 5: raise Exception("PROXY_FAIL!") else: time.sleep(30) else: break resume = json.loads(response.text) assert resume if 'resume' not in resume: raise Exception('No Resume Return! Maybe Over 300!') _resume["sex"] = yfkeywords.Sex(str(resume["resume"].get("sex"))) if resume["resume"].get("sex", None) else None _resume["jobState"] = yfkeywords.JobState(str(resume["resume"].get("jobState"))) if resume["resume"].get("jobState") else None _resume["maritalStatus"] = yfkeywords.MaritalStatus(str(resume["resume"].get("maritalStatus"))) if resume["resume"].get("maritalStatus") else None _resume["expectWorkType"] = yfkeywords.Worktype(str(resume["resume"].get("expectWorkType"))) if resume["resume"].get("expectWorkType", None) else None _resume["education"] = yfkeywords.Education(str(resume["resume"].get("education"))) if resume["resume"].get("education", None) else None for field in ('expectCity', 'city', 'province', 'hukouProvince', 'hukouCity'): if "," in str(resume["resume"].get(field)): citys = str(resume["resume"].get(field)) parsed_citys = [] for i in citys.split(","): parsed_citys.append(yfkeywords.Expectcity(str(i))) _resume[field] = ",".join(parsed_citys) else: _resume[field] = yfkeywords.Expectcity(str(resume["resume"].get(field))) if resume["resume"].get(field, None) else None _resume["expectSalary"] = yfkeywords.Expectsalary(str(resume["resume"].get("expectSalary"))) if resume["resume"].get("expectSalary", None) else None if "," in str(resume["resume"].get("jobTitle")): jobtitles = str(resume["resume"].get("jobTitle")) parsed_jobtitles = [] for i in jobtitles.split(","): parsed_jobtitles.append(yfkeywords.Jobtitle(str(i))) _resume["jobTitle"] = ",".join(parsed_jobtitles) else: _resume["jobTitle"] = yfkeywords.Jobtitle(str(resume["resume"].get("jobTitle"))) if resume["resume"].get("jobTitle", None) else None for k, v in _resume.iteritems(): resume['resume'][k] = v for field in ['work_experiences', 'educations']: if field in resume: items = [] for item in resume[field]: if 'salary' in item: item["salary"] = yfkeywords.Expectsalary(str(item.get("salary"))) if item.get("salary", None) else None if 'compSize' in item: item["compSize"] = yfkeywords.CompSize(str(item.get("compSize"))) if item.get("compSize", None) else None if 'compIndustry' in item: item["compIndustry"] = yfkeywords.Industry(str(item.get("compIndustry"))) if item.get("compIndustry", None) else None if 'compProperty' in item: item["compProperty"] = yfkeywords.CompProperty(str(item.get("compProperty"))) if item.get("compProperty", None) else None if 'education' in item: item["education"] = yfkeywords.Education(str(item.get("education"))) if item.get("education", None) else None items.append(item) resume[field] = items return json.dumps(resume, ensure_ascii=False)