def run(self, version): self.VERSION = version LOGGER.debug('Running setup') self.setup() with open('./lib/bot/token.0', 'r', encoding="utf-8") as tf: self.TOKEN = tf.read().strip() LOGGER.info("Running bot") super().run(self.TOKEN, reconnect=True)
async def list_states() -> list: resp = await session.get(URLS.get('GET_STATES'), headers=headers) if resp.status == 200: states_string = json.loads(await resp.text()) return states_string.get('states') else: LOGGER.debug(f"{resp.status}: {await resp.text()}") return []
def test_select_query(rdbms): posts_sql = fetch_sql_files("posts/selects") parsed_posts_sql = parse_sql_batch(posts_sql) query_result = rdbms.execute_query(parsed_posts_sql[0], "hackers_dev") assert len(posts_sql) > 0 assert type(parsed_posts_sql[0]) == str assert type(query_result) == LegacyCursorResult LOGGER.debug(query_result.rowcount)
async def list_dist(state: str) -> list: url = URLS.get("GET_DIST").replace("{state_id}", state) resp = await session.get(url=url, headers=headers) if resp.status == 200: dist_string = json.loads(await resp.text()) return dist_string.get('districts') else: LOGGER.debug(f"{resp.status}: {await resp.text()}") return []
async def check_dist(dist_id: str, date: str) -> list: url = URLS.get("CHECK_DISTRICT").replace("{dist_id}", dist_id).replace("{date}", date) resp = await session.get(url=url, headers=headers) if resp.status == 200: print(await resp.text()) dist_string = json.loads(await resp.text()) return dist_string.get('centers') else: LOGGER.debug(f"{resp.status}: {await resp.text()}") return []
async def check_pin(pincode: str, date: str) -> list: url = URLS.get("CHECK_PIN").replace("{pincode}", pincode).replace("{date}", date) resp = await session.get(url=url, headers=headers) if resp.status == 200: print(await resp.text()) pin_string = json.loads(await resp.text()) return pin_string.get('centers') else: LOGGER.debug(f"{resp.status}: {await resp.text()}") return []
def get_sample_results_by_naive_bayes(classifier, tags_info, words_info): """ This generator will read the test samples and return a sequence of predicted results. """ LOGGER.debug("Creating the naive bayes classifier...") classifier = create_classifier(tags_info, words_info) # Getting the test samples LOGGER.debug("Start to process the samples") test_samples = get_test_samples() # Start to process the test samples for line in test_samples: LOGGER.debug("Processing sample...") # -- Parse the records # Each line of the sample is make up by three parts # - id # - words # - tags # A typical line will look like this: # question_id;word_id1:count<tag>word_id2:count;tag_id1<tab>tag_id2 segments = line[:-1].split(";") words = (to_ints(elem.split(":")) for elem in segments[1].split()) words = dict(elem for elem in words if (elem[0] in words_info)) tags = to_ints(segments[2].split(), lambda t: t in tags_info) # -- Classifying LOGGER.debug("Classifying sample %s..." % segments[0]) tags_with_score = classifier.classify(words) yield tags, tags_with_score
async def state(self, ctx): states_data = await list_api.list_states() if not states_data: await ctx.send("Error fetching states data") return self.states = {state.get('state_id'): state.get('state_name') for state in states_data} embed = discord.Embed(title="States List", color=0xc0dfd1) embed.set_author(name="CowinBOT", icon_url=self.bot.guild.icon_url) states_data = sorted(states_data, key=lambda x: x['state_id']) LOGGER.debug(states_data) count = 0 for state in states_data: if count < 24: count += 1 embed.add_field(name=state.get('state_id'), value=state.get('state_name')) else: await ctx.send(embed=embed) embed = discord.Embed(title="States List Cont..", color=0xc0dfd1) embed.add_field(name=state.get('state_id'), value=state.get('state_name')) count = 1 if count: await ctx.send(embed=embed)
def run_experiment(predicted_results, settings, limit, predicted_tag_count): """ Run the experiment with configuration """ tags_info = settings["tags_info"] sample_count = config.CLASSIFIER["sample_count"] # predicted_tag_count = settings["predicted_tag_count"] LOGGER.debug("Sample count: %d" % sample_count) LOGGER.debug("Max predicted tag count: %d" % predicted_tag_count) get_similarity = settings["get_similarity"] # run the test for index, predict_result in enumerate(predicted_results): if index > limit: break try: LOGGER.debug("%d/%d sample" % (index, sample_count)) orignal, scored_predicted = predict_result # TODO: HARD CODED Code again. if settings["should_rerank"]: scored_predicted = rerank_tags(scored_predicted[:30], get_similarity) scored_predicted = scored_predicted[:predicted_tag_count] predicted = [t for t, s in scored_predicted] # TODO: SOME PROBLEM may raise here predicted = predicted[:predicted_tag_count] for name, evaluator in settings["evaluators"].items(): evaluation = evaluator.update(orignal, predicted) log_message = "\nOriginal Result: %s\n"\ "Predicted Result: %s\n"\ "Evaluator Type: %s\n"\ "\tPrecision: %f\n"\ "\tRecall: %f\n" % ( str(to_named_tags(orignal, tags_info)), str(to_named_tags(predicted, tags_info)), name, evaluation[0], evaluation[1]) LOGGER.debug(log_message) except Exception as e: LOGGER.error("Error occurs %s" % (str(e))) evaluations = [] for name, evaluator in settings["evaluators"].items(): evaluation = evaluator.get_evaluation() LOGGER.info("%s Precision: %f\t Recall: %f" % (name, evaluation[0], evaluation[1])) evaluations.append(evaluation) return evaluations
async def on_connect(self): LOGGER.info(f'{self.user} has connected to Discord!') LOGGER.debug(self.guilds) for guild in self.guilds: members = '\n - '.join([member.name for member in guild.members]) LOGGER.debug(f'Guild Members:\n - {members}')
def setup(self): for cog in COGS: self.load_extension(f"lib.cogs.{cog}") LOGGER.debug(f"{cog} cog loaded") LOGGER.debug("setup complete")
def create_classifier(all_tags, all_words): LOGGER.debug("Creating classifier ...") conf = config.INPUT base_path = conf["base_path"] model_path = os.path.join(base_path, "bayes.model") # Create classifier from scratch or from already persisted model if not config.CLASSIFIER["retrain_model"] and os.path.exists(model_path): LOGGER.debug("Creating classifier from file ...") classifier = persistence.load_model(model_path) LOGGER.debug("Reading completed.") else: LOGGER.debug("Creating empty classifier ...") classifier = make_classifier_from_config(all_tags, all_words) LOGGER.debug("Traing completed.") if config.CLASSIFIER["retrain_model"]: LOGGER.debug("Writing the model to %s ..." % model_path) persistence.save_model(classifier, model_path) LOGGER.debug("Writing model completed.") return classifier
LOGGER.debug(log_message) except Exception as e: LOGGER.error("Error occurs %s" % (str(e))) evaluations = [] for name, evaluator in settings["evaluators"].items(): evaluation = evaluator.get_evaluation() LOGGER.info("%s Precision: %f\t Recall: %f" % (name, evaluation[0], evaluation[1])) evaluations.append(evaluation) return evaluations if __name__ == "__main__": # Reading the words debug and tags debug from files LOGGER.debug("Reading tags and words...") tags_info, words_info = prediction.read_tags_and_words() LOGGER.debug("Read %d tags and %d words" % (len(tags_info), len(words_info))) EXPERIMENT_CONFIG = { "classifier": "naive_bayes", "evaluator_file": "../../data/stat", "predicted_tag_count": [3, 5, 10, 15, 20, 25], "tags_info": tags_info, "words_info": words_info, "should_rerank": False, "rounds": 5, "sample_count": 10, "NAME": "knn.100.stat", "is_from_classifier": False }