def check_origin(self, origin): log_info("check_origin(): check origin, origin:" + origin) """ Check if incoming connection is in supported domain :param origin (str): Origin/Domain of connection """ if not is_debug(): return 'ioanb7.com' in origin return True
def addWord(self, word, definition='', percentages=False, start_date=''): if '/' in word: if is_debug(): print("Query cannot contain '/': {}".format(word)) return None try: dictionaryResult = dictionary.searchWord(word) if dictionaryResult: dictionary_definition, start_date = dictionaryResult if not definition: definition = dictionary_definition else: if is_debug(): print("Could not get dictionary word: " + word) except Exception as e: if is_debug(): print("Cannot connect to dictionary. Error message: {}".format( e)) academiaWordFreq = academia.get_frequency(word, percentages=percentages) if not academiaWordFreq: return None scienceFictionWordFreq = science_fiction.get_frequency( word, percentages=percentages) if not scienceFictionWordFreq: return None # fill in missing dates between academia and sf with 0's academiaWordFreq, scienceFictionWordFreq = self.preprocessWordFreq( academiaWordFreq, scienceFictionWordFreq) # add to firestore self.db.addDoc(word, academiaWordFreq, scienceFictionWordFreq, description=definition, first_occurance=start_date) return { "word": word, "definition": definition, "start date": start_date, "academia": academiaWordFreq, "science fiction": scienceFictionWordFreq, }
def _get_result_count(query): url = "https://med-by-year.appspot.com/search?q=" + urlEncode(query) result = get(url, timedelta=45, retries=1) if result is None: if is_debug(): print('Error while getting {} from PubMed'.format(query)) return None data = json.loads(result.text) result_count = {} if 'counts' not in data: if 'error' in data: if is_debug(): print('Error while getting {} from PubMed: {}'.format( query, data['error'])) else: if is_debug(): print('Error while getting {} from PubMed'.format(query)) return None for year in data['counts']: result_count[int(year)] = int(data['counts'][year]) return result_count
def get_curriculum() -> dict: if is_debug() is False: db = create_connection() try: with db.cursor() as cursor: sql = """ SELECT DISTINCT curriculum_code as curriculumCode, curriculum_name as curriculumName FROM VIS_CURRICULUM_VERSION WHERE DEGREE_NAME={} GROUP BY curriculumName """ bachelors_sql = sql.format("'Bachelor of Science'") cursor.execute(bachelors_sql) bachelors = cursor.fetchall() masters_sql = sql.format("'Master of Science'") cursor.execute(masters_sql) masters = cursor.fetchall() return { "bachelors": bachelors, "masters": masters } finally: db.close() else: return { "bachelors": [ { "curriculum_code": 20, "curriculumName": "Math", }, { "curriculum_code": 21, "curriculumName": "Physics", } ], "masters": [ { "curriculum_code": 31, "curriculumName": "Advanced Math", }, { "curriculum_code": 32, "curriculumName": "Biology", } ] }
def delay_between_runs(): global killer sleepint = 60 # usual if is_debug(): sleepint = 5 log_info("Till next time.. DEBUG MODE 5 second delay only") log_info("Till next time..") for i in range(sleepint): # 1 minute if killer.kill_now: return time.sleep(1)
def get_data_for_task_3() -> list: if is_debug() is False: db = create_connection() try: with db.cursor() as cursor: curriculum_sql = """ SELECT node, name FROM VIS_CALC_CURRICULUM """ cursor.execute(curriculum_sql) masters_and_bachelors = cursor.fetchall() link_sql = """ SELECT bachelor_curriculum_node as source, master_curriculum_node as target, (LOG(`count`) * 7 + 10) as `value` FROM VIS_CALC_TASK_3; """ cursor.execute(link_sql) links = cursor.fetchall() return { "nodes": masters_and_bachelors, "links": links } finally: db.close() else: return { # //Nodes are the curriculum items "nodes": [ {"node": 0, "name": "Maths"}, {"node": 1, "name": "Physics"}, {"node": 2, "name": "Computer Science"}, {"node": 3, "name": "Advanced Maths"}, {"node": 4, "name": "Advanced Computer Science"} ], # //links are the number of students who went from one bachelor to a master "links": [ {"source": 0, "target": 3, "value": 2}, {"source": 1, "target": 3, "value": 1}, {"source": 1, "target": 4, "value": 1}, {"source": 2, "target": 4, "value": 2} ]}
def searchWord(word): # credentials app_id = 'b0691462' # 'e8901f8f' app_key = 'e924ba24525f6c231fbfeba62ce965ca' # 'b5b75721b3fa6328753ac38e0ca5ee8a' try: url = "https://oed-api.oxforddictionaries.com/oed/api/v0.1/words/?lemma={}".format( urllib.parse.quote(word)) result = get(url, headers={ 'app_id': app_id, 'app_key': app_key }).json() word_result = result["data"][0] start_date = word_result["daterange"]["start"] definition = word_result["definition"] return definition, start_date except Exception as e: if is_debug(): print("Error searching OED for {}: {}".format(word, e)) return None
def get_frequency(query, percentages=False): start_date = 1800 end_date = 2019 result = retrieve_absolute_percentage_counts(query, "english fiction", 0, start_date, end_date) if result is None: if is_debug(): print("Error: Could not get word frequency from Google NGram") return None counts, counts_percentages = result freq = list( zip(range(start_date, end_date), counts_percentages if percentages else counts)) # delete the beginning dates that start with 0 frequency start_from = 0 while counts[start_from] == 0: start_from += 1 counts = counts[start_from:] freq = freq[start_from:] return freq
def main(): port = 8200 print(1) import sys if len(sys.argv) > 1: try: port = int(sys.argv[1]) except: pass print(2) tornado.options.parse_command_line() print(3) ''' global debug debug = False debug=True if 'debug' in sys.argv or '-debug' in sys.argv or '--debug' in sys.argv: debug=True ''' print(4) paths = [ (r'/socket_kinddit', WebSocketHandler), ] if is_debug(): paths += [ (r'/script.js', ScriptHandler), (r'/favicon.ico', FaviconHandler), (r'/kinddit/logo.png', LogoHandler), (r'/(.*)', MainHandler), ] app = tornado.web.Application(paths, debug=is_debug()) print(5) log_info("Websocket service on %d" % port) if is_debug(): log_info("Debug mode.") app.listen(port) tornado.ioloop.IOLoop.current().spawn_callback(antispam_caller) loop = asyncio.get_event_loop() # run. if sys.platform == 'win32': try: log_info("Running. Press Ctrl+C to interrupt.") loop.run_forever() except KeyboardInterrupt: log_info("CTRL+C pressed") else: for signame in ('SIGINT', 'SIGTERM'): try: loop.add_signal_handler(getattr(signal, signame), functools.partial(ask_exit, signame)) except NotImplementedError: log_error("NOTIMPLEMENTEDERROR SIGINT, SIGTERM") try: log_info("Running. Press Ctrl+C to interrupt.") loop.run_forever() finally: loop.close() log_info("Shutdown with grace!")
# List of banned Regular Expressions and strings regex_list = ['CC{\w+}', 'CCRU{\w+}', 'doveva annà così fratellì', 'https://www.youtube.com/watch?v=dQw4w9WgXcQ'] service_type = 'Netcat' # Name of the service # Parameter that controls the packet dropping policy: # 0: only drop the packet; # 1: drop the packet and send a RST packet to kill the connection; # 2: drop the packet and send a ACK packet to continue the connection. dropping_policy = 1 # Checking root privileges if not utils.is_root(): print("You need root privileges to run this application!") exit(-1) # Checking debug flag status (-d or --debug) debug = utils.is_debug() # Indispensable objects instantiation log = mylog.Log(log_file) shield = my_analysis.Shield(regex_list, service_type, log) handling = packet_handling.PacketHandling(log, shield, debug, dropping_policy) # Optional objects instantiation: comment them to disable statistics = stats.Stats(log, handling) pcap_exporter = pcap.PCAP(log, handling, pcap_file) log.uplog("Starting ips-cc") # Retrieving iptables list and determining the policy of each rule iptables_list = utils.list_iptables() shield.set_rules(iptables_list, queue_number)
def process(select): database = get_db() now = datetime.now() todo = {} for subscribtion in select: if subscribtion.user.username not in todo: obj = { 'subs': [], 'archive': subscribtion.user.archive, 'user': subscribtion.user } todo[subscribtion.user.username] = obj todo[subscribtion.user.username]['subs'] += [subscribtion] for username, value in todo.items(): start_time = datetime.now() log_info(username) log_info(value) till_tries_again = now - timedelta(hours=2) user = value['user'] #update before it can go to shit q = user.update(last_sent=till_tries_again).where( UserModel.id == user.id) q.execute() subs_encoded = "" for sub in value['subs']: subs_encoded += sub.subreddit + " ; " report = ReportModel(user=user, processing_stage=0, processing_time=get_milliseconds_from(start_time), to_email=user.kindle_email, was_requested=user.has_requested, was_successful=False, attachment_size=0, attachment_title=get_title(), article_shortlinks='', subs=subs_encoded, is_recurrent=user.is_recurrent) report.save() try: thisdir = os.path.dirname(os.path.realpath(__file__)) title = get_title() output_path = os.path.join(thisdir, 'kinddit_output/' + title + '.html') articles = encode_articles_for(value['subs'], value['archive']) prepend_archives = "" for article in articles: if not article['archived']: prepend_archives = serialise_article_for_archive( article) + prepend_archives archive = prepend_archives + value['archive'] q = user.update(archive=archive).where(UserModel.id == user.id) q.execute() articles_encoded = "" for article in articles: articles_encoded += article['link'] + " ; " #make a list of subs, articles #update report.stage q = report.update(processing_stage=5, processing_time=get_milliseconds_from(start_time), article_shortlinks=articles_encoded )\ .where(ReportModel.id == report.id) q.execute() #generate book.html size = generate_html(username, articles, output_path, title) q = report.update(processing_stage=10, processing_time=get_milliseconds_from(start_time),attachment_size=size )\ .where(ReportModel.id == report.id) q.execute() #update before it can go to shit hasrequested = user.is_recurrent == False and user.has_requested == True if hasrequested: hasrequested = False q = user.update(has_requested=hasrequested).where( UserModel.id == user.id) q.execute() if not is_debug(): send_kindler(value['user'].kindle_email, output_path) else: log_info( "Didnt send kindler through e-mail. it's waiting in the folter." ) #send email #update success=True, processing_time q = report.update(processing_stage=15, processing_time=get_milliseconds_from(start_time), was_successful=True, was_requested=False )\ .where(ReportModel.id == report.id) q.execute() if not is_debug(): os.remove(output_path) log_info("Sent for: " + username) except Exception as e: log_error("Unexpected error while sending.") import traceback log_error( traceback.format_exception( None, # <- type(e) by docs, but ignored e, e.__traceback__), ) delay_between_users()
def get_data_for_task_1(curriculum_code: str) -> dict: if is_debug() is False: db = create_connection() try: with db.cursor() as cursor: sql = """ SELECT * FROM VIS_CALC_TASK_1 WHERE curriculum_code={} and SEMESTER_VALUE <= 6 AND NODE_TITLE!="" """.format("'" + curriculum_code + "'") cursor.execute(sql) data = cursor.fetchall() modules = [] data_sorted = list(sorted(data, key=lambda x: x["NODE_TITLE"])) for k, g in groupby(data_sorted, lambda x: x["NODE_TITLE"]): modules.append(list(g)) def convert_semesters(same_semesters): module = { "moduleId": same_semesters[0]["NODE_ID"], "moduleName": same_semesters[0]["NODE_TITLE"], "type": ("master" if same_semesters[0]["DEGREE_NAME"] == "Master of Science" else "bachelor"), "numberPerSemester": { "Semester 1": 0, "Semester 2": 0, "Semester 3": 0, "Semester 4": 0 } if same_semesters[0]["DEGREE_NAME"] == "Master of Science" else { "Semester 1": 0, "Semester 2": 0, "Semester 3": 0, "Semester 4": 0, "Semester 5": 0, "Semester 6": 0 } } for m in same_semesters: module["numberPerSemester"][("Semester " + str(m["SEMESTER_VALUE"]))] = m["SUCCESSFUL_STUDENTS"] return module return list(map(convert_semesters, modules)) finally: db.close() else: return [ { "moduleId": 1, "moduleName": "Math", "type": "bachelor", "numberOfStudents": 60, "numberPerSemester": { "Semester 1": 10, "Semester 2": 20, "Semester 3": 0, "Semester 4": 5, "Semester 5": 15, "Semester 6": 10 } }, { "moduleId": 2, "moduleName": "Advanced Biology", "type": "master", "numberOfStudents": 25, "numberPerSemester": { "Semester 1": 15, "Semester 2": 8, "Semester 3": 7, "Semester 4": 0 } } ]
def get_data_for_task_2(curriculum_code: str) -> list: if is_debug() is False: alphabet_splitter = [("A", "K"), ("K", "Z")] db = create_connection() try: with db.cursor() as cursor: sql = """ SELECT NODE_TITLE as moduleName, MEDIAN_SEMESTER as medianSemester, SEMESTER_TYPE_ID as recommendedSemester FROM VIS_CALC_TASK_2 WHERE NODE_TITLE IS NOT NULL AND NODE_TITLE != "" AND curriculum_code={} AND SEMESTER_TYPE_ID <= 6 AND MEDIAN_SEMESTER <= 6; """.format("'" + curriculum_code + "'") cursor.execute(sql) data = cursor.fetchall() semesters = [] data_sorted = list(sorted(data, key=lambda x: x["medianSemester"])) for k1, g1 in groupby(data_sorted, lambda x: x["medianSemester"]): modules_parts = [] for start_spell, end_spell in alphabet_splitter: modules = [] s_e_filtered = list(filter(lambda x: start_spell <= x["moduleName"][0] <= end_spell, g1)) for x in s_e_filtered: modules.append({ "name": x["moduleName"], "recommendedSemester": x["recommendedSemester"], "medianForColor": k1 }) if len(modules) != 0: modules_part = { "name": "{}-{}".format(start_spell, end_spell), "children": modules } modules_parts.append(modules_part) if len(modules_parts) > 0: semester = { "median": k1, "children": modules_parts } semesters.append(semester) return semesters finally: db.close() else: return [ { "median": 1, "children": [ { "name": "A-K", "children": [ { "name": "Math", "recommendedSemester": 1 }, { "name": "Biology", "recommendedSemester": 2 } ] } ] }, { "median": 2, "children": [ { "name": "A-K", "children": [ { "name": "Advanced Math", "recommendedSemester": 1 }, { "name": "Advanced Biology", "recommendedSemester": 2 } ] } ] } ]