def get_result_plot_js(master_iteration_id): """ Get the last result plot using html/javascript :return: """ current_f_name = inspect.currentframe().f_code.co_name if not pipong_is_master(): logger.debug( "{}: This node is not a master}".format( current_f_name)) abort(404) master_it = db.session.query(models.MasterIteration).filter_by(id=master_iteration_id).first() if master_it is None: logger.error("{}: No MasterIteration found with id: {}".format( current_f_name, master_iteration_id)) abort(404) if not master_it.json_graph: logger.error("{}: Empty json_graph for id: {}".format( current_f_name, master_iteration_id)) abort(404) return render_template('master/result_plot_js.html', title='Result Plot JS', graph_data_json=master_it.json_graph, master_iteration_id=master_iteration_id)
def collect_post_data(self, page_row_list) -> List[dict]: page_post_data = [] for post in page_row_list: try: post_data = dict() post_data["price"] = post.find("span", { "class": "result-price" }).text basic_info = post.find("div", {"class": "result-info"}) post_data["date_time"] = basic_info.find( "time", {"class": "result-date"})['datetime'] post_data["post_url"] = basic_info.find( "a", {"class": "result-title hdrlnk"})['href'] post_data["post_title"] = basic_info.find( "a", { "class": "result-title hdrlnk" }).text post_data["location"] = self.location post_data["distance"] = self.distance post_data["postal"] = self.postal post_data["auto_make_model"] = self.auto_make_model post_data["min_auto_year"] = self.min_auto_year post_data["max_auto_year"] = self.max_auto_year post_data["max_auto_miles"] = self.max_auto_miles post_data["auto_title_status"] = self.auto_title_status page_post_data.append(post_data) except Exception as error: print(str(error)) logger.error(str(error)) return page_post_data
def get_result_plot_json(master_iteration_id): """ Get the last result plot data in json :return: """ current_f_name = inspect.currentframe().f_code.co_name if not pipong_is_master(): logger.debug( "{}: This node is not a master}".format( current_f_name)) abort(404) master_it = db.session.query(models.MasterIteration).filter_by(id=master_iteration_id).first() if master_it is None: logger.error("{}: No MasterIteration found with id: {}".format( current_f_name, master_iteration_id)) abort(404) if not master_it.json_graph: logger.error("{}: Empty json_graph for id: {}".format( current_f_name, master_iteration_id)) abort(404) js_graph = json.loads(master_it.json_graph) for e in js_graph['links']: e['left'] = False e['right'] = True return jsonify(js_graph)
def get_sel_page(url: str, page_start: int = None) -> Union[BeautifulSoup, None]: options = get_chrome_options() driver = webdriver.Chrome(web_driver_file, options=options) url = url + '&s={page_start}'.format( page_start=page_start) if page_start is not None else url driver.get(url) page_soup = None try: delay = 5 wait = WebDriverWait(driver, delay) wait.until(ec.presence_of_element_located((By.ID, "searchform"))) print("Page is ready") logger.info("Page loaded successfully") page = driver.page_source page_soup = BeautifulSoup(page, 'html.parser') except TimeoutException: print("Loading took too much time") logger.error("Loading took too much time") driver.stop_client() driver.close() driver.quit() return page_soup
def create_user(login: str, user_password: str, role: str = 'default'): from main import logger session = Session() new_user = Users(login=login, user_password=generate_password_hash(user_password, "sha256", salt_length=8)) try: session.add(new_user) session.commit() except exc.IntegrityError: session.rollback() logger.error(f"{current_user.login} get IntegrityError because " f"user with such parameters already exists:\n" f"login = {login}" f"password = {user_password}" f"role = {role}") return False new_user_role = RolesOfUsers(login=new_user.login, user_role=role) try: session.add(new_user_role) session.commit() except exc.IntegrityError: session.rollback() logger.error(f"{current_user.login} get IntegrityError because " f"user {login} already has {role} role") return False session.close() logger.info(f'User {login} successfully created') return True
def get_slack_conversations(): try: slack_token = config["SLACK"]["oauth_token"] client = WebClient(token=slack_token) response = client.conversations_list() conversations = response["channels"] return conversations except Exception as error: logger.error(str(error))
def add_user_profile(self, userID: int, alert: int) -> None: try: self.cursor.execute( "INSERT INTO user_profile VALUES (userID, alert);", (userID, alert)) self.connection.commit() except sqlite3.IntegrityError as ex: logger.error("An error has occurred while adding user info!", ex)
def get_result_plot(master_iteration_id): """ Get the last result plot using matplotlib :return: """ current_f_name = inspect.currentframe().f_code.co_name if not pipong_is_master(): logger.debug( "{}: This node is not a master}".format( current_f_name)) abort(404) master_it = db.session.query(models.MasterIteration).filter_by(id=master_iteration_id).first() if master_it is None: logger.error("{}: No MasterIteration found with id: {}".format( current_f_name, master_iteration_id)) abort(404) if not master_it.json_graph: logger.error("{}: Empty json_graph for id: {}".format( current_f_name, master_iteration_id)) abort(404) js_graph = json.loads(master_it.json_graph) G = json_graph.node_link_graph(js_graph) pos = nx.drawing.nx_agraph.graphviz_layout(G, prog='dot') node_labels = {} for k, v in G.nodes(data=True): node_labels[k] = '{}\n{:.2f}%'.format(k, v['mean']) nx.draw_networkx_labels(G, pos, labels=node_labels, font_size=5) node_colors = [G.node[n]['mean'] for n in G.nodes()] nx.draw_networkx_nodes( G, pos, node_color=node_colors, node_size=600, node_shape='o', cmap=plt.cm.OrRd, vmin=0., vmax=100.) nx.draw_networkx_edges( G, pos, arrowstyle='-|>', arrowsize=20, edge_color='black', width=1) fig = plt.gcf() fig.set_size_inches(30, 20) plt.savefig('/tmp/last_generated_result.png', dpi=250) output = io.BytesIO() FigureCanvas(fig).print_png(output) return Response(output.getvalue(), mimetype='image/png')
def insert_data(cursor, table_name, source): for item in source: column_names = ", ".join(item.keys()) values = ', '.join("'{0}'".format(i) for i in item.values()) try: cursor.execute( DatabaseUtils.QUERY_INSERT_INTO.format( table_name, column_names, values)) except Exception as e: logger.error("Insert data to database failed, {}".format(e))
def add_user(self, userID: int, first_name: str, last_name: str, username: str, number: int, language_code: str) -> None: try: self.cursor.execute( "INSERT INTO users VALUES (?, ?, ?, ?, ?, ?);", (userID, first_name, last_name, username, number, language_code)) self.connection.commit() except sqlite3.IntegrityError as ex: logger.error("An error has occurred while adding an user!", ex)
def create_iperf_server(port): """ Create iperf servers at random ports The max number of servers is defined by parameters :param port: the port used to start the server :return: """ current_f_name = inspect.currentframe().f_code.co_name try: # kill program using this port cmd = ['lsof', '-t', '-i:{}'.format(port)] popen_obj = subprocess.Popen(cmd, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = popen_obj.communicate() pid_port = out.strip() if pid_port: logger.info("{}: iperfserver already running with pid: {}".format( current_f_name, pid_port)) cmd = ['kill', '-9', pid_port] popen_obj = subprocess.Popen(cmd, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = popen_obj.communicate() cmd = [ 'timeout', '6000', app.config['IPERF3_SERVER_SCRIPT_LOCATION'], str(port) ] popen_obj = subprocess.Popen(cmd, close_fds=True) time.sleep(2) # poll() returns None when the process is still running # https://docs.python.org/3/library/subprocess.html#popen-objects poll_msg = popen_obj.poll() if poll_msg is None: return True logger.info("{}: Error opening iperf server: {}. Command: {}".format( current_f_name, poll_msg, ''.join(str(e) for e in cmd))) logger.info("{}: Command: {}".format(current_f_name, cmd)) return False except SoftTimeLimitExceeded: return False except Exception as e: logger.error("{}: Error creating iperf server: {} {}".format( current_f_name, str(e), traceback.format_exc())) return False
def run(self): base_folder = self.base_config.tmp_folder database = self.sql_config.database for table in self.sql_config.tables: previous_dump_name = MonitorJob.construct_filename_with_path(base_folder, database, table, "previous") current_dump_name = MonitorJob.construct_filename_with_path(base_folder, database, table, "current") try: # dump sql to file # mysqldump --skip-comments --skip-extended-insert -u root -p db table > file.sql sql_dump_command = "mysqldump --skip-comments --skip-extended-insert -h{} -u{} -p{} {} {} > {}".format( self.sql_config.host, self.sql_config.username, self.sql_config.password, database, table, current_dump_name ) if self.sql_config.password else "mysqldump --skip-comments --skip-extended-insert -h{} -u{} {} {} > {}".format( self.sql_config.host, self.sql_config.username, database, table, current_dump_name ) command = sql_dump_command logger.debug("running {}".format(command)) c = delegator.run(command) if c.return_code != 0: raise RuntimeError(c.std_err) # Compare and send command = "diff {} {}".format(previous_dump_name, current_dump_name) logger.debug("running {}".format(command)) c = delegator.run(command) if c.return_code == 1: msg = { "title": "Diff on {}-{}".format(database, table), "text": self.reconstruct_msg(c.out) } logger.debug(msg) self.webhook.send_msg(msg) else: logger.error("Diff failed {}".format(c.err)) # move file os.rename(current_dump_name, previous_dump_name) MonitorJob.safe_delete(current_dump_name) except (RuntimeError, AssertionError) as e: # log logger.error(e)
def connect_to_database(db_name): try: connection = connect(user=DatabaseUtils.DB_USERNAME, password=DatabaseUtils.DB_PASSWORD, host=DatabaseUtils.DB_HOSTNAME, database=db_name) connection.autocommit = True logger.info("Connect with database succeed") return connection except Exception as e: logger.error("Connect with database failed, {}".format(e))
async def restart(self, ctx): lang = main.get_lang(ctx.guild.id) if ctx.guild else main.lang await ctx.send(embed=discord.Embed(title=lang["core_restart"], color=self.embed_color)) try: process = psutil.Process(os.getpid()) for handler in process.open_files() + process.connections(): os.close(handler.fd) except Exception as e: logger.error(e) os.execl(sys.executable, sys.executable, *sys.argv)
def copy_optimizer_params_to_model(named_params_model, named_params_optimizer): """ Utility function for optimize_on_cpu and 16-bits training. Copy the parameters optimized on CPU/RAM back to the model on GPU """ for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer, named_params_model): if name_opti != name_model: logger.error("name_opti != name_model: {} {}".format( name_opti, name_model)) raise ValueError param_model.data.copy_(param_opti.data)
def create_slack_channel(self): try: slack_token = config["SLACK"]["oauth_token"] client = WebClient(token=slack_token) response = client.conversations_create( name=self.channel, is_private=False ) channel_id = response["channel"]["id"] logger.info("channel created with id: {channel_id}".format(channel_id=channel_id)) return channel_id except SlackApiError as e: logger.error(e.response["error"])
def execute_file(cursor, sql_file): result = [] file = open(sql_file, 'r') sql_text = file.read() file.close() try: for command in sql_text.split(';'): if command == '' or command == '--': continue cursor.execute(command) result.append({"query": command, "result": cursor.fetchall()}) except Exception as e: logger.error("Execute file to database failed, {}".format(e)) return result
def send_slack_msg(self, msg_text): channel_id = self.get_slack_channel_id() if channel_id is None: channel_id = self.create_slack_channel() if channel_id is not None: try: slack_token = config["SLACK"]["oauth_token"] client = WebClient(token=slack_token) response = client.chat_postMessage( channel=channel_id, text=msg_text ) logger.info("message sending status code: {status_code}".format(status_code=response.status_code)) except SlackApiError as e: logger.error(e.response["error"])
def __init__(self): database_path = os.path.join(self.dir_path, "pg4030_database.db") if not os.path.exists(database_path): logger.warning("File '" + database_path + "' does not exist! Trying to create one.") try: self.create_database(database_path) except Exception as ex: logger.error( "An error has occurred while creating the database!", ex) self.connection = sqlite3.connect(database_path) self.connection.text_factory = lambda x: str(x, 'utf-8', "ignore") self.cursor = self.connection.cursor()
def run(self): tmp_file_name, tmp_file_name_with_path = BackupJob.construct_filename( self.sql_config.prefix, # need an array [self.sql_config.database], self.sql_config.suffix, self.base_config.tmp_folder) try: # dump sql to file # mysqldump -h xxx -d xxx -u root | gzip --best | openssl des -salt -k xxxxxx sql_dump_command = "mysqldump -h{} -u{} -p{} {}".format( self.sql_config.host, self.sql_config.username, self.sql_config.password, self.sql_config.database ) if self.sql_config.password else "mysqldump -h {} -u {} {}".format( self.sql_config.host, self.sql_config.username, self.sql_config.database) command = "{} | gzip --best | openssl des -salt -k {} > {}".format( sql_dump_command, self.base_config.passphrase, tmp_file_name_with_path) logger.debug("running {}".format(command)) c = delegator.run(command) logger.warning("dumped back up file {}".format(tmp_file_name)) if c.return_code != 0: raise RuntimeError(c.std_err) # upload if not dry_run if not self.base_config.dry_run: self.uploader.upload(tmp_file_name, tmp_file_name_with_path, self.sql_config.expired) except (RuntimeError, AssertionError) as e: # log logger.error(e) finally: # delete if self.base_config.delete_tmp_file: BackupJob.safe_delete(tmp_file_name_with_path) else: logger.warning("tmp file deletion is off!")
def _run(self, ds): tmp_file_name, tmp_file_name_with_path = BackupJob.construct_filename( self.mongo_config.prefix, # need an array [self.mongo_config.database], self.mongo_config.suffix, self.base_config.tmp_folder, self.construct_dt(ds)) try: # dump mongo collection to file # mongodump --username xxx --password xxx --host xxx --db xxx --out xxx mongo_dump_command = "mongodump --host {} --username {} --password {} --db {} ".format( self.mongo_config.host, self.mongo_config.username, self.mongo_config.password, self.mongo_config.database, ) if self.mongo_config.password else "mongodump --host {} --db {}".format( self.mongo_config.host, self.mongo_config.database) command = "{} --gzip --archive={}".format(mongo_dump_command, tmp_file_name_with_path) logger.debug("running {}".format(command)) c = delegator.run(command) logger.warning("dumped back up file {}".format(tmp_file_name)) if c.return_code != 0: raise RuntimeError(c.std_err) # upload if not dry_run if not self.base_config.dry_run: self.uploader.upload(tmp_file_name, tmp_file_name_with_path) except (RuntimeError, AssertionError) as e: # log logger.error(e) finally: # delete if self.base_config.delete_tmp_file: BackupJob.safe_delete(tmp_file_name_with_path) else: logger.warning("tmp file deletion is off!")
def check_master_iteration_done(master_iteration_id): """ Check if for the specific iteration all the pingers have sent their results :return: """ current_f_name = inspect.currentframe().f_code.co_name # logger.info("{}: check_master_iteration_done called".format(current_f_name)) is_finished = False master_it = db.session.query( models.MasterIteration).filter_by(id=master_iteration_id).first() if master_it is None: logger.error("{}: No MasterIteration found with id: {}".format( current_f_name, master_iteration_id)) return {'is_finished': is_finished, 'percentage': 0.0} count = 0 pinger_size = len(master_it.master_iteration_pinger) for master_pinger_it in master_it.master_iteration_pinger: if master_pinger_it.status == "FINISHED": count += 1 if count >= pinger_size: s = db.session() is_finished = True master_it.status = 'FINISHED' s.commit() if count > pinger_size: logger.warn("{}: count > pinger_size {}>{}".format( current_f_name, count, pinger_size)) count = pinger_size percent = 0 if pinger_size > 0: percent = (count / float(pinger_size)) * 100 return { 'is_finished': is_finished, 'percentage': percent, 'count': count, 'total': pinger_size }
def run(self): tmp_file_name, tmp_file_name_with_path = BackupJob.construct_filename( self.redis_config.prefix, # need an array ["redis"], self.redis_config.suffix, self.base_config.tmp_folder) rdb_tmp_file_name, rdb_tmp_file_name_with_path = BackupJob.construct_filename( self.redis_config.prefix, # need an array ["redis", "rdbdump"], "rdb", self.base_config.tmp_folder) try: command = "cp {} {}".format(self.redis_config.rdb_path, rdb_tmp_file_name_with_path) logger.debug("running {}".format(command)) c = delegator.run(command) if c.return_code != 0: raise RuntimeError(c.std_err) command = "gzip -9c {} | openssl des -salt -k {} > {}".format( rdb_tmp_file_name_with_path, self.base_config.passphrase, tmp_file_name_with_path) logger.debug("running {}".format(command)) c = delegator.run(command) if not self.base_config.dry_run: self.uploader.upload(tmp_file_name, tmp_file_name_with_path, self.redis_config.expired) except (RuntimeError, AssertionError) as e: # log logger.error(e) finally: # delete if self.base_config.delete_tmp_file: BackupJob.safe_delete(rdb_tmp_file_name_with_path) BackupJob.safe_delete(tmp_file_name_with_path) else: logger.warning("tmp file deletion is off!")
def get_next_page_start(page_soup: BeautifulSoup) -> Union[int, None]: next_start = None try: buttons = page_soup.find("div", { "class": "search-legend" }).find("div", { "class": "paginator" }).find("span", {"class": "buttons"}) total_count = int(buttons.find("span", {"class": "totalcount"}).text) range_to = int( buttons.find("span", { "class": "range" }).find("span", { "class": "rangeTo" }).text) if total_count > range_to: next_start = range_to return next_start else: return next_start except Exception as error: logger.error(str(error)) return next_start
def set_optimizer_params_grad(named_params_optimizer, named_params_model, test_nan=False): """ Utility function for optimize_on_cpu and 16-bits training. Copy the gradient of the GPU parameters to the CPU/RAMM copy of the model """ is_nan = False for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer, named_params_model): if name_opti != name_model: logger.error("name_opti != name_model: {} {}".format( name_opti, name_model)) raise ValueError if param_model.grad is not None: if test_nan and torch.isnan(param_model.grad).sum() > 0: is_nan = True if param_opti.grad is None: param_opti.grad = torch.nn.Parameter( param_opti.data.new().resize_(*param_opti.data.size())) param_opti.grad.data.copy_(param_model.grad.data) else: param_opti.grad = None return is_nan
def worker_msg_sender() -> None: """Send messages with delay, works in background with exists dialogs, time delta: 5 min""" from main import logger time_delta = 3600 # Sent delay in hours sent_delay = 1 start_time = time() # while True: # code # get accounts from DB accounts = db_get_rows_2([Profiles.profile_id, Profiles.profile_password], [Profiles.profile_password]) for account in accounts: chat_id_query = db_get_rows_2([ChatSessions.chat_id], [ChatSessions.profile_id == account[0]], return_query=True) # get profiles which are available and have chat with account profiles = db_get_rows_2( [ChatSessions.profile_id, ChatSessions.chat_id], [ ChatSessions.profile_id == Profiles.profile_id, ChatSessions.chat_id.in_(chat_id_query), Profiles.profile_password == None, Profiles.available ]) logger.error(f"worker_sender start process Account {account[0]} " f'with {len(profiles)} profiles') if len(profiles) != 0: # if account has dialogs with active profiles account_session, account_id = site_login(profile_login=account[0], password=account[1]) # Вытягиваем шаблон по номеру text_templates = db_get_rows_2( [MessageTemplates.text_number, Texts.text], [ MessageTemplates.profile_id == account[0], MessageTemplates.text_id == Texts.text_id ], order_by=[MessageTemplates.text_number]) if len(text_templates) == 0: # we can't find any template logger.error(f"Account {account[0]} without templates, passed") continue for profile in profiles: prepare_answer(account=account, profile=profile, account_session=account_session, sent_delay=sent_delay, text_templates=text_templates) logger.error(f'Worker_sender end prepare answers and go to sleep') logger.stopwatch(f'Worker_msg_sender time spend: {time() - start_time} ' f'sec')
def display(self): logger.error("adsf")
def perform_pipong_iteration_1(pinger_iteration_id): """ First iteration of the discovery and monitor Create all tracert configurations on the DB perform the tasks asynchronously When all task are finished a callback is performed to the second iteration step :param pinger_iteration_id: the iteration id from the db :return: """ current_f_name = inspect.currentframe().f_code.co_name logger.info("{}: Perform_pipong_iteration_1".format(current_f_name)) iter_t = db.session.query( models.PingerIteration).filter_by(id=pinger_iteration_id).first() if iter_t is None: logger.error("{}: Iteration not found with ID: {}".format( current_f_name, pinger_iteration_id)) return if iter_t.status != "CREATED": logger.error( "{}: Iteration ID:{} is not with in CREATED status: {}".format( current_f_name, pinger_iteration_id, iter_t.status)) return s = db.session() iter_t.status = "RUNNING" s.flush() s.commit() src_port_start = 40000 for ponger in iter_t.ponger: api_port = "" if ponger.api_port != "": api_port = ":" + str(ponger.api_port) post_url = "{}{}{}/api/v1.0/iperf/server".format( ponger.api_protocol, ponger.address, api_port) logger.debug("{}: post_url: {}".format(current_f_name, post_url)) try: req_res = requests.post(post_url, auth=requestHTTPAuth( app.config['HTTP_AUTH_USER'], app.config['HTTP_AUTH_PASS']), timeout=10) except Exception as e: logger.error("{}: Error requesting servers: {}".format( current_f_name, str(e))) continue if req_res.status_code != 200: logger.error( "{}: Error creating servers: {} returned status: {}".format( current_f_name, post_url, req_res.status_code)) continue json_data = req_res.json() if 'port' not in json_data or 'result' not in json_data or \ json_data['result'] != 'success': logger.error("{}: Json data invalid: {}".format( current_f_name, json_data)) continue logger.debug("{}: Host:{}{}{} Json data: {}".format( current_f_name, ponger.api_protocol, ponger.address, api_port, json_data)) dst_port = json_data['port'] # register the tracerts src_port_end = src_port_start + iter_t.tracert_qty ponger_port_t = models.PongerPort(ponger_id=ponger.id, dst_port=dst_port, src_port_max=src_port_end, src_port_min=src_port_start) s.add(ponger_port_t) s.flush() src_port_start = src_port_end + 1 logger.debug( "{}: Creating tracert pinger_iteration_id:{} ponger_port_id:{} ". format(current_f_name, pinger_iteration_id, ponger_port_t.id)) tracert_t = models.Tracert(pinger_iteration_id=pinger_iteration_id, status='PENDING', ponger_port_id=ponger_port_t.id) s.add(tracert_t) s.flush() task_list = [] tracert_qt = db.session.query(models.Tracert).filter_by( pinger_iteration_id=pinger_iteration_id, status='PENDING') for row in tracert_qt: logger.debug("{}: Task creating tracert tasks tracert_id:{}".format( current_f_name, row.id)) task_list.append(do_dublin_tracert.s(row.id)) iter_t.status = "RUNNING_TRACEROUTE" s.flush() s.commit() # run async tasks with callback chord(task_list)(perform_pipong_iteration_2.s(pinger_iteration_id))
def perform_pipong_iteration_2(result, pinger_iteration_id): """ Second iteration of the discovery and monitor With the tracert information when find the unique paths and use those ports to create multiple iperf sessions :param result: previous result :param pinger_iteration_id: the iteration id from the db :return: """ current_f_name = inspect.currentframe().f_code.co_name logger.info("{}: Perform_pipong_iteration_2".format(current_f_name)) logger.info("{}: Input:{} pinger_iteration_id:{}".format( current_f_name, result, pinger_iteration_id)) s = db.session() ponger_t = db.session.query( models.Ponger).filter_by(pinger_iteration_id=pinger_iteration_id) for pong in ponger_t: logger.info("{}: Iterating for pong id:{} {}".format( current_f_name, pong.id, pong.address)) tracert_t = [] for pport in pong.ponger_port: tracert_t.append( db.session.query(models.Tracert).filter_by( pinger_iteration_id=pinger_iteration_id, ponger_port_id=pport.id).first()) paths_port = [] for row in tracert_t: try: logger.debug("{}: Task tracert id:{} status:{}".format( current_f_name, row.id, row.status)) json_res = json.loads(row.result) for src_port in json_res['flows']: local_path = [] for flow in json_res['flows'][src_port]: flow_name = flow['name'] if flow_name is not "": ip_addr_node = flow['received']['ip']['src'] local_path.append(ip_addr_node) if flow['is_last'] or (len(local_path) > 0 and local_path[-1] == pong.address): if len(local_path) > 0: if local_path[-1] == pong.address: # delete the last element that contains # the target del local_path[-1] paths_port.append({ 'ponger_port_id': row.ponger_port.id, 'src_port': flow['sent']['udp']['sport'], 'dst_port': flow['sent']['udp']['dport'], 'path': local_path, }) except Exception as e: logger.error("{}: Error loading data from json result. " "Tracert id: {} result:{} error:{}".format( current_f_name, row.id, row.result, str(e))) continue unique_paths = [ list(x) for x in set(tuple(x['path']) for x in paths_port) ] unique_path_port = [] for path in unique_paths: for data in paths_port: data_path = data['path'] if path == data_path: unique_path_port.append(data) break logger.info("{}: Unique_path_port for ponger: {} {}".format( current_f_name, pong.address, unique_path_port)) for path_port in unique_path_port: logger.debug( "{}: Creating iperf pinger_iteration_id:{} ponger_port_id:{} ". format(current_f_name, pinger_iteration_id, path_port['ponger_port_id'])) iperf_n_t = models.Iperf( pinger_iteration_id=pinger_iteration_id, status='PENDING', ponger_port_id=path_port['ponger_port_id'], src_port=path_port['src_port']) s.add(iperf_n_t) s.flush() s.commit() task_list = [] ponger_t = db.session.query( models.Ponger).filter_by(pinger_iteration_id=pinger_iteration_id) for pong in ponger_t: logger.debug("{}: Task creating iperf tasks ponger_id:{}".format( current_f_name, pong.id)) task_list.append(do_iperf3_client.s(pong.id)) iter_t = db.session.query( models.PingerIteration).filter_by(id=pinger_iteration_id).first() if iter_t: iter_t.status = "RUNNING_IPERF" s.commit() chord(task_list)(perform_pipong_iteration_3.s(pinger_iteration_id))
def perform_pipong_iteration_3(result, pinger_iteration_id): """ Third iteration of the discovery and monitor Get the results, compile them into a JSON string and then them to the master node :param result: previous result :param pinger_iteration_id: the iteration id from the db :return: """ current_f_name = inspect.currentframe().f_code.co_name logger.info("{}: Perform_pipong_iteration_3".format(current_f_name)) logger.info("{}: Input:{} pinger_iteration_id:{}".format( current_f_name, result, pinger_iteration_id)) iter_t = db.session.query( models.PingerIteration).filter_by(id=pinger_iteration_id).first() if iter_t is None: logger.error("{}: Iteration not found with ID: {}".format( current_f_name, pinger_iteration_id)) return master_remote_id = iter_t.remote_id s = db.session() iter_t.status = "RUNNING_FINISHING" s.commit() iteration_result = [] iperf_t = db.session.query(models.Iperf).filter_by( pinger_iteration_id=pinger_iteration_id, status='SUCCESS') for iperf in iperf_t: tracert_t = db.session.query(models.Tracert).filter_by( pinger_iteration_id=pinger_iteration_id, ponger_port_id=iperf.ponger_port_id, status='SUCCESS').first() iperf_res = json.loads(iperf.result) logger.info( "{}: tracert_t.ponger_port_id:{} iperf.ponger_port_id:{}".format( current_f_name, tracert_t.ponger_port_id, iperf.ponger_port_id)) if tracert_t: result_dict = { "ponger_address": str(iperf.ponger_port.ponger.address), "src_port": int(iperf.src_port), "dst_port": int(iperf.ponger_port.dst_port) } try: tracert_res = json.loads(tracert_t.result) tracert_path = [] src_ip = tracert_res['flows'][str( iperf.src_port)][0]['sent']['ip']['src'] for flow in tracert_res['flows'][str(iperf.src_port)]: flow_name = flow['name'] if flow_name is not "": ip_addr_node = flow['received']['ip']['src'] if ip_addr_node != src_ip and ip_addr_node != str( iperf.ponger_port.ponger.address): tracert_path.append(ip_addr_node) result_dict["path"] = tracert_path result_dict["seconds"] = iperf_res['end']['sum']['seconds'] result_dict["bytes"] = iperf_res['end']['sum']['bytes'] result_dict["bits_per_second"] = iperf_res['end']['sum'][ 'bits_per_second'] result_dict["lost_percent"] = iperf_res['end']['sum'][ 'lost_percent'] iteration_result.append(result_dict) except Exception as e: logger.error( "{}: Error obtaining data from iperf iteration:{} " "lost_percent for this host:{} result_dict:{}".format( current_f_name, str(e), str(iperf.ponger_port.ponger.address), str(result_dict))) # result_dict["path"] = [str(iperf.ponger_port.ponger.address)] # result_dict["lost_percent"] = 999 pass master_host = app.config['MASTER_SERVER'] master_port = app.config['MASTER_PORT'] http_user = app.config['HTTP_AUTH_USER'] http_pass = app.config['HTTP_AUTH_PASS'] try: post_url = ("http://{}:{}/api/v1.0/master/" "register_pinger_result".format(master_host, master_port)) iter_t.status = "FINISHED" s.commit() try: post_data = { "master_remote_id": master_remote_id, "local_port": app.config['API_PORT'], "result": iteration_result } req = requests.post(post_url, auth=requestHTTPAuth(http_user, http_pass), json=post_data, timeout=10) logger.info("{}: Sent pinger result response:{} data:{}".format( current_f_name, req.text, post_data)) return True except Exception as e: logger.error("{}: Error registering pinger in master: {}".format( current_f_name, str(e))) return None except SoftTimeLimitExceeded: logger.error("{}: Error SoftTimeLimitExceeded".format(current_f_name)) return None
def do_iperf3_client(ponger_id): """ Petform a iperf3 client connection to a remote host The data from the connection is obtained from the db :param ponger_id: the ID of the ponger :return: """ current_f_name = inspect.currentframe().f_code.co_name logger.info("{}: Do_iperf3_client ponger_id:{}".format( current_f_name, ponger_id)) iperf_query = db.session.query( models.Iperf, models.PongerPort, models.Ponger).filter( models.Iperf.ponger_port_id == models.PongerPort.id).filter( models.PongerPort.ponger_id == models.Ponger.id).filter( models.Ponger.id == ponger_id).all() if iperf_query is None: logger.error("{}: No Iperf rows found with ponger_id: {}".format( current_f_name, ponger_id)) return None try: for join_result in iperf_query: iperf_t = join_result.Iperf ponger_t = join_result.Ponger logger.debug( "{}: Performing iperf id:{} address:{} srcport:{} dstport:{}". format(current_f_name, iperf_t.id, str(iperf_t.ponger_port.ponger.address), str(iperf_t.src_port), str(iperf_t.ponger_port.dst_port))) max_tries = 20 client_tries = 0 success = False cmd_result_str = "" while client_tries < max_tries and success is False: iperf_t.status = "STARTED" db.session.commit() api_port = "" if ponger_t.api_port != "": api_port = ":" + str(ponger_t.api_port) post_url = "{}{}{}/api/v1.0/iperf/server".format( ponger_t.api_protocol, ponger_t.address, api_port) logger.debug( "{}: Asking for new iperf server post_url: {}".format( current_f_name, post_url)) try: req_res = requests.post(post_url, auth=requestHTTPAuth( app.config['HTTP_AUTH_USER'], app.config['HTTP_AUTH_PASS']), timeout=10) except Exception as e: logger.error("{}: Error requesting servers: {}".format( current_f_name, str(e))) client_tries += 1 time.sleep(5) continue if req_res.status_code != 200: logger.error( "{}: Error creating servers: {} returned status: {}". format(current_f_name, post_url, req_res.status_code)) client_tries += 1 time.sleep(5) continue json_data = req_res.json() if 'port' not in json_data or 'result' not in json_data or \ json_data['result'] != 'success': logger.error("{}: Json data invalid: {}".format( current_f_name, json_data)) client_tries += 1 time.sleep(5) continue logger.debug("{}: Host:{}{}{} Json data: {}".format( current_f_name, ponger_t.api_protocol, ponger_t.address, api_port, json_data)) cmd = [ app.config['IPERF3_CLIENT_SCRIPT_LOCATION'], str(iperf_t.ponger_port.ponger.address), str(iperf_t.ponger_port.dst_port), str(iperf_t.src_port), ] try: cmd_result_str = subprocess.check_output(cmd).decode( "utf-8") if "lost_percent" in cmd_result_str: success = True iperf_res = json.loads(cmd_result_str) result_dict = { "seconds": iperf_res['end']['sum']['seconds'], "bytes": iperf_res['end']['sum']['bytes'], "bits_per_second": iperf_res['end']['sum']['bits_per_second'], "lost_percent": iperf_res['end']['sum']['lost_percent'], "cpu_utilization_percent": iperf_res['end']['cpu_utilization_percent'] } logger.debug( "{}: Iperf client results summarized: " "host:{} src_port:{} dst_port:{} res:{}".format( current_f_name, str(iperf_t.ponger_port.ponger.address), str(iperf_t.src_port), str(iperf_t.ponger_port.dst_port), result_dict)) else: logger.debug( "{}: Iperf client error: no lost_percent found on " "cmd_result_str: {}/{} host:{} src_port:{} " "dst_port:{} {}".format( current_f_name, client_tries, max_tries, str(iperf_t.ponger_port.ponger.address), str(iperf_t.src_port), str(iperf_t.ponger_port.dst_port), cmd_result_str)) except subprocess.CalledProcessError as e: output = e.output.decode() logger.debug("{}: Iperf client error: {}".format( current_f_name, output)) client_tries += 1 time.sleep(5) if not success: iperf_t.status = "FAILURE" db.session.commit() logger.error("{}: Iperf client cannot communicate with " "iperf server at: {}".format( current_f_name, iperf_t.ponger_port.ponger.address)) else: iperf_t.status = "SUCCESS" iperf_t.result = cmd_result_str db.session.commit() except SoftTimeLimitExceeded: # todo: add failure to all iperf that exceeded time limit return None