def finished(self): CONF.train_conf["l_data_path"] = self.l_data_path_te.text() CONF.train_conf["l_save_path"] = self.l_save_path_te.text() if self.model_sel_h.isChecked(): CONF.train_conf["model_sel"] = 1 elif self.model_sel_m.isChecked(): CONF.train_conf["model_sel"] = 2 else: CONF.train_conf["model_sel"] = 3 if self.dev_sel_c.isChecked(): CONF.train_conf["gpu"] = False else: CONF.train_conf["gpu"] = True CONF.train_conf["gpu_dev"] = [int(dev) for dev in self.dev_value.text().split(',')] CONF.train_conf["remote"] = self.remote_sel.isChecked() CONF.train_conf["r_ip"] = self.ip_value.text() CONF.train_conf["r_port"] = int(self.port_value.text()) CONF.train_conf["r_data_path"] = self.r_data_path.text() CONF.train_conf["r_pro_path"] = self.r_pro_path.text() CONF.train_conf["senior"] = self.senior_sel.isChecked() CONF.train_conf["s_lr"] = float(self.lr_value.text()) CONF.train_conf["s_iter"] = int(self.iter_value.text()) CONF.train_conf["s_batch"] = int(self.batch_size.text()) CONF.train_conf["s_refresh"] = int(self.refresh_time.currentText()) CONF.save() self.accept()
def __init__(self, update_handler=_update_handler): host = CONF.get('DEFAULT', 'redis_host') port = CONF.getint('DEFAULT', 'redis_port') self.cli = redis.StrictRedis(host=host, port=port, db=0) self.pubsub = self.cli.pubsub() self.update_handler = update_handler
def POST(self): originParams = web.input() options = (("dbname", "string", "1-50"), ) if not os.path.exists("log"): os.mkdir("log") if not os.path.exists(os.path.join("static", "attachment")): os.mkdir(os.path.join("static", "attachment")) if not os.path.exists(os.path.join("static", "tmp")): os.mkdir(os.path.join("static", "tmp")) if not os.path.exists("data"): os.mkdir("data") if not os.path.exists(os.path.join("data", "database")): os.mkdir(os.path.join("data", "database")) try: params = formatParam(originParams, options) except ParamError as error: raise web.internalerror("Parameter error, {0}.".format(error)) try: CONF.db.name = str(params.dbname) except WIPError as error: raise web.internalerror("Configure file parse error.") try: Database.create() except DBError as error: raise web.internalerror("Databae creating error," + str(error)) CONF.isinstall = True CONF.save() return jsonSuccess()
def tperf_cmd_exec(client_floatingip, tperf_command, transmit_time, tperf_output, thread_index): for i in range(10): tperf_output[thread_index] = SshCommand.ssh_tperf_exec(CONF.openstack()['key_file'], CONF.openstack()['tperf_vm_username'], client_floatingip, tperf_command, timeout=int(transmit_time) + 5) if tperf_output[thread_index] == 'fail': time.sleep(2) else: break
def setup(self): prefix = 'window/' width, height = CONF.get('main', prefix + 'size') self.resize(QSize(width, height)) posx, posy = CONF.get('main', prefix + 'position') self.move(QPoint(posx, posy)) # Is maximized? if CONF.get('main', prefix + 'is_maximized'): self.setWindowState(Qt.WindowMaximized) # Is fullscreen? if CONF.get('main', prefix + 'is_fullscreen'): self.setWindowState(Qt.WindowFullScreen)
def server_vm_check(server_ip, client_floatingip): for i in range(20): check_result = SshCommand.ssh_tperf_exec(CONF.openstack()['key_file'], CONF.openstack()['tperf_vm_username'], client_floatingip, 'ping -c 1 ' + server_ip + ' | grep transmitted', timeout=2) if ' 0% packet loss' in check_result.split(','): return True else: LOG.error('[Server Network Check Fail and Retry %d', i) time.sleep(1) return False
def POST(self): originParams = web.input() options = (("nmappath", "string", "1-200"), ) try: params = formatParam(originParams, options) except ParamError as error: raise web.internalerror("Parameter error, {0}.".format(error)) CONF.nmap = None if str(params.nmappath) == "nmap" else str( params.nmappath) CONF.save() return jsonSuccess()
def __init__(self, file_name): if not os.path.exists(DEFAULT_LOG_PATH): os.makedirs(DEFAULT_LOG_PATH) log_file_name = DEFAULT_LOG_PATH + file_name # Ref) formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s') formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(message)s', datefmt='%H:%M:%S') handler = TimedRotatingFileHandler(log_file_name, when=CONF.base()['log_rotate_time'], backupCount=CONF.base()['log_backup_count']) handler.setFormatter(formatter) self.logger.addHandler(handler) self.logger.setLevel(logging.DEBUG)
def POST(self): originParams = web.input() options = ( ("nmappath","string","1-200"), ) try: params = formatParam(originParams, options) except ParamError as error: raise web.internalerror("Parameter error, {0}.".format(error)) CONF.nmap = None if str(params.nmappath)=="nmap" else str(params.nmappath) CONF.save() return jsonSuccess()
def onsway_trace(sona_topology, trace_conditions): retry_flag = True up_down_result = [] is_success = False while retry_flag: ssh_result = SshCommand.ssh_exec( CONF.openstack()['account'].split(':')[0], trace_conditions.cur_target_ip, make_command(trace_conditions)) LOG.info('target_node = ' + trace_conditions.cur_target_ip) LOG.info('TRACE RESULT = ' + str(ssh_result)) node_trace = dict() node_trace['trace_node_name'] = trace_conditions.cur_target_hostname process_result, retry_flag, is_success = process_trace( ssh_result, sona_topology, trace_conditions) node_trace['flow_rules'] = process_result trace_conditions.cond_dict['in_port'] = '' trace_conditions.cond_dict['dl_src'] = '' trace_conditions.cond_dict['dl_dst'] = '' trace_conditions.cond_dict['eth_dst'] = '' trace_conditions.cond_dict['eth_src'] = '' up_down_result.append(node_trace) return up_down_result, is_success
def closing(self): prefix = 'window' CONF.set('main', prefix + '/is_maximized', self.isMaximized()) CONF.set('main', prefix + '/is_fullscreen', self.isFullScreen()) if not self.isMaximized() and not self.isFullScreen(): size = self.size() CONF.set('main', prefix + '/size', (size.width(), size.height())) pos = self.pos() CONF.set('main', prefix + '/position', (pos.x(), pos.y()))
def ssh_pexpect(cls, username, node, onos_ip, command): cmd = 'ssh %s %s@%s' % (cls.ssh_options(), username, node) try: LOG.info('ssh_pexpect cmd = ' + cmd) ssh_conn = pexpect.spawn(cmd) rt1 = ssh_conn.expect(['#', '\$', pexpect.EOF], timeout=CONF.ssh_conn()['ssh_req_timeout']) if rt1 == 0: cmd = 'ssh -p 8101 karaf@' + onos_ip + ' ' + command LOG.info('ssh_pexpect cmd = ' + cmd) ssh_conn.sendline(cmd) rt2 = ssh_conn.expect( ['Password:'******'ssh_req_timeout']) if rt2 == 0: ssh_conn.sendline('karaf') ssh_conn.expect(['#', '\$', pexpect.EOF], timeout=CONF.ssh_conn()['ssh_req_timeout']) str_output = str(ssh_conn.before) ret = '' for line in str_output.splitlines(): if (line.strip() == '') or ('#' in line) or ( '$' in line) or ('~' in line) or ('@' in line): continue ret = ret + line + '\n' return ret else: return "fail" elif rt1 == 1: LOG.error('%s', ssh_conn.before) elif rt1 == 2: LOG.error("[ssh_pexpect] connection timeout") return "fail" except: LOG.exception() return "fail"
def connection(): try: conn = sqlite3.connect(CONF.base()['db_file']) conn.isolation_level = None return conn except: LOG.exception() sys.exit(1)
def delete_test_instance(server_vm, client_vm, client_floatingip): try: nova_credentials = client.Client(CONF.openstack()['version'], CONF.openstack()['username'], CONF.openstack()['api_key'], CONF.openstack()['project_id'], CONF.openstack()['auth_url']) nova_credentials.floating_ips.delete(client_floatingip) LOG.info('[Tperf Test] Client floatingip Deleted --- ') for vm in [server_vm, client_vm]: if vm: nova_credentials.servers.delete(vm) LOG.info('[Tperf Test] Server and Client instance Deleted] --- ') except: LOG.exception()
def db_initiation(cls): LOG.info("--- Initiating SONA DB ---") init_sql = [ 'CREATE TABLE ' + cls.NODE_INFO_TBL + '(nodename text primary key, ip_addr, username)', 'CREATE TABLE ' + cls.STATUS_TBL + '(nodename text primary key, ping, app, cpu, memory, disk, time)', 'CREATE TABLE ' + cls.RESOURCE_TBL + '(nodename text primary key, cpu real, memory real, disk real)', 'CREATE TABLE ' + cls.REGI_SYS_TBL + '(url text primary key, auth)', 'CREATE TABLE ' + cls.EVENT_TBL + '(nodename, item, grade, desc, time, PRIMARY KEY (nodename, item))' ] for sql in init_sql: sql_rt = cls.sql_execute(sql) if "already exist" in sql_rt: table_name = sql_rt.split()[1] LOG.info( "\'%s\' table already exist. Delete all tuple of this table...", table_name) sql = 'DELETE FROM ' + table_name sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': LOG.info("DB %s table initiation fail\n%s", table_name, sql_rt) sys.exit(1) elif sql_rt != 'SUCCESS': LOG.info("DB initiation fail\n%s", sql_rt) sys.exit(1) LOG.info('Insert nodes information ...') for node in CONF.watchdog()['check_system']: if str(node).lower() == 'onos': cls.sql_insert_nodes(CONF.onos()['list'], str(CONF.onos()['account']).split(':')[0]) elif str(node).lower() == 'xos': cls.sql_insert_nodes(CONF.xos()['list'], str(CONF.xos()['account']).split(':')[0]) elif str(node).lower() == 'swarm': cls.sql_insert_nodes( CONF.swarm()['list'], str(CONF.swarm()['account']).split(':')[0]) elif str(node).lower() == 'openstack': cls.sql_insert_nodes( CONF.openstack()['list'], str(CONF.openstack()['account']).split(':')[0])
def POST(self): web.header('Content-Type', 'application/json') originParams = web.input() options = (("database", "string", "1-50"), ) try: params = formatParam(originParams, options) except ParamError as error: raise web.internalerror("Parameter error, {0}.".format(error)) oldDB = CONF.db.name CONF.db.name = str(params.database) dblist = os.listdir(os.path.join("data", "database")) if params.database not in dblist: try: Database.create() except DBError as error: CONF.db.name = oldDB raise web.internalerror("Databae creating error," + str(error)) CONF.save() return jsonSuccess()
def flush_pending_alarm(): global alarm_count, alarm_subject, alarm_body if alarm_count <= 0: return; # no alarm pending conf = CONF.alarm() # copy to local variables and clear global variables count = alarm_count subject = '[%s] %s' % (conf['site_name'], alarm_subject) if (count > 1): subject += ' (+ %d events)' % (count - 1) body = alarm_body alarm_count = 0 alarm_subject = '' alarm_body = '' if conf['mail_alarm']: mail_from = conf['mail_user'] + '@' + conf['mail_server'].split(':')[0] # send to each mail_list entry for gmail smtp seems not handling mutiple To: addresses for mail_to in conf['mail_list']: msg = MIMEText(body) msg['Subject'] = subject msg['From'] = mail_from msg['To'] = mail_to LOG.info('Send Email Alarm: subject=%s to=%s body=%s', subject, mail_to, body) try: ms = smtplib.SMTP(conf['mail_server']) if conf['mail_tls']: ms.starttls() ms.login(conf['mail_user'], conf['mail_password']) ms.sendmail(mail_from, mail_to, msg.as_string()) ms.quit() except: LOG.exception() if conf['slack_alarm']: ch = conf['slack_channel'].strip() if ch[0] != '#': ch = '#' + ch LOG.info('Send Slack Alarm: channel=%s text=%s', ch, body) sc = SlackClient(conf['slack_token']) try: sc.api_call("chat.postMessage", channel=ch, text=body) except: LOG.exception()
def db_initiation(cls, db_log): try: db_path = CONF.base()['db_file'] if os.path.isfile(db_path): os.remove(db_path) db_log.write_log("--- Initiating SONA DB ---") init_sql = [ 'CREATE TABLE ' + cls.NODE_INFO_TBL + '(nodename text primary key, ip_addr, username, type, sub_type)', 'CREATE TABLE ' + cls.STATUS_TBL + '(nodename text primary key, ' + cls.item_list + ', time)', 'CREATE TABLE ' + cls.REGI_SYS_TBL + '(url text primary key, auth)', 'CREATE TABLE ' + cls.EVENT_TBL + '(nodename, item, grade, pre_grade, reason, time, PRIMARY KEY (nodename, item))', 'CREATE TABLE ' + cls.ONOS_TBL + '(nodename text primary key, cluster, device, link, app)' ] for sql in init_sql: sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': db_log.write_log("DB initiation fail\n%s", sql_rt) sys.exit(1) db_log.write_log('Insert nodes information ...') for node_type in CONF.watchdog()['check_system']: cls.sql_insert_nodes( db_log, (CONF_MAP[node_type.upper()]())['list'], str((CONF_MAP[node_type.upper()]() )['account']).split(':')[0], node_type) except: LOG.exception()
def POST(self): originParams = web.input() options = ( ("dbname","string","1-50"), ) if not os.path.exists("log"): os.mkdir("log") if not os.path.exists(os.path.join("static","attachment")): os.mkdir(os.path.join("static","attachment")) if not os.path.exists(os.path.join("static","tmp")): os.mkdir(os.path.join("static","tmp")) if not os.path.exists("data"): os.mkdir("data") if not os.path.exists(os.path.join("data","database")): os.mkdir(os.path.join("data","database")) try: params = formatParam(originParams, options) except ParamError as error: raise web.internalerror("Parameter error, {0}.".format(error)) try: CONF.db.name = str(params.dbname) except WIPError as error: raise web.internalerror("Configure file parse error.") try: Database.create() except DBError as error: raise web.internalerror("Databae creating error,"+str(error)) CONF.isinstall = True CONF.save() return jsonSuccess()
def POST(self): web.header('Content-Type', 'application/json') originParams = web.input() options = ( ("database","string","1-50"), ) try: params = formatParam(originParams, options) except ParamError as error: raise web.internalerror("Parameter error, {0}.".format(error)) oldDB = CONF.db.name CONF.db.name = str(params.database) dblist = os.listdir(os.path.join("data","database")) if params.database not in dblist: try: Database.create() except DBError as error: CONF.db.name = oldDB raise web.internalerror("Databae creating error,"+str(error)) CONF.save() return jsonSuccess()
def sql_insert_nodes(cls, node_list, username): for node in node_list: name, ip = str(node).split(':') LOG.info('Insert node [%s %s %s]', name, ip, username) sql = 'INSERT INTO ' + cls.NODE_INFO_TBL + \ ' VALUES (\'' + name + '\',\'' + ip + '\',\'' + username + '\')' LOG.info('%s', sql) sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': LOG.info(" [NODE TABLE] Node data insert fail \n%s", sql_rt) sys.exit(1) # set status tbl sql = 'INSERT INTO ' + cls.STATUS_TBL + \ ' VALUES (\'' + name + '\', \'none\', \'none\', \'none\', \'none\', \'none\', \'none\')' LOG.info('%s', sql) sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': LOG.info(" [STATUS TABLE] Node data insert fail \n%s", sql_rt) sys.exit(1) # set resource tbl sql = 'INSERT INTO ' + cls.RESOURCE_TBL + ' VALUES (\'' + name + '\', -1, -1, -1)' LOG.info('%s', sql) sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': LOG.info(" [RESOURCE TABLE] Node data insert fail \n%s", sql_rt) sys.exit(1) # add Alarm Items for item in CONF.alarm()['item_list']: LOG.info('Insert item [%s %s]', name, item) sql = 'INSERT INTO ' + cls.EVENT_TBL + \ ' VALUES (\'' + name + '\',\'' + item + '\',\'none\', \'none\', \'none\')' LOG.info('%s', sql) sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': LOG.info(" [ITEM TABLE] Item data insert fail \n%s", sql_rt) sys.exit(1)
class SshCommand: ssh_options = '-o StrictHostKeyChecking=no ' \ '-o ConnectTimeout=' + str(CONF.ssh_conn()['ssh_req_timeout']) @classmethod def ssh_exec(cls, username, node, command): cmd = 'ssh %s %s@%s %s' % (cls.ssh_options, username, node, command) try: result = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) output, error = result.communicate() if result.returncode != 0: LOG.error("\'%s\' SSH_Cmd Fail, cause => %s", node, error) return else: # LOG.info("ssh command execute successful \n%s", output) return output except: LOG.exception() @classmethod def onos_ssh_exec(cls, node, command): local_ssh_options = cls.ssh_options + " -p 8101" cmd = 'ssh %s %s %s' % (local_ssh_options, node, command) try: result = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) output, error = result.communicate() if result.returncode != 0: LOG.error("ONOS(%s) SSH_Cmd Fail, cause => %s", node, error) return else: # LOG.info("ONOS ssh command execute successful \n%s", output) return output except: LOG.exception()
def get_onos_ip(self): return str(list(CONF.onos()['list']).pop()).split(':')[-1]
def get_gateway_ip(self): return str(list(CONF.openstack()['gateway_list']).pop()).split(':')[-1]
def ssh_options(cls): return '-o StrictHostKeyChecking=no ' \ '-o ConnectTimeout=' + str(CONF.ssh_conn()['ssh_req_timeout'])
def run_test(sona_topology, test_json, timeout_arr, index, total_timeout): try: node = test_json['node'] ins_id = test_json['instance_id'] user = test_json['vm_user_id'] pw = test_json['vm_user_password'] command = test_json['traffic_test_command'] ip = sona_topology.get_openstack_info(node, 'ip') if ip == '': str_output = node + ' node does not exist' else: node_id = CONF.openstack()['account'].split(':')[0] ssh_options = '-o StrictHostKeyChecking=no ' \ '-o ConnectTimeout=' + str(CONF.ssh_conn()['ssh_req_timeout']) cmd = 'ssh %s %s@%s' % (ssh_options, node_id, ip) try: LOG.info('ssh_pexpect cmd = ' + cmd) ssh_conn = pexpect.spawn(cmd) rt1 = ssh_conn.expect( PROMPT, timeout=CONF.ssh_conn()['ssh_req_timeout']) if rt1 == 0: cmd = 'virsh console ' + ins_id LOG.info('ssh_pexpect cmd = ' + cmd) ssh_conn.sendline(cmd) rt2 = ssh_conn.expect( [ pexpect.TIMEOUT, 'Escape character is', 'error:', pexpect.EOF ], timeout=CONF.ssh_conn()['ssh_req_timeout']) if rt2 == 0: str_output = cmd + ' timeout' elif rt2 == 1: ssh_conn.sendline('\n') try: rt3 = ssh_conn.expect( ['login: '******'ssh_req_timeout']) LOG.info('rt3 = ' + str(rt3)) if rt3 == 2: str_output = 'Permission denied' else: ssh_conn.sendline(user) rt_pw = ssh_conn.expect( [ pexpect.TIMEOUT, '[P|p]assword:', pexpect.EOF ], timeout=CONF.ssh_conn()['ssh_req_timeout']) if rt_pw == 1: ssh_conn.sendline(pw) rt4 = ssh_conn.expect( [ pexpect.TIMEOUT, 'Login incorrect', '~# ', 'onos> ', '\$ ', '\# ', ':~$ ' ], timeout=CONF.ssh_conn() ['ssh_req_timeout']) LOG.info('rt4 = ' + str(rt4)) if rt4 == 0 or rt4 == 1: str_output = 'auth fail' else: ssh_conn.sendline(command) rt5 = ssh_conn.expect( [ pexpect.TIMEOUT, '~# ', 'onos> ', '\$ ', '\# ', ':~$ ' ], timeout=total_timeout) if rt5 == 0: str_output = 'timeout' ssh_conn.sendline('exit') ssh_conn.close() else: str_output = ssh_conn.before ssh_conn.sendline('exit') ssh_conn.close() else: str_output = 'auth fail' except: str_output = 'exception' ssh_conn.sendline('exit') ssh_conn.close() elif rt2 == 2: result = {'command_result': 'virsh console error'} timeout_arr[index] = result return else: str_output = 'connection fail' except: LOG.exception() str_output = 'exception 1' except: LOG.exception() str_output = 'exception 2' result = { 'command_result': str_output.replace('\r\n', '\n'), 'node': node, 'instance_id': ins_id } timeout_arr[index] = result
def create_instance(server_options, client_options): server_instance = client_instance = None image_name = CONF.openstack()['image'] flavor_name = CONF.openstack()['flavor'] securitygroups = CONF.openstack()['security_groups'] keypair = CONF.openstack()['keypair_name'] # TODO add exception for connection nova_credentials = client.Client(CONF.openstack()['version'], CONF.openstack()['username'], CONF.openstack()['api_key'], CONF.openstack()['project_id'], CONF.openstack()['auth_url']) image = nova_credentials.images.find(name=image_name) flavor = nova_credentials.flavors.find(name=flavor_name) hypervisors = nova_credentials.hypervisors.list() onos_ip = CONF.onos()['list'].pop().split(':')[-1] dpid2ip = {c[2]: c[3] for c in [(" ".join(l.split()).split(" ")) for l in SshCommand.onos_ssh_exec(onos_ip, 'openstack-nodes | grep COMPUTE').splitlines()]} def get_zone(dpid): if dpid: for h in hypervisors: if h.host_ip == dpid2ip[dpid]: return 'nova:' + h.service['host'] else: return "nova" # TODO: when no network_id, choice a network excepted external netwrok # network_id = server_options['network_id'] if server_options['network_id'] \ # else random.choice(nova.networks.list()).id # network_list = nova_credentials.networks.list() # target_network.append(str(network_list[-1]).split(':')[1][:-1].strip()) # Create server VM info vm_name = 'tperf_server_vm_' + str(random.randrange(10000, 20000)) LOG.info('[server] - vmname = %s', vm_name) LOG.info(' | image = %s', image) LOG.info(' | flavor = %s', flavor) LOG.info(' | availability_zone = %s', get_zone(server_options['vm_location'])) LOG.info(' | nics = %s', server_options['network_id']) LOG.info(' | security_groups = %s', securitygroups) LOG.info(' | key_pair = %s', keypair) nova_credentials.servers.create(name=vm_name, image=image, flavor=flavor, availability_zone=get_zone(server_options['vm_location']), nics=[{'net-id': server_options['network_id']}], security_groups=securitygroups, key_name=keypair) for i in range(20): time.sleep(1) server_instance = nova_credentials.servers.list(search_opts={'name': vm_name})[0] # server_instance = nova_credentials.servers.list(search_opts={'name': 'tperf_server_vm_17693'})[0] if server_instance.__dict__['addresses']: LOG.info("[Server VM created and ACTIVE] - %s", server_instance) break # Create client VM info vm_name = 'tperf_client_vm_' + str(random.randrange(10000, 20000)) LOG.info('[client] - vmname = %s', vm_name) LOG.info(' | image = %s', image) LOG.info(' | flavor = %s', flavor) LOG.info(' | availability_zone = %s', get_zone(client_options['vm_location'])) LOG.info(' | nics = %s', client_options['network_id']) LOG.info(' | security_groups = %s', securitygroups) LOG.info(' | key_pair = %s', keypair) nova_credentials.servers.create(name=vm_name, image=image, flavor=flavor, availability_zone=get_zone(client_options['vm_location']), nics=[{'net-id': client_options['network_id']}], security_groups=securitygroups, key_name=keypair) client_floatingip = get_floatingip(nova_credentials) # client_floatingip = '172.27.0.179' for i in range(20): time.sleep(1) client_instance = nova_credentials.servers.list(search_opts={'name': vm_name})[0] # client_instance = nova_credentials.servers.list(search_opts={'name': 'tperf_client_vm_15442'})[0] if client_instance.__dict__['addresses']: LOG.info("[Client VM created and ACTIVE] - %s", client_instance) nova_credentials.servers.add_floating_ip(client_instance, client_floatingip.ip) LOG.info("[Floating_IP Assignment] to Client ---") break return server_instance, client_instance, client_floatingip
def finished(self): CONF.anno_conf["img_path"] = self.img_path_te.text() CONF.anno_conf["save_path"] = self.save_path_te.text() CONF.anno_conf["labels"] = self.label_list_te.toPlainText() CONF.save() self.accept()
message = '[m:' + traceback.extract_stack(None, 2)[0][2] + '] ' + message cls.logger.info(message % args) @classmethod def error(cls, message, *args): message = '[m:' + traceback.extract_stack(None, 2)[0][2] + '] ' + message cls.logger.error(message % args) @classmethod def exception(cls): exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) method = '[m:' + traceback.extract_stack(None, 2)[0][2] + ']' cls.error("Exception Error %s\n%s", method, ''.join(' | ' + line for line in lines)) LOG = _Log(CONF.base()['log_file_name']) class USER_LOG(): LOG = None def set_log(self, file_name, rotate, backup): self.LOG = logging.getLogger(file_name) if not os.path.exists(DEFAULT_LOG_PATH): os.makedirs(DEFAULT_LOG_PATH) log_formatter = logging.Formatter('[%(asctime)s] %(message)s') file_name = DEFAULT_LOG_PATH + file_name file_handler = logging.handlers.TimedRotatingFileHandler(file_name,
def main(): parser = ArgumentParser(description='', add_help=False) subparsers = parser.add_subparsers(dest='subparser_name') # Fuzzing parser_fuzz = subparsers.add_parser('fuzz') parser_fuzz.add_argument("-s", "--instruction-set", type=str, required=True) parser_fuzz.add_argument("-c", "--config", type=str, required=False) parser_fuzz.add_argument( "-n", "--num-test-cases", type=int, default=1, help="Number of test cases.", ) parser_fuzz.add_argument( "-i", "--num-inputs", type=int, default=100, help="Number of inputs per test case.", ) parser_fuzz.add_argument( '-w', '--working-directory', type=str, default='', ) parser_fuzz.add_argument('-t', '--testcase', type=str, default=None, help="Use an existing test case") parser_fuzz.add_argument( '--timeout', type=int, default=0, help= "Run fuzzing with a time limit [seconds]. No timeout when set to zero." ) parser_fuzz.add_argument( '--nonstop', action='store_true', help="Don't stop after detecting an unexpected result") parser_mini = subparsers.add_parser('minimize') parser_mini.add_argument( '--infile', '-i', type=str, required=True, ) parser_mini.add_argument( '--outfile', '-o', type=str, required=True, ) parser_mini.add_argument("-c", "--config", type=str, required=False) parser_mini.add_argument( "-n", "--num-inputs", type=int, default=100, help="Number of inputs per test case.", ) parser_mini.add_argument( "-f", "--add-fences", action='store_true', default=False, help="Add as many LFENCEs as possible, while preserving the violation.", ) parser_mini.add_argument("-s", "--instruction-set", type=str, required=True) args = parser.parse_args() # Update configuration if args.config: CONF.config_path = args.config with open(args.config, "r") as f: config_update: Dict = yaml.safe_load(f) for var, value in config_update.items(): CONF.set(var, value) CONF.sanity_check() LOGGER.set_logging_modes() # Fuzzing if args.subparser_name == 'fuzz': # Make sure we're ready for fuzzing if args.working_directory and not os.path.isdir( args.working_directory): SystemExit("The working directory does not exist") # Normal fuzzing mode fuzzer = Fuzzer(args.instruction_set, args.working_directory, args.testcase) fuzzer.start( args.num_test_cases, args.num_inputs, args.timeout, args.nonstop, ) return # Test Case minimisation if args.subparser_name == "minimize": CONF.coverage_type = 'none' postprocessor = Postprocessor(args.instruction_set) postprocessor.minimize(args.infile, args.outfile, args.num_inputs, args.add_fences) return raise Exception("Unreachable")
import time, random from kafka import KafkaProducer from config import CONF host = CONF.get('DEFAULT', 'kafka_host') port = CONF.get('DEFAULT', 'kafka_port') broker = host + ':' + port topic = CONF.get('DEFAULT', 'events_topic') producer = KafkaProducer(bootstrap_servers=broker) keys = ['foo', 'bar', 'buz'] i = 0 while True: msg = random.choice(keys) producer.send(topic, msg).get(timeout=60) i += 1 if i % 1000 == 0: print '>>>', i, 'messages sent'
class MenuHandler(tornado.web.RequestHandler): svr = SVR() conf = CONF() def get_current_user(self): return self.get_secure_cookie('user') def write_error(self, status_code, **kwargs): if 'exc_info' in kwargs: # in debug mode, try to send a traceback if self.settings.get('debug'): for line in traceback.format_exception(*kwargs['exc_info']): self.write(line + '<br />') self.finish() else: self.finish('Bad guy!!!!') def get(self): self._debug() if ('signature' not in self.request.arguments or 'timestamp' not in self.request.arguments or 'nonce' not in self.request.arguments or 'echostr' not in self.request.arguments): return # check signature signature = self.get_argument('signature') timestamp = self.get_argument('timestamp') nonce = self.get_argument('nonce') echostr = self.get_argument('echostr') check_ok = self.svr.check_signature(signature, timestamp, nonce, self.conf.token) if not check_ok: print 'check signature failed' return print 'check signature ok' self.write(echostr) def post(self): try: self._debug() reply = self.svr.do_request(self.request.arguments, self.request.body) if reply is None: print 'reply is none' return '' #print reply.encode('utf-8') self.write(reply) except IException as e: e.detail() #except Exception as base_e: # print 'wx exception' # print(base_e) #traceback.print_stack() # return def _debug(self): print 'request_method=%s' % self.request.method print 'request_url=%s' % self.request.uri print 'request_headers=%s' % self.request.headers print 'request_body=%s' % self.request.body print 'request_arguments=%s' % self.request.arguments
def db_initiation(cls, db_log): try: db_path = CONF.base()['db_file'] if os.path.isfile(db_path): os.remove(db_path) db_log.write_log("--- Initiating SONA DB ---") init_sql = [ 'CREATE TABLE ' + cls.NODE_INFO_TBL + '(nodename text primary key, ip_addr, username, type, sub_type)', 'CREATE TABLE ' + cls.STATUS_TBL + '(nodename text primary key, ' + cls.item_list + ', time)', 'CREATE TABLE ' + cls.RESOURCE_TBL + '(nodename text primary key, cpu real, memory real, disk real)', 'CREATE TABLE ' + cls.REGI_SYS_TBL + '(url text primary key, auth)', 'CREATE TABLE ' + cls.ONOS_TBL + '(nodename text primary key, applist, weblist, nodelist, port, openflow, cluster, traffic_stat)', 'CREATE TABLE ' + cls.SWARM_TBL + '(nodename text primary key, node, service, ps)', 'CREATE TABLE ' + cls.XOS_TBL + '(nodename text primary key, xos_status, synchronizer)', 'CREATE TABLE ' + cls.OPENSTACK_TBL + '(nodename text primary key, sub_type, data_ip, of_id, hostname, docker, onosApp, routingTable, gw_ratio, vxlan_traffic, internal_traffic)', 'CREATE TABLE ' + cls.HA_TBL + '(ha_key text primary key, stats)', 'CREATE TABLE ' + cls.EVENT_TBL + '(nodename, item, grade, pre_grade, reason, time, PRIMARY KEY (nodename, item))' ] for sql in init_sql: sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': db_log.write_log("DB initiation fail\n%s", sql_rt) sys.exit(1) db_log.write_log('Insert nodes information ...') for node_type in CONF.watchdog()['check_system']: if node_type == 'OPENSTACK': cls.sql_insert_nodes( db_log, CONF_MAP[node_type.upper()]()['compute_list'], str((CONF_MAP[node_type.upper()]() )['account']).split(':')[0], node_type, 'COMPUTE') cls.sql_insert_nodes( db_log, CONF_MAP[node_type.upper()]()['gateway_list'], str((CONF_MAP[node_type.upper()]() )['account']).split(':')[0], node_type, 'GATEWAY') else: cls.sql_insert_nodes( db_log, CONF_MAP[node_type.upper()]()['list'], str((CONF_MAP[node_type.upper()]() )['account']).split(':')[0], node_type) # set ha proxy tbl sql = 'INSERT INTO ' + cls.HA_TBL + ' VALUES (\'' + 'HA' + '\', \'none\')' sql_rt = cls.sql_execute(sql) if sql_rt != 'SUCCESS': db_log.write_log( " [HA PROXY TABLE] Node data insert fail \n%s", sql_rt) sys.exit(1) except: LOG.exception()