def get_mon_hosts(): hosts = [] hosts.append('{}:6789'.format(get_host_ip())) for relid in relation_ids('mon'): for unit in related_units(relid): hosts.append( '{}:6789'.format(get_host_ip(relation_get('private-address', unit, relid))) ) hosts.sort() return hosts
def add_node_to_topology(self, topology): ''' Adds this node to the provided topology, and updates the necessary internal information ''' if (self.is_physical == True): id = None if (self.ip_address): ip_split = self.ip_address.split('.') id = ip_split[ -1] # id should be equal host identifier from ip address. This is to avoid CORE assigning an identical IP address to another virtual node #print("id={}".format(id)) coreNode = topology.session.add_node(_type=self.type, _id=id, node_options=NodeOptions( name=self.vethInName, model="rj45")) if (self.ip_address == None): self.ip_address = utils.get_host_ip() self.CORE_node = coreNode else: # all other nodes coreNode = topology.session.add_node(_type=self.type, node_options=NodeOptions( name=self.name, model=self.type_name)) self.ip_address = topology.prefixes.ip4_address(coreNode) self.CORE_node = coreNode #print("Node: {} has internal ID {}".format(self.name, self.CORE_node.id)) if self not in topology.interNodeObjects: topology.interNodeObjects.append(self)
def buildFeatureIndex(self): logger.info(u'开始导入特征文件...') if utils.get_host_ip() == '10.1.13.49': paperFeature = open( "/home/tdlab/recommender/data180526/feature/paper_feature180526.txt", 'r') patentFeature = open( "/home/tdlab/recommender/data180526/feature/patent_feature180526.txt", 'r') projectFeature = open( "/home/tdlab/recommender/data180526/feature/project_feature180526.txt", 'r') else: paperFeature = open( "/data/Recommender/data_filter/feature/paper_feature180526.txt", 'r') patentFeature = open( "/data/Recommender/data_filter/feature/patent_feature180526.txt", 'r') projectFeature = open( "/data/Recommender/data_filter/feature/project_feature180526.txt", 'r') featureIndex = {} featureIndex['paper'] = self.loadFeature(paperFeature, 'paper') featureIndex['patent'] = self.loadFeature(patentFeature, 'patent') featureIndex['project'] = self.loadFeature(projectFeature, 'project') logger.info(u'导入特征文件完成') return featureIndex
def basic_param_collector(): print( 'This section is use to define the basic parameter.\n >>>Unexpected input will be ignore.<<<' ) with open('env.json') as file_obj: pre_defined_config = json.load(file_obj) for key, value in pre_defined_config.items(): template_value = input( 'Please input the argument [%s], default is [%s]:' % (key, value)) if isinstance(value, int) and template_value.isnumeric(): pre_defined_config[key] = int(template_value) elif template_value: pre_defined_config[key] = template_value.strip() host_addr = utils.get_host_ip() debug_api_addr = input( 'This computer has IP as [%s], input correct IP if it wrong: ' % host_addr) re_string = r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" if re.match(re_string, debug_api_addr) or not debug_api_addr: pre_defined_config['debug_api_addr'] = host_addr elif debug_api_addr: pre_defined_config['debug_api_addr'] = '127.0.0.1' for k, v in pre_defined_config.items(): print(k.ljust(25), str(v).ljust(20)) return pre_defined_config
def post(self): if request.form['username'] == 'quanbing' and request.form[ 'password'] == '123': return render_template('index_old.html', ip=get_host_ip(), user='******') return "need login"
def _handle_physical_links(self): ''' this method sets up the routing tables of the different nodes that are reachable from the physical node. This makes sure communication can happen. ''' ## physicalNode and secondNode are from the inter-core type that I created (Class InterNode()) for physicalNode, secondNode in self.physical_links.items(): host_ip = utils.get_host_ip() nodes_connected_to_physical = [] if (secondNode.type == NodeTypes.SWITCH): switch = secondNode switchConnections = switch.links for link in switchConnections: # of type interLinkObj first_node = link.first_node second_node = link.second_node ## Make sure the node isn't the switch itself, and isn't the actual phyiscal node that we are trying to handle # In other words, get all nodes that can be reached from the physical node through the switch if first_node is not switch and first_node is not physicalNode and first_node.type != "rj45": if first_node not in nodes_connected_to_physical: nodes_connected_to_physical.append(first_node) if second_node is not switch and first_node is not physicalNode and second_node.type != "rj45": if second_node not in nodes_connected_to_physical: nodes_connected_to_physical.append(second_node) for node in nodes_connected_to_physical: ## print("second Node {0} with type {1} has dict {2}".format(second)) interface = node.interfaces[switch] command = utils.get_cmd_command( "sudo ip route replace {0}/32 dev eth{1}".format( host_ip, interface.id)) node.CORE_node.cmd(command)
def get_worker_ssh(self): local_ip = utils.get_host_ip() for node in self.conf_file.get_worker_ssh_data(): if local_ip == node[0]: self.list_worker_ssh.append(None) else: ssh = utils.SSHConn(node[0],node[1],node[2],node[3]) self.list_worker_ssh.append(ssh)
def __init__(self): if utils.get_host_ip() == '10.1.13.49': self.HOST = '10.1.18.26' else: self.HOST = '202.107.204.66' self.pool = redis.ConnectionPool(host=self.HOST, port=7070, password='******', db=0)
def __init__(self): if utils.get_host_ip() == '10.1.13.49': self.HOST = '10.1.13.29' else: self.HOST = '202.107.204.50' self.conn = MySQLdb.connect(host=self.HOST, user='******', passwd='tdlabDatabase', db='techpooldata', port=3306, charset='utf8') self.tables = {'paper': 'expert_paper_join', 'patent': 'expert_patent_join', 'project': 'expert_project_join'} self.columns = {'paper': 'PAPER_ID', 'patent': 'PATENT_ID', 'project': 'PROJECT_ID'} self.redis = RedisUtil()
def get_mon_hosts(): hosts = [] for relid in utils.relation_ids('mon'): for unit in utils.relation_list(relid): hosts.append('{}:6789'.format( utils.get_host_ip( utils.relation_get('private-address', unit, relid)))) hosts.sort() return hosts
def get_ssh_conn(self): ssh = SSHAuthorizeNoMGN() local_ip = utils.get_host_ip() for node in self.cluster['node']: if local_ip == node['public_ip']: self.list_ssh.append(None) else: self.list_ssh.append( ssh.make_connect(node['public_ip'], node['port'], 'root', node['root_password']))
def get_mon_hosts(): hosts = [] for relid in utils.relation_ids('mon'): for unit in utils.relation_list(relid): hosts.append( '{}:6789'.format(utils.get_host_ip( utils.relation_get('private-address', unit, relid))) ) hosts.sort() return hosts
def get_mon_hosts(): hosts = [] for relid in relation_ids('mon'): for unit in related_units(relid): addr = relation_get('ceph-public-address', unit, relid) or \ get_host_ip(relation_get('private-address', unit, relid)) if addr: hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) hosts.sort() return hosts
def test_add_node_to_topology(self): # set up a topology topology = Topology() newnode = InterNode("TestAddToTopology", "host") newnode.add_node_to_topology(topology) #self.assertIn(newnode, topology.interNodeObjects) self.assertIsNotNone(newnode.CORE_node) self.assertIn(newnode.CORE_node.id, topology.session.nodes) if (newnode.is_physical): self.assertEqual(newnode.ip_address, utils.get_host_ip()) else: self.assertEqual(newnode.ip_address, topology.prefixes.ip4_address(newnode.CORE_node)) topology.shutdown(hard=True)
def get_ssh_conn(self): local_ip = utils.get_host_ip() vplx_configs = self.config.get_vplx_configs() username = "******" for vplx_config in vplx_configs: if "username" in vplx_config.keys(): if vplx_config['username'] is not None: username = vplx_config['username'] if local_ip == vplx_config['public_ip']: self.list_vplx_ssh.append(None) utils.set_global_dict_value(None, vplx_config['public_ip']) else: ssh_conn = utils.SSHConn(vplx_config['public_ip'], vplx_config['port'], username, vplx_config['password']) self.list_vplx_ssh.append(ssh_conn) utils.set_global_dict_value(ssh_conn, vplx_config['public_ip'])
def get_mon_hosts(): hosts = [] for relid in relation_ids('mon'): for unit in related_units(relid): addr = \ relation_get('ceph-public-address', unit, relid) or get_host_ip( relation_get( 'private-address', unit, relid)) if addr: hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) return sorted(hosts)
def _send_error_email(self, exception): try: local_ip = get_host_ip() invokeCommand = InvokeCommand() cmd_str = "rpm -qa jetty-manager" version_str = invokeCommand._runSysCmd(cmd_str) logging.info("version_str :" + str(version_str)) # send email subject = "[%s]Internal Server Error " % options.sitename body = self.render_string("errors/500_email.html", exception=exception) body += "\n" + version_str[0] + "\nip:" + local_ip # email_from = "%s <noreply@%s>" % (options.sitename, options.domain) if options.send_email_switch: send_email(options.admins, subject, body) except Exception: logging.error(traceback.format_exc())
from tornado.concurrent import run_on_executor from concurrent.futures import ThreadPoolExecutor from multiprocessing import cpu_count import tornado.httpserver import logging.config import utils import json import time logging.config.fileConfig('logger.conf') logger = logging.getLogger('recommServerLog') logger.info('系统启动...') import similarity if utils.get_host_ip() == '10.1.13.49': recmder = similarity.Recommander( '/home/tdlab/recommender/data_hnsw/wm.bin', '/home/tdlab/recommender/data_hnsw/ind/paper.ind', '/home/tdlab/recommender/data_hnsw/ind/patent.ind', '/home/tdlab/recommender/data_hnsw/ind/project.ind') else: recmder = similarity.Recommander( '/data/Recommender/data_hnsw/wm.bin', '/data/Recommender/data_hnsw/ind/paper.ind', '/data/Recommender/data_hnsw/ind/patent.ind', '/data/Recommender/data_hnsw/ind/project.ind') TOPN = 10000 # 先取大量数据,在这数据上再做筛选,该TOPN并不是返回的数量 TOPN_project = 1000 ef_paper = TOPN ef_patent = TOPN
def zk(): zk = KazooClient(hosts="192.168.118.111:2181") zk.start() zk.ensure_path("/registration_number") zk.create("/registration_number/%s" % get_host_ip(), ephemeral=True)
def index(): return render_template('index_old.html', ip=get_host_ip())
def enable_https(port_maps, namespace, cert, key, ca_cert=None): ''' For a given number of port mappings, configures apache2 HTTPs local reverse proxying using certficates and keys provided in either configuration data (preferred) or relation data. Assumes ports are not in use (calling charm should ensure that). port_maps: dict: external to internal port mappings namespace: str: name of charm ''' def _write_if_changed(path, new_content): content = None if os.path.exists(path): with open(path, 'r') as f: content = f.read().strip() if content != new_content: with open(path, 'w') as f: f.write(new_content) return True else: return False juju_log('INFO', "Enabling HTTPS for port mappings: {}".format(port_maps)) http_restart = False if cert: cert = b64decode(cert) if key: key = b64decode(key) if ca_cert: ca_cert = b64decode(ca_cert) if not cert and not key: juju_log('ERROR', "Expected but could not find SSL certificate data, not " "configuring HTTPS!") return False install('apache2') if RELOAD_CHECK in subprocess.check_output(['a2enmod', 'ssl', 'proxy', 'proxy_http']): http_restart = True ssl_dir = os.path.join('/etc/apache2/ssl', namespace) if not os.path.exists(ssl_dir): os.makedirs(ssl_dir) if (_write_if_changed(os.path.join(ssl_dir, 'cert'), cert)): http_restart = True if (_write_if_changed(os.path.join(ssl_dir, 'key'), key)): http_restart = True os.chmod(os.path.join(ssl_dir, 'key'), 0600) install_ca_cert(ca_cert) sites_dir = '/etc/apache2/sites-available' for ext_port, int_port in port_maps.items(): juju_log('INFO', 'Creating apache2 reverse proxy vhost' ' for {}:{}'.format(ext_port, int_port)) site = "{}_{}".format(namespace, ext_port) site_path = os.path.join(sites_dir, site) with open(site_path, 'w') as fsite: context = { "ext": ext_port, "int": int_port, "namespace": namespace, "private_address": get_host_ip() } fsite.write(render_template(SITE_TEMPLATE, context)) if RELOAD_CHECK in subprocess.check_output(['a2ensite', site]): http_restart = True if http_restart: restart('apache2') return True
import socket import utils import sys import yaml with open("config.yaml", "rb") as fh: config = yaml.load(fh.read()) self_ip = utils.get_host_ip() host_list = config["hosts"] print(self_ip) print(host_list) try: MODE = sys.argv[1] except Exception: MODE = "s" if MODE == "s": s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((socket.gethostname(), 1234)) s.listen(5) while True: # now our endpoint knows about the OTHER endpoint. clientsocket, address = s.accept() print(f"Connection from {address} has been established.") while True: clientsocket.send(bytes("Hey there!!!", "utf-8")) import time time.sleep(3)
def filter(self, record): record.ip = utils.get_host_ip() return True
parser = argparse.ArgumentParser() parser.add_argument( '--port', type=int, default=0, help="The port to listen on. Default is a random available port.") args = parser.parse_args() # start auto load function when modules was updated ploader = Process(target=load_modify) ploader.start() # start logger server plog = Process(target=loggingserver.start_server, args=(utils.get_host_ip(), )) plog.start() # start cmodel testing server ptest = Process(target=process_testing, args=( utils.get_host_ip(), args.port, )) ptest.start() plog.join() ptest.join() ploader.join()
action1 = item[0] if str(action1) not in act_reward.keys(): act_reward[str(action1)] = [] act_reward[str(action1)].append( (item[2], item[1], item[3], item[4], 'advantage:' + str(act_advantage_dic[index][2]), 'actor_loss:' + str(act_advantage_dic[index][3]), item[5], item[6], item[7])) value_loss_list.append(value_loss) actor_loss_list.append(actor_loss) print('Ep: %i' % i_episode, "|Ep_r: %i" % ep_rwd) reward_list.append(ep_rwd) np.save('reward_list_d1m4', reward_list) np.save('reward_list_r_d1m4', reward_list_r) np.save('act_reward_d1m4', act_reward) np.save('max_reward_d1m4', max_reward) np.save('suceed_action_d1m4', suceed_action) np.save('fail_action_d1m4', fail_action) np.save('value_loss_d1m4', value_loss_list) np.save('actor_loss_d1m4', actor_loss_list) break if __name__ == '__main__': np.set_printoptions(threshold=np.inf) ip = get_host_ip() server_dic = eval(CONFIG.get('server', 'server')) notebook_root = server_dic[ip]['npath'] dataset_root = server_dic[ip]['dpath'] train(notebook_root, dataset_root, ip)
def _set_up_physical_link(self, interLinkObj, update=False): # Handle phyiscal links at the ends, because we want to make sure that all other links have already been created (This saves us trouble) if (interLinkObj.first_node.is_physical == True): physical_inter_node = interLinkObj.first_node second_inter_node = interLinkObj.second_node physicalNode = physical_inter_node.CORE_node secondNode = second_inter_node.CORE_node else: physical_inter_node = interLinkObj.second_node ## Inter_nodes are the nodes we create (coreNode), not the ones returned by CORE. They contain some useful info for creating links second_inter_node = interLinkObj.first_node ## The naming is confusing for now and seems redundant. This will all be refractored and cleaned up later. physicalNode = second_inter_node.CORE_node secondNode = physical_inter_node.CORE_node # handle case where second node is a switch (which has no IP address) if (second_inter_node.type == NodeTypes.SWITCH): physical_inter_node.handle_switch_case() switchConnections = second_inter_node.neighbours # add static route entries for all other nodes connected to the switch from our host for node in switchConnections: node_ip = node.ip_address # add route from physical host to node subprocess.call( utils.get_cmd_command( "sudo ip route add {0}/32 dev {1}".format( node_ip, physical_inter_node.vethOutName))) # else: # Add route from host to virtual node -- The other way around can only be added once the session has been instantiated # in case of a switch, link to switch will be added here subprocess.call( utils.get_cmd_command( "sudo ip route add {0}/32 dev {1}".format( second_inter_node.ip_address, physical_inter_node.vethOutName))) self.physical_links[physical_inter_node] = second_inter_node first_interface = physical_inter_node.interfaces[second_inter_node] second_interface = second_inter_node.interfaces[ physical_inter_node] # Avoid double adding a link when this method is called later. if (interLinkObj.is_in_CORE == False): self.session.add_link(physical_inter_node.CORE_node.id, second_inter_node.CORE_node.id, first_interface) #, second_interface) logging.debug(interLinkObj) interLinkObj.is_in_CORE = True # this part sets up the routing tables of the different nodes that are reachable from physical node. # This makes sure communication can happen. host_ip = utils.get_host_ip() nodes_connected_to_physical = [] if (second_inter_node.type == NodeTypes.SWITCH): switch = second_inter_node switchConnections = switch.links for link in switchConnections: # of type interLinkObj first_node = link.first_node second_node = link.second_node ## Make sure the node isn't the switch itself, and isn't the actual phyiscal node that we are trying to handle # In other words, get all nodes that can be reached from the physical node through the switch if first_node is not switch and first_node is not physicalNode and first_node.type != NodeTypes.RJ45: if first_node not in nodes_connected_to_physical: nodes_connected_to_physical.append(first_node) if second_node is not switch and first_node is not physicalNode and second_node.type != NodeTypes.RJ45: if second_node not in nodes_connected_to_physical: nodes_connected_to_physical.append(second_node) for node in nodes_connected_to_physical: ## print("second Node {0} with type {1} has dict {2}".format(second)) node.handle_switch_case() interface = node.interfaces[switch] command = utils.get_cmd_command( "sudo ip route replace {0}/32 dev eth{1}".format( host_ip, interface.id)) node.CORE_node.client.cmd(command)