def __init__(self, env, cpus=0, cpus_per_node=0, qname='standby', walltime='1:00:00', modules='', pack=1, qlimit=200): Host.__init__(self) if cpus <= 0: print "You must specify cpus when creating a PBSHost object." raise ValueError if cpus_per_node <= 0: print "You must specify cpus_per_node when creating a PBSHost object." raise ValueError try: fd = open(env, 'r') except IOError as e: print print "Trying to read %s" % env print "I/O error(%s): %s" % (e.errno, e.strerror) print sys.exit(1) fd.close() self.env = env self.cpus = cpus self.cpus_per_node = cpus_per_node self.qname = qname self.walltime = walltime self.jobs = [] self.wqueue = [] self.wlist = [] self.modules = modules self.pack = pack self.scaling = False self.jnum = 0 self.qlimit = qlimit # checkjob on Carter is frequently broken #self.has_checkjob = (os.system("/bin/bash -c 'checkjob --version 2> /dev/null'") >> 8) == 0 self.has_checkjob = False self.has_torque = os.path.isdir('/var/spool/torque')
def addHost(self): host = Host(self, "New Host") newItem = self.tree.AppendItem(self.root, host.getName()) self.tree.SetPyData(newItem, host) self.tree.SetItemImage(newItem, self.fldridx, wx.TreeItemIcon_Normal) self.tree.SetItemImage(newItem, self.fldropenidx, wx.TreeItemIcon_Expanded) self.selectionPanel()
def checkjob(d): quiet = False cmd = "checkjob -A %s" % d['jobid'] print 'cmd=%s' % cmd so = file('/dev/null') st = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=so).stdout so.close() str = st.read().rstrip().rstrip(';') if not str: return d = dict(x.split('=') for x in str.split(';')) if not quiet: etime = int(d['STARTTIME']) if etime: etime = int(time.time()) - int(etime) etime = Host.secs_to_walltime(etime) wtime = Host.secs_to_walltime(d['WCLIMIT']) print "%s %8s %10s %10s %10s" % (d['NAME'], d['RCLASS'], d['STATE'], wtime, etime) # Idle, Started, Running, Completed, or Removed _state = d['STATE'] if _state == 'Idle' or _state == 'Starting': state = 'Q' elif _state == 'Running': state = 'R' elif _state == 'Completed': state = 'F' else: state = 'O' d['job_state'] = state st.close()
def select_host_from_cluster(self, cluster): for host in self.api.hosts.list(): if host.get_cluster().get_id() == cluster: password = self.collect_params(host.get_name(), 'hypervisors')['password'] r_host = Host(host.get_address(), password) if not r_host.has_file('/give_mac_return_ip'): self.ini_host(host) return host
def ini_host(self, host): password = self.collect_params(host.get_name(), 'hypervisors')['password'] remote_host = Host(host.get_address(), password) src_1 = "%s/give_mac_return_ip" % (self.path) dest_1 = "/give_mac_return_ip" try: remote_host.put_file(src_1, dest_1) remote_host.run_bash_command('chmod +x %s' % dest_1) except Exception as e: print e
def main(): devices = inventory.get_device_data() vms = inventory.get_vm_data() ips = inventory.get_ip_data() services = inventory.get_service_data() ansible_inventory = {} ansible_inventory["_meta"] = {} hosts = {} groups = {} for d in devices+vms: host = Host(d, ips) hosts[host.name] = host role = host.hostrole if role not in groups.keys(): groups[role] = {} groups[role]["hosts"] = [] groups[role]["vars"] = {} groups[role]["children"] = [] groups[role]["hosts"].append(host.name) tags = host.tags for tag in tags: tagstr = "tags_" + str(tag) if tagstr not in groups.keys(): groups[tagstr] = {} groups[tagstr]["hosts"] = [] groups[tagstr]["vars"] = {} groups[tagstr]["children"] = [] groups[tagstr]["hosts"].append(host.name) for role in groups.keys(): ansible_inventory[role] = groups[role] for s in services: serv = Service(s) host = hosts[serv.host] if host.services is None: host._data["services"] = [] host._data["services"].append(serv) ansible_inventory["_meta"]["hostvars"] = hosts print(json.dumps(ansible_inventory, default=serialize))
def generate_accounts(self) -> None: short_names: Set[str] = set() long_names: Set[str] = set() limitsets: Set[LimitSet] = set() allhosts: Set[str] = set() routers: Set[str] = set() letters = string.ascii_lowercase while len(short_names) < 8: short_names.add(''.join(random.choice(letters) for _ in range(2))) while len(long_names) < 8: long_names.add(''.join(random.choice(letters) for _ in range(random.randint(5, 9)))) while len(routers) < 3: routers.add(''.join(random.choice(letters) for _ in range(random.randint(5, 9)))) while len(limitsets) < 3: limitsets.add(self.generate_limitset()) accounts = [] for sn, name in zip(short_names, long_names): generated_hosts: Set[str] = set() while len(generated_hosts) < random.randint(2, 7): new_host = ''.join(random.choice(letters) for _ in range(random.randint(3, 8))) if new_host in allhosts: continue generated_hosts.add(new_host) allhosts.add(new_host) hosts: Tuple[Host, ...] = tuple(Host(x) for x in generated_hosts) mark = random.choice(list(Mark)) limit = random.choice(list(limitsets)) acc = Account(sn, name, hosts, limit, mark) accounts.append(acc) self.accounts = {o.short: o for o in accounts} self.hosts = tuple(allhosts) self.limits = tuple(limitsets) self.routers = tuple(routers) self.config['test_host'] = random.choice(self.hosts) self.config['test_router'] = random.choice(self.routers)
def test_list(self): type = Type.create_type(Type.MYSQL_TCP_IP) hostsNames = ["teste", "teste2"] host = Host(hostsNames[0], type) host2 = Host(hostsNames[1], type) listHost = List() listHost.add(host) listHost.add(host2) i = 0 for v in listHost.iteritems(): self.assertEquals(hostsNames[i], v.name) i += 1 try: listHost.add(host2) except hosts.ExceptHostNameExist: expr = True else: expr = False self.assertTrue(expr)
def clear_hosts(): hosts_file = os.path.expanduser("~/.ssh/known_hosts") hosts = [] with open(hosts_file, 'r') as h: # let's read them all for line in h: hosts.append(Host(line)) h.close() # now write out only the non local hosts with open(hosts_file, 'w') as h: for host in hosts: if not host.is_local(): h.write(str(host)) h.close() print("New hosts written") return
def current_host(): for l in tmux_cmd("list-windows -F", "#{window_name} #{window_active}").splitlines(): name, active = l.strip().split() if active == "1": return Host.get_by_name(name.decode("utf-8"))
def new_conn(name): Host.get_by_name(name.decode("utf-8")).connect()
#!/usr/bin/env python import os.path from tmux.selection import list_selection from tmux.helper import tmux_cmd, tmux_send_keys from hosts import Host search_hosts = lambda *argv: [h.name for h in Host.all_hosts(*argv)] create_conn = lambda name: tmux_cmd("new-window -n %s" % name, "%s new_conn '%s'" % (__file__, name)) list_conns = lambda: [ l.decode("utf-8") for l in tmux_cmd("list-windows -F", "#I:#W").splitlines() ] select_conn = lambda name: tmux_cmd("select-window -t %s", name.split(":")[0]) def current_host(): for l in tmux_cmd("list-windows -F", "#{window_name} #{window_active}").splitlines(): name, active = l.strip().split() if active == "1": return Host.get_by_name(name.decode("utf-8")) def new_conn(name): Host.get_by_name(name.decode("utf-8")).connect() def search_and_list(*argv): prefix_cmd = "%s %s" % (__file__, "search_and_list")
def __init__(self, venue=None, cpus=1, cpus_per_node=1, walltime=60): Host.__init__(self) self.cpus = cpus self.cpus_per_node = cpus_per_node self.hostname = venue self.jobs = []
#!/usr/bin/env python import os.path from tmux.selection import list_selection from tmux.helper import tmux_cmd, tmux_send_keys from hosts import Host search_hosts = lambda *argv:[h.name for h in Host.all_hosts(*argv)] create_conn = lambda name:tmux_cmd("new-window -n %s" % name, "%s new_conn '%s'" % (__file__, name)) list_conns = lambda:[l.decode("utf-8") for l in tmux_cmd("list-windows -F", "#I:#W").splitlines()] select_conn = lambda name:tmux_cmd("select-window -t %s", name.split(":")[0]) def current_host(): for l in tmux_cmd("list-windows -F", "#{window_name} #{window_active}").splitlines(): name, active = l.strip().split() if active == "1": return Host.get_by_name(name.decode("utf-8")) def new_conn(name): Host.get_by_name(name.decode("utf-8")).connect() def search_and_list(*argv): prefix_cmd = "%s %s" % (__file__, "search_and_list") list_selection(prefix_cmd, argv, search_hosts, {"Enter":create_conn}) def clone_conn(): current_host().connect() def extern_ftp(): current_host().ftp()
def get_config(env, filename, debug): try: with open(filename) as f: test_data = json.load(f) except FileNotFoundError as error: print("File not found") test_data = None return hosts = [] routers = [] links = [] flows = [] nodes = [] # create link objects for link in test_data["links"]: link_info = test_data['links'][link] l = Link(env, \ link_info['link_id'],\ link_info['link_delay'], \ link_info['link_buffer'], \ link_info['link_rate'], \ link_info['link_source'], \ link_info['link_destination'], debug) links.append(l) # create hosts for host in test_data['hosts']: host_info = test_data['hosts'][host] link = next((l for l in links if l.id == test_data['hosts'][host]['link_id'] \ and l.source == host_info['host_id']), None) assert link != None h = Host(env, host_info['host_id'], link, debug) hosts.append(h) # create flow objects # TODO: Start of flows changes for flow in test_data['flows']: flow_info = test_data['flows'][flow] id = flow_info['flow_id'] source = next((h for h in hosts if h.id == flow_info['flow_src']), None) f = Tahoe(id, env, source, flow_info['flow_dest'], flow_info['data_amt'], flow_info['flow_start']) flows.append(f) # create routers for router in test_data['routers']: router_info = test_data['routers'][router] id = router_info['router_id'] links_list = router_info['links'] router_links = [l for l in links if l.id in links_list \ and l.source == router_info['router_id']] assert len(router_links) == len(router_info['links']) r = Router(env, id, router_links, debug) routers.append(r) # Add source/destination obejcts to links, replacing string IDs nodes = routers + hosts for link in links: source = next((n for n in nodes if n.id == link.source), None) destination = next((n for n in nodes if n.id == link.destination), None) link.source = source link.destination = destination return (hosts, links, flows, routers)