def run(self): SIGINTWatcher(self.cleanup_after_kill) t_start = time.time() self.parse_options() if len(self.args) != 2: print "You must specify an instance id." print "For example: %s [options] gpi-37a8bf17" % self.name exit(1) inst_id = self.args[1] api = API(self.opt.dir) (status_code, message, topology_json) = api.instance(inst_id) if status_code != API.STATUS_SUCCESS: self._print_error("Could not access instance.", message) exit(1) else: t = Topology.from_json_string(topology_json) if not t.domains.has_key(self.opt.domain): self._print_error("Could not add user", "Domain '%s' does not exist" % self.opt.domain) exit(1) domain = t.domains[self.opt.domain] user = User() user.set_property("id", self.opt.login) user.set_property("password_hash", self.opt.passwd) user.set_property("ssh_pkey", self.opt.ssh) if self.opt.admin != None: user.set_property("admin", self.opt.admin) else: user.set_property("admin", False) user.set_property("certificate", self.opt.certificate) domain.add_to_array("users", user) topology_json = t.to_json_string() print "Adding new user to", print Fore.WHITE + Style.BRIGHT + inst_id + Fore.RESET + Style.RESET_ALL + "...", status_code, message = api.instance_update(inst_id, topology_json, [], []) if status_code == API.STATUS_SUCCESS: print Fore.GREEN + Style.BRIGHT + "done!" t_end = time.time() delta = t_end - t_start minutes = int(delta / 60) seconds = int(delta - (minutes * 60)) print "Added user in " + Fore.WHITE + Style.BRIGHT + "%i minutes and %s seconds" % (minutes, seconds) elif status_code == API.STATUS_FAIL: self._print_error("Could not update topology.", message) exit(1)
def to_topology(self): ssh_pubkeyf = os.path.expanduser(self.get("ssh-pubkey")) ssh_pubkeyf = open(ssh_pubkeyf) ssh_pubkey = ssh_pubkeyf.read().strip() ssh_pubkeyf.close() topology = Topology() if self.get("deploy") == "dummy": # No default deploy data pass elif self.get("deploy") == "ec2": deploy_data = DeployData() ec2_deploy_data = EC2DeployData() ec2_deploy_data.set_property("ami", self.get("ec2-ami")) ec2_deploy_data.set_property("instance_type", self.get("ec2-instance-type")) deploy_data.set_property("ec2", ec2_deploy_data) topology.set_property("default_deploy_data", deploy_data) domains = self.get("domains").split() for domain_name in domains: domain = Domain() domain.set_property("id", domain_name) topology.add_to_array("domains", domain) user = User() user.set_property("id", getpass.getuser()) user.set_property("password_hash", "!") user.set_property("certificate", "generated") user.set_property("admin", True) user.set_property("ssh_pkey", ssh_pubkey) domain.add_user(user) usersfile = self.get((domain_name, "users-file")) if usersfile != None: usersfile = open(usersfile, "r") for line in usersfile: fields = line.split() type = fields[0] username = fields[1] if len(fields) >= 3: user_ssh_pubkey = " ".join(fields[2:]) else: user_ssh_pubkey = ssh_pubkey user = User() user.set_property("id", username) user.set_property("password_hash", "!") user.set_property("ssh_pkey", user_ssh_pubkey) if type == "C": user.set_property("certificate", "generated") else: user.set_property("certificate", "none") domain.add_user(user) usersfile.close() else: users = self.get((domain_name, "users")) users_nocert = self.get((domain_name, "users-no-cert")) if users.isdigit(): num_users = int(users) usernames = [("%s-user%i" % (domain_name, i), True) for i in range(1,num_users + 1)] else: num_users = 0 usernames = [(u, True) for u in users.split() if u != getpass.getuser()] if users_nocert.isdigit(): usernames += [("%s-user%i" % (domain_name, i), False) for i in range(num_users + 1,num_users + int(users_nocert) + 1)] else: usernames += [(u, False) for u in users_nocert.split() if u != getpass.getuser()] for username, cert in usernames: user = User() user.set_property("id", username) user.set_property("password_hash", "!") user.set_property("ssh_pkey", ssh_pubkey) if cert: user.set_property("certificate", "generated") else: user.set_property("certificate", "none") domain.add_user(user) for user in domain.users.values(): gme = GridMapEntry() gme.set_property("dn", "/O=Grid/OU=Globus Provision (generated)/CN=%s" % user.id) gme.set_property("login", user.id) domain.add_to_array("gridmap", gme) if self.get((domain_name,"go-auth")) == "go": gme = GridMapEntry() gme.set_property("dn", "/C=US/O=Globus Consortium/OU=Globus Connect User/CN=%s" % user.id) gme.set_property("login", user.id) domain.add_to_array("gridmap", gme) if self.get((domain_name,"nfs-nis")): server_node = Node() server_name = "%s-server" % domain_name server_node.set_property("id", server_name) server_node.add_to_array("run_list", "role[domain-nfsnis]") if not self.get((domain_name,"login")): # If there is no login node, the NFS/NIS server will # effectively act as one. server_node.add_to_array("run_list", "role[globus]") if self.get((domain_name,"galaxy")): # If there is a Galaxy server in the domain, the "common" # recipe has to be installed on the NFS/NIS server server_node.add_to_array("run_list", "recipe[galaxy::galaxy-globus-common]") domain.add_node(server_node) if self.get((domain_name,"login")): login_node = Node() login_node.set_property("id", "%s-login" % domain_name) if self.get((domain_name,"nfs-nis")): login_node.set_property("depends", "node:%s" % server_name) login_node.add_to_array("run_list", "role[domain-nfsnis-client]") else: login_node.add_to_array("run_list", "recipe[provision::domain_users]") login_node.add_to_array("run_list", "role[globus]") domain.add_node(login_node) if self.get((domain_name,"myproxy")): myproxy_node = Node() myproxy_node.set_property("id", "%s-myproxy" % domain_name) if self.get((domain_name,"nfs-nis")): myproxy_node.set_property("depends", "node:%s" % server_name) myproxy_node.add_to_array("run_list", "role[domain-nfsnis-client]") else: myproxy_node.add_to_array("run_list", "recipe[provision::domain_users]") myproxy_node.add_to_array("run_list", "role[domain-myproxy]") domain.add_node(myproxy_node) if self.get((domain_name,"gridftp")): gridftp_node = Node() gridftp_node.set_property("id", "%s-gridftp" % domain_name) if self.get((domain_name,"nfs-nis")): gridftp_node.set_property("depends", "node:%s" % server_name) gridftp_node.add_to_array("run_list", "role[domain-nfsnis-client]") else: gridftp_node.add_to_array("run_list", "recipe[provision::domain_users]") if self.get((domain_name,"go-endpoint")) != None: gridftp_node.add_to_array("run_list", "recipe[globus::go_cert]") gridftp_node.add_to_array("run_list", "role[domain-gridftp]") domain.add_node(gridftp_node) if self.get((domain_name,"galaxy")): galaxy_node = Node() galaxy_node.set_property("id", "%s-galaxy" % domain_name) if self.get((domain_name,"nfs-nis")): galaxy_node.set_property("depends", "node:%s" % server_name) galaxy_node.add_to_array("run_list", "role[domain-nfsnis-client]") else: galaxy_node.add_to_array("run_list", "recipe[provision::domain_users]") galaxy_node.add_to_array("run_list", "recipe[galaxy::galaxy-globus-common]") if self.get((domain_name,"go-endpoint")) != None: galaxy_node.add_to_array("run_list", "recipe[globus::go_cert]") galaxy_node.add_to_array("run_list", "recipe[galaxy::galaxy-globus]") domain.add_node(galaxy_node) lrm = self.get((domain_name,"lrm")) if lrm != "none": gram = self.get((domain_name,"gram")) if lrm == "condor": if gram: node_name = "%s-gram-condor" % domain_name role = "role[domain-gram-condor]" else: node_name = "%s-condor" % domain_name role = "role[domain-condor]" workernode_role = "role[domain-clusternode-condor]" lrm_node = Node() lrm_node.set_property("id", node_name) if self.get((domain_name,"nfs-nis")): lrm_node.set_property("depends", "node:%s" % server_name) lrm_node.add_to_array("run_list", "role[domain-nfsnis-client]") else: lrm_node.add_to_array("run_list", "recipe[provision::domain_users]") lrm_node.add_to_array("run_list", role) domain.add_node(lrm_node) clusternode_host = 1 for i in range(self.get((domain_name,"cluster-nodes"))): wn_name = "%s-condor-wn%i" % (domain_name, i+1) wn_node = Node() wn_node.set_property("id", wn_name) wn_node.set_property("depends", "node:%s" % node_name) if self.get((domain_name,"nfs-nis")): wn_node.add_to_array("run_list", "role[domain-nfsnis-client]") else: wn_node.add_to_array("run_list", "recipe[provision::domain_users]") wn_node.add_to_array("run_list", workernode_role) domain.add_node(wn_node) clusternode_host += 1 if self.get((domain_name,"go-endpoint")) != None: goep = GOEndpoint() gouser, goname = self.get((domain_name,"go-endpoint")).split("#") goep.set_property("user", gouser) goep.set_property("name", goname) goep.set_property("gridftp", "node:%s-gridftp" % domain_name) if self.get((domain_name,"go-auth")) == "myproxy": goep.set_property("myproxy", "node:%s-myproxy" % domain_name) else: goep.set_property("myproxy", "myproxy.globusonline.org") domain.add_to_array("go_endpoints", goep) return topology
def to_topology(self): ssh_pubkeyf = os.path.expanduser(self.get("ssh-pubkey")) ssh_pubkeyf = open(ssh_pubkeyf) ssh_pubkey = ssh_pubkeyf.read().strip() ssh_pubkeyf.close() topology = Topology() if self.get("deploy") == "dummy": # No default deploy data pass elif self.get("deploy") == "ec2": deploy_data = DeployData() ec2_deploy_data = EC2DeployData() ami = self.get("ec2-ami") if ami == "latest-32bit": ami = AMI["us-east-1"]["32-bit"] elif ami == "latest-64bit": ami = AMI["us-east-1"]["64-bit"] elif ami == "latest-hvm": ami = AMI["us-east-1"]["hvm"] ec2_deploy_data.set_property("ami", ami) ec2_deploy_data.set_property("instance_type", self.get("ec2-instance-type")) deploy_data.set_property("ec2", ec2_deploy_data) topology.set_property("default_deploy_data", deploy_data) domains = self.get("domains").split() for domain_name in domains: domain = Domain() domain.set_property("id", domain_name) topology.add_to_array("domains", domain) glusterfs_servers = [] has_go_ep = self.get((domain_name,"go-endpoint")) != None user = User() user.set_property("id", getpass.getuser()) user.set_property("password_hash", "!") user.set_property("certificate", "generated") user.set_property("admin", True) user.set_property("ssh_pkey", ssh_pubkey) domain.add_user(user) usersfile = self.get((domain_name, "users-file")) if usersfile != None: usersfile = open(usersfile, "r") for line in usersfile: fields = line.split() type = fields[0] username = fields[1] if len(fields) >= 3: user_ssh_pubkey = " ".join(fields[2:]) else: user_ssh_pubkey = ssh_pubkey user = User() user.set_property("id", username) user.set_property("password_hash", "!") user.set_property("ssh_pkey", user_ssh_pubkey) if type == "C": user.set_property("certificate", "generated") else: user.set_property("certificate", "none") domain.add_user(user) usersfile.close() else: users = self.get((domain_name, "users")) users_nocert = self.get((domain_name, "users-no-cert")) if users.isdigit(): num_users = int(users) usernames = [("%s-user%i" % (domain_name, i), True) for i in range(1,num_users + 1)] else: num_users = 0 usernames = [(u, True) for u in users.split() if u != getpass.getuser()] if users_nocert.isdigit(): usernames += [("%s-user%i" % (domain_name, i), False) for i in range(num_users + 1,num_users + int(users_nocert) + 1)] else: usernames += [(u, False) for u in users_nocert.split() if u != getpass.getuser()] for username, cert in usernames: userpass = username.split(":") login = userpass[0] if len(userpass) == 1: password = "******" elif len(userpass) > 1: password = gen_sha512(userpass[1]) user = User() user.set_property("id", login) user.set_property("password_hash", password) user.set_property("ssh_pkey", ssh_pubkey) if cert: user.set_property("certificate", "generated") else: user.set_property("certificate", "none") domain.add_user(user) if self.get((domain_name, "gridmap")): for user in domain.users.values(): gme = GridMapEntry() gme.set_property("dn", "/O=Grid/OU=Globus Provision (generated)/CN=%s" % user.id) gme.set_property("login", user.id) domain.add_to_array("gridmap", gme) nis_server = None fs = FileSystem() domain.set_property("filesystem", fs) fs_type = self.get((domain_name,"filesystem")) fs_headnode = None if self.get((domain_name,"nis")) or fs_type == "nfs": # We need a server node server_node = Node() server_name = "%s-server" % domain_name server_node.set_property("id", server_name) server_node.add_to_array("run_list", "recipe[provision::gp_node]") server_node.add_to_array("run_list", "recipe[provision::nis_server]") if not self.get((domain_name,"login")): # If there is no login node, the NFS/NIS server will # effectively act as one. server_node.add_to_array("run_list", "role[globus]") fs_headnode = server_node if self.get((domain_name,"nis")): nis_server = server_node domain.add_node(server_node) if fs_type == "local-only": fs.set_property("dir_homes", "/home") fs.set_property("dir_software", "/usr/local") fs.set_property("dir_scratch", "/var/tmp") if fs_type == "nfs": fs_headnode.add_to_array("run_list", "recipe[provision::nfs_server]") fs.set_property("dir_homes", "/nfs/home") fs.set_property("dir_software", "/nfs/software") fs.set_property("dir_scratch", "/nfs/scratch") mounts = [ ("/nfs/home", "0755", "/nfs/home"), ("/nfs/software/", "0755", "/nfs/software"), ("/ephemeral/0/scratch", "1777", "/nfs/scratch")] for path, mode, mountpoint in mounts: mount = NFSMount() mount.set_property("server", "node:%s" % fs_headnode.id) mount.set_property("owner", "root") mount.set_property("mode", mode) mount.set_property("path", path) mount.set_property("mountpoint", mountpoint) fs.add_to_array("nfs_mounts", mount) if fs_type == "glusterfs": glusterfs_servers_num = self.get((domain_name, "glusterfs-servers")) glusterfs_type = self.get((domain_name, "glusterfs-type")) glusterfs_setsize = self.get((domain_name, "glusterfs-setsize")) # Kludge until we add a general Filesystem object to the topology domain.set_property("glusterfs_type", glusterfs_type) domain.set_property("glusterfs_setsize", glusterfs_setsize) glusterfs_servers = [] # The first server is arbitrarily the one where we will set up GlusterFS name = "glusterfsd-1" head_node = self.__create_node(domain, name, nis_server) head_node.add_to_array("run_list", "recipe[glusterfs::glusterfs-server-head]") glusterfs_servers.append("%s-%s" % (domain_name, name)) fs_headnode = head_node for i in range(1,glusterfs_servers_num): name = "glusterfsd-%i" % (i+1) node = self.__create_node(domain, name, nis_server) node.add_to_array("run_list", "recipe[glusterfs::glusterfs-server]") node_name = "%s-%s" % (domain_name, name) glusterfs_servers.append(node_name) head_node.add_to_array("depends", "node:%s" % node_name) # TODO: Add GlusterFSVols to FileSystem if self.get((domain_name,"nis")): nis_server.add_to_array("run_list", "recipe[provision::domain_users]") if fs_headnode != None: fs_headnode.add_to_array("run_list", "recipe[provision::software_path-common]") if self.get((domain_name,"R")): # If R is installed in the domain, we need to setup the global # Rlibs directory fs_headnode.add_to_array("run_list", "recipe[R::Rlibs-dir-common]") if self.get((domain_name,"galaxy")): # If there is a Galaxy server in the domain, the "common" # recipe has to be installed on the NFS/NIS server fs_headnode.add_to_array("run_list", "recipe[galaxy::galaxy-globus-common]") fs_headnode.add_to_array("run_list", "recipe[galaxy::blast-nfsserver]") if self.get((domain_name,"hadoop")): # If there is a Hadoop cluster in the domain, the "common" # recipe has to be installed on the NFS/NIS server fs_headnode.add_to_array("run_list", "recipe[hadoop::hadoop-common]") if self.get((domain_name,"R")): # If R is installed on the cluster, we'll want RHadoop fs_headnode.add_to_array("run_list", "recipe[hadoop::rhadoop-common]") for i in range(self.get((domain_name,"barebones-nodes"))): node = self.__create_node(domain, "blank-%i" % (i+1), nis_server) node.add_to_array("run_list", "role[globus]") if self.get((domain_name,"login")): node = self.__create_node(domain, "login", nis_server) node.add_to_array("run_list", "role[globus]") if self.get((domain_name,"R")): node.add_to_array("run_list", "recipe[R]") node.add_to_array("run_list", "recipe[R::Rlibs-dir]") if self.get((domain_name,"simpleca")): node = self.__create_node(domain, "simpleca", nis_server) node.add_to_array("run_list", "role[globus]") node.add_to_array("run_list", "recipe[provision::simpleca]") gridftp_node = None myproxy_node = None if self.get((domain_name,"myproxy")) and self.get((domain_name,"gridftp")) and has_go_ep and self.get((domain_name,"go-gc")): node = self.__create_node(domain, "myproxy-gridftp", nis_server) node.add_to_array("run_list", "role[domain-myproxy-gc]") node.add_to_array("run_list", "role[domain-gridftp-gc]") gridftp_node = myproxy_node = node else: if self.get((domain_name,"myproxy")): myproxy_node = self.__create_node(domain, "myproxy", nis_server) if has_go_ep: if self.get((domain_name,"go-gc")): myproxy_node.add_to_array("run_list", "role[domain-myproxy-gc]") else: myproxy_node.add_to_array("run_list", "recipe[globus::go_cert]") myproxy_node.add_to_array("run_list", "role[domain-myproxy-default]") else: myproxy_node.add_to_array("run_list", "role[domain-myproxy-default]") if self.get((domain_name,"gridftp")): gridftp_node = self.__create_node(domain, "gridftp", nis_server) if has_go_ep: if self.get((domain_name,"go-gc")): gridftp_node.add_to_array("run_list", "role[domain-gridftp-gc]") else: gridftp_node.add_to_array("run_list", "recipe[globus::go_cert]") gridftp_node.add_to_array("run_list", "role[domain-gridftp-default]") else: gridftp_node.add_to_array("run_list", "role[domain-gridftp-default]") if self.get((domain_name,"galaxy")) and not self.get((domain_name,"condor")): node = self.__create_node(domain, "galaxy", nis_server) if fs_type == "local-only": node.add_to_array("run_list", "recipe[galaxy::galaxy-globus-common]") if self.get((domain_name,"go-endpoint")) != None: node.add_to_array("run_list", "recipe[globus::go_cert]") node.add_to_array("run_list", "recipe[galaxy::galaxy-globus]") if self.get((domain_name,"condor")): galaxy = self.get((domain_name,"galaxy")) if self.get((domain_name,"gram")): head_name = "gram-condor" head_role = "role[domain-gram-condor]" else: if galaxy: head_name = "galaxy-condor" else: head_name = "condor" head_role = "role[domain-condor]" worker_name = "condor-wn" worker_role = "role[domain-clusternode-condor]" num_workers = self.get((domain.id,"condor-nodes")) head_node, workers = self.__gen_cluster(domain, nis_server, None, head_name, head_role, worker_name, worker_role, num_workers) if self.get((domain_name,"galaxy")): if fs_type == "local-only": head_node.add_to_array("run_list", "recipe[galaxy::galaxy-globus-common]") if self.get((domain_name,"go-endpoint")) != None: head_node.add_to_array("run_list", "recipe[globus::go_cert]") head_node.add_to_array("run_list", "recipe[galaxy::galaxy-globus]") if self.get((domain_name,"hadoop")): head_name = "hadoop-master" head_role = "role[domain-hadoop-master]" worker_name = "hadoop-slave" worker_role = "role[domain-hadoop-slave]" num_workers = self.get((domain.id,"hadoop-nodes")) if fs_type == "local-only": common_recipe = "recipe[hadoop::hadoop-common]" else: common_recipe = None head_node, workers = self.__gen_cluster(domain, nis_server, common_recipe, head_name, head_role, worker_name, worker_role, num_workers, head_depends_on_workers=True) if self.get((domain_name,"R")): # If R is installed in the domain, we need to make sure the worker # nodes have R, and that they are aware of the global Rlibs directory for n in [head_node] + workers: n.add_to_array("run_list", "recipe[R]") n.add_to_array("run_list", "recipe[R::Rlibs-dir]") if has_go_ep: goep = GOEndpoint() gouser, goname = self.get((domain_name,"go-endpoint")).split("#") goep.set_property("user", gouser) goep.set_property("name", goname) goep.set_property("public", False) goep.set_property("gridftp", "node:%s" % gridftp_node.id) if self.get((domain_name,"go-auth")) == "myproxy": goep.set_property("myproxy", "node:%s" % myproxy_node.id) else: goep.set_property("myproxy", "myproxy.globusonline.org") goep.set_property("globus_connect_cert", self.get((domain_name,"go-gc"))) domain.add_to_array("go_endpoints", goep) for user in domain.users.values(): if self.get((domain_name,"go-auth")) == "go": gme = GridMapEntry() gme.set_property("dn", "/C=US/O=Globus Consortium/OU=Globus Connect User/CN=%s" % user.id) gme.set_property("login", user.id) domain.add_to_array("gridmap", gme) return topology