def test_multi_step_deployment(self): msd = MultiStepDeployment() self.assertEqual(len(msd.steps), 0) msd.add(MockDeployment()) self.assertEqual(len(msd.steps), 1) self.assertEqual(self.node, msd.run(node=self.node, client=None))
def deploy_node(self, **kwargs): """ Create a new node, and start deployment. @keyword enable_root: If true, root password will be set to vnc_password (this will enable SSH access) and default 'toor' account will be deleted. @type enable_root: C{bool} For detailed description and keywords args, see L{NodeDriver.deploy_node}. """ image = kwargs['image'] vnc_password = kwargs.get('vnc_password', None) enable_root = kwargs.get('enable_root', False) if not vnc_password: raise ValueError('You need to provide vnc_password argument ' 'if you want to use deployment') if (image in STANDARD_DRIVES and STANDARD_DRIVES[image]['supports_deployment']): raise ValueError('Image %s does not support deployment' % (image.id)) if enable_root: script = ("unset HISTFILE;" "echo root:%s | chpasswd;" "sed -i '/^toor.*$/d' /etc/passwd /etc/shadow;" "history -c") % vnc_password root_enable_script = ScriptDeployment(script=script, delete=True) deploy = kwargs.get('deploy', None) if deploy: if (isinstance(deploy, ScriptDeployment) or isinstance(deploy, SSHKeyDeployment)): deployment = MultiStepDeployment([deploy, root_enable_script]) elif isinstance(deploy, MultiStepDeployment): deployment = deploy deployment.add(root_enable_script) else: deployment = root_enable_script kwargs['deploy'] = deployment if not kwargs.get('ssh_username', None): kwargs['ssh_username'] = '******' return super(ElasticHostsBaseNodeDriver, self).deploy_node(**kwargs)
def deploy_node(self, **kwargs): """ Create a new node, and start deployment. @keyword enable_root: If true, root password will be set to vnc_password (this will enable SSH access) and default 'toor' account will be deleted. @type enable_root: C{bool} For detailed description and keywords args, see L{NodeDriver.deploy_node}. """ image = kwargs['image'] vnc_password = kwargs.get('vnc_password', None) enable_root = kwargs.get('enable_root', False) if not vnc_password: raise ValueError('You need to provide vnc_password argument ' 'if you want to use deployment') if (image in self._standard_drives and not self._standard_drives[image]['supports_deployment']): raise ValueError('Image %s does not support deployment' % (image.id)) if enable_root: script = ("unset HISTFILE;" "echo root:%s | chpasswd;" "sed -i '/^toor.*$/d' /etc/passwd /etc/shadow;" "history -c") % vnc_password root_enable_script = ScriptDeployment(script=script, delete=True) deploy = kwargs.get('deploy', None) if deploy: if (isinstance(deploy, ScriptDeployment) or isinstance(deploy, SSHKeyDeployment)): deployment = MultiStepDeployment([deploy, root_enable_script]) elif isinstance(deploy, MultiStepDeployment): deployment = deploy deployment.add(root_enable_script) else: deployment = root_enable_script kwargs['deploy'] = deployment if not kwargs.get('ssh_username', None): kwargs['ssh_username'] = '******' return super(ElasticStackBaseNodeDriver, self).deploy_node(**kwargs)
def deploy_node(self, **kwargs): """ Create a new node, and start deployment. @inherits: :class:`NodeDriver.deploy_node` :keyword enable_root: If true, root password will be set to vnc_password (this will enable SSH access) and default 'toor' account will be deleted. :type enable_root: ``bool`` """ image = kwargs["image"] vnc_password = kwargs.get("vnc_password", None) enable_root = kwargs.get("enable_root", False) if not vnc_password: raise ValueError("You need to provide vnc_password argument " "if you want to use deployment") if (image in self._standard_drives and not self._standard_drives[image]["supports_deployment"]): raise ValueError("Image %s does not support deployment" % (image.id)) if enable_root: script = ("unset HISTFILE;" "echo root:%s | chpasswd;" "sed -i '/^toor.*$/d' /etc/passwd /etc/shadow;" "history -c") % vnc_password root_enable_script = ScriptDeployment(script=script, delete=True) deploy = kwargs.get("deploy", None) if deploy: if isinstance(deploy, ScriptDeployment) or isinstance( deploy, SSHKeyDeployment): deployment = MultiStepDeployment( [deploy, root_enable_script]) elif isinstance(deploy, MultiStepDeployment): deployment = deploy deployment.add(root_enable_script) else: deployment = root_enable_script kwargs["deploy"] = deployment if not kwargs.get("ssh_username", None): kwargs["ssh_username"] = "******" return super(ElasticStackBaseNodeDriver, self).deploy_node(**kwargs)
def create_node(self): Ovh = get_driver('ovh') #driver = Ovh('aeNU3zwooBfui4hV','60XdgjYPnKDNgRHxpgVVgLB8DvQYK9g0','9521feabdf2241bda7b22a8b37197dec','8E0DJcWjpzQ45umYmZ28kb0FAl759MdP') driver = Ovh('xUEdjyPkmNCJyRhl', 'YNexUap0BWHo0aWk5G3N8rA8QqMPocVy', '407fc2f957624f9f8374cfb70b8fcfc9', 'JpSb8OESQDkifwmnC2rWJPtX85XKE2eH') node = driver.ex_get_node('cccfddd1-59e5-4729-a29c-b919f02d04cc') ip_address = node.public_ips SCRIPT = '''#!/usr/bin/env bash apt-get -y update && apt-get -y install curl ''' step = ScriptDeployment(SCRIPT) msd = MultiStepDeployment([step]) logger.info('+vimage %s ' % (driver)) SSH_CONNECT_TIMEOUT = 1 * 60 ssh_timeout = 10 timeout = SSH_CONNECT_TIMEOUT max_tries = 3 ssh_client = SSHClient(hostname=ip_address[0], port=22, username='******', password='', key_files='/var/lib/odoo/.ssh/id_rsa', timeout=ssh_timeout) logger.info(ssh_client) ssh_client.connect() logger.info(ssh_client)
def shine_node(self, node, settings, container): """ Rubs a node :param node: the node to be polished :type node: :class:`libcloud.compute.base.Node` :param settings: the fittings plan for this node :type settings: ``dict`` :param container: the container of this node :type container: :class:`plumbery.PlumberyInfrastructure` """ rubs = self._get_rubs(node, settings) if len(rubs) < 1: logging.info('- nothing to do') self.report.append({node.name: { 'status': 'skipped - nothing to do' }}) return # hack because the driver does not report public ipv4 accurately if len(node.public_ips) < 1: domain = container.get_network_domain(container.blueprint['domain']['name']) for rule in container.region.ex_list_nat_rules(domain): if rule.internal_ip == node.private_ips[0]: node.public_ips.append(rule.external_ip) break if len(node.public_ips) > 0: logging.info("- node is reachable at '{}'".format(node.public_ips[0])) elif not self.beachheading: logging.info('- node is unreachable') self.report.append({node.name: { 'status': 'unreachable' }}) return descriptions = [] steps = [] for item in rubs: descriptions.append(item['description']) steps.append(item['genius']) if self._apply_rubs(node, MultiStepDeployment(steps)): logging.info('- done') self.report.append({node.name: { 'status': 'completed', 'rubs': descriptions }}) else: self.report.append({node.name: { 'status': 'failed', 'rubs': descriptions }})
def deploy_service(self, fournisseur, host, service): scripts = service.template_id.script hostid = self.env['bibind.host'].browse(host.id) bibindnode = host.nodeid driver = self.run_driver() _logger.info('+vscript host %s ' %(driver) ) name = service.type+'-'+service.partner_id.name+'-'+service.id stack = driver.ex_deploy_stack( name, description=None, docker_compose=None, environment=None, external_id=None, rancher_compose=None, start=True) node = driver.ex_get_node(str(bibindnode.idnode)) ip_address = node.public_ips _logger.info('+vscript host %s ' %(node) ) _logger.info('+vscript ip_address %s ' %(ip_address) ) _logger.info('+vscript ip_address[0] %s ' %(ip_address[0]) ) step=[] # Shell script to run on the remote server for script in host.deploy_scripts_ids: _logger.info('+vscript host %s ' %(script) ) myscript = self.env['launch.script'].browse(script.id) _logger.info('+vscript host %s ' %(myscript.script_code) ) step.append( ScriptDeployment(str(myscript.script_code))) _logger.info('+vscript host %s ' %(step) ) msd = MultiStepDeployment(step) _logger.info('+vimage %s ' %(driver) ) SSH_CONNECT_TIMEOUT = 1 * 60 ssh_timeout = 10 timeout = SSH_CONNECT_TIMEOUT max_tries = 3 params ={} params['deploy'] =msd params['ssh_username'] ='******' params['ssh_key']='/var/lib/odoo/.ssh/id_rsa' # deploy_node takes the same base keyword arguments as create_node. node = driver._connect_and_run_deployment_script( task=msd, node=node, ssh_hostname=ip_address[0], ssh_port=22, ssh_username='******', ssh_password='', ssh_key_file='/var/lib/odoo/.ssh/id_rsa', ssh_timeout=ssh_timeout, timeout=timeout, max_tries=max_tries) return bibindnode
def _eval_deploy_template(self, node_temp, kwargs): #deploy_node params ssh_username = node_temp.getSshUsername() kwargs = get_property(self, ssh_username, 'ssh_username' , kwargs, lambda x : x) ssh_alternate_usernames = node_temp.getSshAlternateUsernames() kwargs = get_property_list(self, ssh_alternate_usernames, 'ssh_alternate_usernames' , kwargs, lambda x : x) ssh_port = node_temp.getSshPort() kwargs = get_property(self, ssh_port, 'ssh_port' , kwargs, lambda x : int(x)) ssh_timeout = node_temp.getSshTimeout() kwargs = get_property(self, ssh_timeout, 'ssh_timeout' , kwargs, lambda x : float(x)) ssh_key = node_temp.getSshKey() kwargs = get_property(self, ssh_key, 'ssh_key' , kwargs, lambda x : x) timeout = node_temp.getTimeout() kwargs = get_property(self, timeout, 'timeout' , kwargs, lambda x : int(x)) max_tries = node_temp.getMaxTries() kwargs = get_property(self, max_tries, 'max_tries' , kwargs, lambda x : int(x)) ssh_interface = node_temp.getSshInterface() kwargs = get_property(self, ssh_interface, 'ssh_interface' , kwargs, lambda x : x) #get the deployment classes deployments = node_temp.getDeploy() if deployments: msd = MultiStepDeployment() for deploy in deployments: if isinstance(deploy, SshKeyDeployment): msd.add(SSHKeyDeployment(str(deploy.getKey()))) elif isinstance(deploy, FileDeployment): msd.add(FileDeployment(deploy.getSource(), deploy.getTarget())) elif isinstance(deploy, ScriptDeployment): args = deploy.getArgs() arg_lst = self._parse_arg_list(args) msd.add(ScriptDeployment(deploy.getScriptContent(), arg_lst, deploy.getName(), deploy.getDelete())) elif isinstance(deploy, ScriptFileDeployment): args = deploy.getArgs() arg_lst = self._parse_arg_list(args) msd.add(ScriptFileDeployment(deploy.getScriptPath(), arg_lst, deploy.getName(), deploy.getDelete())) kwargs['deploy'] = msd return kwargs
def init(instance, username, password=None, token=None, redeploy=False, *args, **kwargs): """ Creates a multi script deployment to prepare and call the latest init script """ if not instance: raise MissingArgsException("Missing instance argument.") if not username: raise MissingArgsException("Missing instance argument.") token = kwargs.get('token', '') if not token: token = instance.id atmo_init = "/usr/sbin/atmo_init_full.py" server_atmo_init = "/api/v1/init_files/v2/atmo_init_full.py" logfile = "/var/log/atmo/deploy.log" url = "%s%s" % (settings.DEPLOY_SERVER_URL, server_atmo_init) script_init = init_log() script_deps = package_deps(logfile, username) script_wget = wget_file(atmo_init, url, logfile=logfile, attempts=3) script_chmod = chmod_ax_file(atmo_init, logfile) script_atmo_init = init_script(atmo_init, username, token, instance, password, redeploy, logfile) if redeploy: # Redeploy the instance script_atmo_init = redeploy_script(atmo_init, username, instance, logfile) script_list = [ script_init, script_wget, script_chmod, script_atmo_init ] else: # Standard install script_list = [ script_init, script_deps, script_wget, script_chmod, script_atmo_init ] if not settings.DEBUG: script_rm_scripts = rm_scripts(logfile=logfile) script_list.append(script_rm_scripts) return MultiStepDeployment(script_list)
def deploy_host(self, fournisseur, host): name = host.name bibind_image = host.image bibind_size = host.size bibind_location = host.location _logger.info('+ idimage %s ' %(bibind_image.id_image) ) _logger.info('+ size %s ' %(bibind_size) ) _logger.info('+v location extr %s ' %(bibind_location) ) driver = self.run_bibind_driver() img = driver.get_image(bibind_image.id_image) _logger.info('+ idimage gggg%s ' %(img) ) location = [l for l in driver.list_locations() if l.id == bibind_location.id_location][0] image = [i for i in driver.list_images() if bibind_image.id_image == i.id][0] size = [s for s in driver.list_sizes() if s.id == bibind_size.id_size][0] step = [] # Shell script to run on the remote server for script in host.deploy_scripts_ids: _logger.info('+vscript host %s ' %(script) ) myscript = self.env['launch.script'].browse(script.id) _logger.info('+vscript host %s ' %(myscript.script_code) ) step.append(ScriptDeployment(str(myscript.script_code))) _logger.info('+vscript host %s ' %(step) ) msd = MultiStepDeployment(step) _logger.info('+vimage %s ' %(driver) ) params ={} params['deploy'] =msd params['ssh_username'] ='******' params['ssh_key']='/var/lib/odoo/.ssh/id_rsa' # deploy_node takes the same base keyword arguments as create_node. node = driver.deploy_node(name=name, image=image, size=size,location=location,ex_keyname='bibind',**params ) _logger.info('+node %s ' %(node) ) nodemodel = self.env['cloud.service.node'] val = self.converti_nodelibcloud_to_nodebibind(node, size) bibindnode = nodemodel.create(val) return bibindnode
def deploy_config_depot_script(self, host, service, env, param): SCRIPT = '''#!/usr/bin/env bash cd /home && sudo mkdir apt-get -y update && apt-get -y install puppet ''' hostid = self.env['bibind.host'].browse(host.id) bibindnode = host.nodeid driver = self.run_driver() _logger.info('+vscript host %s ' %(driver) ) node = driver.ex_get_node(str(bibindnode.idnode)) ip_address = node.public_ips _logger.info('+vscript node %s ' %(node) ) _logger.info('+vscript ip_address %s ' %(ip_address) ) _logger.info('+vscript ip_address[0] %s ' %(ip_address[0]) ) step=[] step.append( ScriptDeployment(str(script.script_code))) msd = MultiStepDeployment(step) _logger.info('+driver %s ' %(step) ) _logger.info('+vscript ip_address[0] %s ' %(msd) ) SSH_CONNECT_TIMEOUT = 1 * 60 ssh_timeout = 10 timeout = SSH_CONNECT_TIMEOUT max_tries = 3 params ={} params['deploy'] =msd params['ssh_username'] ='******' params['ssh_key']='/var/lib/odoo/.ssh/id_rsa' # deploy_node takes the same base keyword arguments as create_node. node = driver._connect_and_run_deployment_script( task=msd, node=node, ssh_hostname=ip_address[0], ssh_port=22, ssh_username='******', ssh_password='', ssh_key_file='/var/lib/odoo/.ssh/id_rsa', ssh_timeout=ssh_timeout, timeout=timeout, max_tries=max_tries) return node
def create_target_environment(self): size=NodeSize(id=self.size_id, name="", ram=None, disk=None, bandwidth=None, price=None, driver="") image=NodeImage(id=self.image_id, name="", driver="") install_key= SSHKeyDeployment(open(os.path.expanduser(self.key_path)).read()) msd=MultiStepDeployment([install_key]) self.__instance_name = "Benchmark_" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) self.__node = self.__conn.deploy_node(name=self.__instance_name, image=image, size=size, deploy=msd, ssh_username=self.vm_user, ssh_key=self.key_path, ex_keyname=self.key_name) logger.info("Instance {0} created with ip {1}".format(self.__instance_name, self.__node.public_ips[0])) self.__target_env_ip = self.__node.public_ips[0]
def deploy_instance(self, *args, **kwargs): """ Deploy an AWS node. """ username = self.identity.user.username atmo_init = "/usr/sbin/atmo_init_full.py" server_atmo_init = "/init_files/30/atmo_init_full.py" script_deps = ScriptDeployment( "sudo apt-get install -y emacs vim wget") script_wget = ScriptDeployment( "sudo wget -O %s %s%s" % (atmo_init, settings.SERVER_URL, server_atmo_init)) script_chmod = ScriptDeployment("sudo chmod a+x %s" % atmo_init) instance_token = kwargs.get('token', '') awesome_atmo_call = "sudo %s --service_type=%s --service_url=%s" awesome_atmo_call += " --server=%s --user_id=%s --token=%s" awesome_atmo_call += " --name=%s &> %s" awesome_atmo_call %= (atmo_init, "instance_service_v1", settings.INSTANCE_SERVICE_URL, settings.SERVER_URL, username, instance_token, kwargs.get('name', ''), '/var/log/atmo_init_full.err') logger.debug(awesome_atmo_call) str_awesome_atmo_call = str(awesome_atmo_call) #kludge: weirdness without the str cast... script_atmo_init = ScriptDeployment(str_awesome_atmo_call) private_key = ("/opt/dev/atmosphere/extras/ssh/id_rsa") scripts = [script_deps, script_wget, script_chmod, script_atmo_init] for s in scripts: logger.debug(s.name) s.name = s.name.replace('/root', '/home/ubuntu') logger.debug(s.name) msd = MultiStepDeployment(scripts) kwargs.update({'ex_keyname': 'dalloway-key'}) kwargs.update({'ssh_username': '******'}) kwargs.update({'ssh_key': private_key}) kwargs.update({'deploy': msd}) kwargs.update({'timeout': 400}) instance = super(AWSDriver, self).deploy_instance(*args, **kwargs) created = datetime.strptime(instance.extra['created'], "%Y-%m-%dT%H:%M:%SZ") # NOTE: Removed for rtwo port. Moved to service tasks. # send_instance_email(username, instance.id, instance.ip, # created, username) return instance
def deploy(self, image_id, size_idx=0, location_idx=0, name='test'): """Linode supports libcloud's `libcloud.compute.deployment`. Pass an `SSHKeyDeployment` to `self.driver.deploy_node`.""" sd = SSHKeyDeployment(open(self.ssh_public_key).read()) script = ScriptDeployment("/bin/true") # NOP msd = MultiStepDeployment([sd, script]) class Image: id = image_id size = self.driver.list_sizes()[size_idx] location = self.driver.list_locations()[location_idx] return node2dict( self.driver.deploy_node(name=name, image=Image, size=size, location=location, deploy=msd))
def water_machines(seed_profile, uuids): """ Bootstrap with salt """ nodes = [] if seed_profile.driver == 'aws': driver = obtain_driver(seed_profile) nodes = [i for i in driver.list_nodes() if i.name == seed_profile.name] for libcloud_node in nodes: logger = logging.getLogger('*'.join([__name__, libcloud_node.name])) libcloud_node, private_ips = libcloud_node.driver.wait_until_running( nodes=[libcloud_node], ssh_interface="private_ips")[0] scripts = [] for script in seed_profile.init_scripts: logger.warn("SCRIPT: %s" % script) _file = FileDeployment(find_script(script), target="/home/%s/%s" % (seed_profile.ami_user, script), ) scripts.append(_file) msd = MultiStepDeployment(scripts) deploy_msd_to_node(libcloud_node, msd, seed_profile.keypair['local_path'])
def deploynode(self, plan, imageid, name): ssh_keypath = os.path.expanduser('~/.ssh/id_rsa') with open(ssh_keypath + ".pub") as f: public_key = f.read() key = SSHKeyDeployment(public_key) images = NodeImage(id=imageid, name=None, driver=self.driver) sizes = self.driver.list_sizes() #script1 = ScriptDeployment("sudo apt-get -y update") #script2 = ScriptDeployment("sudo apt-get install -y apache2") entry = plan['Scripts']['apacheDeploy']['EntryPoint'] script = ScriptDeployment(plan['Files'][entry]['Body']) msd = MultiStepDeployment([key, script]) try: self.driver.deploy_node(name=name, image=images, size=sizes[0], ssh_key=ssh_keypath, ssh_username='******', deploy=msd, timeout=1800, ex_keyname="avni_key") except NotImplementedError: print("Deploy Node is not implemented for this driver")
def upload(self): """ Registers the image in each EC2 region. """ log.info('EC2 upload process started') # Get a starting utility AMI in some region to use as an origin ami = self.util_amis[0] # Select the starting AMI to begin self.destination = 'EC2 ({region})'.format(region=ami['region']) fedimg.messenger.message('image.upload', self.build_name, self.destination, 'started') try: # Connect to the region through the appropriate libcloud driver cls = ami['driver'] driver = cls(fedimg.AWS_ACCESS_ID, fedimg.AWS_SECRET_KEY) # select the desired node attributes sizes = driver.list_sizes() reg_size_id = 'm1.xlarge' # check to make sure we have access to that size node # TODO: Add try/except if for some reason the size isn't # available? size = [s for s in sizes if s.id == reg_size_id][0] base_image = NodeImage(id=ami['ami'], name=None, driver=driver) # Name the utility node name = 'Fedimg AMI builder' # Block device mapping for the utility node # (Requires this second volume to write the image to for # future registration.) mappings = [{ 'VirtualName': None, # cannot specify with Ebs 'Ebs': { 'VolumeSize': fedimg.AWS_UTIL_VOL_SIZE, 'VolumeType': self.vol_type, 'DeleteOnTermination': 'false' }, 'DeviceName': '/dev/sdb' }] # Read in the SSH key with open(fedimg.AWS_PUBKEYPATH, 'rb') as f: key_content = f.read() # Add key to authorized keys for root user step_1 = SSHKeyDeployment(key_content) # Add script for deployment # Device becomes /dev/xvdb on instance script = "touch test" # this isn't so important for the util inst. step_2 = ScriptDeployment(script) # Create deployment object (will set up SSH key and run script) msd = MultiStepDeployment([step_1, step_2]) log.info('Deploying utility instance') while True: try: self.util_node = driver.deploy_node( name=name, image=base_image, size=size, ssh_username=fedimg.AWS_UTIL_USER, ssh_alternate_usernames=[''], ssh_key=fedimg.AWS_KEYPATH, deploy=msd, kernel_id=ami['aki'], ex_metadata={'build': self.build_name}, ex_keyname=fedimg.AWS_KEYNAME, ex_security_groups=['ssh'], ex_ebs_optimized=True, ex_blockdevicemappings=mappings) except KeyPairDoesNotExistError: # The keypair is missing from the current region. # Let's install it and try again. log.exception('Adding missing keypair to region') driver.ex_import_keypair(fedimg.AWS_KEYNAME, fedimg.AWS_PUBKEYPATH) continue except Exception as e: # We might have an invalid security group, aka the 'ssh' # security group doesn't exist in the current region. The # reason this is caught here is because the related # exception that prints`InvalidGroup.NotFound is, for # some reason, a base exception. if 'InvalidGroup.NotFound' in e.message: log.exception('Adding missing security' 'group to region') # Create the ssh security group driver.ex_create_security_group('ssh', 'ssh only') driver.ex_authorize_security_group( 'ssh', '22', '22', '0.0.0.0/0') continue else: raise break # Wait until the utility node has SSH running while not ssh_connection_works(fedimg.AWS_UTIL_USER, self.util_node.public_ips[0], fedimg.AWS_KEYPATH): sleep(10) log.info('Utility node started with SSH running') # Connect to the utility node via SSH client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(self.util_node.public_ips[0], username=fedimg.AWS_UTIL_USER, key_filename=fedimg.AWS_KEYPATH) # Curl the .raw.xz file down from the web, decompressing it # and writing it to the secondary volume defined earlier by # the block device mapping. # curl with -L option, so we follow redirects cmd = "sudo sh -c 'curl -L {0} | xzcat > /dev/xvdb'".format( self.raw_url) chan = client.get_transport().open_session() chan.get_pty() # Request a pseudo-term to get around requiretty log.info('Executing utility script') # Run the above command and wait for its exit status chan.exec_command(cmd) status = chan.recv_exit_status() if status != 0: # There was a problem with the SSH command log.error('Problem writing volume with utility instance') data = "(no data)" if chan.recv_ready(): data = chan.recv(1024 * 32) fedimg.messenger.message('image.upload', self.build_name, self.destination, 'failed', extra={'data': data}) raise EC2UtilityException( "Problem writing image to utility instance volume. " "Command exited with status {0}.\n" "command: {1}\n" "output: {2}".format(status, cmd, data)) client.close() # Get volume name that image was written to vol_id = [ x['ebs']['volume_id'] for x in self.util_node.extra['block_device_mapping'] if x['device_name'] == '/dev/sdb' ][0] log.info('Destroying utility node') # Terminate the utility instance driver.destroy_node(self.util_node) # Wait for utility node to be terminated while ssh_connection_works(fedimg.AWS_UTIL_USER, self.util_node.public_ips[0], fedimg.AWS_KEYPATH): sleep(10) # Wait a little longer since loss of SSH connectivity doesn't mean # that the node's destroyed # TODO: Check instance state rather than this lame sleep thing sleep(45) # Take a snapshot of the volume the image was written to self.util_volume = [ v for v in driver.list_volumes() if v.id == vol_id ][0] snap_name = 'fedimg-snap-{0}'.format(self.build_name) log.info('Taking a snapshot of the written volume') self.snapshot = driver.create_volume_snapshot(self.util_volume, name=snap_name) snap_id = str(self.snapshot.id) while self.snapshot.extra['state'] != 'completed': # Re-obtain snapshot object to get updates on its state self.snapshot = [ s for s in driver.list_snapshots() if s.id == snap_id ][0] sleep(10) log.info('Snapshot taken') # Delete the volume now that we've got the snapshot driver.destroy_volume(self.util_volume) # make sure Fedimg knows that the vol is gone self.util_volume = None log.info('Destroyed volume') # Actually register image log.info('Registering image as an AMI') if self.virt_type == 'paravirtual': image_name = "{0}-{1}-PV-{2}-0".format(self.build_name, ami['region'], self.vol_type) test_size_id = 'm1.xlarge' # test_amis will include AKIs of the appropriate arch registration_aki = [ a['aki'] for a in self.test_amis if a['region'] == ami['region'] ][0] reg_root_device_name = '/dev/sda' else: # HVM image_name = "{0}-{1}-HVM-{2}-0".format( self.build_name, ami['region'], self.vol_type) test_size_id = 'm3.2xlarge' # Can't supply a kernel image with HVM registration_aki = None reg_root_device_name = '/dev/sda1' # For this block device mapping, we have our volume be # based on the snapshot's ID mapping = [{ 'DeviceName': reg_root_device_name, 'Ebs': { 'SnapshotId': snap_id, 'VolumeSize': fedimg.AWS_TEST_VOL_SIZE, 'VolumeType': self.vol_type, 'DeleteOnTermination': 'true' } }] # Avoid duplicate image name by incrementing the number at the # end of the image name if there is already an AMI with that name. # TODO: This process could be written nicer. while True: try: if self.dup_count > 0: # Remove trailing '-0' or '-1' or '-2' or... image_name = '-'.join(image_name.split('-')[:-1]) # Re-add trailing dup number with new count image_name += '-{0}'.format(self.dup_count) # Try to register with that name self.images.append( driver.ex_register_image( image_name, description=self.image_desc, root_device_name=reg_root_device_name, block_device_mapping=mapping, virtualization_type=self.virt_type, kernel_id=registration_aki, architecture=self.image_arch)) except Exception as e: # Check if the problem was a duplicate name if 'InvalidAMIName.Duplicate' in e.message: # Keep trying until an unused name is found self.dup_count += 1 continue else: raise break log.info('Completed image registration') # Emit success fedmsg # TODO: Can probably move this into the above try/except, # to avoid just dumping all the messages at once. for image in self.images: fedimg.messenger.message('image.upload', self.build_name, self.destination, 'completed', extra={ 'id': image.id, 'virt_type': self.virt_type, 'vol_type': self.vol_type }) # Now, we'll spin up a node of the AMI to test: # Add script for deployment # Device becomes /dev/xvdb on instance script = "touch test" step_2 = ScriptDeployment(script) # Create deployment object msd = MultiStepDeployment([step_1, step_2]) log.info('Deploying test node') # Pick a name for the test instance name = 'Fedimg AMI tester' # Select the appropriate size for the instance size = [s for s in sizes if s.id == test_size_id][0] # Alert the fedmsg bus that an image test is starting fedimg.messenger.message('image.test', self.build_name, self.destination, 'started', extra={ 'id': self.images[0].id, 'virt_type': self.virt_type, 'vol_type': self.vol_type }) # Actually deploy the test instance try: self.test_node = driver.deploy_node( name=name, image=self.images[0], size=size, ssh_username=fedimg.AWS_TEST_USER, ssh_alternate_usernames=['root'], ssh_key=fedimg.AWS_KEYPATH, deploy=msd, kernel_id=registration_aki, ex_metadata={'build': self.build_name}, ex_keyname=fedimg.AWS_KEYNAME, ex_security_groups=['ssh'], ) except Exception as e: fedimg.messenger.message('image.test', self.build_name, self.destination, 'failed', extra={ 'id': self.images[0].id, 'virt_type': self.virt_type, 'vol_type': self.vol_type }) raise EC2AMITestException("Failed to boot test node %r." % e) # Wait until the test node has SSH running while not ssh_connection_works(fedimg.AWS_TEST_USER, self.test_node.public_ips[0], fedimg.AWS_KEYPATH): sleep(10) log.info('Starting AMI tests') client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(self.test_node.public_ips[0], username=fedimg.AWS_TEST_USER, key_filename=fedimg.AWS_KEYPATH) # Run /bin/true on the test instance as a simple "does it # work" test cmd = "/bin/true" chan = client.get_transport().open_session() chan.get_pty() # Request a pseudo-term to get around requiretty log.info('Running AMI test script') chan.exec_command(cmd) # Again, wait for the test command's exit status if chan.recv_exit_status() != 0: # There was a problem with the SSH command log.error('Problem testing new AMI') data = "(no data)" if chan.recv_ready(): data = chan.recv(1024 * 32) fedimg.messenger.message('image.test', self.build_name, self.destination, 'failed', extra={ 'id': self.images[0].id, 'virt_type': self.virt_type, 'vol_type': self.vol_type, 'data': data }) raise EC2AMITestException("Tests on AMI failed.\n" "output: %s" % data) client.close() log.info('AMI test completed') fedimg.messenger.message('image.test', self.build_name, self.destination, 'completed', extra={ 'id': self.images[0].id, 'virt_type': self.virt_type, 'vol_type': self.vol_type }) # Let this EC2Service know that the AMI test passed, so # it knows how to proceed. self.test_success = True log.info('Destroying test node') # Destroy the test node driver.destroy_node(self.test_node) # Make AMIs public for image in self.images: driver.ex_modify_image_attribute( image, {'LaunchPermission.Add.1.Group': 'all'}) except EC2UtilityException as e: log.exception("Failure") if fedimg.CLEAN_UP_ON_FAILURE: self._clean_up(driver, delete_images=fedimg.DELETE_IMAGES_ON_FAILURE) return 1 except EC2AMITestException as e: log.exception("Failure") if fedimg.CLEAN_UP_ON_FAILURE: self._clean_up(driver, delete_images=fedimg.DELETE_IMAGES_ON_FAILURE) return 1 except DeploymentException as e: log.exception("Problem deploying node: {0}".format(e.value)) if fedimg.CLEAN_UP_ON_FAILURE: self._clean_up(driver, delete_images=fedimg.DELETE_IMAGES_ON_FAILURE) return 1 except Exception as e: # Just give a general failure message. log.exception("Unexpected exception") if fedimg.CLEAN_UP_ON_FAILURE: self._clean_up(driver, delete_images=fedimg.DELETE_IMAGES_ON_FAILURE) return 1 else: self._clean_up(driver) if self.test_success: # Copy the AMI to every other region if tests passed copied_images = list() # completed image copies (ami: image) # Use the AMI list as a way to cycle through the regions for ami in self.test_amis[1:]: # we don't need the origin region # Choose an appropriate destination name for the copy alt_dest = 'EC2 ({region})'.format(region=ami['region']) fedimg.messenger.message('image.upload', self.build_name, alt_dest, 'started') # Connect to the libcloud EC2 driver for the region we # want to copy into alt_cls = ami['driver'] alt_driver = alt_cls(fedimg.AWS_ACCESS_ID, fedimg.AWS_SECRET_KEY) # Construct the full name for the image copy if self.virt_type == 'paravirtual': image_name = "{0}-{1}-PV-{2}-0".format( self.build_name, ami['region'], self.vol_type) else: # HVM image_name = "{0}-{1}-HVM-{2}-0".format( self.build_name, ami['region'], self.vol_type) log.info('AMI copy to {0} started'.format(ami['region'])) # Avoid duplicate image name by incrementing the number at the # end of the image name if there is already an AMI with # that name. # TODO: Again, this could be written better while True: try: if self.dup_count > 0: # Remove trailing '-0' or '-1' or '-2' or... image_name = '-'.join(image_name.split('-')[:-1]) # Re-add trailing dup number with new count image_name += '-{0}'.format(self.dup_count) # Actually run the image copy from the origin region # to the current region. for image in self.images: image_copy = alt_driver.copy_image( image, self.test_amis[0]['region'], name=image_name, description=self.image_desc) # Add the image copy to a list so we can work with # it later. copied_images.append(image_copy) log.info('AMI {0} copied to AMI {1}'.format( image, image_name)) except Exception as e: # Check if the problem was a duplicate name if 'InvalidAMIName.Duplicate' in e.message: # Keep trying until an unused name is found. # This probably won't trigger, since it seems # like EC2 doesn't mind duplicate AMI names # when they are being copied, only registered. # Strange, but apprently true. self.dup_count += 1 continue else: # TODO: Catch a more specific exception log.exception('Image copy to {0} failed'.format( ami['region'])) fedimg.messenger.message('image.upload', self.build_name, alt_dest, 'failed') break # Now cycle through and make all of the copied AMIs public # once the copy process has completed. Again, use the test # AMI list as a way to have region and arch data: # We don't need the origin region, since the AMI was made there: self.test_amis = self.test_amis[1:] for image in copied_images: ami = self.test_amis[copied_images.index(image)] alt_cls = ami['driver'] alt_driver = alt_cls(fedimg.AWS_ACCESS_ID, fedimg.AWS_SECRET_KEY) # Get an appropriate name for the region in question alt_dest = 'EC2 ({region})'.format(region=ami['region']) # Need to wait until the copy finishes in order to make # the AMI public. while True: try: # Make the image public alt_driver.ex_modify_image_attribute( image, {'LaunchPermission.Add.1.Group': 'all'}) except Exception as e: if 'InvalidAMIID.Unavailable' in e.message: # The copy isn't done, so wait 20 seconds # and try again. sleep(20) continue break log.info('Made {0} public ({1}, {2}, {3})'.format( image.id, self.build_name, self.virt_type, self.vol_type)) fedimg.messenger.message('image.upload', self.build_name, alt_dest, 'completed', extra={ 'id': image.id, 'virt_type': self.virt_type, 'vol_type': self.vol_type }) return 0
def main( distro, test_type, from_version, to_version, python_package, installer_script_url, additional_packages=None, destroy_node=False, verbose=False, ): # type: (str, str, str, str, str, str, str, bool, bool) -> None # deployment objects for package files will be stored here. file_upload_steps = [] # We always upload all the mock test configs from tests/ami/configs/ directory to a remote # server. # Those configs are used during various checks and tests. Uploading the configs is much less # messy compared to manipulating the configs using sed on the server. file_names = os.listdir(MOCK_CONFIGS_DIRECTORY) for file_name in file_names: config_file_path = os.path.join(MOCK_CONFIGS_DIRECTORY, file_name) file_upload_step = _create_config_file_deployment_step( config_file_path) file_upload_steps.append(file_upload_step) # Upload auxiliary files from tests/ami/files/ file_names = os.listdir(TEST_FILES_DIRECTORY) for file_name in file_names: file_path = os.path.join(TEST_FILES_DIRECTORY, file_name) file_upload_step = _create_file_deployment_step(file_path, "ca_certs") file_upload_steps.append(file_upload_step) if test_type == "install": install_package_source = to_version else: # install package is specified in from-version in case of upgrade install_package_source = from_version # prepare data for install_package install_package_source_type = _get_source_type(install_package_source) if install_package_source_type == "file": # create install package file deployment object. file_upload_steps.append( _create_file_deployment_step(install_package_source, "install_package")) install_package_info = { "type": install_package_source_type, "source": install_package_source, } upgrade_package_info = None # prepare data for upgrade_package if it is specified. if test_type == "upgrade": upgrade_package_source = to_version upgrade_package_source_type = _get_source_type(upgrade_package_source) if upgrade_package_source_type == "file": # create install package file deployment object. file_upload_steps.append( _create_file_deployment_step(to_version, "upgrade_package")) upgrade_package_info = { "type": upgrade_package_source_type, "source": upgrade_package_source, } distro_details = EC2_DISTRO_DETAILS_MAP[distro] if distro.lower().startswith("windows"): package_type = "windows" script_extension = "ps1" else: package_type = ("deb" if distro.startswith("ubuntu") or distro.startswith("debian") else "rpm") script_extension = "sh" script_filename = "test_%s.%s.j2" % (package_type, script_extension) script_file_path = os.path.join(SCRIPTS_DIR, script_filename) with open(script_file_path, "r") as fp: script_content = fp.read() cat_logs_script_file_path = os.path.join( SCRIPTS_DIR, "cat_logs.%s" % (script_extension)) with open(cat_logs_script_file_path, "r") as fp: cat_logs_script_content = fp.read() installer_script_info = { "source": installer_script_url or DEFAULT_INSTALLER_SCRIPT_URL } if os.path.exists(installer_script_url): installer_script_info["type"] = "file" file_upload_steps.append( _create_file_deployment_step(installer_script_url, "install-scalyr-agent-2")) else: if not _verify_url_exists(installer_script_url): raise ValueError( 'Failed to retrieve installer script from "%s". Ensure that the URL is correct.' % (installer_script_url)) installer_script_info["type"] = "url" rendered_template = render_script_template( script_template=script_content, distro_name=distro, distro_details=distro_details, python_package=python_package, test_type=test_type, install_package=install_package_info, upgrade_package=upgrade_package_info, installer_script_url=installer_script_info, additional_packages=additional_packages, verbose=verbose, ) # TODO: Lower those timeouts when upstream yum related issues or similar start to stabilize. # All AMI tests should take less than 5 minutes, but in the last days (dec 1, 2020), they # started to take 10 minutes with multiple timeouts. if "windows" in distro.lower(): deploy_step_timeout = 440 # 320 deploy_overall_timeout = 460 # 320 cat_step_timeout = 10 max_tries = 3 else: deploy_step_timeout = 320 # 260 deploy_overall_timeout = 340 # 280 max_tries = 3 cat_step_timeout = 5 remote_script_name = "deploy.{0}".format(script_extension) test_package_step = ScriptDeployment(rendered_template, name=remote_script_name, timeout=deploy_step_timeout) if file_upload_steps: # Package files must be uploaded to the instance directly. file_upload_steps.append(test_package_step) # type: ignore deployment = MultiStepDeployment(add=file_upload_steps) # type: ignore else: deployment = MultiStepDeployment(add=test_package_step) # type: ignore # Add a step which always cats agent.log file at the end. This helps us troubleshoot failures. if "windows" not in distro.lower(): # NOTE: We don't add it on Windows since it tends to time out often cat_logs_step = ScriptDeployment(cat_logs_script_content, timeout=cat_step_timeout) deployment.add(cat_logs_step) else: cat_logs_step = None # type: ignore driver = get_libcloud_driver() size = NodeSize( distro_details["size_id"], distro_details["size_id"], 0, 0, 0, 0, driver, ) image = NodeImage(distro_details["image_id"], distro_details["image_name"], driver, None) circle_branch_name = compat.os_environ_unicode.get("CIRCLE_BRANCH", "unknown") circle_branch_name = circle_branch_name.replace("/", "_").replace("-", "_") circle_build_num = compat.os_environ_unicode.get("CIRCLE_BUILD_NUM", random.randint(0, 1000)) name = "%s-automated-agent-tests-%s-branch-%s-build-%s" % ( distro, test_type, circle_branch_name, circle_build_num, ) print("Starting node provisioning and tests...") start_time = int(time.time()) kwargs = {} if destroy_node: kwargs["at_exit_func"] = destroy_node_and_cleanup try: node = driver.deploy_node(name=name, image=image, size=size, ssh_key=PRIVATE_KEY_PATH, ex_keyname=KEY_NAME, ex_security_groups=SECURITY_GROUPS, ssh_username=distro_details["ssh_username"], ssh_timeout=20, max_tries=max_tries, wait_period=15, timeout=deploy_overall_timeout, deploy=deployment, **kwargs) except DeploymentError as e: print("Deployment failed: %s" % (str(e))) node = e.node success = False test_package_step.exit_status = 1 stdout = getattr(e.original_error, "stdout", None) stderr = getattr(e.original_error, "stderr", None) else: success = test_package_step.exit_status == 0 stdout = test_package_step.stdout stderr = test_package_step.stderr if cat_logs_step and cat_logs_step.stdout: stdout += "\n" + cat_logs_step.stdout if cat_logs_step and cat_logs_step.stderr: stdout += "\n" + cat_logs_step.stderr duration = int(time.time()) - start_time if success: print("Script successfully completed.") else: print("Script failed.") print(("stdout: %s" % (stdout))) print(("stderr: %s" % (stderr))) print(("exit_code: %s" % (test_package_step.exit_status))) print(("succeeded: %s" % (str(success)))) print(("duration: %s seconds" % (duration))) # We don't destroy node if destroy_node is False (e.g. to aid with troubleshooting on failure # and similar) if node and destroy_node: destroy_node_and_cleanup(driver=driver, node=node) if not success: sys.exit(1)
def cmd_deploy_node(self, options, arguments): """ Creates, deploys and bootstraps a new node and prints its details. If a node with the same name already existed, prints its details instead. Usage: %prog [options] deploy-node --name <fqdn> """ if options.name is None: self.fail("deploy-node requires the name of the node to create") options.name node = self.find_node(options.name) if (node): self.succeed(message="Node \"%s\" already exists!" % options.name, data=node) image = self.find_image(options.image) if (not image): print options.image self.fail("Missing or invalid image id provided.") flavorId = self.find_flavor(options.flavorId) if (not flavorId): print options.flavorId self.fail("Missing or invalid flavor id provided.") network_objects = [] if (not self.options.networks): self.fail("Missing networks.") else: for networkId in self.options.networks: try: network = self.find_network(networkId) if (not network): print networkId self.fail("Missing or invalid network id provided.") network_objects.append(network) except Exception as e: self.fail("Failed to retrieve networks") # read your public key in # Note: This key will be added to root's authorized_keys # (/root/.ssh/authorized_keys) sd = SSHKeyDeployment( open(os.path.expanduser(options.public_key)).read()) # a simple script to install puppet post boot, can be much more # complicated. script = ScriptDeployment(options.script) # a task that first installs the ssh key, and then runs the script msd = MultiStepDeployment([sd, script]) try: # deploy our node using multistep deployment strategy node = self.connection_compute.deploy_node( name=options.name, image=image, size=flavorId, deploy=msd, networks=network_objects) print "deploy success" # gets the hostname and domainname from fqdn hostname, domainname = options.name.split('.', 1) # see if zone already exists zone = self.find_zone(domainname) # if zone instance does not exist, create it if (not zone): zone = self.connection_dns.create_zone(domain=domainname) # create an A record type wth the public ip of the created node for # our zone record = zone.create_record(name=hostname, type=RecordType.A, data=node.public_ips[0]) except Exception as e: self.fail("Exception: %s" % e) # decide if we wanted to wait for a reference of the running node if self.options.wait: running_node = self.wait_for_running_node( node.id, timeout=self.options.wait) else: running_node = None # if the node was created if (node): # if the running node exists set the node state to running if (running_node): node.state = running_node.state self.succeed(message="Node \"%s\" deployed!" % options.name, data=node, data_type='node')
existing_lb.destroy() except Exception: print " Does not exist" def matches(self, tag): return True """ Standard defaults that should be used in the absence of specific node settings. Can be overriden in environment.py. """ user_public_ssh_key = os.path.expanduser("~/.ssh/id_rsa.pub") if not os.path.exists(user_public_ssh_key): raise Exception( "A public SSH key is required for SSH access to nodes, but could not be found at: %s. Please create a public/private keypair and try again." % user_public_ssh_key) defaults = { "os": "Ubuntu 11.10", "size": Size(ram=256, disk=10), "deployment": MultiStepDeployment([ # Note: This key will be added to the authorized keys for the root user # (/root/.ssh/authorized_keys) SSHKeyDeployment(open(user_public_ssh_key).read()), LoggedScriptDeployment("apt-get update") ]) }
def shine_node(self, node, settings, container): """ prepares a node :param node: the node to be polished :type node: :class:`libcloud.compute.base.Node` :param settings: the fittings plan for this node :type settings: ``dict`` :param container: the container of this node :type container: :class:`plumbery.PlumberyInfrastructure` """ plogging.info("Preparing node '{}'".format(settings['name'])) if node is None: plogging.error("- not found") return timeout = 300 tick = 6 while node.extra['status'].action == 'START_SERVER': time.sleep(tick) node = self.nodes.get_node(node.name) timeout -= tick if timeout < 0: break if node.state != NodeState.RUNNING: plogging.error("- skipped - node is not running") return self.upgrade_vmware_tools(node) prepares = self._get_prepares(node, settings, container) if len(prepares) < 1: plogging.info('- nothing to do') self.report.append( {node.name: { 'status': 'skipped - nothing to do' }}) return if len(node.public_ips) > 0: plogging.info("- node is reachable at '{}'".format( node.public_ips[0])) elif not self.beachheading: plogging.error('- node is unreachable') self.report.append({node.name: {'status': 'unreachable'}}) return descriptions = [] steps = [] for item in prepares: descriptions.append(item['description']) steps.append(item['genius']) if self._apply_prepares(node, MultiStepDeployment(steps)): plogging.info('- rebooting') self.report.append( {node.name: { 'status': 'completed', 'prepares': descriptions }}) else: self.report.append( {node.name: { 'status': 'failed', 'prepares': descriptions }})
tags=["web", "peakload"], size=Size(ram=512)) """ ==== APP TIER ==== """ Node( name="app1", os="Ubuntu 11.04", tags=["app"]) # also tagged 'peakload' for scaling control Node( name="app2", os="Ubuntu 11.04", tags=["app", "peakload"]) """ ==== DB TIER ==== """ Node( name="db1", os="Ubuntu 11.04", tags=["db"], # We can customize the deployment steps, although the values shown here are # simply the defaults, repeated for visibility. # Normally a provisioning tool such as puppet, chef, cfengine or similar should # be used for detailed provisioning - these deployment steps may be used to # bootstrap the provisioning tool, though. deployment=MultiStepDeployment([ SSHKeyDeployment(open(os.path.expanduser("~/.ssh/id_rsa.pub")).read()), ScriptDeployment("apt-get update"), ScriptDeployment("apt-get -y install puppet") ]))
from pprint import pprint from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver # Import the deployment specific modules from libcloud.compute.deployment import ScriptDeployment from libcloud.compute.deployment import MultiStepDeployment cls = get_driver(Provider.EXOSCALE) driver = cls("api key", "api secret key") image = driver.list_images()[0] size = driver.list_sizes()[0] # Define the scripts that you want to run during deployment script = ScriptDeployment("/bin/date") msd = MultiStepDeployment([script]) node = driver.deploy_node( name="test", image=image, size=size, ssh_key="~/.ssh/id_rsa_test", ex_keyname="test-keypair", deploy=msd, ) # The stdout of the deployment can be checked on the `script` object pprint(script.stdout)
def deploy_init_to(self, *args, **kwargs): """ Creates a multi script deployment to prepare and call the latest init script TODO: Add versioning for 30+ """ if args: instance = args[0] else: raise MissingArgsException("Missing instance argument.") if isinstance(self.identity.user, basestring): username = self.identity.user else: # Django.contrib.auth.models.User username = self.identity.user.username atmo_init = "/usr/sbin/atmo_init_full.py" server_atmo_init = "/init_files/30/atmo_init_full.py" script_init = ScriptDeployment( 'if [ ! -d "/var/log/atmo" ];then\n' 'mkdir -p /var/log/atmo\n' 'fi\n' 'if [ ! -f "/var/log/atmo/deploy.log" ]; then\n' 'touch /var/log/atmo/deploy.log\n' 'fi', name="./deploy_init_log.sh") #These requirements are for Editors, Shell-in-a-box do_ubuntu = "apt-get update;apt-get install -y emacs vim wget "\ + "language-pack-en make gcc g++ gettext texinfo "\ + "autoconf automake" do_centos = "yum install -y emacs vim-enhanced wget make "\ + "gcc gettext texinfo autoconf automake python-simplejson" script_deps = LoggedScriptDeployment( "distro_cat=`cat /etc/*-release`\n" + "if [[ $distro_cat == *Ubuntu* ]]; then\n" + do_ubuntu + "\nelse if [[ $distro_cat == *CentOS* ]];then\n" + do_centos + "\nfi\nfi", name="./deploy_deps.sh", logfile="/var/log/atmo/deploy.log") script_wget = LoggedScriptDeployment( "wget -O %s %s%s" % (atmo_init, settings.SERVER_URL, server_atmo_init), name='./deploy_wget_atmoinit.sh', logfile='/var/log/atmo/deploy.log') script_chmod = LoggedScriptDeployment( "chmod a+x %s" % atmo_init, name='./deploy_chmod_atmoinit.sh', logfile='/var/log/atmo/deploy.log') instance_token = kwargs.get('token', '') if not instance_token: instance_token = instance.id awesome_atmo_call = "%s --service_type=%s --service_url=%s" awesome_atmo_call += " --server=%s --user_id=%s" awesome_atmo_call += " --token=%s --name=\"%s\"" awesome_atmo_call += " --vnc_license=%s" awesome_atmo_call %= (atmo_init, "instance_service_v1", settings.INSTANCE_SERVICE_URL, settings.SERVER_URL, username, instance_token, instance.name, settings.ATMOSPHERE_VNC_LICENSE) #kludge: weirdness without the str cast... str_awesome_atmo_call = str(awesome_atmo_call) #logger.debug(isinstance(str_awesome_atmo_call, basestring)) script_atmo_init = LoggedScriptDeployment( str_awesome_atmo_call, name='./deploy_call_atmoinit.sh', logfile='/var/log/atmo/deploy.log') script_rm_scripts = LoggedScriptDeployment( "rm -rf ~/deploy_*", name='./deploy_remove_scripts.sh', logfile='/var/log/atmo/deploy.log') msd = MultiStepDeployment([ script_init, script_deps, script_wget, script_chmod, script_atmo_init, script_rm_scripts ]) kwargs.update({'deploy': msd}) private_key = "/opt/dev/atmosphere/extras/ssh/id_rsa" kwargs.update({'ssh_key': private_key}) kwargs.update({'timeout': 120}) return self.deploy_to(instance, *args, **kwargs)
def launch(execution_items, executor_params, launch_params): # NOTE: Maybe make this include the experiment name. instance_name = f"del8-{uuidlib.uuid4().hex}" if not executor_params.base_exit_logger_params: executor_params = executor_params.copy( base_exit_logger_params=_create_base_exit_logger_params( launch_params)) script = [ "#!/bin/bash", # I think we need the sudo apt-get update twice for whatever reason. "sudo apt-get update", "sudo apt-get update", "sudo apt-get -y install python3-pip", project_util.pip_packages_to_bash_command( SUPERVISOR_PIP_PACKAGES, pip=launch_params.pip_binary), # project_util.file_to_bash_command(launch_params.public_ssh_key_path, dst_directory="~/.ssh"), project_util.file_to_bash_command( launch_params.credentials_path, dst_directory=os.path.dirname(launch_params.credentials_path), ), project_util.file_to_bash_command(launch_params.private_ssh_key_path, dst_directory="~/.ssh"), SSH_PERMISSIONS_SET_UP_CMD, "eval `ssh-agent -s`", f"ssh-add ~/.ssh/{os.path.basename(launch_params.private_ssh_key_path)}", project_util.file_to_bash_command(launch_params.vast_api_key_file), # project_util.python_project_to_bash_command(project_util.DEL8_PROJECT), # _add_start_supervisor_script(execution_items, executor_params, launch_params, instance_name=instance_name), ] deploy = MultiStepDeployment([ ScriptDeployment("ulimit -s 65536"), ScriptDeployment("\n".join(script)), ], ) ComputeEngine = get_driver(Provider.GCE) driver = ComputeEngine( launch_params.service_account, launch_params.credentials_path, datacenter=launch_params.datacenter, project=launch_params.project_id, ) images = driver.list_images() image = None for name in ["focal", "bionic", "xenial"]: valid_images = [ im for im in images if "ubuntu" in im.name and name in im.name ] if valid_images: image = valid_images[0] logging.info(f"Using image {image.name} for GCE supervisor.") break if not image: raise ValueError( f"Unable to find valid GCE image out of options: {[im.name for im in images]}" ) # f1-micro size = [s for s in driver.list_sizes() if s.name == "e2-micro"][0] node = driver.deploy_node( name=instance_name, image=image, size=size, deploy=deploy, ssh_username="******", ex_metadata=launch_params.create_ex_metadata(), ssh_key=launch_params.private_ssh_key_path, ex_service_accounts=launch_params.create_ex_service_accounts(), ) return node, deploy
import os from blitzem.model import Node, LoadBalancer, Size, defaults, user_public_ssh_key from libcloud.compute.deployment import MultiStepDeployment, ScriptDeployment, SSHKeyDeployment from blitzem.deployment import LoggedScriptDeployment defaults["deployment"] = MultiStepDeployment([ # Note: This key will be added to the authorized keys for the root user # (/root/.ssh/authorized_keys) SSHKeyDeployment(open(user_public_ssh_key).read()), # Serve a simple text file on each node to demonstrate load balancing effect LoggedScriptDeployment("apt-get update; apt-get install dtach"), LoggedScriptDeployment("mkdir web; cd web; hostname > hostname.txt; dtach -n /tmp/simple_http.worker python -m SimpleHTTPServer 8080") ]) """ ==== WEB TIER ==== """ LoadBalancer( name="web_lb1", applies_to_tag="web", port=8080, protocol="http") # A simple pair of nodes in the 'web' tier Node( name="web1", tags=["web"]) Node( name="web2",
def deploy_instance(args): api_key = args['acskey'] secret_key = args['acssecret'] zonename = args['zonename'] endpoint = args['endpoint'] template = args['template'] offering = args['offering'] logging.basicConfig( format='%(asctime)s %(pathname)s %(levelname)s:%(message)s', level=logging.DEBUG, filename=logfile) logging.getLogger().addHandler(logging.StreamHandler()) cls = get_driver(Provider.EXOSCALE) driver = cls(api_key, secret_key, host=endpoint) location = [location for location in driver.list_locations() if location.name.lower() == zonename.lower()][0] size = [size for size in driver.list_sizes() if size.name == offering][0] images = [i for i in driver.list_images() if template.lower() in i.extra['displaytext'].lower()] images = sorted(images, key=lambda i: i.extra['displaytext'], reverse=True) image = NodeImage(id=images[0].id, name=images[0].name, driver=driver) name = 'canary-check-' + location.name.lower() if endpoint != 'api.exoscale.ch': name += '-pp' # check if a previous canary exists nodes = driver.list_nodes() for n in nodes: if name in n.name: raise Exception('Instance with same name already exists !') script = ScriptDeployment('echo Iam alive !') msd = MultiStepDeployment([script]) logging.info('Deploying instance %s', name) node = driver.deploy_node(name=name, image=image, size=size, timeout=300, location=location, ssh_username='******', deploy=msd) logging.debug(pprint.pformat(node)) nodename = str(node.name) nodeid = str(node.uuid) nodeip = str(node.public_ips) logging.info('Instance successfully deployed : %s, %s, %s', nodename, nodeid, nodeip) # The stdout of the deployment can be checked on the `script` object logging.debug(pprint.pformat(script.stdout)) logging.info('Successfully executed echo command thru SSH') logging.info('Destroying the instance now') # destroy our canary node driver.destroy_node(node) logging.info('Successfully destroyed the instance %s', name) logging.info('Script completed')
SCRIPT = """#!/usr/bin/env bash apt-get -y update && apt-get -y install puppet """ RACKSPACE_USER = "******" RACKSPACE_KEY = "your key" Driver = get_driver(Provider.RACKSPACE) conn = Driver(RACKSPACE_USER, RACKSPACE_KEY) with open(KEY_PATH) as fp: content = fp.read() # Note: This key will be added to the authorized keys for the root user # (/root/.ssh/authorized_keys) step_1 = SSHKeyDeployment(content) # A simple script to install puppet post boot, can be much more complicated. step_2 = ScriptDeployment(SCRIPT) msd = MultiStepDeployment([step_1, step_2]) images = conn.list_images() sizes = conn.list_sizes() # deploy_node takes the same base keyword arguments as create_node. node = conn.deploy_node(name="test", image=images[0], size=sizes[0], deploy=msd)
def create_machine(request): """Creates a new virtual machine on the specified backend. If the backend is Rackspace it attempts to deploy the node with an ssh key provided in config. the method used is the only one working in the old Rackspace backend. create_node(), from libcloud.compute.base, with 'auth' kwarg doesn't do the trick. Didn't test if you can upload some ssh related files using the 'ex_files' kwarg from openstack 1.0 driver. In Linode creation is a bit different. There you can pass the key file directly during creation. The Linode API also requires to set a disk size and doesn't get it from size.id. So, send size.disk from the client and use it in all cases just to avoid provider checking. Finally, Linode API does not support association between a machine and the image it came from. We could set this, at least for machines created through mist.io in ex_comment, lroot or lconfig. lroot seems more appropriate. However, liblcoud doesn't support linode.config.list at the moment, so no way to get them. Also, it will create inconsistencies for machines created through mist.io and those from the Linode interface. """ try: conn = connect(request) except: return Response('Backend not found', 404) backend_id = request.matchdict['backend'] try: key_name = request.json_body['key'] except: key_name = None try: keypairs = request.environ['beaker.session']['keypairs'] except: keypairs = request.registry.settings.get('keypairs', {}) if key_name: keypair = get_keypair_by_name(keypairs, key_name) else: keypair = get_keypair(keypairs) if keypair: private_key = keypair['private'] public_key = keypair['public'] else: private_key = public_key = None try: machine_name = request.json_body['name'] location_id = request.json_body['location'] image_id = request.json_body['image'] size_id = request.json_body['size'] #deploy_script received as unicode, but ScriptDeployment wants str script = str(request.json_body.get('script', '')) # these are required only for Linode, passing them anyway image_extra = request.json_body['image_extra'] disk = request.json_body['disk'] except Exception as e: return Response('Invalid payload', 400) size = NodeSize(size_id, name='', ram='', disk=disk, bandwidth='', price='', driver=conn) image = NodeImage(image_id, name='', extra=image_extra, driver=conn) if conn.type in EC2_PROVIDERS: locations = conn.list_locations() for loc in locations: if loc.id == location_id: location = loc break else: location = NodeLocation(location_id, name='', country='', driver=conn) if conn.type in [Provider.RACKSPACE_FIRST_GEN, Provider.RACKSPACE] and\ public_key: key = SSHKeyDeployment(str(public_key)) deploy_script = ScriptDeployment(script) msd = MultiStepDeployment([key, deploy_script]) try: node = conn.deploy_node(name=machine_name, image=image, size=size, location=location, deploy=msd) if keypair: machines = keypair.get('machines', None) if machines and len(machines): keypair['machines'].append([backend_id, node.id]) else: keypair['machines'] = [ [backend_id, node.id], ] save_keypairs(request, keypair) except Exception as e: return Response( 'Something went wrong with node creation in RackSpace: %s' % e, 500) elif conn.type in EC2_PROVIDERS and public_key: imported_key = import_key(conn, public_key, key_name) created_security_group = create_security_group(conn, EC2_SECURITYGROUP) deploy_script = ScriptDeployment(script) (tmp_key, tmp_key_path) = tempfile.mkstemp() key_fd = os.fdopen(tmp_key, 'w+b') key_fd.write(private_key) key_fd.close() #deploy_node wants path for ssh private key if imported_key and created_security_group: try: node = conn.deploy_node( name=machine_name, image=image, size=size, deploy=deploy_script, location=location, ssh_key=tmp_key_path, ex_keyname=key_name, ex_securitygroup=EC2_SECURITYGROUP['name']) if keypair: machines = keypair.get('machines', None) if machines and len(machines): keypair['machines'].append([backend_id, node.id]) else: keypair['machines'] = [ [backend_id, node.id], ] save_keypairs(request, keypair) except Exception as e: return Response( 'Something went wrong with node creation in EC2: %s' % e, 500) #remove temp file with private key try: os.remove(tmp_key_path) except: pass elif conn.type is Provider.LINODE and public_key: auth = NodeAuthSSHKey(public_key) deploy_script = ScriptDeployment(script) try: node = conn.create_node(name=machine_name, image=image, size=size, deploy=deploy_script, location=location, auth=auth) if keypair: machines = keypair.get('machines', None) if machines and len(machines): keypair['machines'].append([backend_id, node.id]) else: keypair['machines'] = [ [backend_id, node.id], ] save_keypairs(request, keypair) except: return Response('Something went wrong with Linode creation', 500) else: try: node = conn.create_node(name=machine_name, image=image, size=size, location=location) except Exception as e: return Response( 'Something went wrong with generic node creation: %s' % e, 500) return { 'id': node.id, 'name': node.name, 'extra': node.extra, 'public_ips': node.public_ips, 'private_ips': node.private_ips, }