Exemplo n.º 1
0
    def test_script_deployment_and_sshkey_deployment_argument_types(self):
        class FileObject(object):
            def __init__(self, name):
                self.name = name

            def read(self):
                return "bar"

        ScriptDeployment(script="foobar")
        ScriptDeployment(script=u("foobar"))
        ScriptDeployment(script=FileObject("test"))

        SSHKeyDeployment(key="foobar")
        SSHKeyDeployment(key=u("foobar"))
        SSHKeyDeployment(key=FileObject("test"))

        try:
            ScriptDeployment(script=[])
        except TypeError:
            pass
        else:
            self.fail("TypeError was not thrown")

        try:
            SSHKeyDeployment(key={})
        except TypeError:
            pass
        else:
            self.fail("TypeError was not thrown")
Exemplo n.º 2
0
    def test_script_deployment_and_sshkey_deployment_argument_types(self):
        class FileObject(object):
            def __init__(self, name):
                self.name = name

            def read(self):
                return 'bar'

        ScriptDeployment(script='foobar')
        ScriptDeployment(script=u('foobar'))
        ScriptDeployment(script=FileObject('test'))

        SSHKeyDeployment(key='foobar')
        SSHKeyDeployment(key=u('foobar'))
        SSHKeyDeployment(key=FileObject('test'))

        try:
            ScriptDeployment(script=[])
        except TypeError:
            pass
        else:
            self.fail('TypeError was not thrown')

        try:
            SSHKeyDeployment(key={})
        except TypeError:
            pass
        else:
            self.fail('TypeError was not thrown')
Exemplo n.º 3
0
def get_public_key_from_file(public_key_path=None):
    public_key_path = public_key_path or os.path.expanduser(
        "~/.ssh/id_rsa.pub")
    print "public_key_path: %s" % public_key_path
    if not os.path.exists(public_key_path):
        logger.error("File does not exist. %s" % public_key_path)
        return None

    ssh_key_deployment = None
    with open(public_key_path) as stream:
        ssh_key_deployment = SSHKeyDeployment(stream.read())
    return ssh_key_deployment
Exemplo n.º 4
0
def ssh_pub(vm_):
    '''
    Deploy the primary ssh authentication key
    '''
    ssh = config.get_cloud_config_value('ssh_auth', vm_, __opts__)
    if not ssh:
        return None

    ssh = os.path.expanduser(ssh)
    if os.path.isfile(ssh):
        return None

    return SSHKeyDeployment(open(ssh).read())
Exemplo n.º 5
0
def ssh_pub(vm_):
    '''
    Deploy the primary ssh authentication key
    '''
    ssh = ''
    if 'ssh_auth' in vm_:
        if not os.path.isfile(vm_['ssh_auth']):
            return None
        ssh = vm_['ssh_auth']
    if not ssh:
        if not os.path.isfile(__opts__['ssh_auth']):
            return None
        ssh = __opts__['ssh_auth']

    return SSHKeyDeployment(open(os.path.expanduser(ssh)).read())
Exemplo n.º 6
0
 def create_target_environment(self):
     size=NodeSize(id=self.size_id, name="", ram=None, disk=None, 
                   bandwidth=None, price=None, driver="")
     image=NodeImage(id=self.image_id, name="", driver="")
     install_key= SSHKeyDeployment(open(os.path.expanduser(self.key_path)).read())
     msd=MultiStepDeployment([install_key])
     self.__instance_name = "Benchmark_" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
     self.__node = self.__conn.deploy_node(name=self.__instance_name,
                         image=image,
                         size=size,
                         deploy=msd,
                         ssh_username=self.vm_user,
                         ssh_key=self.key_path,
                         ex_keyname=self.key_name)
     logger.info("Instance {0} created with ip {1}".format(self.__instance_name, 
                                                           self.__node.public_ips[0]))
     self.__target_env_ip = self.__node.public_ips[0]
Exemplo n.º 7
0
    def deploy(self, image_id, size_idx=0, location_idx=0, name='test'):
        """Linode supports libcloud's `libcloud.compute.deployment`.

        Pass an `SSHKeyDeployment` to `self.driver.deploy_node`."""
        sd = SSHKeyDeployment(open(self.ssh_public_key).read())
        script = ScriptDeployment("/bin/true")  # NOP
        msd = MultiStepDeployment([sd, script])

        class Image:
            id = image_id

        size = self.driver.list_sizes()[size_idx]
        location = self.driver.list_locations()[location_idx]

        return node2dict(
            self.driver.deploy_node(name=name,
                                    image=Image,
                                    size=size,
                                    location=location,
                                    deploy=msd))
Exemplo n.º 8
0
 def deploynode(self, plan, imageid, name):
     ssh_keypath = os.path.expanduser('~/.ssh/id_rsa')
     with open(ssh_keypath + ".pub") as f:
         public_key = f.read()
     key = SSHKeyDeployment(public_key)
     images = NodeImage(id=imageid, name=None, driver=self.driver)
     sizes = self.driver.list_sizes()
     #script1 = ScriptDeployment("sudo apt-get -y update")
     #script2 = ScriptDeployment("sudo apt-get install -y apache2")
     entry = plan['Scripts']['apacheDeploy']['EntryPoint']
     script = ScriptDeployment(plan['Files'][entry]['Body'])
     msd = MultiStepDeployment([key, script])
     try:
         self.driver.deploy_node(name=name,
                                 image=images,
                                 size=sizes[0],
                                 ssh_key=ssh_keypath,
                                 ssh_username='******',
                                 deploy=msd,
                                 timeout=1800,
                                 ex_keyname="avni_key")
     except NotImplementedError:
         print("Deploy Node is not implemented for this driver")
Exemplo n.º 9
0
    def upload(self):
        """ Registers the image in each EC2 region. """

        log.info('EC2 upload process started')

        # Get a starting utility AMI in some region to use as an origin
        ami = self.util_amis[0]  # Select the starting AMI to begin
        self.destination = 'EC2 ({region})'.format(region=ami['region'])

        fedimg.messenger.message('image.upload', self.build_name,
                                 self.destination, 'started')

        try:
            # Connect to the region through the appropriate libcloud driver
            cls = ami['driver']
            driver = cls(fedimg.AWS_ACCESS_ID, fedimg.AWS_SECRET_KEY)

            # select the desired node attributes
            sizes = driver.list_sizes()
            reg_size_id = 'm1.xlarge'

            # check to make sure we have access to that size node
            # TODO: Add try/except if for some reason the size isn't
            # available?
            size = [s for s in sizes if s.id == reg_size_id][0]
            base_image = NodeImage(id=ami['ami'], name=None, driver=driver)

            # Name the utility node
            name = 'Fedimg AMI builder'

            # Block device mapping for the utility node
            # (Requires this second volume to write the image to for
            # future registration.)
            mappings = [{
                'VirtualName': None,  # cannot specify with Ebs
                'Ebs': {
                    'VolumeSize': fedimg.AWS_UTIL_VOL_SIZE,
                    'VolumeType': self.vol_type,
                    'DeleteOnTermination': 'false'
                },
                'DeviceName': '/dev/sdb'
            }]

            # Read in the SSH key
            with open(fedimg.AWS_PUBKEYPATH, 'rb') as f:
                key_content = f.read()

            # Add key to authorized keys for root user
            step_1 = SSHKeyDeployment(key_content)

            # Add script for deployment
            # Device becomes /dev/xvdb on instance
            script = "touch test"  # this isn't so important for the util inst.
            step_2 = ScriptDeployment(script)

            # Create deployment object (will set up SSH key and run script)
            msd = MultiStepDeployment([step_1, step_2])

            log.info('Deploying utility instance')

            while True:
                try:
                    self.util_node = driver.deploy_node(
                        name=name,
                        image=base_image,
                        size=size,
                        ssh_username=fedimg.AWS_UTIL_USER,
                        ssh_alternate_usernames=[''],
                        ssh_key=fedimg.AWS_KEYPATH,
                        deploy=msd,
                        kernel_id=ami['aki'],
                        ex_metadata={'build': self.build_name},
                        ex_keyname=fedimg.AWS_KEYNAME,
                        ex_security_groups=['ssh'],
                        ex_ebs_optimized=True,
                        ex_blockdevicemappings=mappings)

                except KeyPairDoesNotExistError:
                    # The keypair is missing from the current region.
                    # Let's install it and try again.
                    log.exception('Adding missing keypair to region')
                    driver.ex_import_keypair(fedimg.AWS_KEYNAME,
                                             fedimg.AWS_PUBKEYPATH)
                    continue

                except Exception as e:
                    # We might have an invalid security group, aka the 'ssh'
                    # security group doesn't exist in the current region. The
                    # reason this is caught here is because the related
                    # exception that prints`InvalidGroup.NotFound is, for
                    # some reason, a base exception.
                    if 'InvalidGroup.NotFound' in e.message:
                        log.exception('Adding missing security'
                                      'group to region')
                        # Create the ssh security group
                        driver.ex_create_security_group('ssh', 'ssh only')
                        driver.ex_authorize_security_group(
                            'ssh', '22', '22', '0.0.0.0/0')
                        continue
                    else:
                        raise
                break

            # Wait until the utility node has SSH running
            while not ssh_connection_works(fedimg.AWS_UTIL_USER,
                                           self.util_node.public_ips[0],
                                           fedimg.AWS_KEYPATH):
                sleep(10)

            log.info('Utility node started with SSH running')

            # Connect to the utility node via SSH
            client = paramiko.SSHClient()
            client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            client.connect(self.util_node.public_ips[0],
                           username=fedimg.AWS_UTIL_USER,
                           key_filename=fedimg.AWS_KEYPATH)

            # Curl the .raw.xz file down from the web, decompressing it
            # and writing it to the secondary volume defined earlier by
            # the block device mapping.
            # curl with -L option, so we follow redirects
            cmd = "sudo sh -c 'curl -L {0} | xzcat > /dev/xvdb'".format(
                self.raw_url)
            chan = client.get_transport().open_session()
            chan.get_pty()  # Request a pseudo-term to get around requiretty

            log.info('Executing utility script')

            # Run the above command and wait for its exit status
            chan.exec_command(cmd)
            status = chan.recv_exit_status()
            if status != 0:
                # There was a problem with the SSH command
                log.error('Problem writing volume with utility instance')

                data = "(no data)"
                if chan.recv_ready():
                    data = chan.recv(1024 * 32)

                fedimg.messenger.message('image.upload',
                                         self.build_name,
                                         self.destination,
                                         'failed',
                                         extra={'data': data})

                raise EC2UtilityException(
                    "Problem writing image to utility instance volume. "
                    "Command exited with status {0}.\n"
                    "command: {1}\n"
                    "output: {2}".format(status, cmd, data))

            client.close()

            # Get volume name that image was written to
            vol_id = [
                x['ebs']['volume_id']
                for x in self.util_node.extra['block_device_mapping']
                if x['device_name'] == '/dev/sdb'
            ][0]

            log.info('Destroying utility node')

            # Terminate the utility instance
            driver.destroy_node(self.util_node)

            # Wait for utility node to be terminated
            while ssh_connection_works(fedimg.AWS_UTIL_USER,
                                       self.util_node.public_ips[0],
                                       fedimg.AWS_KEYPATH):
                sleep(10)

            # Wait a little longer since loss of SSH connectivity doesn't mean
            # that the node's destroyed
            # TODO: Check instance state rather than this lame sleep thing
            sleep(45)

            # Take a snapshot of the volume the image was written to
            self.util_volume = [
                v for v in driver.list_volumes() if v.id == vol_id
            ][0]
            snap_name = 'fedimg-snap-{0}'.format(self.build_name)

            log.info('Taking a snapshot of the written volume')

            self.snapshot = driver.create_volume_snapshot(self.util_volume,
                                                          name=snap_name)
            snap_id = str(self.snapshot.id)

            while self.snapshot.extra['state'] != 'completed':
                # Re-obtain snapshot object to get updates on its state
                self.snapshot = [
                    s for s in driver.list_snapshots() if s.id == snap_id
                ][0]
                sleep(10)

            log.info('Snapshot taken')

            # Delete the volume now that we've got the snapshot
            driver.destroy_volume(self.util_volume)
            # make sure Fedimg knows that the vol is gone
            self.util_volume = None

            log.info('Destroyed volume')

            # Actually register image
            log.info('Registering image as an AMI')

            if self.virt_type == 'paravirtual':
                image_name = "{0}-{1}-PV-{2}-0".format(self.build_name,
                                                       ami['region'],
                                                       self.vol_type)
                test_size_id = 'm1.xlarge'
                # test_amis will include AKIs of the appropriate arch
                registration_aki = [
                    a['aki'] for a in self.test_amis
                    if a['region'] == ami['region']
                ][0]
                reg_root_device_name = '/dev/sda'
            else:  # HVM
                image_name = "{0}-{1}-HVM-{2}-0".format(
                    self.build_name, ami['region'], self.vol_type)
                test_size_id = 'm3.2xlarge'
                # Can't supply a kernel image with HVM
                registration_aki = None
                reg_root_device_name = '/dev/sda1'

            # For this block device mapping, we have our volume be
            # based on the snapshot's ID
            mapping = [{
                'DeviceName': reg_root_device_name,
                'Ebs': {
                    'SnapshotId': snap_id,
                    'VolumeSize': fedimg.AWS_TEST_VOL_SIZE,
                    'VolumeType': self.vol_type,
                    'DeleteOnTermination': 'true'
                }
            }]

            # Avoid duplicate image name by incrementing the number at the
            # end of the image name if there is already an AMI with that name.
            # TODO: This process could be written nicer.
            while True:
                try:
                    if self.dup_count > 0:
                        # Remove trailing '-0' or '-1' or '-2' or...
                        image_name = '-'.join(image_name.split('-')[:-1])
                        # Re-add trailing dup number with new count
                        image_name += '-{0}'.format(self.dup_count)
                    # Try to register with that name
                    self.images.append(
                        driver.ex_register_image(
                            image_name,
                            description=self.image_desc,
                            root_device_name=reg_root_device_name,
                            block_device_mapping=mapping,
                            virtualization_type=self.virt_type,
                            kernel_id=registration_aki,
                            architecture=self.image_arch))
                except Exception as e:
                    # Check if the problem was a duplicate name
                    if 'InvalidAMIName.Duplicate' in e.message:
                        # Keep trying until an unused name is found
                        self.dup_count += 1
                        continue
                    else:
                        raise
                break

            log.info('Completed image registration')

            # Emit success fedmsg
            # TODO: Can probably move this into the above try/except,
            # to avoid just dumping all the messages at once.
            for image in self.images:
                fedimg.messenger.message('image.upload',
                                         self.build_name,
                                         self.destination,
                                         'completed',
                                         extra={
                                             'id': image.id,
                                             'virt_type': self.virt_type,
                                             'vol_type': self.vol_type
                                         })

            # Now, we'll spin up a node of the AMI to test:

            # Add script for deployment
            # Device becomes /dev/xvdb on instance
            script = "touch test"
            step_2 = ScriptDeployment(script)

            # Create deployment object
            msd = MultiStepDeployment([step_1, step_2])

            log.info('Deploying test node')

            # Pick a name for the test instance
            name = 'Fedimg AMI tester'

            # Select the appropriate size for the instance
            size = [s for s in sizes if s.id == test_size_id][0]

            # Alert the fedmsg bus that an image test is starting
            fedimg.messenger.message('image.test',
                                     self.build_name,
                                     self.destination,
                                     'started',
                                     extra={
                                         'id': self.images[0].id,
                                         'virt_type': self.virt_type,
                                         'vol_type': self.vol_type
                                     })

            # Actually deploy the test instance
            try:
                self.test_node = driver.deploy_node(
                    name=name,
                    image=self.images[0],
                    size=size,
                    ssh_username=fedimg.AWS_TEST_USER,
                    ssh_alternate_usernames=['root'],
                    ssh_key=fedimg.AWS_KEYPATH,
                    deploy=msd,
                    kernel_id=registration_aki,
                    ex_metadata={'build': self.build_name},
                    ex_keyname=fedimg.AWS_KEYNAME,
                    ex_security_groups=['ssh'],
                )
            except Exception as e:
                fedimg.messenger.message('image.test',
                                         self.build_name,
                                         self.destination,
                                         'failed',
                                         extra={
                                             'id': self.images[0].id,
                                             'virt_type': self.virt_type,
                                             'vol_type': self.vol_type
                                         })

                raise EC2AMITestException("Failed to boot test node %r." % e)

            # Wait until the test node has SSH running
            while not ssh_connection_works(fedimg.AWS_TEST_USER,
                                           self.test_node.public_ips[0],
                                           fedimg.AWS_KEYPATH):
                sleep(10)

            log.info('Starting AMI tests')

            client = paramiko.SSHClient()
            client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            client.connect(self.test_node.public_ips[0],
                           username=fedimg.AWS_TEST_USER,
                           key_filename=fedimg.AWS_KEYPATH)

            # Run /bin/true on the test instance as a simple "does it
            # work" test
            cmd = "/bin/true"
            chan = client.get_transport().open_session()
            chan.get_pty()  # Request a pseudo-term to get around requiretty

            log.info('Running AMI test script')

            chan.exec_command(cmd)

            # Again, wait for the test command's exit status
            if chan.recv_exit_status() != 0:
                # There was a problem with the SSH command
                log.error('Problem testing new AMI')

                data = "(no data)"
                if chan.recv_ready():
                    data = chan.recv(1024 * 32)

                fedimg.messenger.message('image.test',
                                         self.build_name,
                                         self.destination,
                                         'failed',
                                         extra={
                                             'id': self.images[0].id,
                                             'virt_type': self.virt_type,
                                             'vol_type': self.vol_type,
                                             'data': data
                                         })

                raise EC2AMITestException("Tests on AMI failed.\n"
                                          "output: %s" % data)

            client.close()

            log.info('AMI test completed')
            fedimg.messenger.message('image.test',
                                     self.build_name,
                                     self.destination,
                                     'completed',
                                     extra={
                                         'id': self.images[0].id,
                                         'virt_type': self.virt_type,
                                         'vol_type': self.vol_type
                                     })

            # Let this EC2Service know that the AMI test passed, so
            # it knows how to proceed.
            self.test_success = True

            log.info('Destroying test node')

            # Destroy the test node
            driver.destroy_node(self.test_node)

            # Make AMIs public
            for image in self.images:
                driver.ex_modify_image_attribute(
                    image, {'LaunchPermission.Add.1.Group': 'all'})

        except EC2UtilityException as e:
            log.exception("Failure")
            if fedimg.CLEAN_UP_ON_FAILURE:
                self._clean_up(driver,
                               delete_images=fedimg.DELETE_IMAGES_ON_FAILURE)
            return 1

        except EC2AMITestException as e:
            log.exception("Failure")
            if fedimg.CLEAN_UP_ON_FAILURE:
                self._clean_up(driver,
                               delete_images=fedimg.DELETE_IMAGES_ON_FAILURE)
            return 1

        except DeploymentException as e:
            log.exception("Problem deploying node: {0}".format(e.value))
            if fedimg.CLEAN_UP_ON_FAILURE:
                self._clean_up(driver,
                               delete_images=fedimg.DELETE_IMAGES_ON_FAILURE)
            return 1

        except Exception as e:
            # Just give a general failure message.
            log.exception("Unexpected exception")
            if fedimg.CLEAN_UP_ON_FAILURE:
                self._clean_up(driver,
                               delete_images=fedimg.DELETE_IMAGES_ON_FAILURE)
            return 1

        else:
            self._clean_up(driver)

        if self.test_success:
            # Copy the AMI to every other region if tests passed
            copied_images = list()  # completed image copies (ami: image)

            # Use the AMI list as a way to cycle through the regions
            for ami in self.test_amis[1:]:  # we don't need the origin region

                # Choose an appropriate destination name for the copy
                alt_dest = 'EC2 ({region})'.format(region=ami['region'])

                fedimg.messenger.message('image.upload', self.build_name,
                                         alt_dest, 'started')

                # Connect to the libcloud EC2 driver for the region we
                # want to copy into
                alt_cls = ami['driver']
                alt_driver = alt_cls(fedimg.AWS_ACCESS_ID,
                                     fedimg.AWS_SECRET_KEY)

                # Construct the full name for the image copy
                if self.virt_type == 'paravirtual':
                    image_name = "{0}-{1}-PV-{2}-0".format(
                        self.build_name, ami['region'], self.vol_type)
                else:  # HVM
                    image_name = "{0}-{1}-HVM-{2}-0".format(
                        self.build_name, ami['region'], self.vol_type)

                log.info('AMI copy to {0} started'.format(ami['region']))

                # Avoid duplicate image name by incrementing the number at the
                # end of the image name if there is already an AMI with
                # that name.
                # TODO: Again, this could be written better
                while True:
                    try:
                        if self.dup_count > 0:
                            # Remove trailing '-0' or '-1' or '-2' or...
                            image_name = '-'.join(image_name.split('-')[:-1])
                            # Re-add trailing dup number with new count
                            image_name += '-{0}'.format(self.dup_count)

                        # Actually run the image copy from the origin region
                        # to the current region.
                        for image in self.images:
                            image_copy = alt_driver.copy_image(
                                image,
                                self.test_amis[0]['region'],
                                name=image_name,
                                description=self.image_desc)
                            # Add the image copy to a list so we can work with
                            # it later.
                            copied_images.append(image_copy)

                            log.info('AMI {0} copied to AMI {1}'.format(
                                image, image_name))

                    except Exception as e:
                        # Check if the problem was a duplicate name
                        if 'InvalidAMIName.Duplicate' in e.message:
                            # Keep trying until an unused name is found.
                            # This probably won't trigger, since it seems
                            # like EC2 doesn't mind duplicate AMI names
                            # when they are being copied, only registered.
                            # Strange, but apprently true.
                            self.dup_count += 1
                            continue
                        else:
                            # TODO: Catch a more specific exception
                            log.exception('Image copy to {0} failed'.format(
                                ami['region']))
                            fedimg.messenger.message('image.upload',
                                                     self.build_name, alt_dest,
                                                     'failed')
                    break

            # Now cycle through and make all of the copied AMIs public
            # once the copy process has completed. Again, use the test
            # AMI list as a way to have region and arch data:

            # We don't need the origin region, since the AMI was made there:
            self.test_amis = self.test_amis[1:]

            for image in copied_images:
                ami = self.test_amis[copied_images.index(image)]
                alt_cls = ami['driver']
                alt_driver = alt_cls(fedimg.AWS_ACCESS_ID,
                                     fedimg.AWS_SECRET_KEY)

                # Get an appropriate name for the region in question
                alt_dest = 'EC2 ({region})'.format(region=ami['region'])

                # Need to wait until the copy finishes in order to make
                # the AMI public.
                while True:
                    try:
                        # Make the image public
                        alt_driver.ex_modify_image_attribute(
                            image, {'LaunchPermission.Add.1.Group': 'all'})
                    except Exception as e:
                        if 'InvalidAMIID.Unavailable' in e.message:
                            # The copy isn't done, so wait 20 seconds
                            # and try again.
                            sleep(20)
                            continue
                    break

                log.info('Made {0} public ({1}, {2}, {3})'.format(
                    image.id, self.build_name, self.virt_type, self.vol_type))

                fedimg.messenger.message('image.upload',
                                         self.build_name,
                                         alt_dest,
                                         'completed',
                                         extra={
                                             'id': image.id,
                                             'virt_type': self.virt_type,
                                             'vol_type': self.vol_type
                                         })

            return 0
Exemplo n.º 10
0
    def create_node(self, form):
        name = form.cleaned_data['name']
        image = form.cleaned_data.get('image')
        if image:
            image = NodeImage(image.image_id, '', self.conn)
        size = form.cleaned_data.get('size')
        if size:
            size = NodeSize(size.size_id,
                            '',
                            '',
                            '',
                            None,
                            None,
                            driver=self.conn)
        location = form.cleaned_data.get('location')
        if location:
            location = NodeLocation(location.location_id, '', '', self.conn)

        # Choose node creation strategy
        features = self.conn.features.get('create_node', [])
        try:
            if "ssh_key" in features:
                # Pass on public key and we are done
                logging.debug("Provider feature: ssh_key. Pass on key")
                node = self.conn.create_node(name=name,
                                             image=image,
                                             size=size,
                                             location=location,
                                             auth=NodeAuthSSHKey(
                                                 settings.PUBLIC_KEY))
            elif 'generates_password' in features:
                # Use deploy_node to deploy public key
                logging.debug(
                    "Provider feature: generates_password. Use deploy_node")
                pubkey = SSHKeyDeployment(settings.PUBLIC_KEY)
                node = self.conn.deploy_node(name=name,
                                             image=image,
                                             size=size,
                                             location=location,
                                             deploy=pubkey)
            elif 'password' in features:
                # Pass on password and use deploy_node to deploy public key
                pubkey = SSHKeyDeployment(settings.PUBLIC_KEY)
                rpassword = generate_random_password(15)
                logging.debug(
                    "Provider feature: password. Pass on password=%s to deploy_node"
                    % rpassword)
                node = self.conn.deploy_node(name=name,
                                             image=image,
                                             size=size,
                                             location=location,
                                             auth=NodeAuthPassword(rpassword),
                                             deploy=pubkey)
            else:
                # Create node without any extra steps nor parameters
                logging.debug("Provider feature: none. Call create_node")
                # Include all plugin form fields in the argument dict
                args = copy.deepcopy(form.cleaned_data)
                # Remove unneeded fields
                for field in ['name', 'image', 'size', 'location', 'provider']:
                    if field in args:
                        del args[field]  #Avoid colissions with default args
                args[str(self.extra_param_name)] = str(self.extra_param_value)
                node = self.conn.create_node(name=name,
                                             image=image,
                                             size=size,
                                             location=location,
                                             **args)
        except Exception, e:
            logging.error('while creating node. %s: %s' % (type(e), e))
            return e, None
Exemplo n.º 11
0
    def test_ssh_key_deployment(self):
        sshd = SSHKeyDeployment(key='1234')

        self.assertEqual(self.node, sshd.run(node=self.node,
                        client=MockClient(hostname='localhost')))
Exemplo n.º 12
0
    def _get_prepares(self, node, settings, container):
        """
        Defines the set of actions to be done on a node

        :param node: the node to be polished
        :type node: :class:`libcloud.compute.base.Node`

        :param settings: the fittings plan for this node
        :type settings: ``dict``

        :param container: the container of this node
        :type container: :class:`plumbery.PlumberyInfrastructure`

        :return: a list of actions to be performed, and related descriptions
        :rtype: a ``list`` of `{ 'description': ..., 'genius': ... }``

        """

        if not isinstance(settings, dict):
            return []

        environment = PlumberyNodeContext(node=node,
                                          container=container,
                                          context=self.facility)

        prepares = []

        for key_file in self.key_files:
            try:
                path = os.path.expanduser(key_file)

                with open(path) as stream:
                    key = stream.read()
                    stream.close()

                prepares.append({
                    'description': 'deploy SSH public key',
                    'genius': SSHKeyDeployment(key=key)
                })

            except IOError:
                plogging.warning("no ssh key in {}".format(key_file))

        if ('prepare' in settings and isinstance(settings['prepare'], list)
                and len(settings['prepare']) > 0):

            plogging.info('- using prepare commands')

            for script in settings['prepare']:

                tokens = script.split(' ')
                if len(tokens) == 1:
                    tokens.insert(0, 'run')

                if tokens[0] in ['run', 'run_raw']:  # send and run a script

                    script = tokens[1]
                    if len(tokens) > 2:
                        args = tokens[2:]
                    else:
                        args = []

                    plogging.debug("- {} {} {}".format(tokens[0], script,
                                                       ' '.join(args)))

                    try:
                        with open(script) as stream:
                            text = stream.read()

                            if (tokens[0] == 'run'
                                    and PlumberyText.could_expand(text)):

                                plogging.debug(
                                    "- expanding script '{}'".format(script))
                                text = PlumberyText.expand_string(
                                    text, environment)

                            if len(text) > 0:

                                plogging.info("- running '{}'".format(script))

                                prepares.append({
                                    'description':
                                    ' '.join(tokens),
                                    'genius':
                                    ScriptDeployment(script=text,
                                                     args=args,
                                                     name=script)
                                })

                            else:
                                plogging.error(
                                    "- script '{}' is empty".format(script))

                    except IOError:
                        plogging.error(
                            "- unable to read script '{}'".format(script))

                elif tokens[0] in ['put', 'put_raw']:  # send a file

                    file = tokens[1]
                    if len(tokens) > 2:
                        destination = tokens[2]
                    else:
                        destination = './' + file

                    plogging.debug("- {} {} {}".format(tokens[0], file,
                                                       destination))

                    try:
                        with open(file) as stream:
                            content = stream.read()

                            if (tokens[0] == 'put'
                                    and PlumberyText.could_expand(content)):

                                plogging.debug(
                                    "- expanding file '{}'".format(file))
                                content = PlumberyText.expand_string(
                                    content, environment)

                            plogging.info("- putting file '{}'".format(file))
                            prepares.append({
                                'description':
                                ' '.join(tokens),
                                'genius':
                                FileContentDeployment(content=content,
                                                      target=destination)
                            })

                    except IOError:
                        plogging.error(
                            "- unable to read file '{}'".format(file))

                else:  # echo a sensible message eventually

                    if tokens[0] == 'echo':
                        tokens.pop(0)
                    message = ' '.join(tokens)
                    message = PlumberyText.expand_string(message, environment)
                    plogging.info("- {}".format(message))

        if ('cloud-config' in settings
                and isinstance(settings['cloud-config'], dict)
                and len(settings['cloud-config']) > 0):

            plogging.info('- using cloud-config')

            # mandatory, else cloud-init will not consider user-data
            plogging.debug('- preparing meta-data')
            meta_data = 'instance_id: dummy\n'

            destination = '/var/lib/cloud/seed/nocloud-net/meta-data'
            prepares.append({
                'description':
                'put meta-data',
                'genius':
                FileContentDeployment(content=meta_data, target=destination)
            })

            plogging.debug('- preparing user-data')

            expanded = PlumberyText.expand_string(settings['cloud-config'],
                                                  environment)

            user_data = '#cloud-config\n' + expanded
            plogging.debug(user_data)

            destination = '/var/lib/cloud/seed/nocloud-net/user-data'
            prepares.append({
                'description':
                'put user-data',
                'genius':
                FileContentDeployment(content=user_data, target=destination)
            })

            plogging.debug('- preparing remote install of cloud-init')

            script = 'prepare.cloud-init.sh'
            try:
                path = os.path.dirname(__file__) + '/' + script
                with open(path) as stream:
                    text = stream.read()
                    if text:
                        prepares.append({
                            'description':
                            'run ' + script,
                            'genius':
                            ScriptDeployment(script=text, name=script)
                        })

            except IOError:
                raise PlumberyException(
                    "Error: cannot read '{}'".format(script))

            plogging.debug('- preparing reboot to trigger cloud-init')

            prepares.append({
                'description': 'reboot node',
                'genius': RebootDeployment(container=container)
            })

        return prepares
Exemplo n.º 13
0
import os

from blitzem.model import Node, LoadBalancer, Size, defaults, user_public_ssh_key
from libcloud.compute.deployment import MultiStepDeployment, ScriptDeployment, SSHKeyDeployment
from blitzem.deployment import LoggedScriptDeployment

defaults["deployment"] = MultiStepDeployment([
		# Note: This key will be added to the authorized keys for the root user
		# (/root/.ssh/authorized_keys)
		SSHKeyDeployment(open(user_public_ssh_key).read()),
		# Serve a simple text file on each node to demonstrate load balancing effect
		LoggedScriptDeployment("apt-get update; apt-get install dtach"),
		LoggedScriptDeployment("mkdir web; cd web; hostname > hostname.txt; dtach -n /tmp/simple_http.worker python -m SimpleHTTPServer 8080")
	])



"""
==== WEB TIER ====
"""

LoadBalancer(	name="web_lb1",
				applies_to_tag="web",
				port=8080,
				protocol="http")

# A simple pair of nodes in the 'web' tier
Node(	name="web1",
		tags=["web"])

Node(	name="web2",
Exemplo n.º 14
0
    def test_ssh_key_deployment(self):
        sshd = SSHKeyDeployment(key='1234')

        self.assertEqual(
            self.node,
            sshd.run(node=self.node, client=MockClient(hostname='localhost')))
Exemplo n.º 15
0
def main(argv):
    # Make a connection through the rackspace driver to the sklearn space

    name = os.environ.get(SKLEARN_RACKSPACE_NAME)
    key = os.environ.get(SKLEARN_RACKSPACE_KEY)
    if name is None or key is None:
        raise RuntimeError(
            "Please set credentials as enviroment variables "
            " {} and {}".format(SKLEARN_RACKSPACE_NAME, SKLEARN_RACKSPACE_KEY))
    conn_sklearn = get_driver(RACKSPACE_DRIVER)(name, key, region=REGION)

    # Obtain list of nodes
    existing_nodes = conn_sklearn.list_nodes()
    node_list = "\n".join("  - " + n.name for n in existing_nodes)
    print("Found %d existing node(s) with names:\n%s" % (
        len(existing_nodes), node_list))

    # Obtain list of machine sizes
    machine_sizes = [n.ram for n in conn_sklearn.list_sizes()]
    selected_ram = None
    server_status = 3  # assume busy

    try:
        opts, args = getopt.getopt(argv, "h")
        for opt, arg in opts:
            if opt == '-h':
                print_usage(machine_sizes)
                sys.exit()
        if args:
            if int(args[0]) not in machine_sizes:
                print_usage(machine_sizes)
                sys.exit()
            else:
                selected_ram = int(args[0])
    except getopt.GetoptError:
        print_usage(machine_sizes)
        sys.exit(2)

    # Check if our desired node already exists
    if not any(n.name == NODE_NAME for n in existing_nodes):
        print('The docbuilder node does not exist yet - creating node...')
        print('  -  Configuring node size')
        if selected_ram is None:
            print('    --   No node size provided: using default size of 2GB')
            size = [i for i in conn_sklearn.list_sizes()
                      if i.ram == DEFAULT_NODE_SIZE][0]
        else:
            print('    --   Node size set to: ', selected_ram)
            size = [i for i in conn_sklearn.list_sizes()
                      if i.ram >= selected_ram][0]

        print('  -   Configuring the builder image to ', IMAGE_NAME)
        images = conn_sklearn.list_images()
        matching_images = [i for i in images if i.name == IMAGE_NAME]
        if len(matching_images) == 0:
            image_names = "\n".join(sorted(i.name for i in images))
            raise RuntimeError("Could not find image with name %s,"
                               " available images:\n%s"
                               % (IMAGE_NAME, image_names))
        s_node_image = matching_images[0]

        # Create a new node if non exists
        with open(PUBLIC_KEY_PATH) as fp:
            pub_key_content = fp.read()
        step = SSHKeyDeployment(pub_key_content)
        print("Starting node deployment - This may take a few minutes")
        print("WARNING: Please do not interrupt the process")
        node = conn_sklearn.deploy_node(name=NODE_NAME, image=s_node_image,
                                          size=size, deploy=step,
                                          timeout=TIMEOUT, ssh_timeout=TIMEOUT)
        print('Node successfully provisioned: ', NODE_NAME)
    else:
        node = [n for n in existing_nodes if n.name == NODE_NAME][0]
        print("Node '%s' found" % NODE_NAME)
        print('Gathering connection information')

    if not os.path.exists('etc/salt'):
        os.makedirs('etc/salt')

    print("Storing connection information to etc/salt/roster")
    ip = gen_salt_roster(host_ips=node.public_ips)

    print("Configuring etc/salt/master")
    salt_master = open("etc/salt/master", "w")
    here = os.getcwd()
    salt_master.write(MASTER_TEMPLATE.format(root_dir=here))

    print('Checking if the server is active:')
    server_status = wait_for_active_status(server_status, conn_sklearn)

    # Making sure the private key has the right permissions to be useable by
    # paramiko
    os.chmod('docbuilder_rsa', 0o600)

    print("SSH connection command:")
    print("  ssh -i %s root@%s" % (PRIVATE_KEY_PATH, ip))

    # TODO: find a way to launch the state.highstate command via salt-ssh
    print("You can now configure the server with:")
    print("  salt-ssh -c ./etc/salt docbuilder state.highstate")
Exemplo n.º 16
0
		tags=["web", "peakload"],
		size=Size(ram=512))

"""
==== APP TIER ====
"""
Node(	name="app1",
		os="Ubuntu 11.04",
		tags=["app"])

# also tagged 'peakload' for scaling control
Node(	name="app2",
		os="Ubuntu 11.04",
		tags=["app", "peakload"])

"""
==== DB TIER ====
"""
Node(	name="db1",
		os="Ubuntu 11.04",
		tags=["db"],
		# We can customize the deployment steps, although the values shown here are
		#  simply the defaults, repeated for visibility.
		# Normally a provisioning tool such as puppet, chef, cfengine or similar should
		#  be used for detailed provisioning - these deployment steps may be used to 
		#  bootstrap the provisioning tool, though.
		deployment=MultiStepDeployment([
			SSHKeyDeployment(open(os.path.expanduser("~/.ssh/id_rsa.pub")).read()),
			ScriptDeployment("apt-get update"),
			ScriptDeployment("apt-get -y install puppet")
		]))
Exemplo n.º 17
0
    def cmd_deploy_node(self, options, arguments):
        """
        Creates, deploys and bootstraps a new node and prints its details. If a node with the same name already existed, prints its details instead.

        Usage: %prog [options] deploy-node --name <fqdn>
        """
        if options.name is None:
            self.fail("deploy-node requires the name of the node to create")

        options.name
        node = self.find_node(options.name)
        if (node):
            self.succeed(message="Node \"%s\" already exists!" % options.name,
                         data=node)

        image = self.find_image(options.image)
        if (not image):
            print options.image
            self.fail("Missing or invalid image id provided.")

        flavorId = self.find_flavor(options.flavorId)
        if (not flavorId):
            print options.flavorId
            self.fail("Missing or invalid flavor id provided.")

        network_objects = []
        if (not self.options.networks):
            self.fail("Missing networks.")
        else:
            for networkId in self.options.networks:
                try:
                    network = self.find_network(networkId)
                    if (not network):
                        print networkId
                        self.fail("Missing or invalid network id provided.")
                    network_objects.append(network)
                except Exception as e:
                    self.fail("Failed to retrieve networks")

        # read your public key in
        # Note: This key will be added to root's authorized_keys
        # (/root/.ssh/authorized_keys)
        sd = SSHKeyDeployment(
            open(os.path.expanduser(options.public_key)).read())

        # a simple script to install puppet post boot, can be much more
        # complicated.
        script = ScriptDeployment(options.script)

        # a task that first installs the ssh key, and then runs the script
        msd = MultiStepDeployment([sd, script])

        try:
            # deploy our node using multistep deployment strategy
            node = self.connection_compute.deploy_node(
                name=options.name,
                image=image,
                size=flavorId,
                deploy=msd,
                networks=network_objects)
            print "deploy success"
            # gets the hostname and domainname from fqdn
            hostname, domainname = options.name.split('.', 1)

            # see if zone already exists
            zone = self.find_zone(domainname)

            # if zone instance does not exist, create it
            if (not zone):
                zone = self.connection_dns.create_zone(domain=domainname)

            # create an A record type wth the public ip of the created node for
            # our zone
            record = zone.create_record(name=hostname,
                                        type=RecordType.A,
                                        data=node.public_ips[0])
        except Exception as e:
            self.fail("Exception: %s" % e)

        # decide if we wanted to wait for a reference of the running node
        if self.options.wait:
            running_node = self.wait_for_running_node(
                node.id, timeout=self.options.wait)
        else:
            running_node = None

        # if the node was created
        if (node):
            # if the running node exists set the node state to running
            if (running_node):
                node.state = running_node.state
            self.succeed(message="Node \"%s\" deployed!" % options.name,
                         data=node,
                         data_type='node')
Exemplo n.º 18
0
def create_machine(request):
    """Creates a new virtual machine on the specified backend.

    If the backend is Rackspace it attempts to deploy the node with an ssh key
    provided in config. the method used is the only one working in the old
    Rackspace backend. create_node(), from libcloud.compute.base, with 'auth'
    kwarg doesn't do the trick. Didn't test if you can upload some ssh related
    files using the 'ex_files' kwarg from openstack 1.0 driver.

    In Linode creation is a bit different. There you can pass the key file
    directly during creation. The Linode API also requires to set a disk size
    and doesn't get it from size.id. So, send size.disk from the client and
    use it in all cases just to avoid provider checking. Finally, Linode API
    does not support association between a machine and the image it came from.
    We could set this, at least for machines created through mist.io in
    ex_comment, lroot or lconfig. lroot seems more appropriate. However,
    liblcoud doesn't support linode.config.list at the moment, so no way to
    get them. Also, it will create inconsistencies for machines created
    through mist.io and those from the Linode interface.
    """

    try:
        conn = connect(request)
    except:
        return Response('Backend not found', 404)

    backend_id = request.matchdict['backend']

    try:
        key_name = request.json_body['key']
    except:
        key_name = None

    try:
        keypairs = request.environ['beaker.session']['keypairs']
    except:
        keypairs = request.registry.settings.get('keypairs', {})

    if key_name:
        keypair = get_keypair_by_name(keypairs, key_name)
    else:
        keypair = get_keypair(keypairs)

    if keypair:
        private_key = keypair['private']
        public_key = keypair['public']
    else:
        private_key = public_key = None

    try:
        machine_name = request.json_body['name']
        location_id = request.json_body['location']
        image_id = request.json_body['image']
        size_id = request.json_body['size']
        #deploy_script received as unicode, but ScriptDeployment wants str
        script = str(request.json_body.get('script', ''))
        # these are required only for Linode, passing them anyway
        image_extra = request.json_body['image_extra']
        disk = request.json_body['disk']
    except Exception as e:
        return Response('Invalid payload', 400)

    size = NodeSize(size_id,
                    name='',
                    ram='',
                    disk=disk,
                    bandwidth='',
                    price='',
                    driver=conn)
    image = NodeImage(image_id, name='', extra=image_extra, driver=conn)

    if conn.type in EC2_PROVIDERS:
        locations = conn.list_locations()
        for loc in locations:
            if loc.id == location_id:
                location = loc
                break
    else:
        location = NodeLocation(location_id, name='', country='', driver=conn)

    if conn.type in [Provider.RACKSPACE_FIRST_GEN, Provider.RACKSPACE] and\
    public_key:
        key = SSHKeyDeployment(str(public_key))
        deploy_script = ScriptDeployment(script)
        msd = MultiStepDeployment([key, deploy_script])
        try:
            node = conn.deploy_node(name=machine_name,
                                    image=image,
                                    size=size,
                                    location=location,
                                    deploy=msd)
            if keypair:
                machines = keypair.get('machines', None)
                if machines and len(machines):
                    keypair['machines'].append([backend_id, node.id])
                else:
                    keypair['machines'] = [
                        [backend_id, node.id],
                    ]
                save_keypairs(request, keypair)
        except Exception as e:
            return Response(
                'Something went wrong with node creation in RackSpace: %s' % e,
                500)
    elif conn.type in EC2_PROVIDERS and public_key:
        imported_key = import_key(conn, public_key, key_name)
        created_security_group = create_security_group(conn, EC2_SECURITYGROUP)
        deploy_script = ScriptDeployment(script)

        (tmp_key, tmp_key_path) = tempfile.mkstemp()
        key_fd = os.fdopen(tmp_key, 'w+b')
        key_fd.write(private_key)
        key_fd.close()
        #deploy_node wants path for ssh private key
        if imported_key and created_security_group:
            try:
                node = conn.deploy_node(
                    name=machine_name,
                    image=image,
                    size=size,
                    deploy=deploy_script,
                    location=location,
                    ssh_key=tmp_key_path,
                    ex_keyname=key_name,
                    ex_securitygroup=EC2_SECURITYGROUP['name'])

                if keypair:
                    machines = keypair.get('machines', None)
                    if machines and len(machines):
                        keypair['machines'].append([backend_id, node.id])
                    else:
                        keypair['machines'] = [
                            [backend_id, node.id],
                        ]
                    save_keypairs(request, keypair)
            except Exception as e:
                return Response(
                    'Something went wrong with node creation in EC2: %s' % e,
                    500)
        #remove temp file with private key
        try:
            os.remove(tmp_key_path)
        except:
            pass
    elif conn.type is Provider.LINODE and public_key:
        auth = NodeAuthSSHKey(public_key)
        deploy_script = ScriptDeployment(script)
        try:
            node = conn.create_node(name=machine_name,
                                    image=image,
                                    size=size,
                                    deploy=deploy_script,
                                    location=location,
                                    auth=auth)
            if keypair:
                machines = keypair.get('machines', None)
                if machines and len(machines):
                    keypair['machines'].append([backend_id, node.id])
                else:
                    keypair['machines'] = [
                        [backend_id, node.id],
                    ]
                save_keypairs(request, keypair)
        except:
            return Response('Something went wrong with Linode creation', 500)

    else:
        try:
            node = conn.create_node(name=machine_name,
                                    image=image,
                                    size=size,
                                    location=location)
        except Exception as e:
            return Response(
                'Something went wrong with generic node creation: %s' % e, 500)

    return {
        'id': node.id,
        'name': node.name,
        'extra': node.extra,
        'public_ips': node.public_ips,
        'private_ips': node.private_ips,
    }
Exemplo n.º 19
0
-----BEGIN RSA PRIVATE KEY-----
????
-----END RSA PRIVATE KEY-----
"""

mykey = j.system.fs.fileGetContents("/root/.ssh/id_dsa.pub")

# Shell script to run on the remote server
SCRIPT = '''#!/usr/bin/env bash
apt-get -y update && apt-get -y install mc
'''
from libcloud.compute.deployment import MultiStepDeployment
from libcloud.compute.deployment import ScriptDeployment, SSHKeyDeployment
# Note: This key will be added to the authorized keys for the root user
# (/root/.ssh/authorized_keys)
step_1 = SSHKeyDeployment(mykey)

# A simple script to install puppet post boot, can be much more complicated.
step_2 = ScriptDeployment(SCRIPT)

msd = MultiStepDeployment([step_1, step_2])


def ffind():
    size = [s for s in sizes if s.id == SIZE_ID][0]
    for image in images:
        if image.name <> None:
            name = image.name.lower()
            if name.find("ubuntu") <> -1 and name.find("14.04") <> -1:
                print image
SCRIPT = """#!/usr/bin/env bash
apt-get -y update && apt-get -y install puppet
"""

RACKSPACE_USER = "******"
RACKSPACE_KEY = "your key"

Driver = get_driver(Provider.RACKSPACE)
conn = Driver(RACKSPACE_USER, RACKSPACE_KEY)

with open(KEY_PATH) as fp:
    content = fp.read()

# Note: This key will be added to the authorized keys for the root user
# (/root/.ssh/authorized_keys)
step_1 = SSHKeyDeployment(content)

# A simple script to install puppet post boot, can be much more complicated.
step_2 = ScriptDeployment(SCRIPT)

msd = MultiStepDeployment([step_1, step_2])

images = conn.list_images()
sizes = conn.list_sizes()

# deploy_node takes the same base keyword arguments as create_node.
node = conn.deploy_node(name="test",
                        image=images[0],
                        size=sizes[0],
                        deploy=msd)
Exemplo n.º 21
0
    def _get_rubs(self, node, settings):
        """
        Defines the set of actions to be done on a node

        :param node: the node to be polished
        :type node: :class:`libcloud.compute.base.Node`

        :param settings: the fittings plan for this node
        :type settings: ``dict``

        :return: a list of actions to be performed, and related descriptions
        :rtype: a ``list`` of `{ 'description': ..., 'genius': ... }``

        """

        if not isinstance(settings, dict) or 'rub' not in settings:
            return []

        rubs = []

        if self.key is not None:
            rubs.append({
                'description': 'deploy SSH public key',
                'genius': SSHKeyDeployment(self.key)})

        if settings['rub'] is not None:
            for script in settings['rub']:

                tokens = script.split(' ')
                if len(tokens) == 1:
                    tokens.insert(0, 'run')

                if tokens[0] == 'run':

                    script = tokens[1]
                    if len(tokens) > 2:
                        args = tokens[2:]
                    else:
                        args = None

                    try:
                        with open(os.path.dirname(__file__)+'/'+script) as stream:
                            text = stream.read()
                            if text:
                                rubs.append({
                                    'description': ' '.join(tokens),
                                    'genius': ScriptDeployment(script=text,
                                                            args=args,
                                                            name=script)})

                    except IOError:
                        raise PlumberyException("Error: cannot read '{}'"
                                                            .format(script))

                elif tokens[0] == 'put':

                    file = tokens[1]
                    if len(tokens) > 2:
                        destination = tokens[2]
                    else:
                        destination = './'+file

                    try:
                        source = os.path.dirname(__file__)+'/'+file
                        with open(source) as stream:
                            text = stream.read()
                            if text:
                                rubs.append({
                                    'description': ' '.join(tokens),
                                    'genius': FileDeployment(source=source,
                                                         target=destination)})

                    except IOError:
                        raise PlumberyException("Error: cannot read '{}'"
                                                            .format(file))

                else:
                    raise PlumberyException("Error: unknown directive '{}'"
                                                    .format(' '.join(tokens)))

        return rubs