示例#1
0
    def do_update_connectivity(self, token, instance_id):
        oss = self.get_oss()
        pbclient = PBClient(token,
                            self.config['INTERNAL_API_BASE_URL'],
                            ssl_verify=False)
        instance = pbclient.get_instance_description(instance_id)
        instance_data = instance['instance_data']
        security_group_id = instance_data['security_group_id']

        blueprint_config = pbclient.get_blueprint_description(
            instance['blueprint_id'])
        config = blueprint_config['config']

        # Delete all existing rules and add the rules using the input port string
        oss.clear_security_group_rules(security_group_id)

        ports_str = config['exposed_ports']
        if not ports_str:
            ports_str = '22'  # If the input port string is empty then use 22 as the default port
        ports_list = parse_ports_string(ports_str)

        for ports in ports_list:
            from_port = ports[0]
            to_port = ports[1]

            oss.create_security_group_rule(security_group_id,
                                           from_port=from_port,
                                           to_port=to_port,
                                           cidr="%s/32" %
                                           instance['client_ip'],
                                           ip_protocol='tcp',
                                           group_id=None)
示例#2
0
def periodic_update():
    token = get_token()
    pbclient = PBClient(token, local_config['INTERNAL_API_BASE_URL'], ssl_verify=False)
    instances = pbclient.get_instances()

    deprovision_list = []
    update_list = []
    for instance in instances:
        logger.debug('checking instance for actions %s' % instance['name'])
        deprovision_required = False
        if instance.get('state') in [Instance.STATE_RUNNING]:
            if not instance.get('lifetime_left') and instance.get('maximum_lifetime'):
                deprovision_required = True

            if deprovision_required:
                deprovision_list.append(instance)

        elif instance.get('state') not in [Instance.STATE_FAILED]:
            update_list.append(instance)

    if len(deprovision_list) > 10:
        deprovision_list = random.sample(deprovision_list, 10)
    for instance in deprovision_list:
        logger.info('deprovisioning triggered for %s (reason: maximum lifetime exceeded)' % instance.get('id'))
        pbclient.do_instance_patch(instance['id'], {'to_be_deleted': True})
        run_update.delay(instance.get('id'))

    if len(update_list) > 10:
        update_list = random.sample(update_list, 10)
    for instance in update_list:
        run_update.delay(instance.get('id'))
    def do_update_connectivity(self, token, instance_id):
        oss = self.get_oss()
        pbclient = PBClient(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)
        instance = pbclient.get_instance_description(instance_id)
        instance_data = instance['instance_data']
        security_group_id = instance_data['security_group_id']

        blueprint_config = pbclient.get_blueprint_description(instance['blueprint_id'])
        config = blueprint_config['config']

        # Delete all existing rules and add the rules using the input port string
        oss.clear_security_group_rules(security_group_id)

        ports_str = config['exposed_ports']
        if not ports_str:
            ports_str = '22'  # If the input port string is empty then use 22 as the default port
        ports_list = parse_ports_string(ports_str)

        for ports in ports_list:
            from_port = ports[0]
            to_port = ports[1]

            oss.create_security_group_rule(
                security_group_id,
                from_port=from_port,
                to_port=to_port,
                cidr="%s/32" % instance['client_ip'],
                ip_protocol='tcp',
                group_id=None
            )
示例#4
0
def get_config():
    """
    Retrieve dynamic config over ReST API. Config object from Flask is unable to resolve variables from
    database if containers are used. In order to use the ReST API some configuration items
    (Variable.filtered_variables) are required. These are read from Flask config object, as these values
    cannot be modified during the runtime.
    """
    token = get_token()
    pbclient = PBClient(token, local_config['INTERNAL_API_BASE_URL'], ssl_verify=False)

    return dict([(x['key'], x['value']) for x in pbclient.do_get('variables').json()])
示例#5
0
    def update(self, token, instance_id):
        self.logger.debug("update('%s')" % instance_id)

        pbclient = PBClient(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)
        instance = pbclient.get_instance(instance_id)
        if not instance['to_be_deleted'] and instance['state'] in [Instance.STATE_QUEUEING]:
            self.provision(token, instance_id)
        elif instance['to_be_deleted'] and instance['state'] not in [Instance.STATE_DELETED]:
            self.deprovision(token, instance_id)
        else:
            self.logger.debug("update('%s') - nothing to do for %s" % (instance_id, instance))
示例#6
0
    def do_update_connectivity(self, token, instance_id):
        pbclient = PBClient(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)

        instance = pbclient.get_instance_description(instance_id)
        cluster_name = instance['name']

        instance_dir = '%s/%s' % (self.config['INSTANCE_DATA_DIR'], cluster_name)

        with open('%s/%s' % (instance_dir, 'firewall.conf'), 'w') as fw_file:
            fw_file.write('# Firewall file generated by pouta blueprints\n')
            fw_file.write('tcp 22 22 %s/32\n' % instance['client_ip'])

        uploader = self.create_prov_log_uploader(token, instance_id, log_type='provisioning')
        cmd = '/webapps/pouta_blueprints/venv/bin/python /opt/pvc/python/poutacluster.py update_firewall firewall.conf'
        self.run_logged_process(cmd=cmd, cwd=instance_dir, env=self.create_openstack_env(), log_uploader=uploader)
    def do_deprovision(self, token, instance_id):
        log_uploader = self.create_prov_log_uploader(token, instance_id, log_type='deprovisioning')
        log_uploader.info("Deprovisioning instance %s\n" % instance_id)
        pbclient = PBClient(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)
        oss = self.get_oss()
        instance = pbclient.get_instance_description(instance_id)
        instance_data = instance['instance_data']
        if 'server_id' not in instance_data:
            log_uploader.info("Skipping, no server id in instance data")
            return

        server_id = instance_data['server_id']

        log_uploader.info("Destroying server instance . . ")
        oss.deprovision_instance(server_id)
        log_uploader.info("Deprovisioning ready\n")
示例#8
0
    def deprovision(self, token, instance_id):
        self.logger.debug('starting deprovisioning')
        pbclient = PBClient(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)

        try:
            pbclient.do_instance_patch(instance_id, {'state': Instance.STATE_DELETING})
            self.logger.debug('calling subclass do_deprovision')
            self.do_deprovision(token, instance_id)

            self.logger.debug('finishing deprovisioning')
            pbclient.do_instance_patch(instance_id, {'deprovisioned_at': datetime.datetime.utcnow()})
            pbclient.do_instance_patch(instance_id, {'state': Instance.STATE_DELETED})
        except Exception as e:
            self.logger.exception('do_deprovision raised %s' % e)
            pbclient.do_instance_patch(instance_id, {'state': Instance.STATE_FAILED})
            raise e
    def do_update_connectivity(self, token, instance_id):
        oss = self.get_oss()
        pbclient = PBClient(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)
        instance = pbclient.get_instance_description(instance_id)
        instance_data = instance['instance_data']
        security_group_id = instance_data['security_group_id']

        # As currently only single firewall rule can be added by the user,
        # first delete all existing rules and add the new one
        oss.clear_security_group_rules(security_group_id)
        oss.create_security_group_rule(
            security_group_id,
            from_port=22,
            to_port=22,
            cidr="%s/32" % instance['client_ip'],
            ip_protocol='tcp',
            group_id=None
        )
示例#10
0
    def do_update_connectivity(self, token, instance_id):
        oss = self.get_oss()
        pbclient = PBClient(token,
                            self.config['INTERNAL_API_BASE_URL'],
                            ssl_verify=False)
        instance = pbclient.get_instance_description(instance_id)
        instance_data = instance['instance_data']
        security_group_id = instance_data['security_group_id']

        # As currently only single firewall rule can be added by the user,
        # first delete all existing rules and add the new one
        oss.clear_security_group_rules(security_group_id)
        oss.create_security_group_rule(security_group_id,
                                       from_port=22,
                                       to_port=22,
                                       cidr="%s/32" % instance['client_ip'],
                                       ip_protocol='tcp',
                                       group_id=None)
    def do_provision(self, token, instance_id):
        self.logger.debug("do_provision %s" % instance_id)

        pbclient = PBClient(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)
        instance = pbclient.get_instance_description(instance_id)

        instance_name = instance['name']
        instance_user = instance['user_id']

        # fetch config
        blueprint_config = pbclient.get_blueprint_description(instance['blueprint_id'])
        config = blueprint_config['config']

        log_uploader = self.create_prov_log_uploader(token, instance_id, log_type='provisioning')
        log_uploader.info("Provisioning OpenStack instance (%s)\n" % instance_id)

        # fetch user public key
        key_data = pbclient.get_user_key_data(instance_user).json()
        if not key_data:
            error = 'user\'s public key is missing'
            error_body = {'state': Instance.STATE_FAILED, 'error_msg': error}
            pbclient.do_instance_patch(instance_id, error_body)
            self.logger.debug(error)
            raise RuntimeError(error)

        oss = self.get_oss()

        result = oss.provision_instance(
            instance_name,
            config['image'],
            config['flavor'],
            public_key=key_data[0]['public_key'],
            userdata=config.get('userdata'))

        if 'error' in result:
            log_uploader.warn('Provisioning failed %s' % result['error'])
            return

        ip = result['address_data']['public_ip']
        instance_data = {
            'server_id': result['server_id'],
            'floating_ip': ip,
            'allocated_from_pool': result['address_data']['allocated_from_pool'],
            'security_group_id': result['security_group'],
            'endpoints': [
                {'name': 'SSH', 'access': 'ssh cloud-user@%s' % ip},
            ]
        }
        log_uploader.info("Publishing server data\n")
        pbclient.do_instance_patch(
            instance_id,
            {'instance_data': json.dumps(instance_data), 'public_ip': ip})
        log_uploader.info("Provisioning complete\n")
示例#12
0
    def do_deprovision(self, token, instance_id):
        pbclient = PBClient(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)

        instance = pbclient.get_instance_description(instance_id)
        cluster_name = instance['name']

        instance_dir = '%s/%s' % (self.config['INSTANCE_DATA_DIR'], cluster_name)

        uploader = self.create_prov_log_uploader(token, instance_id, log_type='deprovisioning')

        self.logger.info('faking deprovisioning')
        if os.path.isdir(instance_dir):
            cmd = 'time ping -c 5 localhost'
            self.run_logged_process(cmd=cmd, cwd=instance_dir, shell=True, log_uploader=uploader)
        else:
            self.logger.info('no instance dir found, assuming instance was never provisioned')

        # use instance id as a part of the name to make tombstones always unique
        if os.path.isdir(instance_dir):
            os.rename(instance_dir, '%s.deleted.%s' % (instance_dir, instance_id))
示例#13
0
    def do_deprovision(self, token, instance_id):
        log_uploader = self.create_prov_log_uploader(token,
                                                     instance_id,
                                                     log_type='deprovisioning')
        log_uploader.info("Deprovisioning instance %s\n" % instance_id)
        pbclient = PBClient(token,
                            self.config['INTERNAL_API_BASE_URL'],
                            ssl_verify=False)
        oss = self.get_oss()
        instance = pbclient.get_instance_description(instance_id)
        instance_data = instance['instance_data']
        if 'server_id' not in instance_data:
            log_uploader.info("Skipping, no server id in instance data")
            return

        server_id = instance_data['server_id']

        log_uploader.info("Destroying server instance . . ")
        oss.deprovision_instance(server_id)
        log_uploader.info("Deprovisioning ready\n")
示例#14
0
    def provision(self, token, instance_id):
        self.logger.debug('starting provisioning')
        pbclient = PBClient(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)

        try:
            pbclient.do_instance_patch(instance_id, {'state': Instance.STATE_PROVISIONING})
            self.logger.debug('calling subclass do_provision')

            new_state = self.do_provision(token, instance_id)
            if not new_state:
                new_state = Instance.STATE_RUNNING

            pbclient.do_instance_patch(instance_id, {'state': new_state})
        except Exception as e:
            self.logger.exception('do_provision raised %s' % e)
            pbclient.do_instance_patch(instance_id, {'state': Instance.STATE_FAILED})
            raise e
示例#15
0
def periodic_update():
    token = get_token()
    pbclient = PBClient(token,
                        local_config['INTERNAL_API_BASE_URL'],
                        ssl_verify=False)
    instances = pbclient.get_instances()

    deprovision_list = []
    update_list = []
    for instance in instances:
        logger.debug('checking instance for actions %s' % instance['name'])
        deprovision_required = False
        if instance.get('state') in [Instance.STATE_RUNNING]:
            if not instance.get('lifetime_left') and instance.get(
                    'maximum_lifetime'):
                deprovision_required = True

            if deprovision_required:
                deprovision_list.append(instance)

        elif instance.get('state') not in [Instance.STATE_FAILED]:
            update_list.append(instance)

    if len(deprovision_list) > 10:
        deprovision_list = random.sample(deprovision_list, 10)
    for instance in deprovision_list:
        logger.info(
            'deprovisioning triggered for %s (reason: maximum lifetime exceeded)'
            % instance.get('id'))
        pbclient.do_instance_patch(instance['id'], {'to_be_deleted': True})
        run_update.delay(instance.get('id'))

    if len(update_list) > 10:
        update_list = random.sample(update_list, 10)
    for instance in update_list:
        run_update.delay(instance.get('id'))
示例#16
0
    def do_deprovision(self, token, instance_id):
        pbclient = PBClient(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)

        instance = pbclient.get_instance_description(instance_id)
        cluster_name = instance['name']

        instance_dir = '%s/%s' % (self.config['INSTANCE_DATA_DIR'], cluster_name)

        # check if provisioning has failed before even creating an instance state directory
        if not os.path.exists(instance_dir):
            return

        # check if provisioning has failed before even creating an instance state directory
        if not os.path.exists(instance_dir):
            return

        uploader = self.create_prov_log_uploader(token, instance_id, log_type='deprovisioning')
        # run deprovisioning
        cmd = '/webapps/pouta_blueprints/venv/bin/python /opt/pvc/python/poutacluster.py down'
        self.run_logged_process(cmd=cmd, cwd=instance_dir, env=self.create_openstack_env(), log_uploader=uploader)

        # clean generated security and server groups
        cmd = '/webapps/pouta_blueprints/venv/bin/python /opt/pvc/python/poutacluster.py cleanup'
        self.run_logged_process(cmd=cmd, cwd=instance_dir, env=self.create_openstack_env(), log_uploader=uploader)

        # destroy volumes
        cmd = '/webapps/pouta_blueprints/venv/bin/python /opt/pvc/python/poutacluster.py destroy_volumes'
        self.run_logged_process(cmd=cmd, cwd=instance_dir, env=self.create_openstack_env(), log_uploader=uploader)

        # remove generated key from OpenStack
        args = ['nova', 'keypair-delete', '%s' % cluster_name]
        p = subprocess.Popen(args, cwd=instance_dir, env=self.create_openstack_env())
        p.wait()

        # use instance id as a part of the name to make tombstones always unique
        os.rename(instance_dir, '%s.deleted.%s' % (instance_dir, instance_id))
示例#17
0
    def do_provision(self, token, instance_id):
        pbclient = PBClient(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)

        instance = pbclient.get_instance_description(instance_id)
        cluster_name = instance['name']

        instance_dir = '%s/%s' % (self.config['INSTANCE_DATA_DIR'], cluster_name)

        # will fail if there is already a directory for this instance
        os.makedirs(instance_dir)

        # fetch config for this cluster
        # config = self.get_blueprint_description(token, instance['blueprint_id'])

        # fetch user public key and save it
        key_data = pbclient.get_user_key_data(instance['user_id']).json()
        user_key_file = '%s/userkey.pub' % instance_dir
        if not key_data:
            error_body = {'state': Instance.STATE_FAILED, 'error_msg': 'user\'s public key is missing'}
            pbclient.do_instance_patch(instance_id, error_body)
            raise RuntimeError("User's public key missing")

        with open(user_key_file, 'w') as kf:
            kf.write(key_data[0]['public_key'])

        uploader = self.create_prov_log_uploader(token, instance_id, log_type='provisioning')

        self.logger.info('faking provisioning')
        cmd = 'time ping -c 10 localhost'
        self.run_logged_process(cmd=cmd, cwd=instance_dir, shell=True, log_uploader=uploader)
        public_ip = '%s.%s.%s.%s' % (randint(1, 254), randint(1, 254), randint(1, 254), randint(1, 254))
        instance_data = {
            'endpoints': [
                {'name': 'SSH', 'access': 'ssh cloud-user@%s' % public_ip},
                {'name': 'Some Web Interface', 'access': 'http://%s/service-x' % public_ip},
            ]
        }
        pbclient.do_instance_patch(
            instance_id,
            {
                'public_ip': public_ip, 'instance_data': json.dumps(instance_data)
            }
        )
示例#18
0
    def do_provision(self, token, instance_id):
        pbclient = PBClient(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)
        instance = pbclient.get_instance_description(instance_id)
        cluster_name = instance['name']

        instance_dir = '%s/%s' % (self.config['INSTANCE_DATA_DIR'], cluster_name)

        # will fail if there is already a directory for this instance
        os.makedirs(instance_dir)

        # generate pvc config for this cluster
        blueprint_config = pbclient.get_blueprint_description(instance['blueprint_id'])['config']

        self.logger.debug('Blueprint config: %s' % blueprint_config)

        cluster_config = self.create_cluster_config(blueprint_config, cluster_name)
        with open('%s/cluster.yml' % instance_dir, 'w') as cf:
            cf.write(cluster_config)
            cf.write('\n')

        # figure out the number of nodes from config provisioning-data
        if 'number_of_nodes' in blueprint_config:
            num_nodes = int(blueprint_config['number_of_nodes'])
        else:
            self.logger.warn('number of nodes in cluster not defined, using default: 2')
            num_nodes = 2

        # fetch user public key and save it
        key_data = pbclient.get_user_key_data(instance['user_id']).json()
        user_key_file = '%s/userkey.pub' % instance_dir
        if not key_data:
            pbclient.do_instance_patch(instance_id, {'state': Instance.STATE_FAILED})
            raise RuntimeError("User's public key missing")

        with open(user_key_file, 'w') as kf:
            kf.write(key_data[0]['public_key'])

        uploader = self.create_prov_log_uploader(token, instance_id, log_type='provisioning')
        # generate keypair for this cluster
        key_file = '%s/key.priv' % instance_dir
        if not os.path.isfile(key_file):
            with open(key_file, 'w') as keyfile:
                args = ['nova', 'keypair-add', '%s' % cluster_name]
                p = subprocess.Popen(args, cwd=instance_dir, stdout=keyfile, env=self.create_openstack_env())
                p.wait()
            os.chmod(key_file, stat.S_IRUSR)

        # run provisioning
        cmd = '/webapps/pouta_blueprints/venv/bin/python /opt/pvc/python/poutacluster.py up %d' % num_nodes
        self.logger.debug('spawning "%s"' % cmd)
        self.run_logged_process(cmd=cmd, cwd=instance_dir, env=self.create_openstack_env(), log_uploader=uploader)

        # add user key for ssh access
        cmd = '/webapps/pouta_blueprints/venv/bin/python /opt/pvc/python/poutacluster.py add_key userkey.pub'
        self.logger.debug('spawning "%s"' % cmd)
        self.run_logged_process(cmd=cmd, cwd=instance_dir, env=self.create_openstack_env(), log_uploader=uploader)

        # get public IP
        cmd = '/webapps/pouta_blueprints/venv/bin/python /opt/pvc/python/poutacluster.py info'
        p = subprocess.Popen(shlex.split(cmd), cwd=instance_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                             env=self.create_openstack_env())
        out, err = p.communicate()
        public_ip = None
        for line in out.splitlines():
            line = line.strip()
            if line.startswith('public ip:'):
                public_ip = line.split(':')[1].strip()
                break
        if public_ip:
            instance_data = {
                'endpoints': [
                    {'name': 'SSH', 'access': 'ssh cloud-user@%s' % public_ip},
                ]
            }
            pbclient.do_instance_patch(
                instance_id,
                {'public_ip': public_ip, 'instance_data': json.dumps(instance_data)}
            )

        # run info as the last command to show the service endpoints at the end of the log
        cmd = '/webapps/pouta_blueprints/venv/bin/python /opt/pvc/python/poutacluster.py info'
        self.logger.debug('spawning "%s"' % cmd)
        self.run_logged_process(cmd=cmd, cwd=instance_dir, env=self.create_openstack_env(), log_uploader=uploader)
示例#19
0
    def do_provision(self, token, instance_id):
        self.logger.debug("do_provision %s" % instance_id)

        pbclient = PBClient(token,
                            self.config['INTERNAL_API_BASE_URL'],
                            ssl_verify=False)
        instance = pbclient.get_instance_description(instance_id)

        instance_name = instance['name']
        instance_user = instance['user_id']

        # fetch config
        blueprint_config = pbclient.get_blueprint_description(
            instance['blueprint_id'])
        config = blueprint_config['config']

        log_uploader = self.create_prov_log_uploader(token,
                                                     instance_id,
                                                     log_type='provisioning')
        log_uploader.info("Provisioning OpenStack instance (%s)\n" %
                          instance_id)

        # fetch user public key
        key_data = pbclient.get_user_key_data(instance_user).json()
        if not key_data:
            error = 'user\'s public key is missing'
            error_body = {'state': Instance.STATE_FAILED, 'error_msg': error}
            pbclient.do_instance_patch(instance_id, error_body)
            self.logger.debug(error)
            raise RuntimeError(error)

        oss = self.get_oss()

        result = oss.provision_instance(instance_name,
                                        config['image'],
                                        config['flavor'],
                                        public_key=key_data[0]['public_key'],
                                        userdata=config.get('userdata'))

        if 'error' in result:
            log_uploader.warn('Provisioning failed %s' % result['error'])
            return

        ip = result['address_data']['public_ip']
        instance_data = {
            'server_id': result['server_id'],
            'floating_ip': ip,
            'allocated_from_pool':
            result['address_data']['allocated_from_pool'],
            'security_group_id': result['security_group'],
            'endpoints': [
                {
                    'name': 'SSH',
                    'access': 'ssh cloud-user@%s' % ip
                },
            ]
        }
        log_uploader.info("Publishing server data\n")
        pbclient.do_instance_patch(instance_id, {
            'instance_data': json.dumps(instance_data),
            'public_ip': ip
        })
        log_uploader.info("Provisioning complete\n")
def get_provisioning_type(token, instance_id):
    pbclient = PBClient(token, local_config['INTERNAL_API_BASE_URL'], ssl_verify=False)

    blueprint = pbclient.get_instance_parent_data(instance_id)
    plugin_id = blueprint['plugin']
    return pbclient.get_plugin_data(plugin_id)['name']
示例#21
0
 def get_pb_client(token, api_base_url, ssl_verify):
     return PBClient(token, api_base_url, ssl_verify)