Exemple #1
0
def upload_uniq_image(osp_connection: Connection, cloud: str,
                      cluster_name: str, images_dir: str, installer: str):
    """Function uploads unique image to the cluster, instead of making shared one"""
    inst_url, version = get_url(installer)
    image_name = f"osia-{cluster_name}-{version}"
    image_path = Path(images_dir).joinpath(f"rhcos-{version}.qcow2")
    image_file = None
    if image_path.exists():
        logging.info("Found image at %s", image_path.name)
        image_file = image_path.as_posix()
    else:
        logging.info("Starting download of image %s", inst_url)
        image_file = download_image(inst_url, image_path.as_posix())

    logging.info("Starting upload of image into openstack")
    osp_connection.create_image(image_name,
                                filename=image_file,
                                container_format="bare",
                                disk_format="qcow2",
                                wait=True,
                                osia_clusters=cluster_name,
                                visibility='private')
    logging.info("Upload finished")
    image = osp_connection.image.find_image(image_name)
    logging.info("Image uploaded as %s", image.name)
    with open(Path(cluster_name).joinpath("fips.json"), "w") as fips:
        obj = {'cloud': cloud, 'fips': [], 'image': image_name}
        json.dump(obj, fips)
    return image.name
Exemple #2
0
def update_aggregates_db(os_conn=None):
    if not os_conn:
        os_conn = OpenStack(session=os.session,
                            cloud=CLOUD,
                            region_name=os.get_region())

    aggregates_os = os_conn.list_aggregates()
    aggregates = {}
    for aggregate in aggregates_os:
        for host in aggregate.hosts:
            aggregates[host] = aggregate.name

    redis.set('aggregates', json.dumps(aggregates))
    redis.set('aggregates:timestamp', time.time())
Exemple #3
0
def _find_cluster_ports(osp_connection: Connection, cluster_name: str) -> Munch:
    port_list = [k for k in osp_connection.list_ports()
                 if k.name.startswith(cluster_name) and k.name.endswith('ingress-port')]
    port = next(iter(port_list), None)
    if port is None:
        raise Exception(f"Ingress port for cluster {cluster_name} was not found")
    return port
def get_openstack_connection(region_name):
    """
    Get the OpenStack Connection object.

    This is the new, all-powerful Python API for OpenStack. It should be used
    instead of the service-specific APIs such as the Nova API below.

    The returned Connection object has an attribute for each available service,
    e.g. "compute", "network", etc.
    """
    profile = Profile()
    profile.set_region(Profile.ALL, region_name)
    connection = Connection(
        profile=profile,
        user_agent='opencraft-im',
        auth_url=settings.OPENSTACK_AUTH_URL,
        project_name=settings.OPENSTACK_TENANT,
        username=settings.OPENSTACK_USER,
        password=settings.OPENSTACK_PASSWORD,
    )
    # API queries via the nova client occasionally get connection errors from the OpenStack provider.
    # To gracefully recover when the unavailability is short-lived, ensure safe requests (as per
    # urllib3's definition) are retried before giving up.
    adapter = requests.adapters.HTTPAdapter(max_retries=get_requests_retry())
    connection.session.session.mount('http://', adapter)
    connection.session.session.mount('https://', adapter)
    return connection
 def read_input_inventory(self):
     auth_url = os.environ.get('OS_AUTH_URL')
     os_username = os.environ.get('OS_USERNAME')
     os_project_name = os.environ.get('OS_PROJECT_NAME',
                                      os.environ.get('OS_TENANT_NAME'))
     os_password = os.environ.get('OS_PASSWORD')
     os_auth_token = os.environ.get('OS_AUTH_TOKEN')
     os_cacert = os.environ.get('OS_CACERT')
     ansible_ssh_user = os.environ.get('ANSIBLE_SSH_USER', 'heat-admin')
     self.osc_conn = Connection()
     self.undercloud_stack = next(self.osc_conn.orchestration.stacks())
     self.plan_name = (os.environ.get('TRIPLEO_PLAN_NAME')
                       or os.environ.get('STACK_NAME_NAME')
                       or self.get_tripleo_plan_name())
     session = get_auth_session(auth_url, os_username, os_project_name,
                                os_password, os_auth_token, os_cacert)
     heat_api_version = (
         self.osc_conn.orchestration.get_api_major_version()[0])
     self.hclient = heat_client.Client(heat_api_version, session=session)
     inventory = TripleoInventory(session=session,
                                  hclient=self.hclient,
                                  auth_url=auth_url,
                                  cacert=os_cacert,
                                  project_name=os_project_name,
                                  username=os_username,
                                  ansible_ssh_user=ansible_ssh_user,
                                  plan_name=self.plan_name)
     return inventory.list()
Exemple #6
0
def _find_fit_network(osp_connection: Connection, networks: List[str]) -> Optional[str]:
    named_networks = {k['name']: k for k in osp_connection.list_networks() if k['name'] in networks}
    results = dict()
    for net_name in networks:
        net_avail = osp_connection.network.get_network_ip_availability(named_networks[net_name])
        results[net_name] = net_avail['total_ips'] / net_avail['used_ips']
    result = _find_best_fit(results)
    return (named_networks[result]['id'], result)
Exemple #7
0
def create_client():
    return Client(Connection(
        auth_url=config.openstack_auth_url,
        domain_name=config.openstack_domain_name,
        username=config.openstack_admin_username,
        password=config.openstack_admin_password,
        project_name=config.openstack_project_name,
    ))
def make_client(instance):
    """Returns a instance_ha proxy"""
    LOG.debug('Instantiating masakari service client')
    con = Connection(session=instance.session,
                     interface=instance.interface,
                     region_name=instance.region_name,
                     ha_api_version=instance._api_version[API_NAME])
    return con.instance_ha
Exemple #9
0
def resolve_image(osp_connection: Connection, cloud: str, cluster_name: str,
                  images_dir: str, installer: str, error: Optional[Exception]):
    """Function searches for image in openstack and creates it
    if it doesn't exist"""
    inst_url, version = get_url(installer)
    image_name = f"osia-rhcos-{version}"
    image = osp_connection.image.find_image(image_name, ignore_missing=True)
    if image is None:
        image_path = Path(images_dir).joinpath(f"rhcos-{version}.qcow2")
        image_file = None
        if image_path.exists():
            logging.info("Found image at %s", image_path.name)
            image_file = image_path.as_posix()
        else:
            logging.info("Starting download of image %s", inst_url)
            image_file = download_image(inst_url, image_path.as_posix())

        logging.info("Starting upload of image into openstack")
        osp_connection.create_image(image_name,
                                    filename=image_file,
                                    container_format="bare",
                                    disk_format="qcow2",
                                    wait=True,
                                    osia_clusters=cluster_name,
                                    visibility='private')
        logging.info("Upload finished")
        image = osp_connection.image.find_image(image_name)
        logging.info("Image uploaded as %s", image.name)
    else:
        logging.info("Reusing found image in openstack %s", image.name)
        try:
            add_cluster(osp_connection, image, cluster_name)
        except SDKException as err:
            if error is not None:
                raise ImageException("Couldn't add cluster to image") from err
            logging.warning(
                "Image disappeared while metadata were written, trying again")
            logging.debug("Openstack error: %s", err)
            return resolve_image(osp_connection, cloud, cluster_name,
                                 images_dir, installer, err)
    with open(Path(cluster_name).joinpath("fips.json"), "w") as fips:
        obj = {'cloud': cloud, 'fips': [], 'image': image_name}
        json.dump(obj, fips)
    return image.name
Exemple #10
0
def os_connect():
    prof = Profile()
    prof.set_region(Profile.ALL, settings.OS_REGION_NAME)

    return Connection(profile=prof,
                      user_agent='cloudvcl',
                      auth_url=settings.OS_AUTH_URL,
                      project_name=settings.OS_PROJECT_NAME,
                      username=settings.OS_USERNAME,
                      password=settings.OS_PASSWORD,
                      user_domain_name='default',
                      project_domain_name='default')
Exemple #11
0
    def __init__(self):

        self.ansible = AnsibleModule(
            swift_full_argument_spec(**self.argument_spec),
            **self.module_kwargs)
        self.params = self.ansible.params
        self.module_name = self.ansible._name
        self.results = {'changed': False}
        self.exit = self.exit_json = self.ansible.exit_json
        self.fail = self.fail_json = self.ansible.fail_json

        self.cloud = OpenStackConfig().get_one()
        self.client = Connection(config=self.cloud).object_store
    def generate_env_specific_variables(self):
        endpointmap_resource = self.hclient.resources.get(
            self.plan_name, 'EndpointMap')
        endpointmap_resource_dict = endpointmap_resource.to_dict(
        )['attributes']['endpoint_map']

        self.internal_lb_vip = endpointmap_resource_dict['KeystoneInternal'][
            'host']
        self.external_lb_vip = endpointmap_resource_dict['KeystonePublic'][
            'host']

        stack_env_dict = self.osc_conn.orchestration.get_stack_environment(
            self.plan_name).to_dict()['parameter_defaults']

        # get galera root password
        self.galera_password = stack_env_dict['MysqlRootPassword']

        # load overcloud env and osc
        self.load_rc_file(stack_name=self.plan_name)
        tmp_osc_conn = Connection()

        # get cinder_backend_volume fact
        self.cinder_backend_fact = {}
        for cinder_backend_pool in tmp_osc_conn.volume.backend_pools():
            backend_host = cinder_backend_pool.name.split('#')[0]
            # (NOTE:tonytan4ever): skip transient legacy backends
            if 'legacy' in backend_host:
                continue
            cinder_backend_pool_dict = cinder_backend_pool.to_dict(
            )['capabilities']
            if 'solidfire' in cinder_backend_pool_dict['volume_backend_name']:
                self.cinder_backend_fact['solidfire'] = {
                    'volume_driver': 'abc',
                    'host': backend_host
                }
            if 'netapp' in cinder_backend_pool_dict['volume_backend_name']:
                self.cinder_backend_fact['netapp'] = {
                    'volume_driver': 'abc',
                    'host': backend_host
                }
            if 'ceph' in cinder_backend_pool_dict['volume_backend_name']:
                self.cinder_backend_fact['ceph'] = {
                    'volume_driver': 'abc',
                    'host': backend_host
                }
        # reset to undercloud rc for osc
        self.load_rc_file()
Exemple #13
0
def _find_fit_network(
        osp_connection: Connection,
        networks: List[str]) -> Tuple[Optional[str], Optional[str]]:
    named_networks = {
        k['name']: k
        for k in osp_connection.list_networks() if k['name'] in networks
    }
    results = {}
    for net_name in networks:
        net_avail = osp_connection.network.get_network_ip_availability(
            named_networks[net_name])
        subnet_usage = [(subnet['total_ips'], subnet['used_ips'])
                        for subnet in net_avail.subnet_ip_availability
                        if subnet['ip_version'] == 4]
        total_ips, used_ips = [sum(i) for i in zip(*subnet_usage)]
        results[net_name] = total_ips / used_ips
    result = _find_best_fit(results)
    return named_networks[result]['id'], result
Exemple #14
0
cloud = 'default'
# (NOTE:tonytan4ever): for OSP cloud, cloud parameter needs to be None
if os.getenv('OS_CLOUDNAME') is not None:
    cloud = None

if os.path.exists(OPENRC) or os.path.exists(STACKRC):
    try:
        OSC_CONFIG = get_cloud_region(cloud=cloud)
    except ConfigException as e:
        # (NOTE:tonytan4ever) Liberty cloud does not have 'default' config
        if 'Cloud default was not found' in str(e):
            OSC_CONFIG = get_cloud_region(cloud=None)
        else:
            raise e
    OSC_CLIENT = Connection(config=OSC_CONFIG, verify=False)


def get_os_component_major_api_version(component_name):
    supported_os_component_osc_mapping = {
        'nova': 'compute',
        'neutron': 'network',
        'cinder': 'volume',
        'glance': 'image',
        'keystone': 'identity',
        'ironic': 'baremetal_introspection',
        'heat': 'orchestration',
        'swfit': 'object_store'
    }
    if component_name not in supported_os_component_osc_mapping.keys():
        raise NameError("Not a supported OpenStack component name: %s,"
Exemple #15
0
def resurrect_instances(dead_hv, spare_hv, update_db=True):
    assert dead_hv != spare_hv

    os_conn = OpenStack(session=os.session,
                        cloud=CLOUD,
                        region_name=os.get_region())

    if update_db:
        _logger.info('updating redis database')
        refresh_redis_inventory(True)

    spare_hv_r = redis.get('hypervisors', json.loads)[spare_hv]
    assert spare_hv_r['running_vms'] == 0
    dead_service = spare_service = None
    for svc in os_conn.compute.services():
        if svc.host == dead_hv:
            dead_service = svc
        elif svc.host == spare_hv:
            spare_service = svc
            assert svc.status == 'disabled'

    assert dead_service is not None
    assert spare_service is not None
    assert spare_service.state == 'up'
    assert spare_service.zone == dead_service.zone
    assert 'spare' in spare_service.disables_reason.lower()

    _logger.info(f'verifying that {dead_hv} is dead')
    if not nmap_scan([dead_hv], [22, 111, 16509]):
        raise Exception(f'hypervisor {dead_hv} does not seem to be dead!')

    instance_list = []
    servers = json.loads(redis.get('servers'))
    for _, server in servers.items():
        if server['hypervisor_hostname'] == spare_hv:
            raise Exception(f'spare hypervisor {spare_hv} has vms assigned!')
        if server['hypervisor_hostname'] == dead_hv:
            instance_list.append(server['id'])
            server['hypervisor_hostname'] = spare_hv

    if not instance_list:
        _logger.warning(f'{dead_hv} does not run any instances')
        return

    _logger.info('updating database records in nova')
    db_conn = mysql_connect(host=MYSQL_HOST,
                            user=MYSQL_USER,
                            passwd=MYSQL_PASS,
                            db='nova')
    try:
        spare_hv = escape_string(spare_hv)
        with db_conn.cursor() as cursor:
            for uuid in instance_list:
                uuid = escape_string(uuid)
                query = \
                    f'''update instances set
                    host = "{spare_hv}", node = "{spare_hv}"
                    where uuid="{uuid}"'''
                _logger.debug(query)
                cursor.execute(query)
        db_conn.commit()
    finally:
        db_conn.close()

    _logger.info('updating servers inventory db')
    redis.set('servers', json.dumps(servers))

    exceptions = []
    for uuid in instance_list:
        try:
            if servers[uuid]['vm_state'] == 'stopped':
                _logger.info(f'instance {uuid} is stoppped, not rebooting')
                continue

            _logger.info(f'hard rebooting instance {uuid}')
            os_conn.compute.reboot_server(uuid, 'HARD')
            for ifce in os_conn.compute.server_interfaces(uuid):
                _logger.info(f'updating port binding on {ifce.port_id}')
                port = os_conn.get_port(ifce.port_id)
                if port:
                    os_conn.network.update_port(
                        port, **{'binding:host_id': spare_hv})
        except Exception as e:
            exceptions.append(str(e))

    if exceptions:
        raise Exception('\n'.join(exceptions))

    _logger.info(f'disabling nova on {dead_hv}, enabling nova on {spare_hv}')
    os_conn.compute.disable_service(dead_service, dead_hv, 'nova-compute',
                                    f'sonny resurrection on {spare_hv}')
    os_conn.compute.enable_service(spare_service, spare_hv, 'nova-compute')