def _get_oneview_conf(self): opts = [ cfg.StrOpt('manager_url', help='URL where OneView is available'), cfg.StrOpt('username', help='OneView username to be used'), cfg.StrOpt('password', help='OneView password to be used'), cfg.Opt('allow_insecure_connections', type=types.Boolean(), default=False, help='Option to allow insecure conection with OneView'), cfg.StrOpt('tls_cacert_file', default=None, help='Path to CA certificate'), ] CONF = cfg.CONF CONF.register_opts(opts, group='oneview') CONF(default_config_files=['sync.conf']) return CONF.oneview
def _get_client(client_name, url, region, access, secret, ca_bundle): connection_data = { 'config_file': (None, 'AWS_CONFIG_FILE', None, None), 'region': ('region', 'AWS_DEFAULT_REGION', region, None), } session = botocore.session.get_session(connection_data) kwargs = { 'region_name': region, 'endpoint_url': url, 'aws_access_key_id': access, 'aws_secret_access_key': secret } if ca_bundle: try: kwargs['verify'] = types.Boolean()(ca_bundle) except Exception: kwargs['verify'] = ca_bundle return session.create_client(client_name, **kwargs)
class BooleanTypeTests(TypeTestHelper, unittest.TestCase): type = types.Boolean() def test_True(self): self.assertConvertedValue('True', True) def test_yes(self): self.assertConvertedValue('yes', True) def test_on(self): self.assertConvertedValue('on', True) def test_1(self): self.assertConvertedValue('1', True) def test_False(self): self.assertConvertedValue('False', False) def test_no(self): self.assertConvertedValue('no', False) def test_off(self): self.assertConvertedValue('off', False) def test_0(self): self.assertConvertedValue('0', False) def test_other_values_produce_error(self): self.assertInvalid('foo') def test_repr(self): self.assertEqual('Boolean', repr(types.Boolean())) def test_equal(self): self.assertEqual(types.Boolean(), types.Boolean()) def test_not_equal_to_other_class(self): self.assertFalse(types.Boolean() == types.String())
def build_ansible_inventory(): """Get inventory list from config files returns python dict representing ansible inventory according to ansible inventory file yaml definition http://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html """ # TODO(radez): consider take advantage of ansible inventory grouping driver_tag = 'ansible:' booleans = ['manage_vlans'] inventory = {} for conffile in CONF.config_file: # parse each config file sections = {} parser = cfg.ConfigParser(conffile, sections) try: parser.parse() except IOError as e: LOG.error(str(e)) # filter out sections that begin with the driver's tag hosts = {k: v for k, v in sections.items() if k.startswith(driver_tag)} # munge the oslo_config data removing the device tag and # turning lists with single item strings into strings for host in hosts: dev_id = host.partition(driver_tag)[2] dev_cfg = {k: v[0] for k, v in hosts[host].items()} for b in booleans: if b in dev_cfg: dev_cfg[b] = types.Boolean()(dev_cfg[b]) inventory[dev_id] = dev_cfg LOG.info('Ansible Host List: %s', ', '.join(inventory)) return {'all': {'hosts': inventory}}
def __init__(self): """Get inventory list from config files builds a Network-Runner inventory object and a mac_map dictionary according to ansible inventory file yaml definition http://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html """ self.inventory = {} self.mac_map = {} for conffile in CONF.config_file: # parse each config file sections = {} parser = cfg.ConfigParser(conffile, sections) try: parser.parse() except IOError as e: LOG.error(str(e)) # filter out sections that begin with the driver's tag hosts = {k: v for k, v in sections.items() if k.startswith(c.DRIVER_TAG)} # munge the oslo_config data removing the device tag and # turning lists with single item strings into strings for host in hosts: dev_id = host.partition(c.DRIVER_TAG)[2] dev_cfg = {k: v[0] for k, v in hosts[host].items()} for b in c.BOOLEANS: if b in dev_cfg: dev_cfg[b] = types.Boolean()(dev_cfg[b]) self.inventory[dev_id] = dev_cfg # If mac is defined add it to the mac_map if 'mac' in dev_cfg: self.mac_map[dev_cfg['mac'].upper()] = dev_id LOG.info('Ansible Host List: %s', ', '.join(self.inventory))
def test_not_equal_to_other_class(self): self.assertFalse(types.Boolean() == types.String())
def test_equal(self): self.assertEqual(types.Boolean(), types.Boolean())
def test_repr(self): self.assertEqual('Boolean', repr(types.Boolean()))
def _sync_network_views(self, discovered_netviews, dns_views): """Discover network views and sync with db. The discovered network view json contains the following data: - network view - cloud_info for delegated member if cloud platform is supported - mapping conditional EAs So discovered information will be updated in tables such as infoblox_network_views and infoblox_mapping_conditions. :param discovered_netviews: discovered network view json :return: None """ session = self._context.session self._load_persisted_mappings() discovered_delegations = dict() persisted_netview_ids = utils.get_values_from_records( 'id', self.db_network_views) discovered_netview_ids = [] for netview in discovered_netviews: netview_name = netview['name'] is_default = netview[const.IS_DEFAULT] netview_id = utils.get_network_view_id(self._grid_id, netview['_ref']) cloud_adapter_id_vals = utils.get_ea_value( const.EA_CLOUD_ADAPTER_ID, netview, True) if cloud_adapter_id_vals is None: participated = False else: cloud_adapter_ids = [ gid for gid in cloud_adapter_id_vals if int(gid) == self._grid_id ] participated = True if cloud_adapter_ids else False if not participated: continue shared_val = utils.get_ea_value(const.EA_IS_SHARED, netview) is_shared = types.Boolean()(shared_val) if shared_val else False # authority member is default to GM gm_row = utils.find_one_in_list('member_type', const.MEMBER_TYPE_GRID_MASTER, self.db_members) authority_member_id = gm_row.member_id # get delegation member if cloud platform is supported delegated_member = self._get_delegated_member(netview) if delegated_member: authority_member_id = delegated_member.member_id discovered_delegations[netview_name] = ( delegated_member.member_id) dns_view = (dns_views[netview_name] if dns_views.get(netview_name) else None) # see if the network view already exists in db netview_row = utils.find_one_in_list('id', netview_id, self.db_network_views) if netview_row: dbi.update_network_view(session, netview_id, netview_name, authority_member_id, is_shared, dns_view, participated, is_default) else: internal_netview = (const.DEFAULT_NETWORK_VIEW if is_default else netview_name) internal_dnsview = (const.DEFAULT_DNS_VIEW if is_default else dns_view) dbi.add_network_view(session, netview_id, netview_name, self._grid_id, authority_member_id, is_shared, dns_view, internal_netview, internal_dnsview, participated, is_default) discovered_netview_ids.append(netview_id) # update mapping conditions for the current network view self._update_mapping_conditions(netview, netview_id, participated) # we have added new network views. now let's remove persisted # network views not found from discovery persisted_set = set(persisted_netview_ids) removable_set = persisted_set.difference(discovered_netview_ids) removable_netviews = list(removable_set) if removable_netviews: dbi.remove_network_views(session, removable_netviews) session.flush() return discovered_delegations
def sync_members(self): """Synchronizes grid members. Members in the active grid are discovered from NIOS backend and grid members are in sync in neutron db. The members who are no longer in used are set to 'OFF' status. """ session = self._context.session grid_id = self._grid_config.grid_id db_grids = dbi.get_grids(session) db_grid = utils.find_one_in_list('grid_id', grid_id, db_grids) gm_member_id = db_grid.gm_id db_members = dbi.get_members(session, grid_id=grid_id) gm_member = utils.find_one_in_list('member_id', gm_member_id, db_members) discovered_members = self._discover_members() if not discovered_members: return dns_member_settings = self._discover_dns_settings() dhcp_member_settings = self._discover_dhcp_settings() discovered_licenses = self._discover_member_licenses() discovered_member_ids = [] for member in discovered_members: member_name = member['host_name'] member_ip, member_ipv6 = self._get_lan1_ips(member) member_wapi = member_ip if member_ip else member_ipv6 member_hwid = member['node_info'][0].get('hwid') member_status = self._get_member_status( member['node_info'][0]['service_status']) member_type = self._get_member_type(discovered_licenses, member_name, member_hwid) require_db_update = False if member_type == const.MEMBER_TYPE_GRID_MASTER: if gm_member: require_db_update = True member_id = gm_member_id member_wapi = self._grid_config.grid_master_host else: # no need to process 'Is Cloud Member' flag for non GM members ea_is_cloud_member = utils.get_ea_value( const.EA_IS_CLOUD_MEMBER, member) is_cloud_member = (types.Boolean()(ea_is_cloud_member) if ea_is_cloud_member else False) if not is_cloud_member: continue db_member = utils.find_one_in_list('member_name', member_name, db_members) if db_member: require_db_update = True member_id = db_member.member_id else: member_id = utils.get_hash(str(grid_id) + member_name) member_dhcp_ip, member_dhcp_ipv6 = self._get_dhcp_ips( member, dhcp_member_settings) member_dns_ip, member_dns_ipv6 = self._get_dns_ips( member, dns_member_settings) if require_db_update: dbi.update_member(session, member_id, grid_id, member_name, member_ip, member_ipv6, member_type, member_status, member_dhcp_ip, member_dhcp_ipv6, member_dns_ip, member_dns_ipv6, member_wapi) else: dbi.add_member(session, member_id, grid_id, member_name, member_ip, member_ipv6, member_type, member_status, member_dhcp_ip, member_dhcp_ipv6, member_dns_ip, member_dns_ipv6, member_wapi) discovered_member_ids.append(member_id) # deleting members are delicate operation so we won't allow it # but we will set member status to OFF to unused members. db_member_ids = utils.get_values_from_records('member_id', db_members) persisted_set = set(db_member_ids) discovered_set = set(discovered_member_ids) disable_set = persisted_set.difference(discovered_set) disabling_member_ids = list(disable_set) for member_id in disabling_member_ids: dbi.update_member(session, member_id, grid_id, member_status=const.MEMBER_STATUS_OFF) session.flush()
opts = [ cfg.StrOpt('manager_url', help='URL where OneView is available'), cfg.StrOpt('username', help='OneView username to be used'), cfg.StrOpt('password', help='OneView password to be used'), cfg.StrOpt('nova_username', help='Nova username'), cfg.StrOpt('nova_user_pass', help='Nova password'), cfg.StrOpt('nova_user_tenant', help='Nova user tenant'), cfg.Opt('allow_insecure_connections', type=types.Boolean(), default=False, help='Option to allow insecure conection with OneView'), cfg.StrOpt('tls_cacert_file', default=None, help='Path to CA certificate'), cfg.IntOpt('max_retries', default=100, help='Max connection retries to check changes on OneView'), ] CONF = cfg.CONF CONF.register_opts(opts, group='oneview') ONEVIEW_POWER_ON = 'On' ONEVIEW_POWER_OFF = 'Off'