def upgrade(migrate_engine): # Upgrade operations go here # Don't create your own engine; bind migrate_engine # to your metadata meta.bind = migrate_engine try: instance_types.create() except Exception: logging.info(repr(instance_types)) logging.exception('Exception while creating instance_types table') raise # Here are the old static instance types INSTANCE_TYPES = { 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1), 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2), 'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3), 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4), 'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)} try: i = instance_types.insert() for name, values in INSTANCE_TYPES.iteritems(): # FIXME(kpepple) should we be seeding created_at / updated_at ? # now = datetime.datatime.utcnow() i.execute({'name': name, 'memory_mb': values["memory_mb"], 'vcpus': values["vcpus"], 'deleted': False, 'local_gb': values["local_gb"], 'flavorid': values["flavorid"]}) except Exception: logging.info(repr(instance_types)) logging.exception('Exception while seeding instance_types table') raise
def downgrade(migrate_engine): meta.bind = migrate_engine for table in (flavors, backend, sm_vol): try: table.drop() except Exception: logging.info(repr(table))
def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine tables = [ certificates, console_pools, consoles, instance_actions, iscsi_targets ] for table in tables: try: table.create() except Exception: logging.info(repr(table)) logging.exception('Exception while creating table') meta.drop_all(tables=tables) raise auth_tokens.c.user_id.alter(type=String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)) instances.create_column(instances_availability_zone) instances.create_column(instances_locked) networks.create_column(networks_cidr_v6) networks.create_column(networks_ra_server) services.create_column(services_availability_zone)
def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine tables = [certificates, console_pools, consoles, instance_actions, iscsi_targets] for table in tables: try: table.create() except Exception: logging.info(repr(table)) logging.exception('Exception while creating table') meta.drop_all(tables=tables) raise auth_tokens.c.user_id.alter(type=String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)) instances.create_column(instances_availability_zone) instances.create_column(instances_locked) networks.create_column(networks_cidr_v6) networks.create_column(networks_ra_server) services.create_column(services_availability_zone)
def downgrade(migrate_engine): meta.bind = migrate_engine try: export_devices.create() except Exception: logging.info(repr(export_devices)) logging.exception('Exception while creating table') raise
def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine try: instance_faults.create() except Exception: logging.info(repr(instance_faults))
def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine for table in (zones,): try: table.create() except Exception: logging.info(repr(table))
def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine for table in (zones, ): try: table.create() except Exception: logging.info(repr(table))
def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine for table in (provider_fw_rules,): try: table.create() except Exception: logging.info(repr(table)) logging.exception('Exception while creating table') raise
def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine try: block_device_mapping.create() except Exception: logging.info(repr(block_device_mapping)) logging.exception('Exception while creating table') meta.drop_all(tables=[block_device_mapping]) raise
def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine for table in (instance_type_extra_specs_table, ): try: table.create() except Exception: logging.info(repr(table)) logging.exception('Exception while creating table') raise
def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine for table in (provider_fw_rules, ): try: table.create() except Exception: logging.info(repr(table)) logging.exception('Exception while creating table') raise
def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine try: virtual_storage_arrays.create() except Exception: logging.info(repr(table)) logging.exception('Exception while creating table') raise
def upgrade(migrate_engine): meta.bind = migrate_engine for table in new_tables: try: table.create() except Exception: logging.info(repr(table)) logging.exception('Exception while creating table') raise volumes.create_column(volume_type_id)
def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine try: snapshots.create() except Exception: logging.info(repr(snapshots)) logging.exception('Exception while creating table') meta.drop_all(tables=[snapshots]) raise
def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine try: compute_nodes.create() except Exception: logging.info(repr(compute_nodes)) logging.exception('Exception while creating table') meta.drop_all(tables=[compute_nodes]) raise instances.create_column(instances_launched_on)
def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine for table in (builds, ): try: table.create() except Exception: logging.info(repr(table)) instances = Table('instances', meta, autoload=True, autoload_with=migrate_engine) # Add columns to existing tables instances.create_column(architecture)
def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine tables = [auth_tokens, instances, key_pairs, networks, fixed_ips, floating_ips, quotas, security_groups, security_group_inst_assoc, security_group_rules, services, users, projects, user_project_association, user_project_role_association, user_role_association, volumes, export_devices] for table in tables: try: table.create() except Exception: logging.info(repr(table)) logging.exception('Exception while creating table') meta.drop_all(tables=tables) raise
def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine tables = [ auth_tokens, instances, key_pairs, networks, fixed_ips, floating_ips, quotas, security_groups, security_group_inst_assoc, security_group_rules, services, users, projects, user_project_association, user_project_role_association, user_role_association, volumes, export_devices ] for table in tables: try: table.create() except Exception: logging.info(repr(table)) logging.exception('Exception while creating table') meta.drop_all(tables=tables) raise
def setup_basic_filtering(self, instance, network_info): """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" logging.info('called setup_basic_filtering in nwfilter') if self.handle_security_groups: # No point in setting up a filter set that we'll be overriding # anyway. return logging.info('ensuring static filters') self._ensure_static_filters() if instance['image_ref'] == str(FLAGS.vpn_image_id): base_filter = 'engine-vpn' else: base_filter = 'engine-base' for (network, mapping) in network_info: nic_id = mapping['mac'].replace(':', '') instance_filter_name = self._instance_filter_name(instance, nic_id) self._define_filter( self._filter_container(instance_filter_name, [base_filter]))
def setup_basic_filtering(self, instance, network_info): """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" logging.info('called setup_basic_filtering in nwfilter') if self.handle_security_groups: # No point in setting up a filter set that we'll be overriding # anyway. return logging.info('ensuring static filters') self._ensure_static_filters() if instance['image_ref'] == str(FLAGS.vpn_image_id): base_filter = 'engine-vpn' else: base_filter = 'engine-base' for (network, mapping) in network_info: nic_id = mapping['mac'].replace(':', '') instance_filter_name = self._instance_filter_name(instance, nic_id) self._define_filter(self._filter_container(instance_filter_name, [base_filter]))
def test_module_level_methods_handle_context_arg(self): log.info("foo", context=_fake_context()) self.assert_(True) # didn't raise exception
def _tenant_usages_for_period(self, context, period_start, period_stop, tenant_id=None, detailed=True): compute_api = api.API() instances = compute_api.get_active_by_window(context, period_start, period_stop, tenant_id) from engine import log as logging logging.info(instances) rval = {} flavors = {} for instance in instances: info = {} info['hours'] = self._hours_for(instance, period_start, period_stop) flavor_type = instance['instance_type_id'] if not flavors.get(flavor_type): try: it_ref = compute_api.get_instance_type(context, flavor_type) flavors[flavor_type] = it_ref except exception.InstanceTypeNotFound: # can't bill if there is no instance type continue flavor = flavors[flavor_type] info['name'] = instance['display_name'] info['memory_mb'] = flavor['memory_mb'] info['local_gb'] = flavor['local_gb'] info['vcpus'] = flavor['vcpus'] info['tenant_id'] = instance['project_id'] info['flavor'] = flavor['name'] info['started_at'] = instance['launched_at'] info['ended_at'] = instance['terminated_at'] if info['ended_at']: info['state'] = 'terminated' else: info['state'] = instance['vm_state'] now = datetime.utcnow() if info['state'] == 'terminated': delta = info['ended_at'] - info['started_at'] else: delta = now - info['started_at'] info['uptime'] = delta.days * 24 * 60 + delta.seconds if not info['tenant_id'] in rval: summary = {} summary['tenant_id'] = info['tenant_id'] if detailed: summary['server_usages'] = [] summary['total_local_gb_usage'] = 0 summary['total_vcpus_usage'] = 0 summary['total_memory_mb_usage'] = 0 summary['total_hours'] = 0 summary['start'] = period_start summary['stop'] = period_stop rval[info['tenant_id']] = summary summary = rval[info['tenant_id']] summary['total_local_gb_usage'] += info['local_gb'] * info['hours'] summary['total_vcpus_usage'] += info['vcpus'] * info['hours'] summary['total_memory_mb_usage'] += info['memory_mb']\ * info['hours'] summary['total_hours'] += info['hours'] if detailed: summary['server_usages'].append(info) return rval.values()
def _tenant_usages_for_period(self, context, period_start, period_stop, tenant_id=None, detailed=True): compute_api = api.API() instances = compute_api.get_active_by_window(context, period_start, period_stop, tenant_id) from engine import log as logging logging.info(instances) rval = {} flavors = {} for instance in instances: info = {} info['hours'] = self._hours_for(instance, period_start, period_stop) flavor_type = instance['instance_type_id'] if not flavors.get(flavor_type): try: it_ref = compute_api.get_instance_type( context, flavor_type) flavors[flavor_type] = it_ref except exception.InstanceTypeNotFound: # can't bill if there is no instance type continue flavor = flavors[flavor_type] info['name'] = instance['display_name'] info['memory_mb'] = flavor['memory_mb'] info['local_gb'] = flavor['local_gb'] info['vcpus'] = flavor['vcpus'] info['tenant_id'] = instance['project_id'] info['flavor'] = flavor['name'] info['started_at'] = instance['launched_at'] info['ended_at'] = instance['terminated_at'] if info['ended_at']: info['state'] = 'terminated' else: info['state'] = instance['vm_state'] now = datetime.utcnow() if info['state'] == 'terminated': delta = info['ended_at'] - info['started_at'] else: delta = now - info['started_at'] info['uptime'] = delta.days * 24 * 60 + delta.seconds if not info['tenant_id'] in rval: summary = {} summary['tenant_id'] = info['tenant_id'] if detailed: summary['server_usages'] = [] summary['total_local_gb_usage'] = 0 summary['total_vcpus_usage'] = 0 summary['total_memory_mb_usage'] = 0 summary['total_hours'] = 0 summary['start'] = period_start summary['stop'] = period_stop rval[info['tenant_id']] = summary summary = rval[info['tenant_id']] summary['total_local_gb_usage'] += info['local_gb'] * info['hours'] summary['total_vcpus_usage'] += info['vcpus'] * info['hours'] summary['total_memory_mb_usage'] += info['memory_mb']\ * info['hours'] summary['total_hours'] += info['hours'] if detailed: summary['server_usages'].append(info) return rval.values()
def upgrade(migrate_engine): meta.bind = migrate_engine # grab tables instance_info_caches = Table('instance_info_caches', meta, autoload=True) instances = Table('instances', meta, autoload=True) vifs = Table('virtual_interfaces', meta, autoload=True) networks = Table('networks', meta, autoload=True) fixed_ips = Table('fixed_ips', meta, autoload=True) floating_ips = Table('floating_ips', meta, autoload=True) # all of these functions return a python list of python dicts # that have nothing to do with sqlalchemy objects whatsoever # after returning def get_instances(): # want all instances whether there is network info or not s = select([instances.c.id, instances.c.uuid]) keys = ('id', 'uuid') return [dict(zip(keys, row)) for row in s.execute()] def get_vifs_by_instance_id(instance_id): s = select([vifs.c.id, vifs.c.uuid, vifs.c.address, vifs.c.network_id], vifs.c.instance_id == instance_id) keys = ('id', 'uuid', 'address', 'network_id') return [dict(zip(keys, row)) for row in s.execute()] def get_network_by_id(network_id): s = select([networks.c.uuid, networks.c.label, networks.c.project_id, networks.c.dns1, networks.c.dns2, networks.c.cidr, networks.c.cidr_v6, networks.c.gateway, networks.c.gateway_v6, networks.c.injected, networks.c.multi_host, networks.c.bridge, networks.c.bridge_interface, networks.c.vlan], networks.c.id == network_id) keys = ('uuid', 'label', 'project_id', 'dns1', 'dns2', 'cidr', 'cidr_v6', 'gateway', 'gateway_v6', 'injected', 'multi_host', 'bridge', 'bridge_interface', 'vlan') return [dict(zip(keys, row)) for row in s.execute()] def get_fixed_ips_by_vif_id(vif_id): s = select([fixed_ips.c.id, fixed_ips.c.address], fixed_ips.c.virtual_interface_id == vif_id) keys = ('id', 'address') fixed_ip_list = [dict(zip(keys, row)) for row in s.execute()] # fixed ips have floating ips, so here they are for fixed_ip in fixed_ip_list: fixed_ip['version'] = 4 fixed_ip['floating_ips'] =\ get_floating_ips_by_fixed_ip_id(fixed_ip['id']) fixed_ip['type'] = 'fixed' del fixed_ip['id'] return fixed_ip_list def get_floating_ips_by_fixed_ip_id(fixed_ip_id): s = select([floating_ips.c.address], floating_ips.c.fixed_ip_id == fixed_ip_id) keys = ('address') floating_ip_list = [dict(zip(keys, row)) for row in s.execute()] for floating_ip in floating_ip_list: floating_ip['version'] = 4 floating_ip['type'] = 'floating' return floating_ip_list def _ip_dict_from_string(ip_string, type): if ip_string: ip = {'address': ip_string, 'type': type} if ':' in ip_string: ip['version'] = 6 else: ip['version'] = 4 return ip def _get_fixed_ipv6_dict(cidr, mac, project_id): ip_string = ipv6.to_global(cidr, mac, project_id) return {'version': 6, 'address': ip_string, 'floating_ips': []} def _create_subnet(version, network, vif): if version == 4: cidr = network['cidr'] gateway = network['gateway'] ips = get_fixed_ips_by_vif_id(vif['id']) else: cidr = network['cidr_v6'] gateway = network['gateway_v6'] ips = [_get_fixed_ipv6_dict(network['cidr_v6'], vif['address'], network['project_id'])] # NOTE(tr3buchet) routes is left empty for now because there # is no good way to generate them or determine which is default subnet = {'version': version, 'cidr': cidr, 'dns': [], 'gateway': _ip_dict_from_string(gateway, 'gateway'), 'routes': [], 'ips': ips} if network['dns1'] and network['dns1']['version'] == version: subnet['dns'].append(network['dns1']) if network['dns2'] and network['dns2']['version'] == version: subnet['dns'].append(network['dns2']) return subnet # preload caches table # list is made up of a row(instance_id, nw_info_json) for each instance for instance in get_instances(): logging.info("Updating %s" % (instance['uuid'])) instance_id = instance['id'] instance_uuid = instance['uuid'] # instances have vifs so aninstance nw_info is # is a list of dicts, 1 dict for each vif nw_info = get_vifs_by_instance_id(instance_id) logging.info("VIFs for Instance %s: \n %s" % \ (instance['uuid'], nw_info)) for vif in nw_info: network = get_network_by_id(vif['network_id'])[0] logging.info("Network for Instance %s: \n %s" % \ (instance['uuid'], network)) # vifs have a network which has subnets, so create the subnets # subnets contain all of the ip information network['subnets'] = [] network['dns1'] = _ip_dict_from_string(network['dns1'], 'dns') network['dns2'] = _ip_dict_from_string(network['dns2'], 'dns') # engine networks can only have 2 subnets if network['cidr']: network['subnets'].append(_create_subnet(4, network, vif)) if network['cidr_v6']: network['subnets'].append(_create_subnet(6, network, vif)) # put network together to fit model network['id'] = network.pop('uuid') network['meta'] = {} # NOTE(tr3buchet) this isn't absolutely necessary as hydration # would still work with these as keys, but cache generated by # the model would show these keys as a part of meta. i went # ahead and set it up the same way just so it looks the same if network['project_id']: network['meta']['project_id'] = network['project_id'] del network['project_id'] if network['injected']: network['meta']['injected'] = network['injected'] del network['injected'] if network['multi_host']: network['meta']['multi_host'] = network['multi_host'] del network['multi_host'] if network['bridge_interface']: network['meta']['bridge_interface'] = \ network['bridge_interface'] del network['bridge_interface'] if network['vlan']: network['meta']['vlan'] = network['vlan'] del network['vlan'] # ip information now lives in the subnet, pull them out of network del network['dns1'] del network['dns2'] del network['cidr'] del network['cidr_v6'] del network['gateway'] del network['gateway_v6'] # don't need meta if it's empty if not network['meta']: del network['meta'] # put vif together to fit model del vif['network_id'] vif['id'] = vif.pop('uuid') vif['network'] = network # vif['meta'] could also be set to contain rxtx data here # but it isn't exposed in the api and is still being rewritten logging.info("VIF network for instance %s: \n %s" % \ (instance['uuid'], vif['network'])) # jsonify nw_info row = {'created_at': utils.utcnow(), 'updated_at': utils.utcnow(), 'instance_id': instance_uuid, 'network_info': json.dumps(nw_info)} # write write row to table insert = instance_info_caches.insert().values(**row) migrate_engine.execute(insert)