Esempio n. 1
0
    def setup_basic_filtering(self, instance, network_info=None):
        """Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
        logging.info('called setup_basic_filtering in nwfilter')

        if not network_info:
            network_info = netutils.get_network_info(instance)

        if self.handle_security_groups:
            # No point in setting up a filter set that we'll be overriding
            # anyway.
            return

        logging.info('ensuring static filters')
        self._ensure_static_filters()

        if instance['image_ref'] == str(FLAGS.vpn_image_id):
            base_filter = 'nova-vpn'
        else:
            base_filter = 'nova-base'

        for (network, mapping) in network_info:
            nic_id = mapping['mac'].replace(':', '')
            instance_filter_name = self._instance_filter_name(instance, nic_id)
            self._define_filter(self._filter_container(instance_filter_name,
                                                       [base_filter]))
Esempio n. 2
0
def upgrade(migrate_engine):
    # Upgrade operations go here
    # Don't create your own engine; bind migrate_engine
    # to your metadata
    meta.bind = migrate_engine
    try:
        instance_types.create()
    except Exception:
        logging.info(repr(instance_types))
        logging.exception('Exception while creating instance_types table')
        raise

    # Here are the old static instance types
    INSTANCE_TYPES = {
    'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
    'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
    'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
    'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
    'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
    try:
        i = instance_types.insert()
        for name, values in INSTANCE_TYPES.iteritems():
            # FIXME(kpepple) should we be seeding created_at / updated_at ?
            # now = datetime.datatime.utcnow()
            i.execute({'name': name, 'memory_mb': values["memory_mb"],
                        'vcpus': values["vcpus"], 'deleted': False,
                        'local_gb': values["local_gb"],
                        'flavorid': values["flavorid"]})
    except Exception:
        logging.info(repr(instance_types))
        logging.exception('Exception while seeding instance_types table')
        raise
Esempio n. 3
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    tables = [certificates, console_pools, consoles, instance_actions,
              iscsi_targets]
    for table in tables:
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            meta.drop_all(tables=tables)
            raise

    auth_tokens.c.user_id.alter(type=String(length=255,
                                            convert_unicode=False,
                                            assert_unicode=None,
                                            unicode_error=None,
                                            _warn_on_bytestring=False))

    instances.create_column(instances_availability_zone)
    instances.create_column(instances_locked)
    networks.create_column(networks_cidr_v6)
    networks.create_column(networks_ra_server)
    services.create_column(services_availability_zone)
Esempio n. 4
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    tables = [
        certificates, console_pools, consoles, instance_actions, iscsi_targets
    ]
    for table in tables:
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            meta.drop_all(tables=tables)
            raise

    auth_tokens.c.user_id.alter(type=String(length=255,
                                            convert_unicode=False,
                                            assert_unicode=None,
                                            unicode_error=None,
                                            _warn_on_bytestring=False))

    instances.create_column(instances_availability_zone)
    instances.create_column(instances_locked)
    networks.create_column(networks_cidr_v6)
    networks.create_column(networks_ra_server)
    services.create_column(services_availability_zone)
Esempio n. 5
0
    def setup_basic_filtering(self, instance, network_info=None):
        """Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
        logging.info('called setup_basic_filtering in nwfilter')

        if not network_info:
            network_info = netutils.get_network_info(instance)

        if self.handle_security_groups:
            # No point in setting up a filter set that we'll be overriding
            # anyway.
            return

        logging.info('ensuring static filters')
        self._ensure_static_filters()

        if instance['image_ref'] == str(FLAGS.vpn_image_id):
            base_filter = 'nova-vpn'
        else:
            base_filter = 'nova-base'

        for (network, mapping) in network_info:
            nic_id = mapping['mac'].replace(':', '')
            instance_filter_name = self._instance_filter_name(instance, nic_id)
            self._define_filter(
                self._filter_container(instance_filter_name, [base_filter]))
Esempio n. 6
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine
    try:
        export_devices.create()
    except Exception:
        logging.info(repr(export_devices))
        logging.exception('Exception while creating table')
        raise
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine
    try:
        instance_faults.create()
    except Exception:
        logging.info(repr(instance_faults))
def downgrade(migrate_engine):
    meta.bind = migrate_engine
    try:
        export_devices.create()
    except Exception:
        logging.info(repr(export_devices))
        logging.exception('Exception while creating table')
        raise
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine
    try:
        instance_faults.create()
    except Exception:
        logging.info(repr(instance_faults))
Esempio n. 10
0
def downgrade(migrate_engine):
    # Operations to reverse the above upgrade go here.
    meta.bind = migrate_engine
    try:
        local_volumes.drop(migrate_engine)
    except Exception:
        logging.info(repr(local_volumes))
        logging.exception('Exception while dropping instance_types table')
        raise
Esempio n. 11
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine
    for table in (zones, ):
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
Esempio n. 12
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine
    try:
        block_device_mapping.create()
    except Exception:
        logging.info(repr(block_device_mapping))
        logging.exception('Exception while creating table')
        meta.drop_all(tables=[block_device_mapping])
        raise
Esempio n. 13
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    try:
        virtual_storage_arrays.create()
    except Exception:
        logging.info(repr(table))
        logging.exception('Exception while creating table')
        raise
Esempio n. 14
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    try:
        virtual_storage_arrays.create()
    except Exception:
        logging.info(repr(table))
        logging.exception('Exception while creating table')
        raise
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine
    for table in (provider_fw_rules, ):
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            raise
Esempio n. 16
0
def upgrade(migrate_engine):
    # Upgrade operations go here
    # Don't create your own engine; bind migrate_engine
    # to your metadata
    meta.bind = migrate_engine
    try:
        local_volumes.create()
    except Exception:
        logging.info(repr(local_volumes))
        logging.exception('Exception while creating instance_types table')
        raise
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine
    try:
        block_device_mapping.create()
    except Exception:
        logging.info(repr(block_device_mapping))
        logging.exception('Exception while creating table')
        meta.drop_all(tables=[block_device_mapping])
        raise
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine
    for table in (instance_type_extra_specs_table, ):
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            raise
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine
    for table in (provider_fw_rules,):
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            raise
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine
    for table in (instance_type_extra_specs_table, ):
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            raise
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    for table in new_tables:
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            raise

    volumes.create_column(volume_type_id)
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    for table in new_tables:
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            raise

    volumes.create_column(volume_type_id)
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    try:
        snapshots.create()
    except Exception:
        logging.info(repr(snapshots))
        logging.exception('Exception while creating table')
        meta.drop_all(tables=[snapshots])
        raise
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    try:
        snapshots.create()
    except Exception:
        logging.info(repr(snapshots))
        logging.exception('Exception while creating table')
        meta.drop_all(tables=[snapshots])
        raise
Esempio n. 25
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    try:
        compute_nodes.create()
    except Exception:
        logging.info(repr(compute_nodes))
        logging.exception('Exception while creating table')
        meta.drop_all(tables=[compute_nodes])
        raise

    instances.create_column(instances_launched_on)
Esempio n. 26
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    try:
        compute_nodes.create()
    except Exception:
        logging.info(repr(compute_nodes))
        logging.exception('Exception while creating table')
        meta.drop_all(tables=[compute_nodes])
        raise

    instances.create_column(instances_launched_on)
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    try:
        phy_hosts.create()
        phy_interfaces.create()
        phy_deployments.create()
        phy_pxe_ips.create()
    except Exception:
        logging.info(repr(phy_hosts))
        logging.exception('Exception while creating table')
        meta.drop_all(tables=[phy_hosts])
        raise
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine
    for table in (builds, ):
        try:
            table.create()
        except Exception:
            logging.info(repr(table))

    instances = Table('instances', meta, autoload=True,
                      autoload_with=migrate_engine)

    # Add columns to existing tables
    instances.create_column(architecture)
Esempio n. 29
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine
    for table in (builds, ):
        try:
            table.create()
        except Exception:
            logging.info(repr(table))

    instances = Table('instances',
                      meta,
                      autoload=True,
                      autoload_with=migrate_engine)

    # Add columns to existing tables
    instances.create_column(architecture)
Esempio n. 30
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    tables = [auth_tokens,
              instances, key_pairs, networks, fixed_ips, floating_ips,
              quotas, security_groups, security_group_inst_assoc,
              security_group_rules, services, users, projects,
              user_project_association, user_project_role_association,
              user_role_association, volumes, export_devices]
    for table in tables:
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            meta.drop_all(tables=tables)
            raise
Esempio n. 31
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    tables = [
        auth_tokens, instances, key_pairs, networks, fixed_ips, floating_ips,
        quotas, security_groups, security_group_inst_assoc,
        security_group_rules, services, users, projects,
        user_project_association, user_project_role_association,
        user_role_association, volumes, export_devices
    ]
    for table in tables:
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            meta.drop_all(tables=tables)
            raise
Esempio n. 32
0
def _run_wsgi(paste_config_file, apis):
    logging.debug(_("Using paste.deploy config at: %s"), paste_config_file)
    apps = []
    for api in apis:
        config = wsgi.load_paste_configuration(paste_config_file, api)
        if config is None:
            logging.debug(_("No paste configuration for app: %s"), api)
            continue
        logging.debug(_("App Config: %(api)s\n%(config)r") % locals())
        logging.info(_("Running %s API"), api)
        app = wsgi.load_paste_app(paste_config_file, api)
        apps.append((app, getattr(FLAGS, "%s_listen_port" % api),
                     getattr(FLAGS, "%s_listen" % api)))
    if len(apps) == 0:
        logging.error(_("No known API applications configured in %s."),
                      paste_config_file)
        return

    server = wsgi.Server()
    for app in apps:
        server.start(*app)
    return server
Esempio n. 33
0
def upgrade(migrate_engine):
    # Upgrade operations go here
    # Don't create your own engine; bind migrate_engine
    # to your metadata
    meta.bind = migrate_engine
    try:
        instance_types.create()
    except Exception:
        logging.info(repr(instance_types))
        logging.exception('Exception while creating instance_types table')
        raise

    # Here are the old static instance types
    INSTANCE_TYPES = {
        'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
        'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
        'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
        'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
        'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)
    }
    try:
        i = instance_types.insert()
        for name, values in INSTANCE_TYPES.iteritems():
            # FIXME(kpepple) should we be seeding created_at / updated_at ?
            # now = datetime.datatime.utcnow()
            i.execute({
                'name': name,
                'memory_mb': values["memory_mb"],
                'vcpus': values["vcpus"],
                'deleted': False,
                'local_gb': values["local_gb"],
                'flavorid': values["flavorid"]
            })
    except Exception:
        logging.info(repr(instance_types))
        logging.exception('Exception while seeding instance_types table')
        raise
Esempio n. 34
0
 def test_module_level_methods_handle_context_arg(self):
     log.info("foo", context=_fake_context())
     self.assert_(True)  # didn't raise exception
Esempio n. 35
0
    def _tenant_usages_for_period(self, context, period_start,
                                  period_stop, tenant_id=None, detailed=True):

        compute_api = api.API()
        instances = compute_api.get_active_by_window(context,
                                                     period_start,
                                                     period_stop,
                                                     tenant_id)
        from nova import log as logging
        logging.info(instances)
        rval = {}
        flavors = {}

        for instance in instances:
            info = {}
            info['hours'] = self._hours_for(instance,
                                            period_start,
                                            period_stop)
            flavor_type = instance['instance_type_id']

            if not flavors.get(flavor_type):
                try:
                    it_ref = compute_api.get_instance_type(context,
                                                           flavor_type)
                    flavors[flavor_type] = it_ref
                except exception.InstanceTypeNotFound:
                    # can't bill if there is no instance type
                    continue

            flavor = flavors[flavor_type]

            info['name'] = instance['display_name']

            info['memory_mb'] = flavor['memory_mb']
            info['local_gb'] = flavor['local_gb']
            info['vcpus'] = flavor['vcpus']

            info['tenant_id'] = instance['project_id']

            info['flavor'] = flavor['name']

            info['started_at'] = instance['launched_at']

            info['ended_at'] = instance['terminated_at']

            if info['ended_at']:
                info['state'] = 'terminated'
            else:
                info['state'] = instance['vm_state']

            now = datetime.utcnow()

            if info['state'] == 'terminated':
                delta = info['ended_at'] - info['started_at']
            else:
                delta = now - info['started_at']

            info['uptime'] = delta.days * 24 * 60 + delta.seconds

            if not info['tenant_id'] in rval:
                summary = {}
                summary['tenant_id'] = info['tenant_id']
                if detailed:
                    summary['server_usages'] = []
                summary['total_local_gb_usage'] = 0
                summary['total_vcpus_usage'] = 0
                summary['total_memory_mb_usage'] = 0
                summary['total_hours'] = 0
                summary['start'] = period_start
                summary['stop'] = period_stop
                rval[info['tenant_id']] = summary

            summary = rval[info['tenant_id']]
            summary['total_local_gb_usage'] += info['local_gb'] * info['hours']
            summary['total_vcpus_usage'] += info['vcpus'] * info['hours']
            summary['total_memory_mb_usage'] += info['memory_mb']\
                                                * info['hours']

            summary['total_hours'] += info['hours']
            if detailed:
                summary['server_usages'].append(info)

        return rval.values()
Esempio n. 36
0
    def instance_rules(self, instance, network_info=None):
        if not network_info:
            network_info = netutils.get_network_info(instance)
        ctxt = context.get_admin_context()

        ipv4_rules = []
        ipv6_rules = []

        # Always drop invalid packets
        ipv4_rules += ['-m state --state ' 'INVALID -j DROP']
        ipv6_rules += ['-m state --state ' 'INVALID -j DROP']

        # Allow established connections
        ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
        ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']

        dhcp_servers = [network['gateway'] for (network, _m) in network_info]

        for dhcp_server in dhcp_servers:
            ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
                              '-j ACCEPT' % (dhcp_server,))

        #Allow project network traffic
        if FLAGS.allow_project_net_traffic:
            cidrs = [network['cidr'] for (network, _m) in network_info]
            for cidr in cidrs:
                ipv4_rules.append('-s %s -j ACCEPT' % (cidr,))

        # We wrap these in FLAGS.use_ipv6 because they might cause
        # a DB lookup. The other ones are just list operations, so
        # they're not worth the clutter.
        if FLAGS.use_ipv6:
            # Allow RA responses
            gateways_v6 = [network['gateway_v6'] for (network, _) in
                           network_info]
            for gateway_v6 in gateways_v6:
                ipv6_rules.append(
                        '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))

            #Allow project network traffic
            if FLAGS.allow_project_net_traffic:
                cidrv6s = [network['cidr_v6'] for (network, _m)
                          in network_info]

                for cidrv6 in cidrv6s:
                    ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,))

        security_groups = db.security_group_get_by_instance(ctxt,
                                                            instance['id'])

        # then, security group chains and rules
        for security_group in security_groups:
            rules = db.security_group_rule_get_by_security_group(ctxt,
                                                          security_group['id'])

            for rule in rules:
                logging.info('%r', rule)

                if not rule.cidr:
                    # Eventually, a mechanism to grant access for security
                    # groups will turn up here. It'll use ipsets.
                    continue

                version = netutils.get_ip_version(rule.cidr)
                if version == 4:
                    rules = ipv4_rules
                else:
                    rules = ipv6_rules

                protocol = rule.protocol
                if version == 6 and rule.protocol == 'icmp':
                    protocol = 'icmpv6'

                args = ['-p', protocol, '-s', rule.cidr]

                if rule.protocol in ['udp', 'tcp']:
                    if rule.from_port == rule.to_port:
                        args += ['--dport', '%s' % (rule.from_port,)]
                    else:
                        args += ['-m', 'multiport',
                                 '--dports', '%s:%s' % (rule.from_port,
                                                        rule.to_port)]
                elif rule.protocol == 'icmp':
                    icmp_type = rule.from_port
                    icmp_code = rule.to_port

                    if icmp_type == -1:
                        icmp_type_arg = None
                    else:
                        icmp_type_arg = '%s' % icmp_type
                        if not icmp_code == -1:
                            icmp_type_arg += '/%s' % icmp_code

                    if icmp_type_arg:
                        if version == 4:
                            args += ['-m', 'icmp', '--icmp-type',
                                     icmp_type_arg]
                        elif version == 6:
                            args += ['-m', 'icmp6', '--icmpv6-type',
                                     icmp_type_arg]

                args += ['-j ACCEPT']
                rules += [' '.join(args)]

        ipv4_rules += ['-j $sg-fallback']
        ipv6_rules += ['-j $sg-fallback']

        return ipv4_rules, ipv6_rules
Esempio n. 37
0
    def _tenant_usages_for_period(self,
                                  context,
                                  period_start,
                                  period_stop,
                                  tenant_id=None,
                                  detailed=True):

        compute_api = api.API()
        instances = compute_api.get_active_by_window(context, period_start,
                                                     period_stop, tenant_id)
        from nova import log as logging
        logging.info(instances)
        rval = {}
        flavors = {}

        for instance in instances:
            info = {}
            info['hours'] = self._hours_for(instance, period_start,
                                            period_stop)
            flavor_type = instance['instance_type_id']

            if not flavors.get(flavor_type):
                try:
                    it_ref = compute_api.get_instance_type(
                        context, flavor_type)
                    flavors[flavor_type] = it_ref
                except exception.InstanceTypeNotFound:
                    # can't bill if there is no instance type
                    continue

            flavor = flavors[flavor_type]

            info['name'] = instance['display_name']

            info['memory_mb'] = flavor['memory_mb']
            info['local_gb'] = flavor['root_gb'] + flavor['ephemeral_gb']
            info['vcpus'] = flavor['vcpus']

            info['tenant_id'] = instance['project_id']

            info['flavor'] = flavor['name']

            info['started_at'] = instance['launched_at']

            info['ended_at'] = instance['terminated_at']

            if info['ended_at']:
                info['state'] = 'terminated'
            else:
                info['state'] = instance['vm_state']

            now = datetime.utcnow()

            if info['state'] == 'terminated':
                delta = info['ended_at'] - info['started_at']
            else:
                delta = now - info['started_at']

            info['uptime'] = delta.days * 24 * 60 + delta.seconds

            if not info['tenant_id'] in rval:
                summary = {}
                summary['tenant_id'] = info['tenant_id']
                if detailed:
                    summary['server_usages'] = []
                summary['total_local_gb_usage'] = 0
                summary['total_vcpus_usage'] = 0
                summary['total_memory_mb_usage'] = 0
                summary['total_hours'] = 0
                summary['start'] = period_start
                summary['stop'] = period_stop
                rval[info['tenant_id']] = summary

            summary = rval[info['tenant_id']]
            summary['total_local_gb_usage'] += info['local_gb'] * info['hours']
            summary['total_vcpus_usage'] += info['vcpus'] * info['hours']
            summary['total_memory_mb_usage'] += info['memory_mb']\
                                                * info['hours']

            summary['total_hours'] += info['hours']
            if detailed:
                summary['server_usages'].append(info)

        return rval.values()
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    # grab tables
    instance_info_caches = Table('instance_info_caches', meta, autoload=True)
    instances = Table('instances', meta, autoload=True)
    vifs = Table('virtual_interfaces', meta, autoload=True)
    networks = Table('networks', meta, autoload=True)
    fixed_ips = Table('fixed_ips', meta, autoload=True)
    floating_ips = Table('floating_ips', meta, autoload=True)

    # all of these functions return a python list of python dicts
    # that have nothing to do with sqlalchemy objects whatsoever
    # after returning
    def get_instances():
        # want all instances whether there is network info or not
        s = select([instances.c.id, instances.c.uuid])
        keys = ('id', 'uuid')

        return [dict(zip(keys, row)) for row in s.execute()]

    def get_vifs_by_instance_id(instance_id):
        s = select([vifs.c.id, vifs.c.uuid, vifs.c.address, vifs.c.network_id],
                   vifs.c.instance_id == instance_id)
        keys = ('id', 'uuid', 'address', 'network_id')
        return [dict(zip(keys, row)) for row in s.execute()]

    def get_network_by_id(network_id):
        s = select([networks.c.uuid, networks.c.label,
                    networks.c.project_id,
                    networks.c.dns1, networks.c.dns2,
                    networks.c.cidr, networks.c.cidr_v6,
                    networks.c.gateway, networks.c.gateway_v6,
                    networks.c.injected, networks.c.multi_host,
                    networks.c.bridge, networks.c.bridge_interface,
                    networks.c.vlan],
                   networks.c.id == network_id)
        keys = ('uuid', 'label', 'project_id', 'dns1', 'dns2',
                'cidr', 'cidr_v6', 'gateway', 'gateway_v6',
                'injected', 'multi_host', 'bridge', 'bridge_interface', 'vlan')
        return [dict(zip(keys, row)) for row in s.execute()]

    def get_fixed_ips_by_vif_id(vif_id):
        s = select([fixed_ips.c.id, fixed_ips.c.address],
                   fixed_ips.c.virtual_interface_id == vif_id)
        keys = ('id', 'address')
        fixed_ip_list = [dict(zip(keys, row)) for row in s.execute()]

        # fixed ips have floating ips, so here they are
        for fixed_ip in fixed_ip_list:
            fixed_ip['version'] = 4
            fixed_ip['floating_ips'] =\
                   get_floating_ips_by_fixed_ip_id(fixed_ip['id'])
            fixed_ip['type'] = 'fixed'
            del fixed_ip['id']

        return fixed_ip_list

    def get_floating_ips_by_fixed_ip_id(fixed_ip_id):
        s = select([floating_ips.c.address],
                   floating_ips.c.fixed_ip_id == fixed_ip_id)
        keys = ('address')
        floating_ip_list = [dict(zip(keys, row)) for row in s.execute()]

        for floating_ip in floating_ip_list:
            floating_ip['version'] = 4
            floating_ip['type'] = 'floating'

        return floating_ip_list

    def _ip_dict_from_string(ip_string, type):
        if ip_string:
            ip = {'address': ip_string,
                  'type': type}
            if ':' in ip_string:
                ip['version'] = 6
            else:
                ip['version'] = 4

            return ip

    def _get_fixed_ipv6_dict(cidr, mac, project_id):
        ip_string = ipv6.to_global(cidr, mac, project_id)
        return {'version': 6,
                'address': ip_string,
                'floating_ips': []}

    def _create_subnet(version, network, vif):
        if version == 4:
            cidr = network['cidr']
            gateway = network['gateway']
            ips = get_fixed_ips_by_vif_id(vif['id'])
        else:
            cidr = network['cidr_v6']
            gateway = network['gateway_v6']
            ips = [_get_fixed_ipv6_dict(network['cidr_v6'],
                                        vif['address'],
                                        network['project_id'])]

        # NOTE(tr3buchet) routes is left empty for now because there
        # is no good way to generate them or determine which is default
        subnet = {'version': version,
                  'cidr': cidr,
                  'dns': [],
                  'gateway': _ip_dict_from_string(gateway, 'gateway'),
                  'routes': [],
                  'ips': ips}

        if network['dns1'] and network['dns1']['version'] == version:
            subnet['dns'].append(network['dns1'])
        if network['dns2'] and network['dns2']['version'] == version:
            subnet['dns'].append(network['dns2'])

        return subnet

    def _update_network(vif, network):
        # vifs have a network which has subnets, so create the subnets
        # subnets contain all of the ip information
        network['subnets'] = []

        network['dns1'] = _ip_dict_from_string(network['dns1'], 'dns')
        network['dns2'] = _ip_dict_from_string(network['dns2'], 'dns')

        # nova networks can only have 2 subnets
        if network['cidr']:
            network['subnets'].append(_create_subnet(4, network, vif))
        if network['cidr_v6']:
            network['subnets'].append(_create_subnet(6, network, vif))

        # put network together to fit model
        network['id'] = network.pop('uuid')
        network['meta'] = {}

        # NOTE(tr3buchet) this isn't absolutely necessary as hydration
        # would still work with these as keys, but cache generated by
        # the model would show these keys as a part of meta. i went
        # ahead and set it up the same way just so it looks the same
        if network['project_id']:
            network['meta']['project_id'] = network['project_id']
        del network['project_id']
        if network['injected']:
            network['meta']['injected'] = network['injected']
        del network['injected']
        if network['multi_host']:
            network['meta']['multi_host'] = network['multi_host']
        del network['multi_host']
        if network['bridge_interface']:
            network['meta']['bridge_interface'] = \
                                              network['bridge_interface']
        del network['bridge_interface']
        if network['vlan']:
            network['meta']['vlan'] = network['vlan']
        del network['vlan']

        # ip information now lives in the subnet, pull them out of network
        del network['dns1']
        del network['dns2']
        del network['cidr']
        del network['cidr_v6']
        del network['gateway']
        del network['gateway_v6']

        # don't need meta if it's empty
        if not network['meta']:
            del network['meta']

    # preload caches table
    # list is made up of a row(instance_id, nw_info_json) for each instance
    for instance in get_instances():
        logging.info("Updating %s" % (instance['uuid']))
        instance_id = instance['id']
        instance_uuid = instance['uuid']

        # instances have vifs so aninstance nw_info is
        # is a list of dicts, 1 dict for each vif
        nw_info = get_vifs_by_instance_id(instance_id)
        logging.info("VIFs for Instance %s: \n %s" % \
                        (instance['uuid'], nw_info))
        for vif in nw_info:
            networks_ = get_network_by_id(vif['network_id'])
            if networks_:
                network = networks_[0]
                logging.info("Network for Instance %s: \n %s" % \
                        (instance['uuid'], network))
                _update_network(vif, network)
            else:
                network = None

            # put vif together to fit model
            del vif['network_id']
            vif['id'] = vif.pop('uuid')
            vif['network'] = network
            # vif['meta'] could also be set to contain rxtx data here
            # but it isn't exposed in the api and is still being rewritten

            logging.info("VIF network for instance %s: \n %s" % \
                        (instance['uuid'], vif['network']))

        # jsonify nw_info
        row = {'created_at': utils.utcnow(),
               'updated_at': utils.utcnow(),
               'instance_id': instance_uuid,
               'network_info': json.dumps(nw_info)}

        # write write row to table
        insert = instance_info_caches.insert().values(**row)
        migrate_engine.execute(insert)
Esempio n. 39
0
 def test_module_level_methods_handle_context_arg(self):
     log.info("foo", context=_fake_context())
     self.assert_(True)  # didn't raise exception
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    # grab tables
    instance_info_caches = Table('instance_info_caches', meta, autoload=True)
    instances = Table('instances', meta, autoload=True)
    vifs = Table('virtual_interfaces', meta, autoload=True)
    networks = Table('networks', meta, autoload=True)
    fixed_ips = Table('fixed_ips', meta, autoload=True)
    floating_ips = Table('floating_ips', meta, autoload=True)

    # all of these functions return a python list of python dicts
    # that have nothing to do with sqlalchemy objects whatsoever
    # after returning
    def get_instances():
        # want all instances whether there is network info or not
        s = select([instances.c.id, instances.c.uuid])
        keys = ('id', 'uuid')

        return [dict(zip(keys, row)) for row in s.execute()]

    def get_vifs_by_instance_id(instance_id):
        s = select([vifs.c.id, vifs.c.uuid, vifs.c.address, vifs.c.network_id],
                   vifs.c.instance_id == instance_id)
        keys = ('id', 'uuid', 'address', 'network_id')
        return [dict(zip(keys, row)) for row in s.execute()]

    def get_network_by_id(network_id):
        s = select([
            networks.c.uuid, networks.c.label, networks.c.project_id,
            networks.c.dns1, networks.c.dns2, networks.c.cidr,
            networks.c.cidr_v6, networks.c.gateway, networks.c.gateway_v6,
            networks.c.injected, networks.c.multi_host, networks.c.bridge,
            networks.c.bridge_interface, networks.c.vlan
        ], networks.c.id == network_id)
        keys = ('uuid', 'label', 'project_id', 'dns1', 'dns2', 'cidr',
                'cidr_v6', 'gateway', 'gateway_v6', 'injected', 'multi_host',
                'bridge', 'bridge_interface', 'vlan')
        return [dict(zip(keys, row)) for row in s.execute()]

    def get_fixed_ips_by_vif_id(vif_id):
        s = select([fixed_ips.c.id, fixed_ips.c.address],
                   fixed_ips.c.virtual_interface_id == vif_id)
        keys = ('id', 'address')
        fixed_ip_list = [dict(zip(keys, row)) for row in s.execute()]

        # fixed ips have floating ips, so here they are
        for fixed_ip in fixed_ip_list:
            fixed_ip['version'] = 4
            fixed_ip['floating_ips'] =\
                   get_floating_ips_by_fixed_ip_id(fixed_ip['id'])
            fixed_ip['type'] = 'fixed'
            del fixed_ip['id']

        return fixed_ip_list

    def get_floating_ips_by_fixed_ip_id(fixed_ip_id):
        s = select([floating_ips.c.address],
                   floating_ips.c.fixed_ip_id == fixed_ip_id)
        keys = ('address')
        floating_ip_list = [dict(zip(keys, row)) for row in s.execute()]

        for floating_ip in floating_ip_list:
            floating_ip['version'] = 4
            floating_ip['type'] = 'floating'

        return floating_ip_list

    def _ip_dict_from_string(ip_string, type):
        if ip_string:
            ip = {'address': ip_string, 'type': type}
            if ':' in ip_string:
                ip['version'] = 6
            else:
                ip['version'] = 4

            return ip

    def _get_fixed_ipv6_dict(cidr, mac, project_id):
        ip_string = ipv6.to_global(cidr, mac, project_id)
        return {'version': 6, 'address': ip_string, 'floating_ips': []}

    def _create_subnet(version, network, vif):
        if version == 4:
            cidr = network['cidr']
            gateway = network['gateway']
            ips = get_fixed_ips_by_vif_id(vif['id'])
        else:
            cidr = network['cidr_v6']
            gateway = network['gateway_v6']
            ips = [
                _get_fixed_ipv6_dict(network['cidr_v6'], vif['address'],
                                     network['project_id'])
            ]

        # NOTE(tr3buchet) routes is left empty for now because there
        # is no good way to generate them or determine which is default
        subnet = {
            'version': version,
            'cidr': cidr,
            'dns': [],
            'gateway': _ip_dict_from_string(gateway, 'gateway'),
            'routes': [],
            'ips': ips
        }

        if network['dns1'] and network['dns1']['version'] == version:
            subnet['dns'].append(network['dns1'])
        if network['dns2'] and network['dns2']['version'] == version:
            subnet['dns'].append(network['dns2'])

        return subnet

    # preload caches table
    # list is made up of a row(instance_id, nw_info_json) for each instance
    for instance in get_instances():
        logging.info("Updating %s" % (instance['uuid']))
        instance_id = instance['id']
        instance_uuid = instance['uuid']

        # instances have vifs so aninstance nw_info is
        # is a list of dicts, 1 dict for each vif
        nw_info = get_vifs_by_instance_id(instance_id)
        logging.info("VIFs for Instance %s: \n %s" % \
                        (instance['uuid'], nw_info))
        for vif in nw_info:
            network = get_network_by_id(vif['network_id'])[0]
            logging.info("Network for Instance %s: \n %s" % \
                        (instance['uuid'], network))

            # vifs have a network which has subnets, so create the subnets
            # subnets contain all of the ip information
            network['subnets'] = []

            network['dns1'] = _ip_dict_from_string(network['dns1'], 'dns')
            network['dns2'] = _ip_dict_from_string(network['dns2'], 'dns')

            # nova networks can only have 2 subnets
            if network['cidr']:
                network['subnets'].append(_create_subnet(4, network, vif))
            if network['cidr_v6']:
                network['subnets'].append(_create_subnet(6, network, vif))

            # put network together to fit model
            network['id'] = network.pop('uuid')
            network['meta'] = {}

            # NOTE(tr3buchet) this isn't absolutely necessary as hydration
            # would still work with these as keys, but cache generated by
            # the model would show these keys as a part of meta. i went
            # ahead and set it up the same way just so it looks the same
            if network['project_id']:
                network['meta']['project_id'] = network['project_id']
            del network['project_id']
            if network['injected']:
                network['meta']['injected'] = network['injected']
            del network['injected']
            if network['multi_host']:
                network['meta']['multi_host'] = network['multi_host']
            del network['multi_host']
            if network['bridge_interface']:
                network['meta']['bridge_interface'] = \
                                                  network['bridge_interface']
            del network['bridge_interface']
            if network['vlan']:
                network['meta']['vlan'] = network['vlan']
            del network['vlan']

            # ip information now lives in the subnet, pull them out of network
            del network['dns1']
            del network['dns2']
            del network['cidr']
            del network['cidr_v6']
            del network['gateway']
            del network['gateway_v6']

            # don't need meta if it's empty
            if not network['meta']:
                del network['meta']

            # put vif together to fit model
            del vif['network_id']
            vif['id'] = vif.pop('uuid')
            vif['network'] = network
            # vif['meta'] could also be set to contain rxtx data here
            # but it isn't exposed in the api and is still being rewritten

            logging.info("VIF network for instance %s: \n %s" % \
                        (instance['uuid'], vif['network']))

        # jsonify nw_info
        row = {
            'created_at': utils.utcnow(),
            'updated_at': utils.utcnow(),
            'instance_id': instance_uuid,
            'network_info': json.dumps(nw_info)
        }

        # write write row to table
        insert = instance_info_caches.insert().values(**row)
        migrate_engine.execute(insert)