Esempio n. 1
0
    def report_state(self):
        """Update the state of this service in the datastore."""
        ctxt = context.get_admin_context()
        try:
            try:
                service_ref = db.service_get(ctxt, self.service_id)
            except exception.NotFound:
                logging.debug(_("The service database object disappeared, "
                                "Recreating it."))
                self._create_service_ref(ctxt)
                service_ref = db.service_get(ctxt, self.service_id)

            db.service_update(ctxt,
                             self.service_id,
                             {'report_count': service_ref['report_count'] + 1})

            # TODO(termie): make this pattern be more elegant.
            if getattr(self, "model_disconnected", False):
                self.model_disconnected = False
                logging.error(_("Recovered model server connection!"))

        # TODO(vish): this should probably only catch connection errors
        except Exception:  # pylint: disable=W0702
            if not getattr(self, "model_disconnected", False):
                self.model_disconnected = True
                logging.exception(_("model server went away"))
Esempio n. 2
0
    def report_state(self):
        """Update the state of this service in the datastore."""
        ctxt = context.get_admin_context()
        zone = FLAGS.node_availability_zone
        state_catalog = {}
        try:
            try:
                service_ref = db.service_get(ctxt, self.service_id)
            except exception.NotFound:
                logging.debug(_('The service database object disappeared, '
                                'Recreating it.'))
                self._create_service_ref(ctxt)
                service_ref = db.service_get(ctxt, self.service_id)

            state_catalog['report_count'] = service_ref['report_count'] + 1
            if zone != service_ref['availability_zone']:
                state_catalog['availability_zone'] = zone

            db.service_update(ctxt,
                             self.service_id, state_catalog)

            # TODO(termie): make this pattern be more elegant.
            if getattr(self, 'model_disconnected', False):
                self.model_disconnected = False
                logging.error(_('Recovered model server connection!'))

        # TODO(vish): this should probably only catch connection errors
        except Exception:  # pylint: disable=W0702
            if not getattr(self, 'model_disconnected', False):
                self.model_disconnected = True
                logging.exception(_('model server went away'))
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    for table in tables:
        try:
            table.create()
        except Exception:
            logging.exception(repr(table))
Esempio n. 4
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    tables = [
        certificates, console_pools, consoles, instance_actions, iscsi_targets
    ]
    for table in tables:
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            meta.drop_all(tables=tables)
            raise

    auth_tokens.c.user_id.alter(type=String(length=255,
                                            convert_unicode=False,
                                            assert_unicode=None,
                                            unicode_error=None,
                                            _warn_on_bytestring=False))

    instances.create_column(instances_availability_zone)
    instances.create_column(instances_locked)
    networks.create_column(networks_cidr_v6)
    networks.create_column(networks_ra_server)
    services.create_column(services_availability_zone)
def downgrade(migrate_engine):
    meta.bind = migrate_engine
    for table in tables:
        try:
            table.drop()
        except Exception:
            logging.exception(repr(table))
Esempio n. 6
0
def upgrade(migrate_engine):
    # Upgrade operations go here
    # Don't create your own engine; bind migrate_engine
    # to your metadata
    meta.bind = migrate_engine
    try:
        instance_types.create()
    except Exception:
        logging.info(repr(instance_types))
        logging.exception('Exception while creating instance_types table')
        raise

    # Here are the old static instance types
    INSTANCE_TYPES = {
    'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
    'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
    'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
    'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
    'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
    try:
        i = instance_types.insert()
        for name, values in INSTANCE_TYPES.iteritems():
            # FIXME(kpepple) should we be seeding created_at / updated_at ?
            # now = datetime.datatime.utcnow()
            i.execute({'name': name, 'memory_mb': values["memory_mb"],
                        'vcpus': values["vcpus"], 'deleted': False,
                        'local_gb': values["local_gb"],
                        'flavorid': values["flavorid"]})
    except Exception:
        logging.info(repr(instance_types))
        logging.exception('Exception while seeding instance_types table')
        raise
Esempio n. 7
0
    def _process_data(self, msg_id, ctxt, method, args):
        """Thread that maigcally looks for a method on the proxy
        object and calls it.
        """

        node_func = getattr(self.proxy, str(method))
        node_args = dict((str(k), v) for k, v in args.iteritems())
        # NOTE(vish): magic is fun!
        try:
            rval = node_func(context=ctxt, **node_args)
            if msg_id:
                # Check if the result was a generator
                if isinstance(rval, types.GeneratorType):
                    for x in rval:
                        msg_reply(msg_id, x, None)
                else:
                    msg_reply(msg_id, rval, None)

                # This final None tells multicall that it is done.
                msg_reply(msg_id, None, None)
            elif isinstance(rval, types.GeneratorType):
                # NOTE(vish): this iterates through the generator
                list(rval)
        except Exception as e:
            logging.exception('Exception during message handling')
            if msg_id:
                msg_reply(msg_id, None, sys.exc_info())
        return
Esempio n. 8
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    tables = [certificates, console_pools, consoles, instance_actions,
              iscsi_targets]
    for table in tables:
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            meta.drop_all(tables=tables)
            raise

    auth_tokens.c.user_id.alter(type=String(length=255,
                                            convert_unicode=False,
                                            assert_unicode=None,
                                            unicode_error=None,
                                            _warn_on_bytestring=False))

    instances.create_column(instances_availability_zone)
    instances.create_column(instances_locked)
    networks.create_column(networks_cidr_v6)
    networks.create_column(networks_ra_server)
    services.create_column(services_availability_zone)
Esempio n. 9
0
    def report_state(self):
        """Update the state of this service in the datastore."""
        ctxt = context.get_admin_context()
        try:
            try:
                service_ref = db.service_get(ctxt, self.service_id)
            except exception.NotFound:
                logging.debug(
                    _('The service database object disappeared, '
                      'Recreating it.'))
                self._create_service_ref(ctxt)
                service_ref = db.service_get(ctxt, self.service_id)

            db.service_update(
                ctxt, self.service_id,
                {'report_count': service_ref['report_count'] + 1})

            # TODO(termie): make this pattern be more elegant.
            if getattr(self, 'model_disconnected', False):
                self.model_disconnected = False
                logging.error(_('Recovered model server connection!'))

        # TODO(vish): this should probably only catch connection errors
        except Exception:  # pylint: disable=W0702
            if not getattr(self, 'model_disconnected', False):
                self.model_disconnected = True
                logging.exception(_('model server went away'))
Esempio n. 10
0
    def _process_data(self, msg_id, ctxt, method, args):
        """Thread that maigcally looks for a method on the proxy
        object and calls it.
        """

        node_func = getattr(self.proxy, str(method))
        node_args = dict((str(k), v) for k, v in args.iteritems())
        # NOTE(vish): magic is fun!
        try:
            rval = node_func(context=ctxt, **node_args)
            if msg_id:
                # Check if the result was a generator
                if isinstance(rval, types.GeneratorType):
                    for x in rval:
                        msg_reply(msg_id, x, None)
                else:
                    msg_reply(msg_id, rval, None)

                # This final None tells multicall that it is done.
                msg_reply(msg_id, None, None)
            elif isinstance(rval, types.GeneratorType):
                # NOTE(vish): this iterates through the generator
                list(rval)
        except Exception as e:
            logging.exception("Exception during message handling")
            if msg_id:
                msg_reply(msg_id, None, sys.exc_info())
        return
Esempio n. 11
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine
    try:
        export_devices.create()
    except Exception:
        logging.info(repr(export_devices))
        logging.exception('Exception while creating table')
        raise
Esempio n. 12
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine
    try:
        export_devices.create()
    except Exception:
        logging.info(repr(export_devices))
        logging.exception('Exception while creating table')
        raise
Esempio n. 13
0
def downgrade(migrate_engine):
    # Operations to reverse the above upgrade go here.
    meta.bind = migrate_engine
    try:
        local_volumes.drop(migrate_engine)
    except Exception:
        logging.info(repr(local_volumes))
        logging.exception('Exception while dropping instance_types table')
        raise
Esempio n. 14
0
    def _live_migration_common_check(self, context, instance_ref, dest):
        """Live migration common check routine.

        Below checkings are followed by
        http://wiki.libvirt.org/page/TodoPreMigrationChecks

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host

        """

        # Checking shared storage connectivity
        self.mounted_on_same_shared_storage(context, instance_ref, dest)

        # Checking dest exists.
        dservice_refs = db.service_get_all_compute_by_host(context, dest)
        dservice_ref = dservice_refs[0]['compute_node'][0]

        # Checking original host( where instance was launched at) exists.
        try:
            oservice_refs = db.service_get_all_compute_by_host(context,
                                           instance_ref['launched_on'])
        except exception.NotFound:
            raise exception.Invalid(_("host %s where instance was launched "
                                      "does not exist.")
                                       % instance_ref['launched_on'])
        oservice_ref = oservice_refs[0]['compute_node'][0]

        # Checking hypervisor is same.
        orig_hypervisor = oservice_ref['hypervisor_type']
        dest_hypervisor = dservice_ref['hypervisor_type']
        if orig_hypervisor != dest_hypervisor:
            raise exception.Invalid(_("Different hypervisor type"
                                      "(%(orig_hypervisor)s->"
                                      "%(dest_hypervisor)s)')" % locals()))

        # Checkng hypervisor version.
        orig_hypervisor = oservice_ref['hypervisor_version']
        dest_hypervisor = dservice_ref['hypervisor_version']
        if orig_hypervisor > dest_hypervisor:
            raise exception.Invalid(_("Older hypervisor version"
                                      "(%(orig_hypervisor)s->"
                                      "%(dest_hypervisor)s)") % locals())

        # Checking cpuinfo.
        try:
            rpc.call(context,
                     db.queue_get_for(context, FLAGS.compute_topic, dest),
                     {"method": 'compare_cpu',
                      "args": {'cpu_info': oservice_ref['cpu_info']}})

        except rpc.RemoteError:
            src = instance_ref['host']
            logging.exception(_("host %(dest)s is not compatible with "
                                "original host %(src)s.") % locals())
            raise
Esempio n. 15
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    try:
        s3_images.create()
    except Exception:
        logging.exception("Exception while creating table 's3_images'")
        meta.drop_all(tables=[s3_images])
        raise
Esempio n. 16
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    try:
        s3_images.create()
    except Exception:
        logging.exception("Exception while creating table 's3_images'")
        meta.drop_all(tables=[s3_images])
        raise
Esempio n. 17
0
    def _live_migration_common_check(self, context, instance_ref, dest):
        """Live migration common check routine.

        Below checkings are followed by
        http://wiki.libvirt.org/page/TodoPreMigrationChecks

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host

        """

        # Checking shared storage connectivity
        self.mounted_on_same_shared_storage(context, instance_ref, dest)

        # Checking dest exists.
        dservice_refs = db.service_get_all_compute_by_host(context, dest)
        dservice_ref = dservice_refs[0]['compute_node'][0]

        # Checking original host( where instance was launched at) exists.
        try:
            oservice_refs = db.service_get_all_compute_by_host(
                context, instance_ref['launched_on'])
        except exception.NotFound:
            raise exception.SourceHostUnavailable()
        oservice_ref = oservice_refs[0]['compute_node'][0]

        # Checking hypervisor is same.
        orig_hypervisor = oservice_ref['hypervisor_type']
        dest_hypervisor = dservice_ref['hypervisor_type']
        if orig_hypervisor != dest_hypervisor:
            raise exception.InvalidHypervisorType()

        # Checkng hypervisor version.
        orig_hypervisor = oservice_ref['hypervisor_version']
        dest_hypervisor = dservice_ref['hypervisor_version']
        if orig_hypervisor > dest_hypervisor:
            raise exception.DestinationHypervisorTooOld()

        # Checking cpuinfo.
        try:
            rpc.call(
                context, db.queue_get_for(context, FLAGS.compute_topic, dest),
                {
                    "method": 'compare_cpu',
                    "args": {
                        'cpu_info': oservice_ref['cpu_info']
                    }
                })

        except rpc.RemoteError:
            src = instance_ref['host']
            logging.exception(
                _("host %(dest)s is not compatible with "
                  "original host %(src)s.") % locals())
            raise
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine
    for table in (migrations, ):
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            raise
Esempio n. 19
0
def upgrade(migrate_engine):
    # Upgrade operations go here
    # Don't create your own engine; bind migrate_engine
    # to your metadata
    meta.bind = migrate_engine
    try:
        local_volumes.create()
    except Exception:
        logging.info(repr(local_volumes))
        logging.exception('Exception while creating instance_types table')
        raise
Esempio n. 20
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine
    try:
        block_device_mapping.create()
    except Exception:
        logging.info(repr(block_device_mapping))
        logging.exception('Exception while creating table')
        meta.drop_all(tables=[block_device_mapping])
        raise
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine
    for table in (migrations, ):
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            raise
Esempio n. 22
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    try:
        virtual_storage_arrays.create()
    except Exception:
        logging.info(repr(table))
        logging.exception('Exception while creating table')
        raise
Esempio n. 23
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    try:
        virtual_storage_arrays.create()
    except Exception:
        logging.info(repr(table))
        logging.exception('Exception while creating table')
        raise
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine
    try:
        block_device_mapping.create()
    except Exception:
        logging.info(repr(block_device_mapping))
        logging.exception('Exception while creating table')
        meta.drop_all(tables=[block_device_mapping])
        raise
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    try:
        snapshots.create()
    except Exception:
        logging.info(repr(snapshots))
        logging.exception('Exception while creating table')
        meta.drop_all(tables=[snapshots])
        raise
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    for table in new_tables:
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            raise

    volumes.create_column(volume_type_id)
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    try:
        snapshots.create()
    except Exception:
        logging.info(repr(snapshots))
        logging.exception('Exception while creating table')
        meta.drop_all(tables=[snapshots])
        raise
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    for table in new_tables:
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            raise

    volumes.create_column(volume_type_id)
Esempio n. 29
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine
    for table in (instance_metadata_table, ):
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            raise

    quotas.create_column(quota_metadata_items)
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine
    for table in (instance_metadata_table, ):
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            raise

    quotas.create_column(quota_metadata_items)
Esempio n. 31
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    try:
        compute_nodes.create()
    except Exception:
        logging.info(repr(compute_nodes))
        logging.exception('Exception while creating table')
        meta.drop_all(tables=[compute_nodes])
        raise

    instances.create_column(instances_launched_on)
Esempio n. 32
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    try:
        compute_nodes.create()
    except Exception:
        logging.info(repr(compute_nodes))
        logging.exception('Exception while creating table')
        meta.drop_all(tables=[compute_nodes])
        raise

    instances.create_column(instances_launched_on)
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    try:
        phy_hosts.create()
        phy_interfaces.create()
        phy_deployments.create()
        phy_pxe_ips.create()
    except Exception:
        logging.info(repr(phy_hosts))
        logging.exception('Exception while creating table')
        meta.drop_all(tables=[phy_hosts])
        raise
Esempio n. 34
0
def serve_wsgi(cls, conf=None):
    try:
        service = cls.create(conf)
    except Exception:
        logging.exception('in WsgiService.create()')
        raise
    finally:
        # After we've loaded up all our dynamic bits, check
        # whether we should print help
        flags.DEFINE_flag(flags.HelpFlag())
        flags.DEFINE_flag(flags.HelpshortFlag())
        flags.DEFINE_flag(flags.HelpXMLFlag())
        FLAGS.ParseNewFlags()

    service.start()

    return service
Esempio n. 35
0
    def start(self, interval, now=True):
        self._running = True
        done = event.Event()

        def _inner():
            if not now:
                greenthread.sleep(interval)
            try:
                while self._running:
                    self.f(*self.args, **self.kw)
                    greenthread.sleep(interval)
            except LoopingCallDone, e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                logging.exception('in looping call')
                done.send_exception(*sys.exc_info())
                return
Esempio n. 36
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    tables = [auth_tokens,
              instances, key_pairs, networks, fixed_ips, floating_ips,
              quotas, security_groups, security_group_inst_assoc,
              security_group_rules, services, users, projects,
              user_project_association, user_project_role_association,
              user_role_association, volumes, export_devices]
    for table in tables:
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            meta.drop_all(tables=tables)
            raise
Esempio n. 37
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine

    tables = [
        auth_tokens, instances, key_pairs, networks, fixed_ips, floating_ips,
        quotas, security_groups, security_group_inst_assoc,
        security_group_rules, services, users, projects,
        user_project_association, user_project_role_association,
        user_role_association, volumes, export_devices
    ]
    for table in tables:
        try:
            table.create()
        except Exception:
            logging.info(repr(table))
            logging.exception('Exception while creating table')
            meta.drop_all(tables=tables)
            raise
Esempio n. 38
0
    def _receive(self, message_data, message):
        """Magically looks for a method on the proxy object and calls it.

        Message data should be a dictionary with two keys:
            method: string representing the method to call
            args: dictionary of arg: value

        Example: {'method': 'echo', 'args': {'value': 42}}

        """
        LOG.debug(_('received %s') % message_data)
        msg_id = message_data.pop('_msg_id', None)

        ctxt = _unpack_context(message_data)

        method = message_data.get('method')
        args = message_data.get('args', {})
        message.ack()
        if not method:
            # NOTE(vish): we may not want to ack here, but that means that bad
            #             messages stay in the queue indefinitely, so for now
            #             we just log the message and send an error string
            #             back to the caller
            LOG.warn(_('no method for message: %s') % message_data)
            msg_reply(msg_id, _('No method for message: %s') % message_data)
            return

        node_func = getattr(self.proxy, str(method))
        node_args = dict((str(k), v) for k, v in args.iteritems())
        # NOTE(vish): magic is fun!
        try:
            rval = node_func(context=ctxt, **node_args)
            if msg_id:
                msg_reply(msg_id, rval, None)
        except Exception as e:
            logging.exception('Exception during message handling')
            if msg_id:
                msg_reply(msg_id, None, sys.exc_info())
        return
Esempio n. 39
0
File: rpc.py Progetto: termie/pupa
    def _receive(self, message_data, message):
        """Magically looks for a method on the proxy object and calls it.

        Message data should be a dictionary with two keys:
            method: string representing the method to call
            args: dictionary of arg: value

        Example: {'method': 'echo', 'args': {'value': 42}}

        """
        LOG.debug(_('received %s') % message_data)
        msg_id = message_data.pop('_msg_id', None)

        ctxt = _unpack_context(message_data)

        method = message_data.get('method')
        args = message_data.get('args', {})
        message.ack()
        if not method:
            # NOTE(vish): we may not want to ack here, but that means that bad
            #             messages stay in the queue indefinitely, so for now
            #             we just log the message and send an error string
            #             back to the caller
            LOG.warn(_('no method for message: %s') % message_data)
            msg_reply(msg_id, _('No method for message: %s') % message_data)
            return

        node_func = getattr(self.proxy, str(method))
        node_args = dict((str(k), v) for k, v in args.iteritems())
        # NOTE(vish): magic is fun!
        try:
            rval = node_func(context=ctxt, **node_args)
            if msg_id:
                msg_reply(msg_id, rval, None)
        except Exception as e:
            logging.exception('Exception during message handling')
            if msg_id:
                msg_reply(msg_id, None, sys.exc_info())
        return
Esempio n. 40
0
def upgrade(migrate_engine):
    # Upgrade operations go here
    # Don't create your own engine; bind migrate_engine
    # to your metadata
    meta.bind = migrate_engine
    try:
        instance_types.create()
    except Exception:
        logging.info(repr(instance_types))
        logging.exception('Exception while creating instance_types table')
        raise

    # Here are the old static instance types
    INSTANCE_TYPES = {
        'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
        'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
        'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
        'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
        'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)
    }
    try:
        i = instance_types.insert()
        for name, values in INSTANCE_TYPES.iteritems():
            # FIXME(kpepple) should we be seeding created_at / updated_at ?
            # now = datetime.datatime.utcnow()
            i.execute({
                'name': name,
                'memory_mb': values["memory_mb"],
                'vcpus': values["vcpus"],
                'deleted': False,
                'local_gb': values["local_gb"],
                'flavorid': values["flavorid"]
            })
    except Exception:
        logging.info(repr(instance_types))
        logging.exception('Exception while seeding instance_types table')
        raise
Esempio n. 41
0
def serve(*services):
    try:
        if not services:
            services = [Service.create()]
    except Exception:
        logging.exception('in Service.create()')
        raise
    finally:
        # After we've loaded up all our dynamic bits, check
        # whether we should print help
        flags.DEFINE_flag(flags.HelpFlag())
        flags.DEFINE_flag(flags.HelpshortFlag())
        flags.DEFINE_flag(flags.HelpXMLFlag())
        FLAGS.ParseNewFlags()

    name = '_'.join(x.binary for x in services)
    logging.debug(_("Serving %s"), name)
    logging.debug(_("Full set of FLAGS:"))
    for flag in FLAGS:
        flag_get = FLAGS.get(flag, None)
        logging.debug("%(flag)s : %(flag_get)s" % locals())

    for x in services:
        x.start()
Esempio n. 42
0
def serve(*services):
    try:
        if not services:
            services = [Service.create()]
    except Exception:
        logging.exception('in Service.create()')
        raise
    finally:
        # After we've loaded up all our dynamic bits, check
        # whether we should print help
        flags.DEFINE_flag(flags.HelpFlag())
        flags.DEFINE_flag(flags.HelpshortFlag())
        flags.DEFINE_flag(flags.HelpXMLFlag())
        FLAGS.ParseNewFlags()

    name = '_'.join(x.binary for x in services)
    logging.debug(_('Serving %s'), name)
    logging.debug(_('Full set of FLAGS:'))
    for flag in FLAGS:
        flag_get = FLAGS.get(flag, None)
        logging.debug('%(flag)s : %(flag_get)s' % locals())

    for x in services:
        x.start()
Esempio n. 43
0
    def _live_migration_common_check(self, context, instance_ref, dest,
                                     block_migration):
        """Live migration common check routine.

        Below checkings are followed by
        http://wiki.libvirt.org/page/TodoPreMigrationChecks

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host
        :param block_migration if True, check for block_migration.

        """

        # Checking shared storage connectivity
        # if block migration, instances_paths should not be on shared storage.
        try:
            self.mounted_on_same_shared_storage(context, instance_ref, dest)
            if block_migration:
                reason = _("Block migration can not be used "
                           "with shared storage.")
                raise exception.InvalidSharedStorage(reason=reason, path=dest)
        except exception.FileNotFound:
            if not block_migration:
                src = instance_ref['host']
                ipath = FLAGS.instances_path
                logging.error(
                    _("Cannot confirm tmpfile at %(ipath)s is on "
                      "same shared storage between %(src)s "
                      "and %(dest)s.") % locals())
                raise

        # Checking dest exists.
        dservice_refs = db.service_get_all_compute_by_host(context, dest)
        dservice_ref = dservice_refs[0]['compute_node'][0]

        # Checking original host( where instance was launched at) exists.
        try:
            oservice_refs = db.service_get_all_compute_by_host(
                context, instance_ref['launched_on'])
        except exception.NotFound:
            raise exception.SourceHostUnavailable()
        oservice_ref = oservice_refs[0]['compute_node'][0]

        # Checking hypervisor is same.
        orig_hypervisor = oservice_ref['hypervisor_type']
        dest_hypervisor = dservice_ref['hypervisor_type']
        if orig_hypervisor != dest_hypervisor:
            raise exception.InvalidHypervisorType()

        # Checkng hypervisor version.
        orig_hypervisor = oservice_ref['hypervisor_version']
        dest_hypervisor = dservice_ref['hypervisor_version']
        if orig_hypervisor > dest_hypervisor:
            raise exception.DestinationHypervisorTooOld()

        # Checking cpuinfo.
        try:
            rpc.call(
                context, db.queue_get_for(context, FLAGS.compute_topic, dest),
                {
                    "method": 'compare_cpu',
                    "args": {
                        'cpu_info': oservice_ref['cpu_info']
                    }
                })

        except rpc.RemoteError:
            src = instance_ref['host']
            logging.exception(
                _("host %(dest)s is not compatible with "
                  "original host %(src)s.") % locals())
            raise
Esempio n. 44
0
    def _live_migration_common_check(self, context, instance_ref, dest,
                                     block_migration):
        """Live migration common check routine.

        Below checkings are followed by
        http://wiki.libvirt.org/page/TodoPreMigrationChecks

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host
        :param block_migration if True, check for block_migration.

        """

        # Checking shared storage connectivity
        # if block migration, instances_paths should not be on shared storage.
        try:
            self.mounted_on_same_shared_storage(context, instance_ref, dest)
            if block_migration:
                reason = _("Block migration can not be used "
                           "with shared storage.")
                raise exception.InvalidSharedStorage(reason=reason, path=dest)
        except exception.FileNotFound:
            if not block_migration:
                src = instance_ref['host']
                ipath = FLAGS.instances_path
                logging.error(_("Cannot confirm tmpfile at %(ipath)s is on "
                                "same shared storage between %(src)s "
                                "and %(dest)s.") % locals())
                raise

        # Checking dest exists.
        dservice_refs = db.service_get_all_compute_by_host(context, dest)
        dservice_ref = dservice_refs[0]['compute_node'][0]

        # Checking original host( where instance was launched at) exists.
        try:
            oservice_refs = db.service_get_all_compute_by_host(context,
                                           instance_ref['launched_on'])
        except exception.NotFound:
            raise exception.SourceHostUnavailable()
        oservice_ref = oservice_refs[0]['compute_node'][0]

        # Checking hypervisor is same.
        orig_hypervisor = oservice_ref['hypervisor_type']
        dest_hypervisor = dservice_ref['hypervisor_type']
        if orig_hypervisor != dest_hypervisor:
            raise exception.InvalidHypervisorType()

        # Checkng hypervisor version.
        orig_hypervisor = oservice_ref['hypervisor_version']
        dest_hypervisor = dservice_ref['hypervisor_version']
        if orig_hypervisor > dest_hypervisor:
            raise exception.DestinationHypervisorTooOld()

        # Checking cpuinfo.
        try:
            rpc.call(context,
                     db.queue_get_for(context, FLAGS.compute_topic, dest),
                     {"method": 'compare_cpu',
                      "args": {'cpu_info': oservice_ref['cpu_info']}})

        except rpc.RemoteError:
            src = instance_ref['host']
            logging.exception(_("host %(dest)s is not compatible with "
                                "original host %(src)s.") % locals())
            raise