Ejemplo n.º 1
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    try:
        compute_nodes.create_column(disk_available_least)
    except Exception:
        logging.error(_("progress column not added to compute_nodes table"))
        raise
Ejemplo n.º 2
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    try:
        compute_nodes.create_column(disk_available_least)
    except Exception:
        logging.error(_("progress column not added to compute_nodes table"))
        raise
Ejemplo n.º 3
0
    def report_state(self):
        """Update the state of this service in the datastore."""
        ctxt = context.get_admin_context()
        try:
            try:
                service_ref = db.service_get(ctxt, self.service_id)
            except exception.NotFound:
                logging.debug(_("The service database object disappeared, "
                                "Recreating it."))
                self._create_service_ref(ctxt)
                service_ref = db.service_get(ctxt, self.service_id)

            db.service_update(ctxt,
                             self.service_id,
                             {'report_count': service_ref['report_count'] + 1})

            # TODO(termie): make this pattern be more elegant.
            if getattr(self, "model_disconnected", False):
                self.model_disconnected = False
                logging.error(_("Recovered model server connection!"))

        # TODO(vish): this should probably only catch connection errors
        except Exception:  # pylint: disable=W0702
            if not getattr(self, "model_disconnected", False):
                self.model_disconnected = True
                logging.exception(_("model server went away"))
Ejemplo n.º 4
0
def downgrade(migrate_engine):
    # Operations to reverse the above upgrade go here.
    meta.bind = migrate_engine
    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        return

    instances = Table('instances', meta, autoload=True)
    networks = Table('networks', meta, autoload=True)
    vifs = Table('virtual_interfaces', meta, autoload=True)
    fixed_ips = Table('fixed_ips', meta, autoload=True)
    floating_ips = Table('floating_ips', meta, autoload=True)

    try:
        ForeignKeyConstraint(columns=[fixed_ips.c.network_id],
                             refcolumns=[networks.c.id]).create()

        ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id],
                             refcolumns=[vifs.c.id]).create()

        ForeignKeyConstraint(columns=[fixed_ips.c.instance_id],
                             refcolumns=[instances.c.id]).create()

        ForeignKeyConstraint(columns=[floating_ips.c.fixed_ip_id],
                             refcolumns=[fixed_ips.c.id]).create()
    except Exception:
        logging.error(_("foreign key constraint couldn't be added"))
        raise
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    instance_actions = _get_table('instance_actions')
    instances = _get_table('instances')
    uuid_column = Column('instance_uuid', String(36),
                         ForeignKey('instances.uuid'))
    uuid_column = Column('instance_uuid', String(36))
    uuid_column.create(instance_actions)

    try:
        instance_actions.update().values(
            instance_uuid=select(
                [instances.c.uuid],
                instances.c.id == instance_actions.c.instance_id)
        ).execute()
    except Exception:
        uuid_column.drop()
        raise

    try:
        fkey_name = list(instance_actions.c.instance_id.foreign_keys)[0].constraint.name
        ForeignKeyConstraint(columns=[instance_actions.c.instance_id],
                             refcolumns=[instances.c.id],
                             name=fkey_name).drop()
    except Exception:
        logging.error(_("foreign key constraint couldn't be removed"))
        raise

    instance_actions.c.instance_id.drop()
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    dialect = migrate_engine.url.get_dialect().name
    instance_actions = _get_table("instance_actions")
    instances = _get_table("instances")
    uuid_column = Column("instance_uuid", String(36), ForeignKey("instances.uuid"))
    uuid_column = Column("instance_uuid", String(36))
    uuid_column.create(instance_actions)

    try:
        instance_actions.update().values(
            instance_uuid=select([instances.c.uuid], instances.c.id == instance_actions.c.instance_id)
        ).execute()
    except Exception:
        uuid_column.drop()
        raise

    if not dialect.startswith("sqlite"):
        fkeys = list(instance_actions.c.instance_id.foreign_keys)
        if fkeys:
            try:
                fkey_name = fkeys[0].constraint.name
                ForeignKeyConstraint(
                    columns=[instance_actions.c.instance_id], refcolumns=[instances.c.id], name=fkey_name
                ).drop()
            except:
                logging.error(_("foreign key constraint couldn't be removed"))
                raise

    instance_actions.c.instance_id.drop()
Ejemplo n.º 7
0
    def report_state(self):
        """Update the state of this service in the datastore."""
        ctxt = context.get_admin_context()
        try:
            try:
                service_ref = db.service_get(ctxt, self.service_id)
            except exception.NotFound:
                logging.debug(
                    _('The service database object disappeared, '
                      'Recreating it.'))
                self._create_service_ref(ctxt)
                service_ref = db.service_get(ctxt, self.service_id)

            db.service_update(
                ctxt, self.service_id,
                {'report_count': service_ref['report_count'] + 1})

            # TODO(termie): make this pattern be more elegant.
            if getattr(self, 'model_disconnected', False):
                self.model_disconnected = False
                logging.error(_('Recovered model server connection!'))

        # TODO(vish): this should probably only catch connection errors
        except Exception:  # pylint: disable=W0702
            if not getattr(self, 'model_disconnected', False):
                self.model_disconnected = True
                logging.exception(_('model server went away'))
Ejemplo n.º 8
0
    def report_state(self):
        """Update the state of this service in the datastore."""
        ctxt = context.get_admin_context()
        zone = FLAGS.node_availability_zone
        state_catalog = {}
        try:
            try:
                service_ref = db.service_get(ctxt, self.service_id)
            except exception.NotFound:
                logging.debug(_('The service database object disappeared, '
                                'Recreating it.'))
                self._create_service_ref(ctxt)
                service_ref = db.service_get(ctxt, self.service_id)

            state_catalog['report_count'] = service_ref['report_count'] + 1
            if zone != service_ref['availability_zone']:
                state_catalog['availability_zone'] = zone

            db.service_update(ctxt,
                             self.service_id, state_catalog)

            # TODO(termie): make this pattern be more elegant.
            if getattr(self, 'model_disconnected', False):
                self.model_disconnected = False
                logging.error(_('Recovered model server connection!'))

        # TODO(vish): this should probably only catch connection errors
        except Exception:  # pylint: disable=W0702
            if not getattr(self, 'model_disconnected', False):
                self.model_disconnected = True
                logging.exception(_('model server went away'))
Ejemplo n.º 9
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    try:
        networks.create_column(priority)
    except Exception:
        logging.error(_("priority column not added to networks table"))
        raise
Ejemplo n.º 10
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    try:
        networks.create_column(priority)
    except Exception:
        logging.error(_("priority column not added to networks table"))
        raise
Ejemplo n.º 11
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    try:
        instances.create_column(progress)
    except Exception:
        logging.error(_("progress column not added to instances table"))
        raise
Ejemplo n.º 12
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    try:
        instances.create_column(progress)
    except Exception:
        logging.error(_("progress column not added to instances table"))
        raise
Ejemplo n.º 13
0
 def _xvp_start(self):
     if self._xvp_check_running():
         return
     logging.debug(_('Starting xvp'))
     try:
         utils.execute('xvp', '-p', FLAGS.console_xvp_pid, '-c',
                       FLAGS.console_xvp_conf, '-l', FLAGS.console_xvp_log)
     except exception.ProcessExecutionError, err:
         logging.error(_('Error starting xvp: %s') % err)
Ejemplo n.º 14
0
Archivo: xvp.py Proyecto: yosh/nova
 def _xvp_start(self):
     if self._xvp_check_running():
         return
     logging.debug(_("Starting xvp"))
     try:
         utils.execute('xvp -p %s -c %s -l %s' %
                       (FLAGS.console_xvp_pid,
                        FLAGS.console_xvp_conf,
                        FLAGS.console_xvp_log))
     except exception.ProcessExecutionError, err:
         logging.error(_("Error starting xvp: %s") % err)
Ejemplo n.º 15
0
 def _xvp_start(self):
     if self._xvp_check_running():
         return
     logging.debug(_('Starting xvp'))
     try:
         utils.execute('xvp',
                       '-p', FLAGS.console_xvp_pid,
                       '-c', FLAGS.console_xvp_conf,
                       '-l', FLAGS.console_xvp_log)
     except exception.ProcessExecutionError, err:
         logging.error(_('Error starting xvp: %s') % err)
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    # grab tables and (column for dropping later)
    networks = Table('networks', meta, autoload=True)

    try:
        networks.create_column(priority)
    except Exception:
        logging.error(_("priority column not added to networks table"))
        raise
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    # load instances for fk
    instances = Table('instances', meta, autoload=True)

    # create instance_info_caches table
    try:
        instance_info_caches.create()
    except Exception:
        logging.error(_("Table |%s| not created!"), repr(instance_info_caches))
        raise
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    # load instances for fk
    instances = Table('instances', meta, autoload=True)

    # create instance_info_caches table
    try:
        instance_info_caches.create()
    except Exception:
        logging.error(_("Table |%s| not created!"), repr(instance_info_caches))
        raise
Ejemplo n.º 19
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine
    dialect = migrate_engine.url.get_dialect().name

    # drop foreignkey if not sqlite
    try:
        if not dialect.startswith('sqlite'):
            ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id],
                                 refcolumns=[virtual_interfaces.c.id]).drop()
    except Exception:
        logging.error(_("foreign key constraint couldn't be dropped"))
        raise
Ejemplo n.º 20
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    # load instances for fk
    instances = Table('projects', meta, autoload=True)

    # create dns_domains table
    try:
        dns_domains.create()
    except Exception:
        logging.error(_("Table |%s| not created!"), repr(dns_domains))
        raise
Ejemplo n.º 21
0
    def check_for_export(self, context, volume_id):
        """Make sure volume is exported."""

        tid = self.db.volume_get_iscsi_target_num(context, volume_id)
        try:
            self.tgtadm.show_target(tid)
        except exception.ProcessExecutionError, e:
            # Instances remount read-only in this case.
            # /etc/init.d/iscsitarget restart and rebooting nova-volume
            # is better since ensure_export() works at boot time.
            logging.error(_("Cannot confirm exported volume " "id:%(volume_id)s.") % locals())
            raise
Ejemplo n.º 22
0
    def check_for_export(self, context, volume_id):
        """Make sure volume is exported."""

        tid = self.db.volume_get_iscsi_target_num(context, volume_id)
        try:
            self.tgtadm.show_target(tid)
        except exception.ProcessExecutionError, e:
            # Instances remount read-only in this case.
            # /etc/init.d/iscsitarget restart and rebooting nova-volume
            # is better since ensure_export() works at boot time.
            logging.error(_("Cannot confirm exported volume "
                            "id:%(volume_id)s.") % locals())
            raise
Ejemplo n.º 23
0
    def log_error(self, exception):
        """Something went wrong. Check to see if zone should be
           marked as offline."""
        self.last_exception = exception
        self.last_exception_time = utils.utcnow()
        api_url = self.api_url
        logging.warning(_("'%(exception)s' error talking to " "zone %(api_url)s") % locals())

        max_errors = FLAGS.zone_failures_to_offline
        self.attempt += 1
        if self.attempt >= max_errors:
            self.is_active = False
            logging.error(
                _("No answer from zone %(api_url)s " "after %(max_errors)d " "attempts. Marking inactive.") % locals()
            )
Ejemplo n.º 24
0
    def log_error(self, exception):
        """Something went wrong. Check to see if zone should be
           marked as offline."""
        self.last_exception = exception
        self.last_exception_time = utils.utcnow()
        api_url = self.api_url
        logging.warning(_("'%(exception)s' error talking to "
                          "zone %(api_url)s") % locals())

        max_errors = FLAGS.zone_failures_to_offline
        self.attempt += 1
        if self.attempt >= max_errors:
            self.is_active = False
            logging.error(_("No answer from zone %(api_url)s "
                            "after %(max_errors)d "
                            "attempts. Marking inactive.") % locals())
Ejemplo n.º 25
0
    def _get_snapshot(self, instance):
        #TODO(sirp): Add quiesce and VSS locking support when Windows support
        # is added

        logging.debug(_("Starting snapshot for VM %s"), instance)
        vm_ref = VMHelper.lookup(self._session, instance.name)

        label = "%s-snapshot" % instance.name
        try:
            template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
                self._session, instance.id, vm_ref, label)
            return template_vm_ref, template_vdi_uuids
        except self.XenAPI.Failure, exc:
            logging.error(
                _("Unable to Snapshot %(vm_ref)s: %(exc)s") % locals())
            return
Ejemplo n.º 26
0
    def _get_snapshot(self, instance):
        #TODO(sirp): Add quiesce and VSS locking support when Windows support
        # is added

        logging.debug(_("Starting snapshot for VM %s"), instance)
        vm_ref = VMHelper.lookup(self._session, instance.name)

        label = "%s-snapshot" % instance.name
        try:
            template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
                self._session, instance.id, vm_ref, label)
            return template_vm_ref, template_vdi_uuids
        except self.XenAPI.Failure, exc:
            logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s")
                    % locals())
            return
Ejemplo n.º 27
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    dialect = migrate_engine.url.get_dialect().name

    # grab tables
    fixed_ips = Table('fixed_ips', meta, autoload=True)
    virtual_interfaces = Table('virtual_interfaces', meta, autoload=True)

    # add foreignkey if not sqlite
    try:
        if not dialect.startswith('sqlite'):
            ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id],
                                 refcolumns=[virtual_interfaces.c.id]).create()
    except Exception:
        logging.error(_("foreign key constraint couldn't be added"))
        raise
Ejemplo n.º 28
0
    def mounted_on_same_shared_storage(self, context, instance_ref, dest):
        """Check if the src and dest host mount same shared storage.

        At first, dest host creates temp file, and src host can see
        it if they mounts same shared storage. Then src host erase it.

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host

        """

        src = instance_ref['host']
        dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest)
        src_t = db.queue_get_for(context, FLAGS.compute_topic, src)

        try:
            # create tmpfile at dest host
            filename = rpc.call(context, dst_t,
                                {"method": 'create_shared_storage_test_file'})

            # make sure existence at src host.
            rpc.call(
                context, src_t, {
                    "method": 'check_shared_storage_test_file',
                    "args": {
                        'filename': filename
                    }
                })

        except rpc.RemoteError:
            ipath = FLAGS.instances_path
            logging.error(
                _("Cannot confirm tmpfile at %(ipath)s is on "
                  "same shared storage between %(src)s "
                  "and %(dest)s.") % locals())
            raise

        finally:
            rpc.call(
                context, dst_t, {
                    "method": 'cleanup_shared_storage_test_file',
                    "args": {
                        'filename': filename
                    }
                })
Ejemplo n.º 29
0
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta.bind = migrate_engine
    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        return

    instances = Table('instances', meta, autoload=True)
    networks = Table('networks', meta, autoload=True)
    vifs = Table('virtual_interfaces', meta, autoload=True)
    fixed_ips = Table('fixed_ips', meta, autoload=True)
    floating_ips = Table('floating_ips', meta, autoload=True)

    try:
        fkeys = list(fixed_ips.c.network_id.foreign_keys)
        if fkeys:
            fkey_name = fkeys[0].constraint.name
            ForeignKeyConstraint(columns=[fixed_ips.c.network_id],
                                 refcolumns=[networks.c.id],
                                 name=fkey_name).drop()

        fkeys = list(fixed_ips.c.virtual_interface_id.foreign_keys)
        if fkeys:
            fkey_name = fkeys[0].constraint.name
            ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id],
                                 refcolumns=[vifs.c.id],
                                 name=fkey_name).drop()

        fkeys = list(fixed_ips.c.instance_id.foreign_keys)
        if fkeys:
            fkey_name = fkeys[0].constraint.name
            ForeignKeyConstraint(columns=[fixed_ips.c.instance_id],
                                 refcolumns=[instances.c.id],
                                 name=fkey_name).drop()

        fkeys = list(floating_ips.c.fixed_ip_id.foreign_keys)
        if fkeys:
            fkey_name = fkeys[0].constraint.name
            ForeignKeyConstraint(columns=[floating_ips.c.fixed_ip_id],
                                 refcolumns=[fixed_ips.c.id],
                                 name=fkey_name).drop()

    except Exception:
        logging.error(_("foreign key constraint couldn't be removed"))
        raise
Ejemplo n.º 30
0
 def test_instance_update_state(self):
     # TODO(termie): what is this code even testing?
     def instance(num):
         return {
             'reservation_id': 'r-1',
             'instance_id': 'i-%s' % num,
             'image_id': 'ami-%s' % num,
             'private_dns_name': '10.0.0.%s' % num,
             'dns_name': '10.0.0%s' % num,
             'ami_launch_index': str(num),
             'instance_type': 'fake',
             'availability_zone': 'fake',
             'key_name': None,
             'kernel_id': 'fake',
             'ramdisk_id': 'fake',
             'groups': ['default'],
             'product_codes': None,
             'state': 0x01,
             'user_data': ''}
     rv = self.cloud._format_describe_instances(self.context)
     logging.error(str(rv))
     self.assertEqual(len(rv['reservationSet']), 0)
Ejemplo n.º 31
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    # grab tables and (column for dropping later)
    instances = Table('instances', meta, autoload=True)
    networks = Table('networks', meta, autoload=True)
    fixed_ips = Table('fixed_ips', meta, autoload=True)
    c = instances.columns['mac_address']

    # add interface column to networks table
    # values will have to be set manually before running nova
    try:
        networks.create_column(interface)
    except Exception:
        logging.error(_("interface column not added to networks table"))
        raise

    # create virtual_interfaces table
    try:
        virtual_interfaces.create()
    except Exception:
        logging.error(_("Table |%s| not created!"), repr(virtual_interfaces))
        raise

    # add virtual_interface_id column to fixed_ips table
    try:
        fixed_ips.create_column(virtual_interface_id)
    except Exception:
        logging.error(_("VIF column not added to fixed_ips table"))
        raise

    # populate the virtual_interfaces table
    # extract data from existing instance and fixed_ip tables
    s = select(
        [instances.c.id, instances.c.mac_address, fixed_ips.c.network_id],
        fixed_ips.c.instance_id == instances.c.id)
    keys = ('instance_id', 'address', 'network_id')
    join_list = [dict(zip(keys, row)) for row in s.execute()]
    logging.debug(_("join list for moving mac_addresses |%s|"), join_list)

    # insert data into the table
    if join_list:
        i = virtual_interfaces.insert()
        i.execute(join_list)

    # populate the fixed_ips virtual_interface_id column
    s = select([fixed_ips.c.id, fixed_ips.c.instance_id],
               fixed_ips.c.instance_id != None)

    for row in s.execute():
        m = select([virtual_interfaces.c.id]).\
            where(virtual_interfaces.c.instance_id == row['instance_id']).\
            as_scalar()
        u = fixed_ips.update().values(virtual_interface_id=m).\
            where(fixed_ips.c.id == row['id'])
        u.execute()

    # drop the mac_address column from instances
    c.drop()
Ejemplo n.º 32
0
def _run_wsgi(paste_config_file, apis):
    logging.debug(_("Using paste.deploy config at: %s"), paste_config_file)
    apps = []
    for api in apis:
        config = wsgi.load_paste_configuration(paste_config_file, api)
        if config is None:
            logging.debug(_("No paste configuration for app: %s"), api)
            continue
        logging.debug(_("App Config: %(api)s\n%(config)r") % locals())
        logging.info(_("Running %s API"), api)
        app = wsgi.load_paste_app(paste_config_file, api)
        apps.append((app, getattr(FLAGS, "%s_listen_port" % api),
                     getattr(FLAGS, "%s_listen" % api)))
    if len(apps) == 0:
        logging.error(_("No known API applications configured in %s."),
                      paste_config_file)
        return

    server = wsgi.Server()
    for app in apps:
        server.start(*app)
    return server
Ejemplo n.º 33
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    # grab tables and (column for dropping later)
    instances = Table('instances', meta, autoload=True)
    networks = Table('networks', meta, autoload=True)
    fixed_ips = Table('fixed_ips', meta, autoload=True)
    c = instances.columns['mac_address']

    # add interface column to networks table
    # values will have to be set manually before running nova
    try:
        networks.create_column(interface)
    except Exception:
        logging.error(_("interface column not added to networks table"))
        raise

    # create virtual_interfaces table
    try:
        virtual_interfaces.create()
    except Exception:
        logging.error(_("Table |%s| not created!"), repr(virtual_interfaces))
        raise

    # add virtual_interface_id column to fixed_ips table
    try:
        fixed_ips.create_column(virtual_interface_id)
    except Exception:
        logging.error(_("VIF column not added to fixed_ips table"))
        raise

    # populate the virtual_interfaces table
    # extract data from existing instance and fixed_ip tables
    s = select([instances.c.id, instances.c.mac_address,
                fixed_ips.c.network_id],
               fixed_ips.c.instance_id == instances.c.id)
    keys = ('instance_id', 'address', 'network_id')
    join_list = [dict(zip(keys, row)) for row in s.execute()]
    logging.debug(_("join list for moving mac_addresses |%s|"), join_list)

    # insert data into the table
    if join_list:
        i = virtual_interfaces.insert()
        i.execute(join_list)

    # populate the fixed_ips virtual_interface_id column
    s = select([fixed_ips.c.id, fixed_ips.c.instance_id],
               fixed_ips.c.instance_id != None)

    for row in s.execute():
        m = select([virtual_interfaces.c.id]).\
            where(virtual_interfaces.c.instance_id == row['instance_id']).\
            as_scalar()
        u = fixed_ips.update().values(virtual_interface_id=m).\
            where(fixed_ips.c.id == row['id'])
        u.execute()

    # drop the mac_address column from instances
    c.drop()
Ejemplo n.º 34
0
    def snapshot(self, instance, image_id):
        """ Create snapshot from a running VM instance

        :param instance: instance to be snapshotted
        :param image_id: id of image to upload to

        Steps involved in a XenServer snapshot:

        1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This
            creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
            Snapshot VHD

        2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to
            a 'base-copy' VDI.  The base_copy is immutable and may be chained
            with other base_copies.  If chained, the base_copies
            coalesce together, so, we must wait for this coalescing to occur to
            get a stable representation of the data on disk.

        3. Push-to-glance: Once coalesced, we call a plugin on the XenServer
            that will bundle the VHDs together and then push the bundle into
            Glance.
        """

        #TODO(sirp): Add quiesce and VSS locking support when Windows support
        # is added

        logging.debug(_("Starting snapshot for VM %s"), instance)
        vm_ref = VMHelper.lookup(self._session, instance.name)

        label = "%s-snapshot" % instance.name
        try:
            template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
                self._session, instance.id, vm_ref, label)
        except self.XenAPI.Failure, exc:
            logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s")
                    % locals())
            return
Ejemplo n.º 35
0
    def mounted_on_same_shared_storage(self, context, instance_ref, dest):
        """Check if the src and dest host mount same shared storage.

        At first, dest host creates temp file, and src host can see
        it if they mounts same shared storage. Then src host erase it.

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host

        """

        src = instance_ref['host']
        dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest)
        src_t = db.queue_get_for(context, FLAGS.compute_topic, src)

        try:
            # create tmpfile at dest host
            filename = rpc.call(context, dst_t,
                                {"method": 'create_shared_storage_test_file'})

            # make sure existence at src host.
            rpc.call(context, src_t,
                     {"method": 'check_shared_storage_test_file',
                      "args": {'filename': filename}})

        except rpc.RemoteError:
            ipath = FLAGS.instances_path
            logging.error(_("Cannot confirm tmpfile at %(ipath)s is on "
                            "same shared storage between %(src)s "
                            "and %(dest)s.") % locals())
            raise

        finally:
            rpc.call(context, dst_t,
                     {"method": 'cleanup_shared_storage_test_file',
                      "args": {'filename': filename}})
Ejemplo n.º 36
0
def downgrade(migrate_engine):
    try:
        dns_domains.drop()
    except Exception:
        logging.error(_("dns_domains table not dropped"))
        raise
Ejemplo n.º 37
0
def downgrade(migrate_engine):
    logging.error(_("Can't downgrade without losing data"))
    raise Exception
def downgrade(migrate_engine):
    try:
        instance_info_caches.drop()
    except Exception:
        logging.error(_("instance_info_caches tables not dropped"))
        raise
Ejemplo n.º 39
0
    def _live_migration_common_check(self, context, instance_ref, dest,
                                     block_migration):
        """Live migration common check routine.

        Below checkings are followed by
        http://wiki.libvirt.org/page/TodoPreMigrationChecks

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host
        :param block_migration if True, check for block_migration.

        """

        # Checking shared storage connectivity
        # if block migration, instances_paths should not be on shared storage.
        try:
            self.mounted_on_same_shared_storage(context, instance_ref, dest)
            if block_migration:
                reason = _("Block migration can not be used "
                           "with shared storage.")
                raise exception.InvalidSharedStorage(reason=reason, path=dest)
        except exception.FileNotFound:
            if not block_migration:
                src = instance_ref['host']
                ipath = FLAGS.instances_path
                logging.error(
                    _("Cannot confirm tmpfile at %(ipath)s is on "
                      "same shared storage between %(src)s "
                      "and %(dest)s.") % locals())
                raise

        # Checking dest exists.
        dservice_refs = db.service_get_all_compute_by_host(context, dest)
        dservice_ref = dservice_refs[0]['compute_node'][0]

        # Checking original host( where instance was launched at) exists.
        try:
            oservice_refs = db.service_get_all_compute_by_host(
                context, instance_ref['launched_on'])
        except exception.NotFound:
            raise exception.SourceHostUnavailable()
        oservice_ref = oservice_refs[0]['compute_node'][0]

        # Checking hypervisor is same.
        orig_hypervisor = oservice_ref['hypervisor_type']
        dest_hypervisor = dservice_ref['hypervisor_type']
        if orig_hypervisor != dest_hypervisor:
            raise exception.InvalidHypervisorType()

        # Checkng hypervisor version.
        orig_hypervisor = oservice_ref['hypervisor_version']
        dest_hypervisor = dservice_ref['hypervisor_version']
        if orig_hypervisor > dest_hypervisor:
            raise exception.DestinationHypervisorTooOld()

        # Checking cpuinfo.
        try:
            rpc.call(
                context, db.queue_get_for(context, FLAGS.compute_topic, dest),
                {
                    "method": 'compare_cpu',
                    "args": {
                        'cpu_info': oservice_ref['cpu_info']
                    }
                })

        except rpc.RemoteError:
            src = instance_ref['host']
            logging.exception(
                _("host %(dest)s is not compatible with "
                  "original host %(src)s.") % locals())
            raise
def downgrade(migrate_engine):
    logging.error(_("Can't downgrade without losing data"))
    raise Exception
def downgrade(migrate_engine):
    try:
        instance_info_caches.drop()
    except Exception:
        logging.error(_("instance_info_caches tables not dropped"))
        raise
Ejemplo n.º 42
0
    def _live_migration_common_check(self, context, instance_ref, dest,
                                     block_migration):
        """Live migration common check routine.

        Below checkings are followed by
        http://wiki.libvirt.org/page/TodoPreMigrationChecks

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host
        :param block_migration if True, check for block_migration.

        """

        # Checking shared storage connectivity
        # if block migration, instances_paths should not be on shared storage.
        try:
            self.mounted_on_same_shared_storage(context, instance_ref, dest)
            if block_migration:
                reason = _("Block migration can not be used "
                           "with shared storage.")
                raise exception.InvalidSharedStorage(reason=reason, path=dest)
        except exception.FileNotFound:
            if not block_migration:
                src = instance_ref['host']
                ipath = FLAGS.instances_path
                logging.error(_("Cannot confirm tmpfile at %(ipath)s is on "
                                "same shared storage between %(src)s "
                                "and %(dest)s.") % locals())
                raise

        # Checking dest exists.
        dservice_refs = db.service_get_all_compute_by_host(context, dest)
        dservice_ref = dservice_refs[0]['compute_node'][0]

        # Checking original host( where instance was launched at) exists.
        try:
            oservice_refs = db.service_get_all_compute_by_host(context,
                                           instance_ref['launched_on'])
        except exception.NotFound:
            raise exception.SourceHostUnavailable()
        oservice_ref = oservice_refs[0]['compute_node'][0]

        # Checking hypervisor is same.
        orig_hypervisor = oservice_ref['hypervisor_type']
        dest_hypervisor = dservice_ref['hypervisor_type']
        if orig_hypervisor != dest_hypervisor:
            raise exception.InvalidHypervisorType()

        # Checkng hypervisor version.
        orig_hypervisor = oservice_ref['hypervisor_version']
        dest_hypervisor = dservice_ref['hypervisor_version']
        if orig_hypervisor > dest_hypervisor:
            raise exception.DestinationHypervisorTooOld()

        # Checking cpuinfo.
        try:
            rpc.call(context,
                     db.queue_get_for(context, FLAGS.compute_topic, dest),
                     {"method": 'compare_cpu',
                      "args": {'cpu_info': oservice_ref['cpu_info']}})

        except rpc.RemoteError:
            src = instance_ref['host']
            logging.exception(_("host %(dest)s is not compatible with "
                                "original host %(src)s.") % locals())
            raise