def report_state(self): """Update the state of this service in the datastore.""" ctxt = context.get_admin_context() zone = FLAGS.node_availability_zone state_catalog = {} try: try: service_ref = db.service_get(ctxt, self.service_id) except exception.NotFound: logging.debug(_('The service database object disappeared, ' 'Recreating it.')) self._create_service_ref(ctxt) service_ref = db.service_get(ctxt, self.service_id) state_catalog['report_count'] = service_ref['report_count'] + 1 if zone != service_ref['availability_zone']: state_catalog['availability_zone'] = zone db.service_update(ctxt, self.service_id, state_catalog) # TODO(termie): make this pattern be more elegant. if getattr(self, 'model_disconnected', False): self.model_disconnected = False logging.error(_('Recovered model server connection!')) # TODO(vish): this should probably only catch connection errors except Exception: # pylint: disable=W0702 if not getattr(self, 'model_disconnected', False): self.model_disconnected = True logging.exception(_('model server went away'))
def upgrade(migrate_engine): meta.bind = migrate_engine try: instances.create_column(progress) except Exception: logging.error(_("progress column not added to instances table")) raise
def upgrade(migrate_engine): meta.bind = migrate_engine try: networks.create_column(priority) except Exception: logging.error(_("priority column not added to networks table")) raise
def _xvp_start(self): if self._xvp_check_running(): return logging.debug(_('Starting xvp')) try: utils.execute('xvp', '-p', FLAGS.console_xvp_pid, '-c', FLAGS.console_xvp_conf, '-l', FLAGS.console_xvp_log) except exception.ProcessExecutionError, err: logging.error(_('Error starting xvp: %s') % err)
def downgrade(migrate_engine): meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name # drop foreignkey if not sqlite try: if not dialect.startswith('sqlite'): ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id], refcolumns=[virtual_interfaces.c.id]).drop() except Exception: logging.error(_("foreign key constraint couldn't be dropped")) raise
def upgrade(migrate_engine): meta.bind = migrate_engine # load instances for fk instances = Table('instances', meta, autoload=True) # create instance_info_caches table try: instance_info_caches.create() except Exception: logging.error(_("Table |%s| not created!"), repr(instance_info_caches)) raise
def check_for_export(self, context, volume_id): """Make sure volume is exported.""" tid = self.db.volume_get_iscsi_target_num(context, volume_id) try: self.tgtadm.show_target(tid) except exception.ProcessExecutionError, e: # Instances remount read-only in this case. # /etc/init.d/iscsitarget restart and rebooting engine-volume # is better since ensure_export() works at boot time. logging.error(_("Cannot confirm exported volume " "id:%(volume_id)s.") % locals()) raise
def check_for_export(self, context, volume_id): """Make sure volume is exported.""" tid = self.db.volume_get_iscsi_target_num(context, volume_id) try: self.tgtadm.show_target(tid) except exception.ProcessExecutionError, e: # Instances remount read-only in this case. # /etc/init.d/iscsitarget restart and rebooting engine-volume # is better since ensure_export() works at boot time. logging.error( _("Cannot confirm exported volume " "id:%(volume_id)s.") % locals()) raise
def log_error(self, exception): """Something went wrong. Check to see if zone should be marked as offline.""" self.last_exception = exception self.last_exception_time = utils.utcnow() api_url = self.api_url logging.warning(_("'%(exception)s' error talking to " "zone %(api_url)s") % locals()) max_errors = FLAGS.zone_failures_to_offline self.attempt += 1 if self.attempt >= max_errors: self.is_active = False logging.error(_("No answer from zone %(api_url)s " "after %(max_errors)d " "attempts. Marking inactive.") % locals())
def upgrade(migrate_engine): meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name # grab tables fixed_ips = Table('fixed_ips', meta, autoload=True) virtual_interfaces = Table('virtual_interfaces', meta, autoload=True) # add foreignkey if not sqlite try: if not dialect.startswith('sqlite'): ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id], refcolumns=[virtual_interfaces.c.id]).create() except Exception: logging.error(_("foreign key constraint couldn't be added")) raise
def downgrade(migrate_engine): # Operations to reverse the above upgrade go here. meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): return networks = Table('networks', meta, autoload=True) vifs = Table('virtual_interfaces', meta, autoload=True) try: ForeignKeyConstraint(columns=[vifs.c.network_id], refcolumns=[networks.c.id]).create() except Exception: logging.error(_("foreign key constraint couldn't be added")) raise
def upgrade(migrate_engine): meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name # grab tables fixed_ips = Table('fixed_ips', meta, autoload=True) virtual_interfaces = Table('virtual_interfaces', meta, autoload=True) # add foreignkey if not sqlite try: if not dialect.startswith('sqlite'): ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id], refcolumns=[virtual_interfaces.c.id ]).create() except Exception: logging.error(_("foreign key constraint couldn't be added")) raise
def log_error(self, exception): """Something went wrong. Check to see if zone should be marked as offline.""" self.last_exception = exception self.last_exception_time = utils.utcnow() api_url = self.api_url logging.warning( _("'%(exception)s' error talking to " "zone %(api_url)s") % locals()) max_errors = FLAGS.zone_failures_to_offline self.attempt += 1 if self.attempt >= max_errors: self.is_active = False logging.error( _("No answer from zone %(api_url)s " "after %(max_errors)d " "attempts. Marking inactive.") % locals())
def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine dialect = migrate_engine.url.get_dialect().name if dialect.startswith('sqlite'): return networks = Table('networks', meta, autoload=True) vifs = Table('virtual_interfaces', meta, autoload=True) try: fkey_name = list(vifs.c.network_id.foreign_keys)[0].constraint.name ForeignKeyConstraint(columns=[vifs.c.network_id], refcolumns=[networks.c.id], name=fkey_name).drop() except Exception: logging.error(_("foreign key constraint couldn't be removed")) raise
def upgrade(migrate_engine): meta.bind = migrate_engine # grab tables and (column for dropping later) instances = Table('instances', meta, autoload=True) networks = Table('networks', meta, autoload=True) fixed_ips = Table('fixed_ips', meta, autoload=True) c = instances.columns['mac_address'] # add interface column to networks table # values will have to be set manually before running engine try: networks.create_column(interface) except Exception: logging.error(_("interface column not added to networks table")) raise # create virtual_interfaces table try: virtual_interfaces.create() except Exception: logging.error(_("Table |%s| not created!"), repr(virtual_interfaces)) raise # add virtual_interface_id column to fixed_ips table try: fixed_ips.create_column(virtual_interface_id) except Exception: logging.error(_("VIF column not added to fixed_ips table")) raise # populate the virtual_interfaces table # extract data from existing instance and fixed_ip tables s = select([instances.c.id, instances.c.mac_address, fixed_ips.c.network_id], fixed_ips.c.instance_id == instances.c.id) keys = ('instance_id', 'address', 'network_id') join_list = [dict(zip(keys, row)) for row in s.execute()] logging.debug(_("join list for moving mac_addresses |%s|"), join_list) # insert data into the table if join_list: i = virtual_interfaces.insert() i.execute(join_list) # populate the fixed_ips virtual_interface_id column s = select([fixed_ips.c.id, fixed_ips.c.instance_id], fixed_ips.c.instance_id != None) for row in s.execute(): m = select([virtual_interfaces.c.id]).\ where(virtual_interfaces.c.instance_id == row['instance_id']).\ as_scalar() u = fixed_ips.update().values(virtual_interface_id=m).\ where(fixed_ips.c.id == row['id']) u.execute() # drop the mac_address column from instances c.drop()
def upgrade(migrate_engine): meta.bind = migrate_engine # grab tables and (column for dropping later) instances = Table('instances', meta, autoload=True) networks = Table('networks', meta, autoload=True) fixed_ips = Table('fixed_ips', meta, autoload=True) c = instances.columns['mac_address'] # add interface column to networks table # values will have to be set manually before running engine try: networks.create_column(interface) except Exception: logging.error(_("interface column not added to networks table")) raise # create virtual_interfaces table try: virtual_interfaces.create() except Exception: logging.error(_("Table |%s| not created!"), repr(virtual_interfaces)) raise # add virtual_interface_id column to fixed_ips table try: fixed_ips.create_column(virtual_interface_id) except Exception: logging.error(_("VIF column not added to fixed_ips table")) raise # populate the virtual_interfaces table # extract data from existing instance and fixed_ip tables s = select( [instances.c.id, instances.c.mac_address, fixed_ips.c.network_id], fixed_ips.c.instance_id == instances.c.id) keys = ('instance_id', 'address', 'network_id') join_list = [dict(zip(keys, row)) for row in s.execute()] logging.debug(_("join list for moving mac_addresses |%s|"), join_list) # insert data into the table if join_list: i = virtual_interfaces.insert() i.execute(join_list) # populate the fixed_ips virtual_interface_id column s = select([fixed_ips.c.id, fixed_ips.c.instance_id], fixed_ips.c.instance_id != None) for row in s.execute(): m = select([virtual_interfaces.c.id]).\ where(virtual_interfaces.c.instance_id == row['instance_id']).\ as_scalar() u = fixed_ips.update().values(virtual_interface_id=m).\ where(fixed_ips.c.id == row['id']) u.execute() # drop the mac_address column from instances c.drop()
def downgrade(migrate_engine): logging.error(_("Can't downgrade without losing data")) raise Exception
def downgrade(migrate_engine): try: instance_info_caches.drop() except Exception: logging.error(_("instance_info_caches tables not dropped")) raise