Example #1
0
    def _wait_child(self):
        try:
            # Don't block if no child processes have exited
            pid, status = os.waitpid(0, os.WNOHANG)
            if not pid:
                return None
        except OSError as exc:
            if exc.errno not in (errno.EINTR, errno.ECHILD):
                raise
            return None

        if os.WIFSIGNALED(status):
            sig = os.WTERMSIG(status)
            LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
                     dict(pid=pid, sig=sig))
        else:
            code = os.WEXITSTATUS(status)
            LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
                     dict(pid=pid, code=code))

        if pid not in self.children:
            LOG.warning(_LW('pid %d not in child list'), pid)
            return None

        wrap = self.children.pop(pid)
        wrap.children.remove(pid)
        return wrap
Example #2
0
    def setup_basic_filtering(self, instance, network_info):
        """Set up basic filtering (MAC, IP, and ARP spoofing protection)."""
        LOG.info(_LI('Called setup_basic_filtering in nwfilter'),
                 instance=instance)

        if self.handle_security_groups:
            # No point in setting up a filter set that we'll be overriding
            # anyway.
            return

        LOG.info(_LI('Ensuring static filters'), instance=instance)
        self._ensure_static_filters()

        nodhcp_base_filter = self.get_base_filter_list(instance, False)
        dhcp_base_filter = self.get_base_filter_list(instance, True)

        for vif in network_info:
            _base_filter = nodhcp_base_filter
            for subnet in vif['network']['subnets']:
                if subnet.get_meta('dhcp_server'):
                    _base_filter = dhcp_base_filter
                    break
            self._define_filter(self._get_instance_filter_xml(instance,
                                                              _base_filter,
                                                              vif))
Example #3
0
    def _start_child(self, wrap):
        if len(wrap.forktimes) > wrap.workers:
            # Limit ourselves to one process a second (over the period of
            # number of workers * 1 second). This will allow workers to
            # start up quickly but ensure we don't fork off children that
            # die instantly too quickly.
            if time.time() - wrap.forktimes[0] < wrap.workers:
                LOG.info(_LI('Forking too fast, sleeping'))
                time.sleep(1)

            wrap.forktimes.pop(0)

        wrap.forktimes.append(time.time())

        pid = os.fork()
        if pid == 0:
            launcher = self._child_process(wrap.service)
            while True:
                self._child_process_handle_signal()
                status, signo = self._child_wait_for_exit_or_signal(launcher)
                if not _is_sighup_and_daemon(signo):
                    break
                launcher.restart()

            os._exit(status)

        LOG.info(_LI('Started child %d'), pid)

        wrap.children.add(pid)
        self.children[pid] = wrap

        return pid
Example #4
0
    def setup_basic_filtering(self, instance, network_info):
        """Set up basic filtering (MAC, IP, and ARP spoofing protection)."""
        LOG.info(_LI('Called setup_basic_filtering in nwfilter'),
                 instance=instance)

        if self.handle_security_groups:
            # No point in setting up a filter set that we'll be overriding
            # anyway.
            return

        LOG.info(_LI('Ensuring static filters'), instance=instance)
        self._ensure_static_filters()

        nodhcp_base_filter = self.get_base_filter_list(instance, False)
        dhcp_base_filter = self.get_base_filter_list(instance, True)

        for vif in network_info:
            _base_filter = nodhcp_base_filter
            for subnet in vif['network']['subnets']:
                if subnet.get_meta('dhcp_server'):
                    _base_filter = dhcp_base_filter
                    break
            self._define_filter(self._get_instance_filter_xml(instance,
                                                              _base_filter,
                                                              vif))
Example #5
0
    def wait(self):
        """Loop waiting on children to die and respawning as necessary."""

        LOG.debug('Full set of CONF:')
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        try:
            while True:
                self.handle_signal()
                self._respawn_children()
                if self.sigcaught:
                    signame = _signo_to_signame(self.sigcaught)
                    LOG.info(_LI('Caught %s, stopping children'), signame)
                if not _is_sighup_and_daemon(self.sigcaught):
                    break

                for pid in self.children:
                    os.kill(pid, signal.SIGHUP)
                self.running = True
                self.sigcaught = None
        except eventlet.greenlet.GreenletExit:
            LOG.info(_LI("Wait called after thread killed.  Cleaning up."))

        for pid in self.children:
            try:
                os.kill(pid, signal.SIGTERM)
            except OSError as exc:
                if exc.errno != errno.ESRCH:
                    raise

        # Wait for children to die
        if self.children:
            LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
            while self.children:
                self._wait_child()
Example #6
0
    def wait(self):
        """Loop waiting on children to die and respawning as necessary."""

        LOG.debug('Full set of CONF:')
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        try:
            while True:
                self.handle_signal()
                self._respawn_children()
                if self.sigcaught:
                    signame = _signo_to_signame(self.sigcaught)
                    LOG.info(_LI('Caught %s, stopping children'), signame)
                if not _is_sighup_and_daemon(self.sigcaught):
                    break

                for pid in self.children:
                    os.kill(pid, signal.SIGHUP)
                self.running = True
                self.sigcaught = None
        except eventlet.greenlet.GreenletExit:
            LOG.info(_LI("Wait called after thread killed.  Cleaning up."))

        for pid in self.children:
            try:
                os.kill(pid, signal.SIGTERM)
            except OSError as exc:
                if exc.errno != errno.ESRCH:
                    raise

        # Wait for children to die
        if self.children:
            LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
            while self.children:
                self._wait_child()
Example #7
0
    def _remove_base_file(self, base_file):
        """Remove a single base file if it is old enough.

        Returns nothing.
        """
        if not os.path.exists(base_file):
            LOG.debug('Cannot remove %s, it does not exist',
                      base_file)
            return

        mtime = os.path.getmtime(base_file)
        age = time.time() - mtime

        maxage = CONF.libvirt.remove_unused_resized_minimum_age_seconds
        if base_file in self.originals:
            maxage = CONF.remove_unused_original_minimum_age_seconds

        if age < maxage:
            LOG.info(_LI('Base file too young to remove: %s'),
                     base_file)
        else:
            LOG.info(_LI('Removing base file: %s'), base_file)
            try:
                os.remove(base_file)
                signature = get_info_filename(base_file)
                if os.path.exists(signature):
                    os.remove(signature)
            except OSError as e:
                LOG.error(_LE('Failed to remove %(base_file)s, '
                              'error was %(error)s'),
                          {'base_file': base_file,
                           'error': e})
Example #8
0
    def _start_child(self, wrap):
        if len(wrap.forktimes) > wrap.workers:
            # Limit ourselves to one process a second (over the period of
            # number of workers * 1 second). This will allow workers to
            # start up quickly but ensure we don't fork off children that
            # die instantly too quickly.
            if time.time() - wrap.forktimes[0] < wrap.workers:
                LOG.info(_LI('Forking too fast, sleeping'))
                time.sleep(1)

            wrap.forktimes.pop(0)

        wrap.forktimes.append(time.time())

        pid = os.fork()
        if pid == 0:
            launcher = self._child_process(wrap.service)
            while True:
                self._child_process_handle_signal()
                status, signo = self._child_wait_for_exit_or_signal(launcher)
                if not _is_sighup_and_daemon(signo):
                    break
                launcher.restart()

            os._exit(status)

        LOG.info(_LI('Started child %d'), pid)

        wrap.children.add(pid)
        self.children[pid] = wrap

        return pid
Example #9
0
    def _wait_child(self):
        try:
            # Don't block if no child processes have exited
            pid, status = os.waitpid(0, os.WNOHANG)
            if not pid:
                return None
        except OSError as exc:
            if exc.errno not in (errno.EINTR, errno.ECHILD):
                raise
            return None

        if os.WIFSIGNALED(status):
            sig = os.WTERMSIG(status)
            LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
                     dict(pid=pid, sig=sig))
        else:
            code = os.WEXITSTATUS(status)
            LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
                     dict(pid=pid, code=code))

        if pid not in self.children:
            LOG.warning(_LW('pid %d not in child list'), pid)
            return None

        wrap = self.children.pop(pid)
        wrap.children.remove(pid)
        return wrap
Example #10
0
    def _wait_for_exit_or_signal(self, ready_callback=None):
        status = None
        signo = 0

        LOG.debug('Full set of CONF:')
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        try:
            if ready_callback:
                ready_callback()
            super(ServiceLauncher, self).wait()
        except SignalExit as exc:
            signame = _signo_to_signame(exc.signo)
            LOG.info(_LI('Caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        finally:
            self.stop()
            if rpc:
                try:
                    rpc.cleanup()
                except Exception:
                    # We're shutting down, so it doesn't matter at this point.
                    LOG.exception(_LE('Exception during rpc cleanup.'))

        return status, signo
Example #11
0
    def acquire(self):
        basedir = os.path.dirname(self.fname)

        if not os.path.exists(basedir):
            fileutils.ensure_tree(basedir)
            LOG.info(_LI('Created lock path: %s'), basedir)

        self.lockfile = open(self.fname, 'w')

        while True:
            try:
                # Using non-blocking locks since green threads are not
                # patched to deal with blocking locking calls.
                # Also upon reading the MSDN docs for locking(), it seems
                # to have a laughable 10 attempts "blocking" mechanism.
                self.trylock()
                LOG.debug('Got file lock "%s"', self.fname)
                return True
            except IOError as e:
                if e.errno in (errno.EACCES, errno.EAGAIN):
                    # external locks synchronise things like iptables
                    # updates - give it some time to prevent busy spinning
                    time.sleep(0.01)
                else:
                    raise threading.ThreadError(_("Unable to acquire lock on"
                                                  " `%(filename)s` due to"
                                                  " %(exception)s") %
                                                {'filename': self.fname,
                                                    'exception': e})
Example #12
0
 def _preserve_multipath_id(self, connection_info):
     if self['connection_info'] and 'data' in self['connection_info']:
         if 'multipath_id' in self['connection_info']['data']:
             connection_info['data']['multipath_id'] =\
                 self['connection_info']['data']['multipath_id']
             LOG.info(_LI('preserve multipath_id %s'),
                      connection_info['data']['multipath_id'])
Example #13
0
    def acquire(self):
        basedir = os.path.dirname(self.fname)

        if not os.path.exists(basedir):
            fileutils.ensure_tree(basedir)
            LOG.info(_LI('Created lock path: %s'), basedir)

        self.lockfile = open(self.fname, 'w')

        while True:
            try:
                # Using non-blocking locks since green threads are not
                # patched to deal with blocking locking calls.
                # Also upon reading the MSDN docs for locking(), it seems
                # to have a laughable 10 attempts "blocking" mechanism.
                self.trylock()
                LOG.debug('Got file lock "%s"', self.fname)
                return True
            except IOError as e:
                if e.errno in (errno.EACCES, errno.EAGAIN):
                    # external locks synchronise things like iptables
                    # updates - give it some time to prevent busy spinning
                    time.sleep(0.01)
                else:
                    raise threading.ThreadError(
                        _("Unable to acquire lock on"
                          " `%(filename)s` due to"
                          " %(exception)s") % {
                              'filename': self.fname,
                              'exception': e,
                          })
Example #14
0
 def _preserve_multipath_id(self, connection_info):
     if self['connection_info'] and 'data' in self['connection_info']:
         if 'multipath_id' in self['connection_info']['data']:
             connection_info['data']['multipath_id'] =\
                 self['connection_info']['data']['multipath_id']
             LOG.info(_LI('preserve multipath_id %s'),
                      connection_info['data']['multipath_id'])
Example #15
0
def write_stored_info(target, field=None, value=None):
    """Write information about an image."""

    if not field:
        return

    info_file = get_info_filename(target)
    LOG.info(_LI('Writing stored info to %s'), info_file)
    fileutils.ensure_tree(os.path.dirname(info_file))

    lock_name = 'info-%s' % os.path.split(target)[-1]
    lock_path = os.path.join(CONF.instances_path, 'locks')

    @utils.synchronized(lock_name, external=True, lock_path=lock_path)
    def write_file(info_file, field, value):
        d = {}

        if os.path.exists(info_file):
            with open(info_file, 'r') as f:
                d = _read_possible_json(f.read(), info_file)

        d[field] = value
        d['%s-timestamp' % field] = time.time()

        with open(info_file, 'w') as f:
            f.write(json.dumps(d))

    write_file(info_file, field, value)
Example #16
0
def _set_session_sql_mode(dbapi_con, connection_rec, connection_proxy, sql_mode=None):
    """Set the sql_mode session variable.

    MySQL supports several server modes. The default is None, but sessions
    may choose to enable server modes like TRADITIONAL, ANSI,
    several STRICT_* modes and others.

    Note: passing in '' (empty string) for sql_mode clears
    the SQL mode for the session, overriding a potentially set
    server default. Passing in None (the default) makes this
    a no-op, meaning if a server-side SQL mode is set, it still applies.
    """
    cursor = dbapi_con.cursor()
    if sql_mode is not None:
        cursor.execute("SET SESSION sql_mode = %s", [sql_mode])

    # Check against the real effective SQL mode. Even when unset by
    # our own config, the server may still be operating in a specific
    # SQL mode as set by the server configuration
    cursor.execute("SHOW VARIABLES LIKE 'sql_mode'")
    row = cursor.fetchone()
    if row is None:
        LOG.warning(_LW("Unable to detect effective SQL mode"))
        return
    realmode = row[1]
    LOG.info(_LI("MySQL server mode set to %s") % realmode)
    # 'TRADITIONAL' mode enables several other modes, so
    # we need a substring match here
    if not ("TRADITIONAL" in realmode.upper() or "STRICT_ALL_TABLES" in realmode.upper()):
        LOG.warning(_LW("MySQL SQL mode is '%s', " "consider enabling TRADITIONAL or STRICT_ALL_TABLES") % realmode)
Example #17
0
    def _wait_for_exit_or_signal(self, ready_callback=None):
        status = None
        signo = 0

        LOG.debug('Full set of CONF:')
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        try:
            if ready_callback:
                ready_callback()
            super(ServiceLauncher, self).wait()
        except SignalExit as exc:
            signame = _signo_to_signame(exc.signo)
            LOG.info(_LI('Caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        finally:
            self.stop()
            if rpc:
                try:
                    rpc.cleanup()
                except Exception:
                    # We're shutting down, so it doesn't matter at this point.
                    LOG.exception(_LE('Exception during rpc cleanup.'))

        return status, signo
Example #18
0
        def inner_verify_checksum():
            (stored_checksum, stored_timestamp) = read_stored_checksum(
                base_file, timestamped=True)
            if stored_checksum:
                # NOTE(mikal): Checksums are timestamped. If we have recently
                # checksummed (possibly on another compute node if we are using
                # shared storage), then we don't need to checksum again.
                if (stored_timestamp and
                    time.time() - stored_timestamp <
                        CONF.libvirt.checksum_interval_seconds):
                    return True

                # NOTE(mikal): If there is no timestamp, then the checksum was
                # performed by a previous version of the code.
                if not stored_timestamp:
                    write_stored_info(base_file, field='sha1',
                                      value=stored_checksum)

                current_checksum = _hash_file(base_file)

                if current_checksum != stored_checksum:
                    LOG.error(_LE('image %(id)s at (%(base_file)s): image '
                                  'verification failed'),
                              {'id': img_id,
                               'base_file': base_file})
                    return False

                else:
                    return True

            else:
                LOG.info(_LI('image %(id)s at (%(base_file)s): image '
                             'verification skipped, no hash stored'),
                         {'id': img_id,
                          'base_file': base_file})

                # NOTE(mikal): If the checksum file is missing, then we should
                # create one. We don't create checksums when we download images
                # from glance because that would delay VM startup.
                if CONF.libvirt.checksum_base_images and create_if_missing:
                    LOG.info(_LI('%(id)s (%(base_file)s): generating '
                                 'checksum'),
                             {'id': img_id,
                              'base_file': base_file})
                    write_stored_checksum(base_file)

                return None
Example #19
0
    def _pipe_watcher(self):
        # This will block until the write end is closed when the parent
        # dies unexpectedly
        self.readpipe.read()

        LOG.info(_LI('Parent process has died unexpectedly, exiting'))

        sys.exit(1)
Example #20
0
    def _pipe_watcher(self):
        # This will block until the write end is closed when the parent
        # dies unexpectedly
        self.readpipe.read()

        LOG.info(_LI('Parent process has died unexpectedly, exiting'))

        sys.exit(1)
Example #21
0
    def _age_and_verify_cached_images(self, context, all_instances, base_dir):
        LOG.debug('Verify base images')
        # Determine what images are on disk because they're in use
        for img in self.used_images:
            fingerprint = hashlib.sha1(img).hexdigest()
            LOG.debug('Image id %(id)s yields fingerprint %(fingerprint)s',
                      {'id': img,
                       'fingerprint': fingerprint})
            for result in self._find_base_file(base_dir, fingerprint):
                base_file, image_small, image_resized = result
                self._handle_base_image(img, base_file)

                if not image_small and not image_resized:
                    self.originals.append(base_file)

        # Elements remaining in unexplained_images might be in use
        inuse_backing_images = self._list_backing_images()
        for backing_path in inuse_backing_images:
            if backing_path not in self.active_base_files:
                self.active_base_files.append(backing_path)

        # Anything left is an unknown base image
        for img in self.unexplained_images:
            LOG.warn(_LW('Unknown base file: %s'), img)
            self.removable_base_files.append(img)

        # Dump these lists
        if self.active_base_files:
            LOG.info(_LI('Active base files: %s'),
                     ' '.join(self.active_base_files))
        if self.corrupt_base_files:
            LOG.info(_LI('Corrupt base files: %s'),
                     ' '.join(self.corrupt_base_files))

        if self.removable_base_files:
            LOG.info(_LI('Removable base files: %s'),
                     ' '.join(self.removable_base_files))

            if self.remove_unused_base_images:
                for base_file in self.removable_base_files:
                    self._remove_base_file(base_file)

        # That's it
        LOG.debug('Verification complete')
Example #22
0
 def unfilter_instance(self, instance, network_info):
     # NOTE(salvatore-orlando):
     # Overriding base class method for applying nwfilter operation
     if self.instance_info.pop(instance['id'], None):
         self.remove_filters_for_instance(instance)
         self.iptables.apply()
         self.nwfilter.unfilter_instance(instance, network_info)
     else:
         LOG.info(_LI('Attempted to unfilter instance which is not '
                      'filtered'), instance=instance)
Example #23
0
    def __init__(cls, names, bases, dict_):
        """Metaclass that allows us to collect decorated periodic tasks."""
        super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)

        # NOTE(sirp): if the attribute is not present then we must be the base
        # class, so, go ahead an initialize it. If the attribute is present,
        # then we're a subclass so make a copy of it so we don't step on our
        # parent's toes.
        try:
            cls._periodic_tasks = cls._periodic_tasks[:]
        except AttributeError:
            cls._periodic_tasks = []

        try:
            cls._periodic_spacing = cls._periodic_spacing.copy()
        except AttributeError:
            cls._periodic_spacing = {}

        for value in cls.__dict__.values():
            if getattr(value, '_periodic_task', False):
                task = value
                name = task.__name__

                if task._periodic_spacing < 0:
                    LOG.info(_LI('Skipping periodic task %(task)s because '
                                 'its interval is negative'),
                             {'task': name})
                    continue
                if not task._periodic_enabled:
                    LOG.info(_LI('Skipping periodic task %(task)s because '
                                 'it is disabled'),
                             {'task': name})
                    continue

                # A periodic spacing of zero indicates that this task should
                # be run on the default interval to avoid running too
                # frequently.
                if task._periodic_spacing == 0:
                    task._periodic_spacing = DEFAULT_INTERVAL

                cls._periodic_tasks.append((name, task))
                cls._periodic_spacing[name] = task._periodic_spacing
Example #24
0
    def __init__(cls, names, bases, dict_):
        """Metaclass that allows us to collect decorated periodic tasks."""
        super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)

        # NOTE(sirp): if the attribute is not present then we must be the base
        # class, so, go ahead an initialize it. If the attribute is present,
        # then we're a subclass so make a copy of it so we don't step on our
        # parent's toes.
        try:
            cls._periodic_tasks = cls._periodic_tasks[:]
        except AttributeError:
            cls._periodic_tasks = []

        try:
            cls._periodic_spacing = cls._periodic_spacing.copy()
        except AttributeError:
            cls._periodic_spacing = {}

        for value in cls.__dict__.values():
            if getattr(value, '_periodic_task', False):
                task = value
                name = task.__name__

                if task._periodic_spacing < 0:
                    LOG.info(
                        _LI('Skipping periodic task %(task)s because '
                            'its interval is negative'), {'task': name})
                    continue
                if not task._periodic_enabled:
                    LOG.info(
                        _LI('Skipping periodic task %(task)s because '
                            'it is disabled'), {'task': name})
                    continue

                # A periodic spacing of zero indicates that this task should
                # be run on the default interval to avoid running too
                # frequently.
                if task._periodic_spacing == 0:
                    task._periodic_spacing = DEFAULT_INTERVAL

                cls._periodic_tasks.append((name, task))
                cls._periodic_spacing[name] = task._periodic_spacing
Example #25
0
def remove_external_lock_file(name, lock_file_prefix=None):
    """Remove an external lock file when it's not used anymore
    This will be helpful when we have a lot of lock files
    """
    with internal_lock(name):
        lock_file_path = _get_lock_path(name, lock_file_prefix)
        try:
            os.remove(lock_file_path)
        except OSError:
            LOG.info(_LI('Failed to remove file %(file)s'),
                     {'file': lock_file_path})
Example #26
0
def remove_external_lock_file(name, lock_file_prefix=None):
    """Remove an external lock file when it's not used anymore
    This will be helpful when we have a lot of lock files
    """
    with internal_lock(name):
        lock_file_path = _get_lock_path(name, lock_file_prefix)
        try:
            os.remove(lock_file_path)
        except OSError:
            LOG.info(_LI('Failed to remove file %(file)s'),
                     {'file': lock_file_path})
Example #27
0
 def _inner_do_refresh_rules(self, instance, network_info, ipv4_rules,
                             ipv6_rules):
     chain_name = self._instance_chain_name(instance)
     if not self.iptables.ipv4['filter'].has_chain(chain_name):
         LOG.info(
             _LI('instance chain %s disappeared during refresh, '
                 'skipping') % chain_name,
             instance=instance)
         return
     self.remove_filters_for_instance(instance)
     self.add_filters_for_instance(instance, network_info, ipv4_rules,
                                   ipv6_rules)
Example #28
0
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
                                          use_soft_delete, *uc_column_names):
    """Drop all old rows having the same values for columns in uc_columns.

    This method drop (or mark ad `deleted` if use_soft_delete is True) old
    duplicate rows form table with name `table_name`.

    :param migrate_engine:  Sqlalchemy engine
    :param table_name:      Table with duplicates
    :param use_soft_delete: If True - values will be marked as `deleted`,
                            if False - values will be removed from table
    :param uc_column_names: Unique constraint columns
    """
    meta = MetaData()
    meta.bind = migrate_engine

    table = Table(table_name, meta, autoload=True)
    columns_for_group_by = [table.c[name] for name in uc_column_names]

    columns_for_select = [func.max(table.c.id)]
    columns_for_select.extend(columns_for_group_by)

    duplicated_rows_select = sqlalchemy.sql.select(
        columns_for_select, group_by=columns_for_group_by,
        having=func.count(table.c.id) > 1)

    for row in migrate_engine.execute(duplicated_rows_select):
        # NOTE(boris-42): Do not remove row that has the biggest ID.
        delete_condition = table.c.id != row[0]
        is_none = None  # workaround for pyflakes
        delete_condition &= table.c.deleted_at == is_none
        for name in uc_column_names:
            delete_condition &= table.c[name] == row[name]

        rows_to_delete_select = sqlalchemy.sql.select(
            [table.c.id]).where(delete_condition)
        for row in migrate_engine.execute(rows_to_delete_select).fetchall():
            LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
                         "%(table)s") % dict(id=row[0], table=table_name))

        if use_soft_delete:
            delete_statement = table.update().\
                where(delete_condition).\
                values({
                    'deleted': literal_column('id'),
                    'updated_at': literal_column('updated_at'),
                    'deleted_at': timeutils.utcnow()
                })
        else:
            delete_statement = table.delete().where(delete_condition)
        migrate_engine.execute(delete_statement)
Example #29
0
def is_mounted(mount_path, source=None):
    """Check if the given source is mounted at given destination point."""
    try:
        check_cmd = ['findmnt', '--target', mount_path]
        if source:
            check_cmd.extend(['--source', source])

        utils.execute(*check_cmd)
        return True
    except processutils.ProcessExecutionError as exc:
        return False
    except OSError as exc:
        #info since it's not required to have this tool.
        if exc.errno == errno.ENOENT:
            LOG.info(_LI("findmnt tool is not installed"))
        return False
Example #30
0
def is_mounted(mount_path, source=None):
    """Check if the given source is mounted at given destination point."""
    try:
        check_cmd = ['findmnt', '--target', mount_path]
        if source:
            check_cmd.extend(['--source', source])

        utils.execute(*check_cmd)
        return True
    except processutils.ProcessExecutionError as exc:
        return False
    except OSError as exc:
        #info since it's not required to have this tool.
        if exc.errno == errno.ENOENT:
            LOG.info(_LI("findmnt tool is not installed"))
        return False
Example #31
0
def validate_num_values(vals, default=None, cast_to=int, based_on=min):
    """Returns a corretly casted value based on a set of values.

    This method is useful to work with per-aggregate filters, It takes
    a set of values then return the 'based_on'{min/max} converted to
    'cast_to' of the set or the default value.

    Note: The cast implies a possible ValueError
    """
    num_values = len(vals)
    if num_values == 0:
        return default

    if num_values > 1:
        LOG.info(
            _LI("%(num_values)d values found, " "of which the minimum value will be used."), {"num_values": num_values}
        )

    return cast_to(based_on(vals))
Example #32
0
def validate_num_values(vals, default=None, cast_to=int, based_on=min):
    """Returns a corretly casted value based on a set of values.

    This method is useful to work with per-aggregate filters, It takes
    a set of values then return the 'based_on'{min/max} converted to
    'cast_to' of the set or the default value.

    Note: The cast implies a possible ValueError
    """
    num_values = len(vals)
    if num_values == 0:
        return default

    if num_values > 1:
        LOG.info(_LI("%(num_values)d values found, "
                     "of which the minimum value will be used."),
                 {'num_values': num_values})

    return cast_to(based_on(vals))
Example #33
0
def initialize_if_enabled():
    backdoor_locals = {
        'exit': _dont_use_this,  # So we don't exit the entire process
        'quit': _dont_use_this,  # So we don't exit the entire process
        'fo': _find_objects,
        'pgt': _print_greenthreads,
        'pnt': _print_nativethreads,
    }

    if CONF.backdoor_port is None:
        return None

    start_port, end_port = _parse_port_range(str(CONF.backdoor_port))

    # NOTE(johannes): The standard sys.displayhook will print the value of
    # the last expression and set it to __builtin__._, which overwrites
    # the __builtin__._ that gettext sets. Let's switch to using pprint
    # since it won't interact poorly with gettext, and it's easier to
    # read the output too.
    def displayhook(val):
        if val is not None:
            pprint.pprint(val)

    sys.displayhook = displayhook

    sock = _listen('localhost', start_port, end_port, eventlet.listen)

    # In the case of backdoor port being zero, a port number is assigned by
    # listen().  In any case, pull the port number out here.
    port = sock.getsockname()[1]
    LOG.info(
        _LI('Eventlet backdoor listening on %(port)s for process %(pid)d') % {
            'port': port,
            'pid': os.getpid()
        })
    eventlet.spawn_n(eventlet.backdoor.backdoor_server,
                     sock,
                     locals=backdoor_locals)
    return port
Example #34
0
    def _child_wait_for_exit_or_signal(self, launcher):
        status = 0
        signo = 0

        # NOTE(johannes): All exceptions are caught to ensure this
        # doesn't fallback into the loop spawning children. It would
        # be bad for a child to spawn more children.
        try:
            launcher.wait()
        except SignalExit as exc:
            signame = _signo_to_signame(exc.signo)
            LOG.info(_LI('Caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        except BaseException:
            LOG.exception(_LE('Unhandled exception'))
            status = 2
        finally:
            launcher.stop()

        return status, signo
Example #35
0
    def _child_wait_for_exit_or_signal(self, launcher):
        status = 0
        signo = 0

        # NOTE(johannes): All exceptions are caught to ensure this
        # doesn't fallback into the loop spawning children. It would
        # be bad for a child to spawn more children.
        try:
            launcher.wait()
        except SignalExit as exc:
            signame = _signo_to_signame(exc.signo)
            LOG.info(_LI('Caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        except BaseException:
            LOG.exception(_LE('Unhandled exception'))
            status = 2
        finally:
            launcher.stop()

        return status, signo
Example #36
0
def _set_session_sql_mode(dbapi_con,
                          connection_rec,
                          connection_proxy,
                          sql_mode=None):
    """Set the sql_mode session variable.

    MySQL supports several server modes. The default is None, but sessions
    may choose to enable server modes like TRADITIONAL, ANSI,
    several STRICT_* modes and others.

    Note: passing in '' (empty string) for sql_mode clears
    the SQL mode for the session, overriding a potentially set
    server default. Passing in None (the default) makes this
    a no-op, meaning if a server-side SQL mode is set, it still applies.
    """
    cursor = dbapi_con.cursor()
    if sql_mode is not None:
        cursor.execute("SET SESSION sql_mode = %s", [sql_mode])

    # Check against the real effective SQL mode. Even when unset by
    # our own config, the server may still be operating in a specific
    # SQL mode as set by the server configuration
    cursor.execute("SHOW VARIABLES LIKE 'sql_mode'")
    row = cursor.fetchone()
    if row is None:
        LOG.warning(_LW('Unable to detect effective SQL mode'))
        return
    realmode = row[1]
    LOG.info(_LI('MySQL server mode set to %s') % realmode)
    # 'TRADITIONAL' mode enables several other modes, so
    # we need a substring match here
    if not ('TRADITIONAL' in realmode.upper()
            or 'STRICT_ALL_TABLES' in realmode.upper()):
        LOG.warning(
            _LW("MySQL SQL mode is '%s', "
                "consider enabling TRADITIONAL or STRICT_ALL_TABLES") %
            realmode)
Example #37
0
def initialize_if_enabled():
    backdoor_locals = {
        'exit': _dont_use_this,      # So we don't exit the entire process
        'quit': _dont_use_this,      # So we don't exit the entire process
        'fo': _find_objects,
        'pgt': _print_greenthreads,
        'pnt': _print_nativethreads,
    }

    if CONF.backdoor_port is None:
        return None

    start_port, end_port = _parse_port_range(str(CONF.backdoor_port))

    # NOTE(johannes): The standard sys.displayhook will print the value of
    # the last expression and set it to __builtin__._, which overwrites
    # the __builtin__._ that gettext sets. Let's switch to using pprint
    # since it won't interact poorly with gettext, and it's easier to
    # read the output too.
    def displayhook(val):
        if val is not None:
            pprint.pprint(val)
    sys.displayhook = displayhook

    sock = _listen('localhost', start_port, end_port, eventlet.listen)

    # In the case of backdoor port being zero, a port number is assigned by
    # listen().  In any case, pull the port number out here.
    port = sock.getsockname()[1]
    LOG.info(
        _LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
        {'port': port, 'pid': os.getpid()}
    )
    eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
                     locals=backdoor_locals)
    return port
Example #38
0
    def launch_service(self, service, workers=1):
        wrap = ServiceWrapper(service, workers)

        LOG.info(_LI('Starting %d workers'), wrap.workers)
        while self.running and len(wrap.children) < wrap.workers:
            self._start_child(wrap)
Example #39
0
    def launch_service(self, service, workers=1):
        wrap = ServiceWrapper(service, workers)

        LOG.info(_LI('Starting %d workers'), wrap.workers)
        while self.running and len(wrap.children) < wrap.workers:
            self._start_child(wrap)
Example #40
0
    def _handle_base_image(self, img_id, base_file):
        """Handle the checks for a single base image."""

        image_bad = False
        image_in_use = False

        LOG.info(_LI('image %(id)s at (%(base_file)s): checking'),
                 {'id': img_id,
                  'base_file': base_file})

        if base_file in self.unexplained_images:
            self.unexplained_images.remove(base_file)

        if (base_file and os.path.exists(base_file)
                and os.path.isfile(base_file)):
            # _verify_checksum returns True if the checksum is ok, and None if
            # there is no checksum file
            checksum_result = self._verify_checksum(img_id, base_file)
            if checksum_result is not None:
                image_bad = not checksum_result

            # Give other threads a chance to run
            time.sleep(0)

        instances = []
        if img_id in self.used_images:
            local, remote, instances = self.used_images[img_id]

            if local > 0 or remote > 0:
                image_in_use = True
                LOG.info(_LI('image %(id)s at (%(base_file)s): '
                             'in use: on this node %(local)d local, '
                             '%(remote)d on other nodes sharing this instance '
                             'storage'),
                         {'id': img_id,
                          'base_file': base_file,
                          'local': local,
                          'remote': remote})

                self.active_base_files.append(base_file)

                if not base_file:
                    LOG.warn(_LW('image %(id)s at (%(base_file)s): warning '
                                 '-- an absent base file is in use! '
                                 'instances: %(instance_list)s'),
                                {'id': img_id,
                                 'base_file': base_file,
                                 'instance_list': ' '.join(instances)})

        if image_bad:
            self.corrupt_base_files.append(base_file)

        if base_file:
            if not image_in_use:
                LOG.debug('image %(id)s at (%(base_file)s): image is not in '
                          'use',
                          {'id': img_id,
                           'base_file': base_file})
                self.removable_base_files.append(base_file)

            else:
                LOG.debug('image %(id)s at (%(base_file)s): image is in '
                          'use',
                          {'id': img_id,
                           'base_file': base_file})
                if os.path.exists(base_file):
                    virtutils.chown(base_file, os.getuid())
                    os.utime(base_file, None)