Exemplo n.º 1
0
    def _wait_child(self):
        try:
            # Don't block if no child processes have exited
            pid, status = os.waitpid(0, os.WNOHANG)
            if not pid:
                return None
        except OSError as exc:
            if exc.errno not in (errno.EINTR, errno.ECHILD):
                raise
            return None

        if os.WIFSIGNALED(status):
            sig = os.WTERMSIG(status)
            LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
                     dict(pid=pid, sig=sig))
        else:
            code = os.WEXITSTATUS(status)
            LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
                     dict(pid=pid, code=code))

        if pid not in self.children:
            LOG.warning(_LW('pid %d not in child list'), pid)
            return None

        wrap = self.children.pop(pid)
        wrap.children.remove(pid)
        return wrap
Exemplo n.º 2
0
    def wait(self):
        """Loop waiting on children to die and respawning as necessary."""

        LOG.debug('Full set of CONF:')
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        try:
            while True:
                self.handle_signal()
                self._respawn_children()
                if self.sigcaught:
                    signame = _signo_to_signame(self.sigcaught)
                    LOG.info(_LI('Caught %s, stopping children'), signame)
                if not _is_sighup_and_daemon(self.sigcaught):
                    break

                for pid in self.children:
                    os.kill(pid, signal.SIGHUP)
                self.running = True
                self.sigcaught = None
        except eventlet.greenlet.GreenletExit:
            LOG.info(_LI("Wait called after thread killed.  Cleaning up."))

        for pid in self.children:
            try:
                os.kill(pid, signal.SIGTERM)
            except OSError as exc:
                if exc.errno != errno.ESRCH:
                    raise

        # Wait for children to die
        if self.children:
            LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
            while self.children:
                self._wait_child()
Exemplo n.º 3
0
    def from_file(self, input_file=None, delimiter=None):
        input_file = str(input_file) if input_file is not None else None

        if not os.path.exists(input_file):
            raise Exception('TLD Input file Not Found')

        LOG.info(_LI("Importing TLDs from %s"), input_file)

        error_lines = []
        tlds_added = 0

        with open(input_file) as inf:
            csv.register_dialect('import-tlds', delimiter=str(delimiter))
            reader = csv.DictReader(inf,
                                    fieldnames=['name', 'description'],
                                    restkey='extra_fields',
                                    dialect='import-tlds')
            for line in reader:
                # check if there are more than 2 fields
                if 'extra_fields' in line:
                    error_lines.append("InvalidLine --> " +
                                       self._convert_tld_dict_to_str(line))
                else:
                    tlds_added += self._validate_and_create_tld(
                        line, error_lines)

        LOG.info(_LI("Number of tlds added: %d"), tlds_added)

        errors = len(error_lines)
        if errors > 0:
            LOG.error(_LE("Number of errors: %d"), errors)
            # Sorting the errors and printing them so that it is easier to
            # read the errors
            LOG.error(_LE("Error Lines:\n%s"), '\n'.join(sorted(error_lines)))
Exemplo n.º 4
0
    def _start_child(self, wrap):
        if len(wrap.forktimes) > wrap.workers:
            # Limit ourselves to one process a second (over the period of
            # number of workers * 1 second). This will allow workers to
            # start up quickly but ensure we don't fork off children that
            # die instantly too quickly.
            if time.time() - wrap.forktimes[0] < wrap.workers:
                LOG.info(_LI('Forking too fast, sleeping'))
                time.sleep(1)

            wrap.forktimes.pop(0)

        wrap.forktimes.append(time.time())

        pid = os.fork()
        if pid == 0:
            launcher = self._child_process(wrap.service)
            while True:
                self._child_process_handle_signal()
                status, signo = self._child_wait_for_exit_or_signal(launcher)
                if not _is_sighup_and_daemon(signo):
                    break
                launcher.restart()

            os._exit(status)

        LOG.info(_LI('Started child %d'), pid)

        wrap.children.add(pid)
        self.children[pid] = wrap

        return pid
Exemplo n.º 5
0
    def _start_child(self, wrap):
        if len(wrap.forktimes) > wrap.workers:
            # Limit ourselves to one process a second (over the period of
            # number of workers * 1 second). This will allow workers to
            # start up quickly but ensure we don't fork off children that
            # die instantly too quickly.
            if time.time() - wrap.forktimes[0] < wrap.workers:
                LOG.info(_LI('Forking too fast, sleeping'))
                time.sleep(1)

            wrap.forktimes.pop(0)

        wrap.forktimes.append(time.time())

        pid = os.fork()
        if pid == 0:
            launcher = self._child_process(wrap.service)
            while True:
                self._child_process_handle_signal()
                status, signo = self._child_wait_for_exit_or_signal(launcher)
                if not _is_sighup_and_daemon(signo):
                    break
                launcher.restart()

            os._exit(status)

        LOG.info(_LI('Started child %d'), pid)

        wrap.children.add(pid)
        self.children[pid] = wrap

        return pid
Exemplo n.º 6
0
    def __init__(self, *args, **kwargs):
        notify_endpoint = notify.NotifyEndpoint()
        kwargs['endpoints'] = [notify_endpoint]
        super(Service, self).__init__(*args, **kwargs)

        # Create an instance of the RequestHandler class
        self.handler = handler.RequestHandler()

        # Bind to the TCP port
        LOG.info(_LI('Opening TCP Listening Socket on %(host)s:%(port)d') %
                 {'host': CONF['service:mdns'].host,
                  'port': CONF['service:mdns'].port})
        self._sock_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self._sock_tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self._sock_tcp.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
        self._sock_tcp.bind((CONF['service:mdns'].host,
                             CONF['service:mdns'].port))
        self._sock_tcp.listen(CONF['service:mdns'].tcp_backlog)

        # Bind to the UDP port
        LOG.info(_LI('Opening UDP Listening Socket on %(host)s:%(port)d') %
                 {'host': CONF['service:mdns'].host,
                  'port': CONF['service:mdns'].port})
        self._sock_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self._sock_udp.bind((CONF['service:mdns'].host,
                             CONF['service:mdns'].port))
Exemplo n.º 7
0
    def wait(self):
        """Loop waiting on children to die and respawning as necessary."""

        LOG.debug('Full set of CONF:')
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        try:
            while True:
                self.handle_signal()
                self._respawn_children()
                if self.sigcaught:
                    signame = _signo_to_signame(self.sigcaught)
                    LOG.info(_LI('Caught %s, stopping children'), signame)
                if not _is_sighup_and_daemon(self.sigcaught):
                    break

                for pid in self.children:
                    os.kill(pid, signal.SIGHUP)
                self.running = True
                self.sigcaught = None
        except eventlet.greenlet.GreenletExit:
            LOG.info(_LI("Wait called after thread killed.  Cleaning up."))

        for pid in self.children:
            try:
                os.kill(pid, signal.SIGTERM)
            except OSError as exc:
                if exc.errno != errno.ESRCH:
                    raise

        # Wait for children to die
        if self.children:
            LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
            while self.children:
                self._wait_child()
Exemplo n.º 8
0
    def _wait_child(self):
        try:
            # Don't block if no child processes have exited
            pid, status = os.waitpid(0, os.WNOHANG)
            if not pid:
                return None
        except OSError as exc:
            if exc.errno not in (errno.EINTR, errno.ECHILD):
                raise
            return None

        if os.WIFSIGNALED(status):
            sig = os.WTERMSIG(status)
            LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
                     dict(pid=pid, sig=sig))
        else:
            code = os.WEXITSTATUS(status)
            LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
                     dict(pid=pid, code=code))

        if pid not in self.children:
            LOG.warning(_LW('pid %d not in child list'), pid)
            return None

        wrap = self.children.pop(pid)
        wrap.children.remove(pid)
        return wrap
Exemplo n.º 9
0
    def sync(self, version=None):
        url = cfg.CONF['storage:sqlalchemy'].database_connection

        if not os.path.exists(REPOSITORY):
            raise Exception('Migration Repository Not Found')

        try:
            target_version = int(version) if version else None

            current_version = versioning_api.db_version(url=url,
                                                        repository=REPOSITORY)
        except DatabaseNotControlledError:
            raise Exception('Database not yet initialized')

        LOG.info(_LI("Attempting to synchronize database from version "
                 "'%s' to '%s'"),
                 current_version,
                 target_version if target_version is not None else "latest")

        if target_version and target_version < current_version:
            versioning_api.downgrade(url=url, repository=REPOSITORY,
                                     version=version)
        else:
            versioning_api.upgrade(url=url, repository=REPOSITORY,
                                   version=version)

        LOG.info(_LI('Database synchronized successfully'))
Exemplo n.º 10
0
    def sync(self, version=None):
        url = cfg.CONF['backend:powerdns'].database_connection

        if not os.path.exists(REPOSITORY):
            raise Exception('Migration Repository Not Found')

        try:
            target_version = int(version) if version is not None else None

            current_version = versioning_api.db_version(url=url,
                                                        repository=REPOSITORY)
        except DatabaseNotControlledError:
            raise Exception('PowerDNS database not yet initialized')

        LOG.info(_LI("Attempting to synchronize PowerDNS database "
                     "from version '%s' to '%s'"),
                 current_version,
                 target_version if target_version is not None else "latest")

        if target_version and target_version < current_version:
            versioning_api.downgrade(url=url, repository=REPOSITORY,
                                     version=version)
        else:
            versioning_api.upgrade(url=url, repository=REPOSITORY,
                                   version=version)

        LOG.info(_LI('PowerDNS database synchronized successfully'))
Exemplo n.º 11
0
    def wait(self):
        """Loop waiting on children to die and respawning as necessary."""

        systemd.notify_once()
        LOG.debug('Full set of CONF:')
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        try:
            while True:
                self.handle_signal()
                self._respawn_children()
                # No signal means that stop was called.  Don't clean up here.
                if not self.sigcaught:
                    return

                signame = _signo_to_signame(self.sigcaught)
                LOG.info(_LI('Caught %s, stopping children'), signame)
                if not _is_sighup_and_daemon(self.sigcaught):
                    break

                for pid in self.children:
                    os.kill(pid, signal.SIGHUP)
                self.running = True
                self.sigcaught = None
        except eventlet.greenlet.GreenletExit:
            LOG.info(_LI("Wait called after thread killed.  Cleaning up."))

        self.stop()
Exemplo n.º 12
0
    def from_file(self, input_file=None, delimiter=None):
        input_file = str(input_file) if input_file is not None else None

        if not os.path.exists(input_file):
            raise Exception('TLD Input file Not Found')

        LOG.info(_LI("Importing TLDs from %s"), input_file)

        error_lines = []
        tlds_added = 0

        with open(input_file) as inf:
            csv.register_dialect('import-tlds', delimiter=str(delimiter))
            reader = csv.DictReader(inf,
                                    fieldnames=['name', 'description'],
                                    restkey='extra_fields',
                                    dialect='import-tlds')
            for line in reader:
                # check if there are more than 2 fields
                if 'extra_fields' in line:
                    error_lines.append("InvalidLine --> " +
                                       self._convert_tld_dict_to_str(line))
                else:
                    tlds_added += self._validate_and_create_tld(line,
                                                                error_lines)

        LOG.info(_LI("Number of tlds added: %d"), tlds_added)

        errors = len(error_lines)
        if errors > 0:
            LOG.error(_LE("Number of errors: %d"), errors)
            # Sorting the errors and printing them so that it is easier to
            # read the errors
            LOG.error(_LE("Error Lines:\n%s"), '\n'.join(sorted(error_lines)))
Exemplo n.º 13
0
    def init(self):
        url = cfg.CONF['storage:sqlalchemy'].database_connection

        try:
            LOG.info(_LI('Attempting to initialize database'))
            versioning_api.version_control(url=url, repository=REPOSITORY)
            LOG.info(_LI('Database initialized successfully'))
        except DatabaseAlreadyControlledError:
            raise Exception('Database already initialized')
Exemplo n.º 14
0
    def init(self):
        url = cfg.CONF['storage:sqlalchemy'].database_connection

        try:
            LOG.info(_LI('Attempting to initialize database'))
            versioning_api.version_control(url=url, repository=REPOSITORY)
            LOG.info(_LI('Database initialized successfully'))
        except DatabaseAlreadyControlledError:
            raise Exception('Database already initialized')
Exemplo n.º 15
0
    def init(self):
        url = cfg.CONF['backend:powerdns'].database_connection

        if not os.path.exists(REPOSITORY):
            raise Exception('Migration Repository Not Found')

        try:
            LOG.info(_LI('Attempting to initialize PowerDNS database'))
            versioning_api.version_control(url=url, repository=REPOSITORY)
            LOG.info(_LI('PowerDNS database initialized successfully'))
        except DatabaseAlreadyControlledError:
            raise Exception('PowerDNS Database already initialized')
Exemplo n.º 16
0
    def _wait_for_exit_or_signal(self, ready_callback=None):
        status = None
        signo = 0

        LOG.debug('Full set of CONF:')
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        try:
            if ready_callback:
                ready_callback()
            super(ServiceLauncher, self).wait()
        except SignalExit as exc:
            signame = _signo_to_signame(exc.signo)
            LOG.info(_LI('Caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        finally:
            self.stop()
            if rpc:
                try:
                    rpc.cleanup()
                except Exception:
                    # We're shutting down, so it doesn't matter at this point.
                    LOG.exception(_LE('Exception during rpc cleanup.'))

        return status, signo
Exemplo n.º 17
0
 def delete_recordset(self, context, domain_id, recordset_id,
                      increment_serial=True):
     LOG.info(_LI("delete_recordset: Calling central's delete_recordset."))
     return self.client.call(context, 'delete_recordset',
                             domain_id=domain_id,
                             recordset_id=recordset_id,
                             increment_serial=increment_serial)
Exemplo n.º 18
0
    def find_servers(self, context, criterion=None, marker=None, limit=None,
                     sort_key=None, sort_dir=None):
        LOG.info(_LI("find_servers: Calling central's find_servers."))

        return self.client.call(context, 'find_servers', criterion=criterion,
                                marker=marker, limit=limit, sort_key=sort_key,
                                sort_dir=sort_dir)
Exemplo n.º 19
0
 def delete_recordset(self, context, domain_id, recordset_id,
                      increment_serial=True):
     LOG.info(_LI("delete_recordset: Calling central's delete_recordset."))
     return self.client.call(context, 'delete_recordset',
                             domain_id=domain_id,
                             recordset_id=recordset_id,
                             increment_serial=increment_serial)
Exemplo n.º 20
0
    def __init__(self, application):
        super(MaintenanceMiddleware, self).__init__(application)

        LOG.info(_LI('Starting designate maintenance middleware'))

        self.enabled = cfg.CONF['service:api'].maintenance_mode
        self.role = cfg.CONF['service:api'].maintenance_mode_role
Exemplo n.º 21
0
    def find_servers(self, context, criterion=None, marker=None, limit=None,
                     sort_key=None, sort_dir=None):
        LOG.info(_LI("find_servers: Calling central's find_servers."))

        return self.client.call(context, 'find_servers', criterion=criterion,
                                marker=marker, limit=limit, sort_key=sort_key,
                                sort_dir=sort_dir)
Exemplo n.º 22
0
    def acquire(self):
        basedir = os.path.dirname(self.fname)

        if not os.path.exists(basedir):
            fileutils.ensure_tree(basedir)
            LOG.info(_LI('Created lock path: %s'), basedir)

        self.lockfile = open(self.fname, 'w')

        while True:
            try:
                # Using non-blocking locks since green threads are not
                # patched to deal with blocking locking calls.
                # Also upon reading the MSDN docs for locking(), it seems
                # to have a laughable 10 attempts "blocking" mechanism.
                self.trylock()
                LOG.debug('Got file lock "%s"', self.fname)
                return True
            except IOError as e:
                if e.errno in (errno.EACCES, errno.EAGAIN):
                    # external locks synchronise things like iptables
                    # updates - give it some time to prevent busy spinning
                    time.sleep(0.01)
                else:
                    raise threading.ThreadError(_("Unable to acquire lock on"
                                                  " `%(filename)s` due to"
                                                  " %(exception)s") %
                                                {'filename': self.fname,
                                                    'exception': e})
Exemplo n.º 23
0
    def _wait_for_exit_or_signal(self, ready_callback=None):
        status = None
        signo = 0

        LOG.debug('Full set of CONF:')
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        try:
            if ready_callback:
                ready_callback()
            super(ServiceLauncher, self).wait()
        except SignalExit as exc:
            signame = _signo_to_signame(exc.signo)
            LOG.info(_LI('Caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        finally:
            self.stop()
            if rpc:
                try:
                    rpc.cleanup()
                except Exception:
                    # We're shutting down, so it doesn't matter at this point.
                    LOG.exception(_LE('Exception during rpc cleanup.'))

        return status, signo
Exemplo n.º 24
0
 def notify_zone_changed(self, context, zone_name):
     LOG.info(_LI("notify_zone_changed: Calling mdns's notify_zone_changed "
                  "for zone '%(zone_name)s'") % {'zone_name': zone_name})
     # The notify_zone_changed method is a cast rather than a call since the
     # caller need not wait for the notify to complete.
     return self.notify_client.cast(
         context, 'notify_zone_changed', zone_name=zone_name)
Exemplo n.º 25
0
    def __init__(self, application):
        super(MaintenanceMiddleware, self).__init__(application)

        LOG.info(_LI('Starting designate maintenance middleware'))

        self.enabled = cfg.CONF['service:api'].maintenance_mode
        self.role = cfg.CONF['service:api'].maintenance_mode_role
Exemplo n.º 26
0
    def acquire(self):
        basedir = os.path.dirname(self.fname)

        if not os.path.exists(basedir):
            fileutils.ensure_tree(basedir)
            LOG.info(_LI('Created lock path: %s'), basedir)

        self.lockfile = open(self.fname, 'w')

        while True:
            try:
                # Using non-blocking locks since green threads are not
                # patched to deal with blocking locking calls.
                # Also upon reading the MSDN docs for locking(), it seems
                # to have a laughable 10 attempts "blocking" mechanism.
                self.trylock()
                LOG.debug('Got file lock "%s"', self.fname)
                return True
            except IOError as e:
                if e.errno in (errno.EACCES, errno.EAGAIN):
                    # external locks synchronise things like iptables
                    # updates - give it some time to prevent busy spinning
                    time.sleep(0.01)
                else:
                    raise threading.ThreadError(
                        _("Unable to acquire lock on"
                          " `%(filename)s` due to"
                          " %(exception)s") % {
                              'filename': self.fname,
                              'exception': e,
                          })
Exemplo n.º 27
0
    def _pipe_watcher(self):
        # This will block until the write end is closed when the parent
        # dies unexpectedly
        self.readpipe.read()

        LOG.info(_LI('Parent process has died unexpectedly, exiting'))

        sys.exit(1)
Exemplo n.º 28
0
    def _pipe_watcher(self):
        # This will block until the write end is closed when the parent
        # dies unexpectedly
        self.readpipe.read()

        LOG.info(_LI('Parent process has died unexpectedly, exiting'))

        sys.exit(1)
Exemplo n.º 29
0
 def create_record(self, context, domain_id, recordset_id, record,
                   increment_serial=True):
     LOG.info(_LI("create_record: Calling central's create_record."))
     return self.client.call(context, 'create_record',
                             domain_id=domain_id,
                             recordset_id=recordset_id,
                             record=record,
                             increment_serial=increment_serial)
Exemplo n.º 30
0
 def update_recordset(self, context, domain_id, recordset_id, values,
                      increment_serial=True):
     LOG.info(_LI("update_recordset: Calling central's update_recordset."))
     return self.client.call(context, 'update_recordset',
                             domain_id=domain_id,
                             recordset_id=recordset_id,
                             values=values,
                             increment_serial=increment_serial)
Exemplo n.º 31
0
 def create_record(self, context, domain_id, recordset_id, record,
                   increment_serial=True):
     LOG.info(_LI("create_record: Calling central's create_record."))
     return self.client.call(context, 'create_record',
                             domain_id=domain_id,
                             recordset_id=recordset_id,
                             record=record,
                             increment_serial=increment_serial)
Exemplo n.º 32
0
    def _handle_udp(self):
        LOG.info(_LI("_handle_udp thread started"))
        while True:
            # TODO(kiall): Determine the appropriate default value for
            #              UDP recvfrom.
            payload, addr = self._sock_udp.recvfrom(8192)
            LOG.warn(_LW("Handling UDP Request from: %s") % addr)

            self.tg.add_thread(self._handle, addr, payload)
Exemplo n.º 33
0
    def _handle_tcp(self):
        LOG.info(_LI("_handle_tcp thread started"))
        while True:
            client, addr = self._sock_tcp.accept()
            LOG.warn(_LW("Handling TCP Request from: %s") % addr)

            payload = client.recv(65535)

            self.tg.add_thread(self._handle, addr, payload, client)
Exemplo n.º 34
0
    def _handle_tcp(self):
        LOG.info(_LI("_handle_tcp thread started"))
        while True:
            client, addr = self._sock_tcp.accept()
            LOG.warn(_LW("Handling TCP Request from: %(host)s:%(port)d") %
                     {'host': addr[0], 'port': addr[1]})

            payload = client.recv(65535)

            self.tg.add_thread(self._handle, addr, payload, client)
Exemplo n.º 35
0
def remove_external_lock_file(name, lock_file_prefix=None):
    """Remove an external lock file when it's not used anymore
    This will be helpful when we have a lot of lock files
    """
    with internal_lock(name):
        lock_file_path = _get_lock_path(name, lock_file_prefix)
        try:
            os.remove(lock_file_path)
        except OSError:
            LOG.info(_LI('Failed to remove file %(file)s'),
                     {'file': lock_file_path})
Exemplo n.º 36
0
def remove_external_lock_file(name, lock_file_prefix=None):
    """Remove a external lock file when it's not used anymore
    This will be helpful when we have a lot of lock files
    """
    with internal_lock(name):
        lock_file_path = _get_lock_path(name, lock_file_prefix)
        try:
            os.remove(lock_file_path)
        except OSError:
            LOG.info(_LI('Failed to remove file %(file)s'),
                     {'file': lock_file_path})
Exemplo n.º 37
0
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
                                          use_soft_delete, *uc_column_names):
    """Drop all old rows having the same values for columns in uc_columns.

    This method drop (or mark ad `deleted` if use_soft_delete is True) old
    duplicate rows form table with name `table_name`.

    :param migrate_engine:  Sqlalchemy engine
    :param table_name:      Table with duplicates
    :param use_soft_delete: If True - values will be marked as `deleted`,
                            if False - values will be removed from table
    :param uc_column_names: Unique constraint columns
    """
    meta = MetaData()
    meta.bind = migrate_engine

    table = Table(table_name, meta, autoload=True)
    columns_for_group_by = [table.c[name] for name in uc_column_names]

    columns_for_select = [func.max(table.c.id)]
    columns_for_select.extend(columns_for_group_by)

    duplicated_rows_select = sqlalchemy.sql.select(
        columns_for_select,
        group_by=columns_for_group_by,
        having=func.count(table.c.id) > 1)

    for row in migrate_engine.execute(duplicated_rows_select):
        # NOTE(boris-42): Do not remove row that has the biggest ID.
        delete_condition = table.c.id != row[0]
        is_none = None  # workaround for pyflakes
        delete_condition &= table.c.deleted_at == is_none
        for name in uc_column_names:
            delete_condition &= table.c[name] == row[name]

        rows_to_delete_select = sqlalchemy.sql.select(
            [table.c.id]).where(delete_condition)
        for row in migrate_engine.execute(rows_to_delete_select).fetchall():
            LOG.info(
                _LI("Deleting duplicated row with id: %(id)s from table: "
                    "%(table)s") % dict(id=row[0], table=table_name))

        if use_soft_delete:
            delete_statement = table.update().\
                where(delete_condition).\
                values({
                    'deleted': literal_column('id'),
                    'updated_at': literal_column('updated_at'),
                    'deleted_at': timeutils.utcnow()
                })
        else:
            delete_statement = table.delete().where(delete_condition)
        migrate_engine.execute(delete_statement)
Exemplo n.º 38
0
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
                                          use_soft_delete, *uc_column_names):
    """Drop all old rows having the same values for columns in uc_columns.

    This method drop (or mark ad `deleted` if use_soft_delete is True) old
    duplicate rows form table with name `table_name`.

    :param migrate_engine:  Sqlalchemy engine
    :param table_name:      Table with duplicates
    :param use_soft_delete: If True - values will be marked as `deleted`,
                            if False - values will be removed from table
    :param uc_column_names: Unique constraint columns
    """
    meta = MetaData()
    meta.bind = migrate_engine

    table = Table(table_name, meta, autoload=True)
    columns_for_group_by = [table.c[name] for name in uc_column_names]

    columns_for_select = [func.max(table.c.id)]
    columns_for_select.extend(columns_for_group_by)

    duplicated_rows_select = sqlalchemy.sql.select(
        columns_for_select, group_by=columns_for_group_by,
        having=func.count(table.c.id) > 1)

    for row in migrate_engine.execute(duplicated_rows_select):
        # NOTE(boris-42): Do not remove row that has the biggest ID.
        delete_condition = table.c.id != row[0]
        is_none = None  # workaround for pyflakes
        delete_condition &= table.c.deleted_at == is_none
        for name in uc_column_names:
            delete_condition &= table.c[name] == row[name]

        rows_to_delete_select = sqlalchemy.sql.select(
            [table.c.id]).where(delete_condition)
        for row in migrate_engine.execute(rows_to_delete_select).fetchall():
            LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
                         "%(table)s") % dict(id=row[0], table=table_name))

        if use_soft_delete:
            delete_statement = table.update().\
                where(delete_condition).\
                values({
                    'deleted': literal_column('id'),
                    'updated_at': literal_column('updated_at'),
                    'deleted_at': timeutils.utcnow()
                })
        else:
            delete_statement = table.delete().where(delete_condition)
        migrate_engine.execute(delete_statement)
Exemplo n.º 39
0
def syncipaservers2des(servers, designatereq, designateurl):
    # get existing servers from designate
    dservers = {}
    srvurl = designateurl + "/servers"
    resp = designatereq.get(srvurl)
    LOG.debug("Response: %s" % pprint.pformat(resp.json()))
    if resp and resp.status_code == 200 and resp.json() and \
            'servers' in resp.json():
        for srec in resp.json()['servers']:
            dservers[srec['name']] = srec['id']
    else:
        LOG.warn(_LW("No servers in designate"))

    # first - add servers from ipa not already in designate
    for server in servers:
        if server in dservers:
            LOG.info(_LI("Skipping ipa server %s already in designate")
                     % server)
        else:
            desreq = {"name": server}
            resp = designatereq.post(srvurl, data=json.dumps(desreq))
            LOG.debug("Response: %s" % pprint.pformat(resp.json()))
            if resp.status_code == 200:
                LOG.info(_LI("Added server %s to designate") % server)
            else:
                raise AddServerError("Unable to add %s: %s" %
                                     (server, pprint.pformat(resp.json())))

    # next - delete servers in designate not in ipa
    for server, sid in dservers.iteritems():
        if server not in servers:
            delresp = designatereq.delete(srvurl + "/" + sid)
            if delresp.status_code == 200:
                LOG.info(_LI("Deleted server %s") % server)
            else:
                raise DeleteServerError("Unable to delete %s: %s" %
                                        (server,
                                         pprint.pformat(delresp.json())))
Exemplo n.º 40
0
def syncipaservers2des(servers, designatereq, designateurl):
    # get existing servers from designate
    dservers = {}
    srvurl = designateurl + "/servers"
    resp = designatereq.get(srvurl)
    LOG.debug("Response: %s" % pprint.pformat(resp.json()))
    if resp and resp.status_code == 200 and resp.json() and \
            'servers' in resp.json():
        for srec in resp.json()['servers']:
            dservers[srec['name']] = srec['id']
    else:
        LOG.warn(_LW("No servers in designate"))

    # first - add servers from ipa not already in designate
    for server in servers:
        if server in dservers:
            LOG.info(
                _LI("Skipping ipa server %s already in designate") % server)
        else:
            desreq = {"name": server}
            resp = designatereq.post(srvurl, data=json.dumps(desreq))
            LOG.debug("Response: %s" % pprint.pformat(resp.json()))
            if resp.status_code == 200:
                LOG.info(_LI("Added server %s to designate") % server)
            else:
                raise AddServerError("Unable to add %s: %s" %
                                     (server, pprint.pformat(resp.json())))

    # next - delete servers in designate not in ipa
    for server, sid in dservers.iteritems():
        if server not in servers:
            delresp = designatereq.delete(srvurl + "/" + sid)
            if delresp.status_code == 200:
                LOG.info(_LI("Deleted server %s") % server)
            else:
                raise DeleteServerError(
                    "Unable to delete %s: %s" %
                    (server, pprint.pformat(delresp.json())))
Exemplo n.º 41
0
 def delete_domain(self, context, domain):
     LOG.info(_LI('Deleting domain %s / %s'), domain['id'], domain['name'])
     url = '/Zone/%s' % domain['name'].rstrip('.')
     client = self.get_client()
     try:
         client.delete(url)
     except DynClientError as e:
         if e.http_status == 404:
             LOG.warn(_LW("Attempt to delete %s / %s caused 404, "
                      "ignoring."), domain['id'], domain['name'])
             pass
         else:
             raise
     client.logout()
Exemplo n.º 42
0
def auth_pipeline_factory(loader, global_conf, **local_conf):
    """
    A paste pipeline replica that keys off of auth_strategy.

    Code nabbed from cinder.
    """
    pipeline = local_conf[cfg.CONF['service:api'].auth_strategy]
    pipeline = pipeline.split()
    LOG.info(_LI('Getting auth pipeline: %s') % pipeline[:-1])
    filters = [loader.get_filter(n) for n in pipeline[:-1]]
    app = loader.get_app(pipeline[-1])
    filters.reverse()
    for filter in filters:
        app = filter(app)
    return app
Exemplo n.º 43
0
    def __init__(self, *args, **kwargs):
        # Parse the slave-nameserver-ips-and-ports.
        self._slave_server_ips = []
        self._slave_server_ports = []
        self._total_slave_nameservers = 0
        for slave in CONF['service:mdns'].slave_nameserver_ips_and_ports:
            slave_details = slave.split(':')

            # Check each entry to ensure that it has an IP and port.
            if (len(slave_details) != 2):
                raise exceptions.ConfigurationError(
                    "'slave-nameserver-ips-and-ports' in ['service:mdns'] is "
                    "not in the correct format. Expected format 'ipaddress:"
                    "port'. Got %(list_item)s" % {'list_item': slave})

            self._slave_server_ips.append(slave_details[0])
            self._slave_server_ports.append(int(slave_details[1]))
            self._total_slave_nameservers += 1

        LOG.info(_LI("slave nameserver ips = %(slave_server_ips)s") %
                 {"slave_server_ips": self._slave_server_ips})
        LOG.info(_LI("slave nameserver ports = %(slave_server_ports)s") %
                 {"slave_server_ports": self._slave_server_ports})
        LOG.info(_LI("started mdns notify endpoint"))
Exemplo n.º 44
0
 def delete_domain(self, context, domain):
     LOG.info(_LI('Deleting domain %s / %s'), domain['id'], domain['name'])
     url = '/Zone/%s' % domain['name'].rstrip('.')
     client = self.get_client()
     try:
         client.delete(url)
     except DynClientError as e:
         if e.http_status == 404:
             LOG.warn(
                 _LW("Attempt to delete %s / %s caused 404, "
                     "ignoring."), domain['id'], domain['name'])
             pass
         else:
             raise
     client.logout()
Exemplo n.º 45
0
def auth_pipeline_factory(loader, global_conf, **local_conf):
    """
    A paste pipeline replica that keys off of auth_strategy.

    Code nabbed from cinder.
    """
    pipeline = local_conf[cfg.CONF['service:api'].auth_strategy]
    pipeline = pipeline.split()
    LOG.info(_LI('Getting auth pipeline: %s') % pipeline[:-1])
    filters = [loader.get_filter(n) for n in pipeline[:-1]]
    app = loader.get_app(pipeline[-1])
    filters.reverse()
    for filter in filters:
        app = filter(app)
    return app
Exemplo n.º 46
0
    def stop(self):
        """Terminate child processes and wait on each."""
        self.running = False
        for pid in self.children:
            try:
                os.kill(pid, signal.SIGTERM)
            except OSError as exc:
                if exc.errno != errno.ESRCH:
                    raise

        # Wait for children to die
        if self.children:
            LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
            while self.children:
                self._wait_child()
Exemplo n.º 47
0
    def create_domain(self, context, domain):
        LOG.info(_LI('Creating domain %s / %s'), domain['id'], domain['name'])

        url = '/Secondary/%s' % domain['name'].rstrip('.')
        data = {'masters': cfg.CONF[GROUP].masters}

        if cfg.CONF[GROUP].contact_nickname is not None:
            data['contact_nickname'] = cfg.CONF[GROUP].contact_nickname

        if cfg.CONF[GROUP].tsig_key_name is not None:
            data['tsig_key_name'] = cfg.CONF[GROUP].tsig_key_name

        client = self.get_client()
        client.post(url, data=data)
        client.put(url, data={'activate': True})
        client.logout()
Exemplo n.º 48
0
def rec2des(rec, zonename):
    """Convert an IPA record to Designate format.  A single IPA record
    returned from the search may translate into multiple Designate.
    IPA dnsrecord_find returns a "name".  Each DNS name may contain
    multiple record types.  Each record type may contain multiple
    values.  Each one of these values must be added separately to
    Designate.  This function returns all of those as a list of
    dict designate records.
    """
    # convert record name
    if rec['idnsname'][0] == '@':
        name = zonename
    else:
        name = rec['idnsname'][0] + "." + zonename
    # find all record types
    rectypes = []
    for k in rec:
        if k.endswith("record"):
            if k in iparectype2designate:
                rectypes.append(k)
            else:
                LOG.info(_LI("Skipping unknown record type "
                             "%(type)s in %(name)s") %
                         {'type': k, 'name': name})

    desrecs = []
    for rectype in rectypes:
        dtype = iparectype2designate[rectype]
        for ddata in rec[rectype]:
            desreq = {'name': name, 'type': dtype}
            if dtype == 'SRV' or dtype == 'MX':
                # split off the priority and send in a separate field
                idx = ddata.find(' ')
                desreq['priority'] = int(ddata[:idx])
                if dtype == 'SRV' and not ddata.endswith("."):
                    # if server is specified as relative, add zonename
                    desreq['data'] = ddata[(idx + 1):] + "." + zonename
                else:
                    desreq['data'] = ddata[(idx + 1):]
            else:
                desreq['data'] = ddata
            if rec.get('description', [None])[0]:
                desreq['description'] = rec.get('description')[0]
            if rec.get('ttl', [None])[0]:
                desreq['ttl'] = int(rec['dnsttl'][0])
            desrecs.append(desreq)
    return desrecs
Exemplo n.º 49
0
def rec2des(rec, zonename):
    """Convert an IPA record to Designate format.  A single IPA record
    returned from the search may translate into multiple Designate.
    IPA dnsrecord_find returns a "name".  Each DNS name may contain
    multiple record types.  Each record type may contain multiple
    values.  Each one of these values must be added separately to
    Designate.  This function returns all of those as a list of
    dict designate records.
    """
    # convert record name
    if rec['idnsname'][0] == '@':
        name = zonename
    else:
        name = rec['idnsname'][0] + "." + zonename
    # find all record types
    rectypes = []
    for k in rec:
        if k.endswith("record"):
            if k in iparectype2designate:
                rectypes.append(k)
            else:
                LOG.info(
                    _LI("Skipping unknown record type %s in %s") % k, name)

    desrecs = []
    for rectype in rectypes:
        dtype = iparectype2designate[rectype]
        for ddata in rec[rectype]:
            desreq = {'name': name, 'type': dtype}
            if dtype == 'SRV' or dtype == 'MX':
                # split off the priority and send in a separate field
                idx = ddata.find(' ')
                desreq['priority'] = int(ddata[:idx])
                if dtype == 'SRV' and not ddata.endswith("."):
                    # if server is specified as relative, add zonename
                    desreq['data'] = ddata[(idx + 1):] + "." + zonename
                else:
                    desreq['data'] = ddata[(idx + 1):]
            else:
                desreq['data'] = ddata
            if rec.get('description', [None])[0]:
                desreq['description'] = rec.get('description')[0]
            if rec.get('ttl', [None])[0]:
                desreq['ttl'] = int(rec['dnsttl'][0])
            desrecs.append(desreq)
    return desrecs
Exemplo n.º 50
0
    def create_domain(self, context, domain):
        LOG.info(_LI('Creating domain %s / %s'), domain['id'], domain['name'])

        url = '/Secondary/%s' % domain['name'].rstrip('.')
        data = {
            'masters': cfg.CONF[GROUP].masters
        }

        if cfg.CONF[GROUP].contact_nickname is not None:
            data['contact_nickname'] = cfg.CONF[GROUP].contact_nickname

        if cfg.CONF[GROUP].tsig_key_name is not None:
            data['tsig_key_name'] = cfg.CONF[GROUP].tsig_key_name

        client = self.get_client()
        client.post(url, data=data)
        client.put(url, data={'activate': True})
        client.logout()
Exemplo n.º 51
0
def init(default_rule=None):
    policy_files = utils.find_config(cfg.CONF.policy_file)

    if len(policy_files) == 0:
        msg = 'Unable to determine appropriate policy json file'
        raise exceptions.ConfigurationError(msg)

    LOG.info(_LI('Using policy_file found at: %s') % policy_files[0])

    with open(policy_files[0]) as fh:
        policy_string = fh.read()
    rules = policy.Rules.load_json(policy_string, default_rule=default_rule)

    global _ENFORCER
    if not _ENFORCER:
        LOG.debug("Enforcer is not present, recreating.")
        _ENFORCER = policy.Enforcer()

    _ENFORCER.set_rules(rules)
Exemplo n.º 52
0
def init(default_rule=None):
    policy_files = utils.find_config(cfg.CONF.policy_file)

    if len(policy_files) == 0:
        msg = 'Unable to determine appropriate policy json file'
        raise exceptions.ConfigurationError(msg)

    LOG.info(_LI('Using policy_file found at: %s') % policy_files[0])

    with open(policy_files[0]) as fh:
        policy_string = fh.read()
    rules = policy.Rules.load_json(policy_string, default_rule=default_rule)

    global _ENFORCER
    if not _ENFORCER:
        LOG.debug("Enforcer is not present, recreating.")
        _ENFORCER = policy.Enforcer()

    _ENFORCER.set_rules(rules)
Exemplo n.º 53
0
    def __init__(self, backlog=128, threads=1000):

        api_paste_config = cfg.CONF['service:api'].api_paste_config
        config_paths = utils.find_config(api_paste_config)

        if len(config_paths) == 0:
            msg = 'Unable to determine appropriate api-paste-config file'
            raise exceptions.ConfigurationError(msg)

        LOG.info(_LI('Using api-paste-config found at: %s') % config_paths[0])

        policy.init()

        application = deploy.loadapp("config:%s" % config_paths[0],
                                     name='osapi_dns')

        super(Service, self).__init__(application=application,
                                      host=cfg.CONF['service:api'].api_host,
                                      port=cfg.CONF['service:api'].api_port,
                                      backlog=backlog,
                                      threads=threads)
Exemplo n.º 54
0
 def _extract_zones(self):
     zones = []
     for zone in self._zone_regex.finditer(self._conf):
         content = zone.group('content')
         name = zone.group('name')
         # Make sure it's a master zone:
         if self._type_master_regex.search(content):
             zonefile = self._zonefile_regex.search(content).group('file')
             try:
                 zone_object = dns.zone.from_file(zonefile,
                                                  allow_include=True)
             except dns.zone.UnknownOrigin:
                 LOG.info(
                     _LI('%s is missing $ORIGIN, inserting %s') %
                     (zonefile, name))
                 zone_object = dns.zone.from_file(zonefile,
                                                  allow_include=True,
                                                  origin=name)
             except dns.zone.NoSOA:
                 LOG.error(_LE('%s has no SOA') % zonefile)
             zones.append(Zone(zone_object))
     return zones
Exemplo n.º 55
0
    def _child_wait_for_exit_or_signal(self, launcher):
        status = 0
        signo = 0

        # NOTE(johannes): All exceptions are caught to ensure this
        # doesn't fallback into the loop spawning children. It would
        # be bad for a child to spawn more children.
        try:
            launcher.wait()
        except SignalExit as exc:
            signame = _signo_to_signame(exc.signo)
            LOG.info(_LI('Caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        except BaseException:
            LOG.exception(_LE('Unhandled exception'))
            status = 2
        finally:
            launcher.stop()

        return status, signo
Exemplo n.º 56
0
def initialize_if_enabled():
    backdoor_locals = {
        'exit': _dont_use_this,      # So we don't exit the entire process
        'quit': _dont_use_this,      # So we don't exit the entire process
        'fo': _find_objects,
        'pgt': _print_greenthreads,
        'pnt': _print_nativethreads,
    }

    if CONF.backdoor_port is None:
        return None

    start_port, end_port = _parse_port_range(str(CONF.backdoor_port))

    # NOTE(johannes): The standard sys.displayhook will print the value of
    # the last expression and set it to __builtin__._, which overwrites
    # the __builtin__._ that gettext sets. Let's switch to using pprint
    # since it won't interact poorly with gettext, and it's easier to
    # read the output too.
    def displayhook(val):
        if val is not None:
            pprint.pprint(val)
    sys.displayhook = displayhook

    sock = _listen('localhost', start_port, end_port, eventlet.listen)

    # In the case of backdoor port being zero, a port number is assigned by
    # listen().  In any case, pull the port number out here.
    port = sock.getsockname()[1]
    LOG.info(
        _LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
        {'port': port, 'pid': os.getpid()}
    )
    eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
                     locals=backdoor_locals)
    return port
Exemplo n.º 57
0
    def __init__(self, application):
        super(FaultWrapperMiddleware, self).__init__(application)

        LOG.info(_LI('Starting designate faultwrapper middleware'))
Exemplo n.º 58
0
    def __init__(self, application):
        super(NoAuthContextMiddleware, self).__init__(application)

        LOG.info(_LI('Starting designate noauthcontext middleware'))
Exemplo n.º 59
0
    def __init__(self, application):
        super(KeystoneContextMiddleware, self).__init__(application)

        LOG.info(_LI('Starting designate keystonecontext middleware'))