def upgrade(migrate_engine):
    meta.bind = migrate_engine

    records_table = Table("records", meta, autoload=True)

    # Create the new inherit_ttl column
    inherit_ttl = Column("inherit_ttl", Boolean(), default=True)
    inherit_ttl.create(records_table)

    # Semi-Populate the new inherit_ttl column. We'll need to do a cross-db
    # join from powerdns.records -> powerdns.domains -> designate.domains, so
    # we can't perform the second half here.
    query = records_table.update().values(inherit_ttl=False)
    query = query.where(records_table.c.ttl != None)
    query.execute()

    # If there are records without an explicity configured TTL, we'll need
    # a manual post-migration step.
    query = records_table.select()
    query = query.where(records_table.c.ttl == None)
    c = query.count()

    if c > 0:
        pmq = (
            "UPDATE powerdns.records JOIN powerdns.domains ON powerdns.reco"
            "rds.domain_id = powerdns.domains.id JOIN designate.domains ON "
            "powerdns.domains.designate_id = designate.domains.id SET power"
            "dns.records.ttl = designate.domains.ttl WHERE powerdns.records"
            ".inherit_ttl = 1;"
        )

        LOG.warn(_LW("**** A manual post-migration step is required ****"))
        LOG.warn(_LW("Please issue this query: %s" % pmq))
示例#2
0
    def _send_notify_message(self, context, zone_name, notify_message, dest_ip,
                             dest_port, timeout):
        """
        :param context: The user context.
        :param zone_name: The zone name for which a NOTIFY needs to be sent.
        :param notify_message: The notify message that needs to be sent to the
        slave name servers.
        :param dest_ip: The destination ip.
        :param dest_port: The destination port.
        :param timeout: The timeout in seconds to wait for a response.
        :return: None
        """
        try:
            response = dns.query.udp(
                notify_message, dest_ip, port=dest_port, timeout=timeout)

            # Check that we actually got a NOERROR in the rcode
            if dns.rcode.from_flags(
                    response.flags, response.ednsflags) != dns.rcode.NOERROR:
                LOG.warn(_LW("Failed to get NOERROR while trying to notify "
                             "change in %(zone)s to %(server)s:%(port)d. "
                             "Response message = %(resp)s") %
                         {'zone': zone_name, 'server': dest_ip,
                          'port': dest_port, 'resp': str(response)})
            return response
        except dns.exception.Timeout as timeout:
            LOG.warn(_LW("Got Timeout while trying to notify change in"
                         " %(zone)s to %(server)s:%(port)d. ") %
                     {'zone': zone_name, 'server': dest_ip, 'port': dest_port})
            return timeout
        except dns.query.BadResponse as badResponse:
            LOG.warn(_LW("Got BadResponse while trying to notify "
                         "change in %(zone)s to %(server)s:%(port)d") %
                     {'zone': zone_name, 'server': dest_ip, 'port': dest_port})
            return badResponse
示例#3
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(_LW('task run outlasted interval by %s sec') %
                                 -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_LE('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
示例#4
0
    def __init__(self,
                 auth_token=None,
                 user=None,
                 tenant=None,
                 domain=None,
                 user_domain=None,
                 project_domain=None,
                 is_admin=False,
                 read_only=False,
                 show_deleted=False,
                 request_id=None,
                 instance_uuid=None,
                 roles=[],
                 service_catalog=None,
                 all_tenants=False,
                 **kwargs):
        if kwargs:
            LOG.warn(
                _LW('Arguments dropped when creating context: %s') %
                str(kwargs))
        super(DesignateContext, self).__init__(auth_token=auth_token,
                                               user=user,
                                               tenant=tenant,
                                               domain=domain,
                                               user_domain=user_domain,
                                               project_domain=project_domain,
                                               is_admin=is_admin,
                                               read_only=read_only,
                                               show_deleted=show_deleted,
                                               request_id=request_id,
                                               instance_uuid=instance_uuid)

        self.roles = roles
        self.service_catalog = service_catalog
        self.all_tenants = all_tenants
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    records_table = Table('records', meta, autoload=True)

    # Add the hash column, start with allowing NULLs
    hash_column = Column('hash', String(32), nullable=True, default=None,
                         unique=True)
    hash_column.create(records_table, unique_name='unique_record')

    sync_domains = []

    # Fill out the hash values. We need to do this in a way that lets us track
    # which domains need to be re-synced, so having the DB do this directly
    # won't work.
    for record in records_table.select().execute():
        try:
            records_table.update()\
                         .where(records_table.c.id == record.id)\
                         .values(hash=_build_hash(record))\
                         .execute()
        except IntegrityError:
            if record.domain_id not in sync_domains:
                sync_domains.append(record.domain_id)
                LOG.warn(_LW("Domain '%s' needs to be synchronised") %
                         record.domain_id)

            records_table.delete()\
                         .where(records_table.c.id == record.id)\
                         .execute()

    # Finally, the column should not be nullable.
    records_table.c.hash.alter(nullable=False)
示例#6
0
    def sync_domain(self, context, domain, rdata):
        """
        Re-Sync a DNS domain

        This is the default, naive, domain synchronization implementation.
        """
        # First up, delete the domain from the backend.
        try:
            self.delete_domain(context, domain)
        except exceptions.DomainNotFound as e:
            # NOTE(Kiall): This means a domain was missing from the backend.
            #              Good thing we're doing a sync!
            LOG.warn(
                _LW("Failed to delete domain '%s' during sync. "
                    "Message: %s"), domain['id'], str(e))

        # Next, re-create the domain in the backend.
        self.create_domain(context, domain)

        # Finally, re-create the records for the domain.
        for recordset, records in rdata:
            # Re-create the record in the backend.
            self.create_recordset(context, domain, recordset)
            for record in records:
                self.create_record(context, domain, recordset, record)
示例#7
0
        def _call(endpoint, region, *args, **kw):
            client = get_client(context, endpoint=endpoint)
            LOG.debug("Attempting to fetch FloatingIPs from %s @ %s" %
                      (endpoint, region))
            try:
                fips = client.list_floatingips(*args, **kw)
            except neutron_exceptions.Unauthorized as e:
                # NOTE: 401 might be that the user doesn't have neutron
                # activated in a particular region, we'll just log the failure
                # and go on with our lives.
                LOG.warn(_LW("Calling Neutron resulted in a 401, "
                             "please investigate."))
                LOG.exception(e)
                return
            except Exception as e:
                LOG.error(_LE('Failed calling Neutron '
                              '%(region)s - %(endpoint)s') %
                          {'region': region, 'endpoint': endpoint})
                LOG.exception(e)
                failed.append((e, endpoint, region))
                return

            for fip in fips['floatingips']:
                data.append({
                    'id': fip['id'],
                    'address': fip['floating_ip_address'],
                    'region': region
                })

            LOG.debug("Added %i FloatingIPs from %s @ %s" %
                      (len(data), endpoint, region))
示例#8
0
    def _wait_child(self):
        try:
            # Don't block if no child processes have exited
            pid, status = os.waitpid(0, os.WNOHANG)
            if not pid:
                return None
        except OSError as exc:
            if exc.errno not in (errno.EINTR, errno.ECHILD):
                raise
            return None

        if os.WIFSIGNALED(status):
            sig = os.WTERMSIG(status)
            LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
                     dict(pid=pid, sig=sig))
        else:
            code = os.WEXITSTATUS(status)
            LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
                     dict(pid=pid, code=code))

        if pid not in self.children:
            LOG.warning(_LW('pid %d not in child list'), pid)
            return None

        wrap = self.children.pop(pid)
        wrap.children.remove(pid)
        return wrap
示例#9
0
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
    """Ensures that MySQL and DB2 connections are alive.

    Borrowed from:
    http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
    """
    cursor = dbapi_conn.cursor()
    try:
        ping_sql = 'select 1'
        if engine.name == 'ibm_db_sa':
            # DB2 requires a table expression
            ping_sql = 'select 1 from (values (1)) AS t1'
        cursor.execute(ping_sql)
    except Exception as ex:
        if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
            msg = _LW('Database server has gone away: %s') % ex
            LOG.warning(msg)

            # if the database server has gone away, all connections in the pool
            # have become invalid and we can safely close all of them here,
            # rather than waste time on checking of every single connection
            engine.dispose()

            # this will be handled by SQLAlchemy and will force it to create
            # a new connection and retry the original action
            raise sqla_exc.DisconnectionError(msg)
        else:
            raise
示例#10
0
def _mysql_check_effective_sql_mode(engine):
    """Logs a message based on the effective SQL mode for MySQL connections."""
    realmode = _mysql_get_effective_sql_mode(engine)

    if realmode is None:
        LOG.warning(_LW('Unable to detect effective SQL mode'))
        return

    LOG.debug('MySQL server mode set to %s', realmode)
    # 'TRADITIONAL' mode enables several other modes, so
    # we need a substring match here
    if not ('TRADITIONAL' in realmode.upper() or
            'STRICT_ALL_TABLES' in realmode.upper()):
        LOG.warning(_LW("MySQL SQL mode is '%s', "
                        "consider enabling TRADITIONAL or STRICT_ALL_TABLES"),
                    realmode)
示例#11
0
    def _wait_child(self):
        try:
            # Don't block if no child processes have exited
            pid, status = os.waitpid(0, os.WNOHANG)
            if not pid:
                return None
        except OSError as exc:
            if exc.errno not in (errno.EINTR, errno.ECHILD):
                raise
            return None

        if os.WIFSIGNALED(status):
            sig = os.WTERMSIG(status)
            LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
                     dict(pid=pid, sig=sig))
        else:
            code = os.WEXITSTATUS(status)
            LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
                     dict(pid=pid, code=code))

        if pid not in self.children:
            LOG.warning(_LW('pid %d not in child list'), pid)
            return None

        wrap = self.children.pop(pid)
        wrap.children.remove(pid)
        return wrap
示例#12
0
    def sync_domain(self, context, domain, rdata):
        """
        Re-Sync a DNS domain

        This is the default, naive, domain synchronization implementation.
        """
        # First up, delete the domain from the backend.
        try:
            self.delete_domain(context, domain)
        except exceptions.DomainNotFound as e:
            # NOTE(Kiall): This means a domain was missing from the backend.
            #              Good thing we're doing a sync!
            LOG.warn(_LW("Failed to delete domain '%(domain)s' during sync. "
                         "Message: %(message)s") %
                     {'domain': domain['id'], 'message': str(e)})

        # Next, re-create the domain in the backend.
        self.create_domain(context, domain)

        # Finally, re-create the records for the domain.
        for recordset, records in rdata:
            # Re-create the record in the backend.
            self.create_recordset(context, domain, recordset)
            for record in records:
                self.create_record(context, domain, recordset, record)
示例#13
0
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
    """Ensures that MySQL and DB2 connections are alive.

    Borrowed from:
    http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
    """
    cursor = dbapi_conn.cursor()
    try:
        ping_sql = 'select 1'
        if engine.name == 'ibm_db_sa':
            # DB2 requires a table expression
            ping_sql = 'select 1 from (values (1)) AS t1'
        cursor.execute(ping_sql)
    except Exception as ex:
        if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
            msg = _LW('Database server has gone away: %s') % ex
            LOG.warning(msg)

            # if the database server has gone away, all connections in the pool
            # have become invalid and we can safely close all of them here,
            # rather than waste time on checking of every single connection
            engine.dispose()

            # this will be handled by SQLAlchemy and will force it to create
            # a new connection and retry the original action
            raise sqla_exc.DisconnectionError(msg)
        else:
            raise
示例#14
0
def _mysql_check_effective_sql_mode(engine):
    """Logs a message based on the effective SQL mode for MySQL connections."""
    realmode = _mysql_get_effective_sql_mode(engine)

    if realmode is None:
        LOG.warning(_LW('Unable to detect effective SQL mode'))
        return

    LOG.debug('MySQL server mode set to %s', realmode)
    # 'TRADITIONAL' mode enables several other modes, so
    # we need a substring match here
    if not ('TRADITIONAL' in realmode.upper()
            or 'STRICT_ALL_TABLES' in realmode.upper()):
        LOG.warning(
            _LW("MySQL SQL mode is '%s', "
                "consider enabling TRADITIONAL or STRICT_ALL_TABLES"),
            realmode)
示例#15
0
    def _handle_tcp(self):
        LOG.info(_LI("_handle_tcp thread started"))
        while True:
            client, addr = self._sock_tcp.accept()
            LOG.warn(_LW("Handling TCP Request from: %s") % addr)

            payload = client.recv(65535)

            self.tg.add_thread(self._handle, addr, payload, client)
示例#16
0
    def _handle_udp(self):
        LOG.info(_LI("_handle_udp thread started"))
        while True:
            # TODO(kiall): Determine the appropriate default value for
            #              UDP recvfrom.
            payload, addr = self._sock_udp.recvfrom(8192)
            LOG.warn(_LW("Handling UDP Request from: %s") % addr)

            self.tg.add_thread(self._handle, addr, payload)
示例#17
0
 def __init__(self, keytab, hostname):
     # store the kerberos credentials in memory rather than on disk
     os.environ['KRB5CCNAME'] = "MEMORY:" + str(uuid.uuid4())
     self.token = None
     self.keytab = keytab
     self.hostname = hostname
     if self.keytab:
         os.environ['KRB5_CLIENT_KTNAME'] = self.keytab
     else:
         LOG.warn(_LW('No IPA client kerberos keytab file given'))
示例#18
0
 def __init__(self, keytab, hostname):
     # store the kerberos credentials in memory rather than on disk
     os.environ['KRB5CCNAME'] = "MEMORY:" + str(uuid.uuid4())
     self.token = None
     self.keytab = keytab
     self.hostname = hostname
     if self.keytab:
         os.environ['KRB5_CLIENT_KTNAME'] = self.keytab
     else:
         LOG.warn(_LW('No IPA client kerberos keytab file given'))
示例#19
0
    def _handle_tcp(self):
        LOG.info(_LI("_handle_tcp thread started"))
        while True:
            client, addr = self._sock_tcp.accept()
            LOG.warn(_LW("Handling TCP Request from: %(host)s:%(port)d") %
                     {'host': addr[0], 'port': addr[1]})

            payload = client.recv(65535)

            self.tg.add_thread(self._handle, addr, payload, client)
示例#20
0
    def _init_extensions(self):
        """ Loads and prepares all enabled extensions """

        enabled_notification_handlers = \
            cfg.CONF['service:sink'].enabled_notification_handlers

        notification_handlers = notification_handler.get_notification_handlers(
            enabled_notification_handlers)

        if len(notification_handlers) == 0:
            LOG.warn(_LW('No designate-sink handlers enabled or loaded'))

        return notification_handlers
示例#21
0
 def delete_domain(self, context, domain):
     LOG.info(_LI('Deleting domain %s / %s'), domain['id'], domain['name'])
     url = '/Zone/%s' % domain['name'].rstrip('.')
     client = self.get_client()
     try:
         client.delete(url)
     except DynClientError as e:
         if e.http_status == 404:
             LOG.warn(_LW("Attempt to delete %s / %s caused 404, "
                      "ignoring."), domain['id'], domain['name'])
             pass
         else:
             raise
     client.logout()
示例#22
0
    def process_request(self, request):
        # If maintaince mode is not enabled, pass the request on as soon as
        # possible
        if not self.enabled:
            return None

        # If the caller has the bypass role, let them through
        if ('context' in request.environ
                and self.role in request.environ['context'].roles):
            LOG.warn(_LW('Request authorized to bypass maintenance mode'))
            return None

        # Otherwise, reject the request with a 503 Service Unavailable
        return flask.Response(status=503, headers={'Retry-After': 60})
示例#23
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    domains_table = Table('domains', meta, autoload=True)

    serial = Column('serial', Integer(), default=timeutils.utcnow_ts,
                    nullable=False, server_default="1")
    serial.create(domains_table, populate_default=True)

    # Do we have any domains?
    domain_count = domains_table.count().execute().first()[0]

    if domain_count > 0:
        LOG.warn(_LW('A sync-domains is now required in order for the API '
                 'provided, and backend provided serial numbers to align'))
示例#24
0
 def delete_domain(self, context, domain):
     LOG.info(_LI('Deleting domain %s / %s'), domain['id'], domain['name'])
     url = '/Zone/%s' % domain['name'].rstrip('.')
     client = self.get_client()
     try:
         client.delete(url)
     except DynClientError as e:
         if e.http_status == 404:
             LOG.warn(
                 _LW("Attempt to delete %s / %s caused 404, "
                     "ignoring."), domain['id'], domain['name'])
             pass
         else:
             raise
     client.logout()
示例#25
0
def ping_listener(dbapi_conn, connection_rec, connection_proxy):
    """
    Ensures that MySQL connections checked out of the
    pool are alive.

    Borrowed from:
    http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
    """
    try:
        dbapi_conn.cursor().execute('select 1')
    except dbapi_conn.OperationalError as ex:
        if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
            LOG.warn(_LW('Got mysql server has gone away: %s'), ex)
            raise DisconnectionError("Database server went away")
        else:
            raise
示例#26
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    domains_table = Table('domains', meta, autoload=True)

    serial = Column('serial',
                    Integer(),
                    default=timeutils.utcnow_ts,
                    nullable=False,
                    server_default="1")
    serial.create(domains_table, populate_default=True)

    # Do we have any domains?
    domain_count = domains_table.count().execute().first()[0]

    if domain_count > 0:
        LOG.warn(
            _LW('A sync-domains is now required in order for the API '
                'provided, and backend provided serial numbers to align'))
示例#27
0
    def sync_record(self, context, domain, recordset, record):
        """
        Re-Sync a DNS record.

        This is the default, naive, record synchronization implementation.
        """
        # First up, delete the record from the backend.
        try:
            self.delete_record(context, domain, recordset, record)
        except exceptions.RecordNotFound as e:
            # NOTE(Kiall): This means a record was missing from the backend.
            #              Good thing we're doing a sync!
            LOG.warn(
                _LW("Failed to delete record '%s' in domain '%s' "
                    "during sync. Message: %s"), record['id'], domain['id'],
                str(e))

        # Finally, re-create the record in the backend.
        self.create_record(context, domain, recordset, record)
示例#28
0
    def sync_record(self, context, domain, recordset, record):
        """
        Re-Sync a DNS record.

        This is the default, naive, record synchronization implementation.
        """
        # First up, delete the record from the backend.
        try:
            self.delete_record(context, domain, recordset, record)
        except exceptions.RecordNotFound as e:
            # NOTE(Kiall): This means a record was missing from the backend.
            #              Good thing we're doing a sync!
            LOG.warn(_LW("Failed to delete record '%(record)s' "
                         "in domain '%(domain)s' during sync. "
                         "Message: %(message)s") %
                     {'record': record['id'], 'domain': domain['id'],
                      'message': str(e)})

        # Finally, re-create the record in the backend.
        self.create_record(context, domain, recordset, record)
示例#29
0
    def start(self):
        super(Bind9Backend, self).start()

        domains = self.central_service.find_domains(self.admin_context)

        for domain in domains:
            rndc_op = 'reload'
            rndc_call = self._rndc_base() + [rndc_op]
            rndc_call.extend([domain['name']])

            try:
                LOG.debug('Calling RNDC with: %s' % " ".join(rndc_call))
                utils.execute(*rndc_call)
            except utils.processutils.ProcessExecutionError as proc_exec_err:
                stderr = proc_exec_err.stderr
                if stderr.count("rndc: 'reload' failed: not found") is not 0:
                    LOG.warn(_LW("Domain %s (%s) missing from backend, "
                             "recreating"), domain['name'], domain['id'])
                    self._sync_domain(domain, new_domain_flag=True)
                else:
                    raise proc_exec_err
示例#30
0
    def __init__(
        self,
        auth_token=None,
        user=None,
        tenant=None,
        domain=None,
        user_domain=None,
        project_domain=None,
        is_admin=False,
        read_only=False,
        show_deleted=False,
        request_id=None,
        instance_uuid=None,
        roles=None,
        service_catalog=None,
        all_tenants=False,
        **kwargs
    ):
        if kwargs:
            LOG.warn(_LW("Arguments dropped when creating context: %s") % str(kwargs))
        roles = roles or []
        super(DesignateContext, self).__init__(
            auth_token=auth_token,
            user=user,
            tenant=tenant,
            domain=domain,
            user_domain=user_domain,
            project_domain=project_domain,
            is_admin=is_admin,
            read_only=read_only,
            show_deleted=show_deleted,
            request_id=request_id,
            instance_uuid=instance_uuid,
        )

        self.roles = roles
        self.service_catalog = service_catalog
        self.all_tenants = all_tenants
示例#31
0
def syncipaservers2des(servers, designatereq, designateurl):
    # get existing servers from designate
    dservers = {}
    srvurl = designateurl + "/servers"
    resp = designatereq.get(srvurl)
    LOG.debug("Response: %s" % pprint.pformat(resp.json()))
    if resp and resp.status_code == 200 and resp.json() and \
            'servers' in resp.json():
        for srec in resp.json()['servers']:
            dservers[srec['name']] = srec['id']
    else:
        LOG.warn(_LW("No servers in designate"))

    # first - add servers from ipa not already in designate
    for server in servers:
        if server in dservers:
            LOG.info(
                _LI("Skipping ipa server %s already in designate") % server)
        else:
            desreq = {"name": server}
            resp = designatereq.post(srvurl, data=json.dumps(desreq))
            LOG.debug("Response: %s" % pprint.pformat(resp.json()))
            if resp.status_code == 200:
                LOG.info(_LI("Added server %s to designate") % server)
            else:
                raise AddServerError("Unable to add %s: %s" %
                                     (server, pprint.pformat(resp.json())))

    # next - delete servers in designate not in ipa
    for server, sid in dservers.iteritems():
        if server not in servers:
            delresp = designatereq.delete(srvurl + "/" + sid)
            if delresp.status_code == 200:
                LOG.info(_LI("Deleted server %s") % server)
            else:
                raise DeleteServerError(
                    "Unable to delete %s: %s" %
                    (server, pprint.pformat(delresp.json())))
示例#32
0
def syncipaservers2des(servers, designatereq, designateurl):
    # get existing servers from designate
    dservers = {}
    srvurl = designateurl + "/servers"
    resp = designatereq.get(srvurl)
    LOG.debug("Response: %s" % pprint.pformat(resp.json()))
    if resp and resp.status_code == 200 and resp.json() and \
            'servers' in resp.json():
        for srec in resp.json()['servers']:
            dservers[srec['name']] = srec['id']
    else:
        LOG.warn(_LW("No servers in designate"))

    # first - add servers from ipa not already in designate
    for server in servers:
        if server in dservers:
            LOG.info(_LI("Skipping ipa server %s already in designate")
                     % server)
        else:
            desreq = {"name": server}
            resp = designatereq.post(srvurl, data=json.dumps(desreq))
            LOG.debug("Response: %s" % pprint.pformat(resp.json()))
            if resp.status_code == 200:
                LOG.info(_LI("Added server %s to designate") % server)
            else:
                raise AddServerError("Unable to add %s: %s" %
                                     (server, pprint.pformat(resp.json())))

    # next - delete servers in designate not in ipa
    for server, sid in dservers.iteritems():
        if server not in servers:
            delresp = designatereq.delete(srvurl + "/" + sid)
            if delresp.status_code == 200:
                LOG.info(_LI("Deleted server %s") % server)
            else:
                raise DeleteServerError("Unable to delete %s: %s" %
                                        (server,
                                         pprint.pformat(delresp.json())))
示例#33
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    records_table = Table('records', meta, autoload=True)

    # Add the hash column, start with allowing NULLs
    hash_column = Column('hash',
                         String(32),
                         nullable=True,
                         default=None,
                         unique=True)
    hash_column.create(records_table, unique_name='unique_record')

    sync_domains = []

    # Fill out the hash values. We need to do this in a way that lets us track
    # which domains need to be re-synced, so having the DB do this directly
    # won't work.
    for record in records_table.select().execute():
        try:
            records_table.update()\
                         .where(records_table.c.id == record.id)\
                         .values(hash=_build_hash(record))\
                         .execute()
        except IntegrityError:
            if record.domain_id not in sync_domains:
                sync_domains.append(record.domain_id)
                LOG.warn(
                    _LW("Domain '%s' needs to be synchronised") %
                    record.domain_id)

            records_table.delete()\
                         .where(records_table.c.id == record.id)\
                         .execute()

    # Finally, the column should not be nullable.
    records_table.c.hash.alter(nullable=False)
示例#34
0
def paginate_query(query, model, limit, sort_keys, marker=None,
                   sort_dir=None, sort_dirs=None):
    """Returns a query with sorting / pagination criteria added.

    Pagination works by requiring a unique sort_key, specified by sort_keys.
    (If sort_keys is not unique, then we risk looping through values.)
    We use the last row in the previous page as the 'marker' for pagination.
    So we must return values that follow the passed marker in the order.
    With a single-valued sort_key, this would be easy: sort_key > X.
    With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
    the lexicographical ordering:
    (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)

    We also have to cope with different sort_directions.

    Typically, the id of the last row is used as the client-facing pagination
    marker, then the actual marker object must be fetched from the db and
    passed in to us as marker.

    :param query: the query object to which we should add paging/sorting
    :param model: the ORM model class
    :param limit: maximum number of items to return
    :param sort_keys: array of attributes by which results should be sorted
    :param marker: the last item of the previous page; we returns the next
                    results after this value.
    :param sort_dir: direction in which results should be sorted (asc, desc)
    :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys

    :rtype: sqlalchemy.orm.query.Query
    :return: The query with sorting/pagination added.
    """

    if 'id' not in sort_keys:
        # TODO(justinsb): If this ever gives a false-positive, check
        # the actual primary key, rather than assuming its id
        LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))

    assert(not (sort_dir and sort_dirs))

    # Default the sort direction to ascending
    if sort_dirs is None and sort_dir is None:
        sort_dir = 'asc'

    # Ensure a per-column sort direction
    if sort_dirs is None:
        sort_dirs = [sort_dir for _sort_key in sort_keys]

    assert(len(sort_dirs) == len(sort_keys))

    # Add sorting
    for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
        try:
            sort_dir_func = {
                'asc': sqlalchemy.asc,
                'desc': sqlalchemy.desc,
            }[current_sort_dir]
        except KeyError:
            raise ValueError(_("Unknown sort direction, "
                               "must be 'desc' or 'asc'"))
        try:
            sort_key_attr = getattr(model, current_sort_key)
        except AttributeError:
            raise InvalidSortKey()
        query = query.order_by(sort_dir_func(sort_key_attr))

    # Add pagination
    if marker is not None:
        marker_values = []
        for sort_key in sort_keys:
            v = getattr(marker, sort_key)
            marker_values.append(v)

        # Build up an array of sort criteria as in the docstring
        criteria_list = []
        for i in range(len(sort_keys)):
            crit_attrs = []
            for j in range(i):
                model_attr = getattr(model, sort_keys[j])
                crit_attrs.append((model_attr == marker_values[j]))

            model_attr = getattr(model, sort_keys[i])
            if sort_dirs[i] == 'desc':
                crit_attrs.append((model_attr < marker_values[i]))
            else:
                crit_attrs.append((model_attr > marker_values[i]))

            criteria = sqlalchemy.sql.and_(*crit_attrs)
            criteria_list.append(criteria)

        f = sqlalchemy.sql.or_(*criteria_list)
        query = query.filter(f)

    if limit is not None:
        query = query.limit(limit)

    return query
示例#35
0
def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
                  idle_timeout=3600,
                  connection_debug=0, max_pool_size=None, max_overflow=None,
                  pool_timeout=None, sqlite_synchronous=True,
                  connection_trace=False, max_retries=10, retry_interval=10):
    """Return a new SQLAlchemy engine."""

    connection_dict = sqlalchemy.engine.url.make_url(sql_connection)

    engine_args = {
        "pool_recycle": idle_timeout,
        'convert_unicode': True,
    }

    logger = logging.getLogger('sqlalchemy.engine')

    # Map SQL debug level to Python log level
    if connection_debug >= 100:
        logger.setLevel(logging.DEBUG)
    elif connection_debug >= 50:
        logger.setLevel(logging.INFO)
    else:
        logger.setLevel(logging.WARNING)

    if "sqlite" in connection_dict.drivername:
        if sqlite_fk:
            engine_args["listeners"] = [SqliteForeignKeysListener()]
        engine_args["poolclass"] = NullPool

        if sql_connection == "sqlite://":
            engine_args["poolclass"] = StaticPool
            engine_args["connect_args"] = {'check_same_thread': False}
    else:
        if max_pool_size is not None:
            engine_args['pool_size'] = max_pool_size
        if max_overflow is not None:
            engine_args['max_overflow'] = max_overflow
        if pool_timeout is not None:
            engine_args['pool_timeout'] = pool_timeout

    engine = sqlalchemy.create_engine(sql_connection, **engine_args)

    sqlalchemy.event.listen(engine, 'checkin', _thread_yield)

    if engine.name in ['mysql', 'ibm_db_sa']:
        ping_callback = functools.partial(_ping_listener, engine)
        sqlalchemy.event.listen(engine, 'checkout', ping_callback)
        if engine.name == 'mysql':
            if mysql_sql_mode:
                _mysql_set_mode_callback(engine, mysql_sql_mode)
    elif 'sqlite' in connection_dict.drivername:
        if not sqlite_synchronous:
            sqlalchemy.event.listen(engine, 'connect',
                                    _synchronous_switch_listener)
        sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)

    if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb':
        _patch_mysqldb_with_stacktrace_comments()

    try:
        engine.connect()
    except sqla_exc.OperationalError as e:
        if not _is_db_connection_error(e.args[0]):
            raise

        remaining = max_retries
        if remaining == -1:
            remaining = 'infinite'
        while True:
            msg = _LW('SQL connection failed. %s attempts left.')
            LOG.warning(msg % remaining)
            if remaining != 'infinite':
                remaining -= 1
            time.sleep(retry_interval)
            try:
                engine.connect()
                break
            except sqla_exc.OperationalError as e:
                if (remaining != 'infinite' and remaining == 0) or \
                        not _is_db_connection_error(e.args[0]):
                    raise
    return engine
示例#36
0
def get_engine(config_group):
    """Return a SQLAlchemy engine."""
    global _ENGINES

    database_connection = cfg.CONF[config_group].database_connection

    if config_group not in _ENGINES:
        connection_dict = sqlalchemy.engine.url.make_url(
            database_connection)

        engine_args = {
            "pool_recycle": cfg.CONF[config_group].idle_timeout,
            "echo": False,
            'convert_unicode': True,
        }

        # Map our SQL debug level to SQLAlchemy's options
        if cfg.CONF[config_group].connection_debug >= 100:
            engine_args['echo'] = 'debug'
        elif cfg.CONF[config_group].connection_debug >= 50:
            engine_args['echo'] = True

        if "sqlite" in connection_dict.drivername:
            engine_args["poolclass"] = NullPool

            if database_connection == "sqlite://":
                engine_args["poolclass"] = StaticPool
                engine_args["connect_args"] = {'check_same_thread': False}

        _ENGINES[config_group] = sqlalchemy.create_engine(database_connection,
                                                          **engine_args)

        if 'mysql' in connection_dict.drivername:
            sqlalchemy.event.listen(_ENGINES[config_group],
                                    'checkout',
                                    ping_listener)
        elif "sqlite" in connection_dict.drivername:
            if not cfg.CONF[config_group].sqlite_synchronous:
                sqlalchemy.event.listen(_ENGINES[config_group],
                                        'connect',
                                        synchronous_switch_listener)
            sqlalchemy.event.listen(_ENGINES[config_group],
                                    'connect',
                                    add_regexp_listener)

        if (cfg.CONF[config_group].connection_trace and
                _ENGINES[config_group].dialect.dbapi.__name__ == 'MySQLdb'):
            import MySQLdb.cursors
            _do_query = debug_mysql_do_query()
            setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)

        try:
            _ENGINES[config_group].connect()
        except OperationalError as e:
            if not is_db_connection_error(e.args[0]):
                raise

            remaining = cfg.CONF[config_group].max_retries
            if remaining == -1:
                remaining = 'infinite'
            while True:
                LOG.warn(_LW('SQL connection failed. %s attempts left.') %
                         remaining)
                if remaining != 'infinite':
                    remaining -= 1
                time.sleep(cfg.CONF[config_group].retry_interval)
                try:
                    _ENGINES[config_group].connect()
                    break
                except OperationalError as e:
                    if (remaining != 'infinite' and remaining == 0) or \
                            not is_db_connection_error(e.args[0]):
                        raise
    return _ENGINES[config_group]
示例#37
0
def paginate_query(query,
                   model,
                   limit,
                   sort_keys,
                   marker=None,
                   sort_dir=None,
                   sort_dirs=None):
    """Returns a query with sorting / pagination criteria added.

    Pagination works by requiring a unique sort_key, specified by sort_keys.
    (If sort_keys is not unique, then we risk looping through values.)
    We use the last row in the previous page as the 'marker' for pagination.
    So we must return values that follow the passed marker in the order.
    With a single-valued sort_key, this would be easy: sort_key > X.
    With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
    the lexicographical ordering:
    (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)

    We also have to cope with different sort_directions.

    Typically, the id of the last row is used as the client-facing pagination
    marker, then the actual marker object must be fetched from the db and
    passed in to us as marker.

    :param query: the query object to which we should add paging/sorting
    :param model: the ORM model class
    :param limit: maximum number of items to return
    :param sort_keys: array of attributes by which results should be sorted
    :param marker: the last item of the previous page; we returns the next
                    results after this value.
    :param sort_dir: direction in which results should be sorted (asc, desc)
    :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys

    :rtype: sqlalchemy.orm.query.Query
    :return: The query with sorting/pagination added.
    """

    if 'id' not in sort_keys:
        # TODO(justinsb): If this ever gives a false-positive, check
        # the actual primary key, rather than assuming its id
        LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))

    assert (not (sort_dir and sort_dirs))

    # Default the sort direction to ascending
    if sort_dirs is None and sort_dir is None:
        sort_dir = 'asc'

    # Ensure a per-column sort direction
    if sort_dirs is None:
        sort_dirs = [sort_dir for _sort_key in sort_keys]

    assert (len(sort_dirs) == len(sort_keys))

    # Add sorting
    for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
        try:
            sort_dir_func = {
                'asc': sqlalchemy.asc,
                'desc': sqlalchemy.desc,
            }[current_sort_dir]
        except KeyError:
            raise ValueError(
                _("Unknown sort direction, "
                  "must be 'desc' or 'asc'"))
        try:
            sort_key_attr = getattr(model, current_sort_key)
        except AttributeError:
            raise InvalidSortKey()
        query = query.order_by(sort_dir_func(sort_key_attr))

    # Add pagination
    if marker is not None:
        marker_values = []
        for sort_key in sort_keys:
            v = getattr(marker, sort_key)
            marker_values.append(v)

        # Build up an array of sort criteria as in the docstring
        criteria_list = []
        for i in range(len(sort_keys)):
            crit_attrs = []
            for j in range(i):
                model_attr = getattr(model, sort_keys[j])
                crit_attrs.append((model_attr == marker_values[j]))

            model_attr = getattr(model, sort_keys[i])
            if sort_dirs[i] == 'desc':
                crit_attrs.append((model_attr < marker_values[i]))
            else:
                crit_attrs.append((model_attr > marker_values[i]))

            criteria = sqlalchemy.sql.and_(*crit_attrs)
            criteria_list.append(criteria)

        f = sqlalchemy.sql.or_(*criteria_list)
        query = query.filter(f)

    if limit is not None:
        query = query.limit(limit)

    return query
示例#38
0
def create_engine(sql_connection,
                  sqlite_fk=False,
                  mysql_sql_mode=None,
                  idle_timeout=3600,
                  connection_debug=0,
                  max_pool_size=None,
                  max_overflow=None,
                  pool_timeout=None,
                  sqlite_synchronous=True,
                  connection_trace=False,
                  max_retries=10,
                  retry_interval=10):
    """Return a new SQLAlchemy engine."""

    connection_dict = sqlalchemy.engine.url.make_url(sql_connection)

    engine_args = {
        "pool_recycle": idle_timeout,
        'convert_unicode': True,
    }

    logger = logging.getLogger('sqlalchemy.engine')

    # Map SQL debug level to Python log level
    if connection_debug >= 100:
        logger.setLevel(logging.DEBUG)
    elif connection_debug >= 50:
        logger.setLevel(logging.INFO)
    else:
        logger.setLevel(logging.WARNING)

    if "sqlite" in connection_dict.drivername:
        if sqlite_fk:
            engine_args["listeners"] = [SqliteForeignKeysListener()]
        engine_args["poolclass"] = NullPool

        if sql_connection == "sqlite://":
            engine_args["poolclass"] = StaticPool
            engine_args["connect_args"] = {'check_same_thread': False}
    else:
        if max_pool_size is not None:
            engine_args['pool_size'] = max_pool_size
        if max_overflow is not None:
            engine_args['max_overflow'] = max_overflow
        if pool_timeout is not None:
            engine_args['pool_timeout'] = pool_timeout

    engine = sqlalchemy.create_engine(sql_connection, **engine_args)

    sqlalchemy.event.listen(engine, 'checkin', _thread_yield)

    if engine.name in ['mysql', 'ibm_db_sa']:
        ping_callback = functools.partial(_ping_listener, engine)
        sqlalchemy.event.listen(engine, 'checkout', ping_callback)
        if engine.name == 'mysql':
            if mysql_sql_mode:
                _mysql_set_mode_callback(engine, mysql_sql_mode)
    elif 'sqlite' in connection_dict.drivername:
        if not sqlite_synchronous:
            sqlalchemy.event.listen(engine, 'connect',
                                    _synchronous_switch_listener)
        sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)

    if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb':
        _patch_mysqldb_with_stacktrace_comments()

    try:
        engine.connect()
    except sqla_exc.OperationalError as e:
        if not _is_db_connection_error(e.args[0]):
            raise

        remaining = max_retries
        if remaining == -1:
            remaining = 'infinite'
        while True:
            msg = _LW('SQL connection failed. %s attempts left.')
            LOG.warning(msg % remaining)
            if remaining != 'infinite':
                remaining -= 1
            time.sleep(retry_interval)
            try:
                engine.connect()
                break
            except sqla_exc.OperationalError as e:
                if (remaining != 'infinite' and remaining == 0) or \
                        not _is_db_connection_error(e.args[0]):
                    raise
    return engine