Beispiel #1
0
def run(config, section_name):
    import eventlet
    # Apply monkey_patch at the earliest
    eventlet.monkey_patch(thread=False)
    '''
    Sets up logging for the entire process and then starts the corresponding handler.
    - config : Configuration of the application.
    - section_name : Name of the section in the config.
    '''
    logUtils.setup_logging(config)
    logger = logUtils.get_logger(__name__)
    solr_url = config['DEFAULT']['solr_url']
    db_info = session.DBInfo(config['DEFAULT']['nova_db_server'], config['DEFAULT']['nova_db_port'], config['DEFAULT']['nova_db_creds'])
    amqp_info = amqp_client.AmqpInfo(config['DEFAULT']['amqp_server'], config['DEFAULT']['amqp_port'], config['DEFAULT']['amqp_creds'])

    # pull out name of the handler for the specific core
    section = config[section_name]
    handlerName = section["handlerName"]
    interval = int(section["interval"])
    
    try :
        # instantiate the handler
        handlerKls = get_class(handlerName)
        handler = handlerKls(solr_url, db_info, amqp_info, interval)
        logger.info(str(handler) + ' instantiated to perform work.')
        # handler is expected to have an on_start & setup_amqp() method to do the real work.
        # run on_start on a green thread.
        eventlet.spawn_n(handler.on_start)
        # call setup_amqp on the main thread so that it keeps the main thread busy.
        handler.setup_amqp()
        return handler
    except Exception as inst:
        logger.exception(inst)
Beispiel #2
0
def main():
    cfg.parse_args(sys.argv)
    logging.setup(CONF, None)

    debug_utils.setup()

    # Patch 'thread' module if debug is disabled
    if not debug_utils.enabled():
        eventlet.monkey_patch(thread=True)

    from trove.guestagent import dbaas
    manager = dbaas.datastore_registry().get(CONF.datastore_manager)
    if not manager:
        msg = ("Manager class not registered for datastore manager %s" %
               CONF.datastore_manager)
        raise RuntimeError(msg)

    # rpc module must be loaded after decision about thread monkeypatching
    # because if thread module is not monkeypatched we can't use eventlet
    # executor from oslo_messaging library.
    from trove import rpc
    rpc.init(CONF)

    from trove.common.rpc import service as rpc_service
    from trove.common.rpc import version as rpc_version
    server = rpc_service.RpcService(
        manager=manager, host=CONF.guest_id,
        rpc_api_version=rpc_version.RPC_API_VERSION)

    launcher = openstack_service.launch(CONF, server)
    launcher.wait()
def main():
    eventlet.monkey_patch()
    cfg.CONF(project='neutron')
    logging_config.setup_logging(cfg.CONF)

    try:
        interface_mappings = q_utils.parse_mappings(
            cfg.CONF.ESWITCH.physical_interface_mappings)
    except ValueError as e:
        LOG.error(_("Parsing physical_interface_mappings failed: %s."
                    " Agent terminated!"), e)
        sys.exit(1)
    LOG.info(_("Interface mappings: %s"), interface_mappings)

    try:
        agent = MlnxEswitchNeutronAgent(interface_mappings)
    except Exception as e:
        LOG.error(_("Failed on Agent initialisation : %s."
                    " Agent terminated!"), e)
        sys.exit(1)

    # Start everything.
    LOG.info(_("Agent initialised successfully, now running... "))
    agent.daemon_loop()
    sys.exit(0)
Beispiel #4
0
    def _open(self, scheme='mongodb://'):
        hostname, dbname, options = self._parse_uri(scheme=scheme)

        conf = self._prepare_client_options(options)
        conf['host'] = hostname

        env = _detect_environment()
        if env == 'gevent':
            from gevent import monkey
            monkey.patch_all()
        elif env == 'eventlet':
            from eventlet import monkey_patch
            monkey_patch()

        mongoconn = MongoClient(**conf)
        database = mongoconn[dbname]

        version = mongoconn.server_info()['version']
        if tuple(map(int, version.split('.')[:2])) < (1, 3):
            raise NotImplementedError(
                'Kombu requires MongoDB version 1.3+ (server is {0})'.format(
                    version))

        self._create_broadcast(database, options)

        self._client = database
Beispiel #5
0
 def __init__(self, listener, application=None, backlog=None):
     eventlet.monkey_patch()
     host, port = listener
     self.socket = create_socket(host, port, self.address_family,
                                 self.socket_type, backlog=backlog)
     self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
     self.application = application
Beispiel #6
0
def main():
    eventlet.monkey_patch()
    cfg.CONF.register_opts(ip_lib.OPTS)
    cfg.CONF(project='neutron')
    logging_config.setup_logging(cfg.CONF)
    legacy.modernize_quantum_config(cfg.CONF)

    try:
        agent_config = create_agent_config_map(cfg.CONF)
    except ValueError as e:
        LOG.error(_('%s Agent terminated!'), e)
        sys.exit(1)

    is_xen_compute_host = 'rootwrap-xen-dom0' in agent_config['root_helper']
    if is_xen_compute_host:
        # Force ip_lib to always use the root helper to ensure that ip
        # commands target xen dom0 rather than domU.
        cfg.CONF.set_default('ip_lib_force_root', True)

    plugin = OVSNeutronAgent(**agent_config)

    # Start everything.
    LOG.info(_("Agent initialized successfully, now running... "))
    plugin.daemon_loop()
    sys.exit(0)
Beispiel #7
0
def server(host=RESTAPI_DEFAULT_ADDRESS, port=RESTAPI_DEFAULT_PORT, adapter=RESTAPI_DEFAULT_ADAPTER):
    """
    REST-JSON API server
    """
    DataStore.admin_id = hexencode(os.urandom(16))
    Database.filepath = tempfile.mkstemp(prefix="sqlmapipc-", text=False)[1]
    #make adminid to known this is safe because api only avalible to local
    file_object = open('/www/xseclab.com/termite/.sqlmapadminid', 'w')
    file_object.write(DataStore.admin_id)
    file_object.close( )

    logger.info("Running REST-JSON API server at '%s:%d'.." % (host, port))
    logger.info("Admin ID: %s" % DataStore.admin_id)
    logger.debug("IPC database: %s" % Database.filepath)

    # Initialize IPC database
    DataStore.current_db = Database()
    DataStore.current_db.connect()
    DataStore.current_db.init()

    # Run RESTful API
    try:
        if adapter == "gevent":
            from gevent import monkey
            monkey.patch_all()
        elif adapter == "eventlet":
            import eventlet
            eventlet.monkey_patch()
        logger.debug("Using adapter '%s' to run bottle" % adapter)
        run(host=host, port=port, quiet=True, debug=False, server=adapter)
    except socket.error, ex:
        if "already in use" in getSafeExString(ex):
            logger.error("Address already in use ('%s:%s')" % (host, port))
        else:
            raise
Beispiel #8
0
def setup(url=None, optional=False):
    """Initialise the oslo.messaging layer."""
    global TRANSPORT, NOTIFIER

    if url and url.startswith("fake://"):
        # NOTE(sileht): oslo.messaging fake driver uses time.sleep
        # for task switch, so we need to monkey_patch it
        eventlet.monkey_patch(time=True)

    if not TRANSPORT:
        oslo.messaging.set_transport_defaults('heat')
        exmods = ['heat.common.exception']
        try:
            TRANSPORT = oslo.messaging.get_transport(
                cfg.CONF, url, allowed_remote_exmods=exmods, aliases=_ALIASES)
        except oslo.messaging.InvalidTransportURL as e:
            TRANSPORT = None
            if not optional or e.url:
                # NOTE(sileht): oslo.messaging is configured but unloadable
                # so reraise the exception
                raise

    if not NOTIFIER and TRANSPORT:
        serializer = RequestContextSerializer(JsonPayloadSerializer())
        NOTIFIER = oslo.messaging.Notifier(TRANSPORT, serializer=serializer)
Beispiel #9
0
def main():
    eventlet.monkey_patch()
    cfg.CONF(args=sys.argv, project='quantum')

    # (TODO) gary - swap with common logging
    logging_config.setup_logging(cfg.CONF)

    interface_mappings = {}
    for mapping in cfg.CONF.LINUX_BRIDGE.physical_interface_mappings:
        try:
            physical_network, physical_interface = mapping.split(':')
            interface_mappings[physical_network] = physical_interface
            LOG.debug("physical network %s mapped to physical interface %s" %
                      (physical_network, physical_interface))
        except ValueError as ex:
            LOG.error("Invalid physical interface mapping: %s - %s. "
                      "Agent terminated!" %
                      (mapping, ex))
            sys.exit(1)

    polling_interval = cfg.CONF.AGENT.polling_interval
    root_helper = cfg.CONF.AGENT.root_helper
    plugin = LinuxBridgeQuantumAgentRPC(interface_mappings,
                                        polling_interval,
                                        root_helper)
    LOG.info("Agent initialized successfully, now running... ")
    plugin.daemon_loop()
    sys.exit(0)
Beispiel #10
0
def main():
    """Main method for cleaning up network namespaces.

    This method will make two passes checking for namespaces to delete. The
    process will identify candidates, sleep, and call garbage collect. The
    garbage collection will re-verify that the namespace meets the criteria for
    deletion (ie it is empty). The period of sleep and the 2nd pass allow
    time for the namespace state to settle, so that the check prior deletion
    will re-confirm the namespace is empty.

    The utility is designed to clean-up after the forced or unexpected
    termination of Quantum agents.

    The --force flag should only be used as part of the cleanup of a devstack
    installation as it will blindly purge namespaces and their devices. This
    option also kills any lingering DHCP instances.
    """
    eventlet.monkey_patch()

    conf = setup_conf()
    conf(sys.argv)

    # Identify namespaces that are candidates for deletion.
    candidates = [ns for ns in
                  ip_lib.IPWrapper.get_namespaces(conf.root_helper)
                  if eligible_for_deletion(conf, ns, conf.force)]

    if candidates:
        eventlet.sleep(2)

        for namespace in candidates:
            destroy_namespace(conf, namespace, conf.force)
Beispiel #11
0
    def _open(self, scheme='mongodb://'):
        hostname, dbname, options = self._parse_uri(scheme=scheme)

        conf = self._prepare_client_options(options)
        conf['host'] = hostname

        env = _detect_environment()
        if env == 'gevent':
            from gevent import monkey
            monkey.patch_all()
        elif env == 'eventlet':
            from eventlet import monkey_patch
            monkey_patch()

        mongoconn = MongoClient(**conf)
        database = mongoconn[dbname]

        version_str = mongoconn.server_info()['version']
        version = tuple(map(int, version_str.split('.')))

        if version < (1, 3):
            raise VersionMismatch(E_SERVER_VERSION.format(version_str))
        elif self.ttl and version < (2, 2):
            raise VersionMismatch(E_NO_TTL_INDEXES.format(version_str))

        return database
Beispiel #12
0
def _monkey_patch():
    eventlet.monkey_patch(
        os=True,
        select=True,
        socket=True,
        thread=False if '--use-debugger' in sys.argv else True,
        time=True)
Beispiel #13
0
def prepare_service(argv=None):
    eventlet.monkey_patch()
    gettextutils.install('gringotts', lazy=False)
    # Override the default control_exchange, default is 'openstack'
    rpc.set_defaults(control_exchange='gringotts')
    cfg.set_defaults(log.log_opts,
                     default_log_levels=['amqplib=WARN',
                                         'qpid.messaging=INFO',
                                         'sqlalchemy=WARN',
                                         'keystoneclient=INFO',
                                         'stevedore=INFO',
                                         'eventlet.wsgi.server=WARN'
                                         ])
    if argv is None:
        argv = sys.argv
    cfg.CONF(argv[1:], project='gringotts')
    log.setup('gringotts')

    #NOTE(suo): Import services/submodules to register methods
    # If use `from gringotts.services import *` will cause SynaxWarning,
    # so we import every submodule implicitly.
    from gringotts import services
    for m in services.SUBMODULES:
        importutils.import_module("gringotts.services.%s" % m)
    LOG.warn('Loaded resources: %s' % services.RESOURCE_GET_MAP.keys())
Beispiel #14
0
def main():
    eventlet.monkey_patch()

    # the configuration will be read into the cfg.CONF global data structure
    config.parse(sys.argv[1:])
    if not cfg.CONF.config_file:
        sys.exit(
            _(
                "ERROR: Unable to find configuration file via the default"
                " search paths (~/.neutron/, ~/, /etc/neutron/, /etc/) and"
                " the '--config-file' option!"
            )
        )
    try:
        pool = eventlet.GreenPool()

        neutron_api = service.serve_wsgi(service.NeutronApiService)
        api_thread = pool.spawn(neutron_api.wait)

        try:
            neutron_rpc = service.serve_rpc()
        except NotImplementedError:
            LOG.info(_("RPC was already started in parent process by plugin."))
        else:
            rpc_thread = pool.spawn(neutron_rpc.wait)

            # api and rpc should die together.  When one dies, kill the other.
            rpc_thread.link(lambda gt: api_thread.kill())
            api_thread.link(lambda gt: rpc_thread.kill())

        pool.waitall()
    except KeyboardInterrupt:
        pass
    except RuntimeError as e:
        sys.exit(_("ERROR: %s") % e)
def main():
    eventlet.monkey_patch()
    cfg.CONF(project='neutron')

    # fix-neutron-agent-for-mtu-config hack
    cfg.CONF.register_opts(interface.OPTS)
    logging_config.setup_logging(cfg.CONF)
    LOG.info(_("network_device_mtu: %s"), str(cfg.CONF.network_device_mtu))
    try:
        interface_mappings = q_utils.parse_mappings(
            cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
    except ValueError as e:
        LOG.error(_("Parsing physical_interface_mappings failed: %s."
                    " Agent terminated!"), e)
        sys.exit(1)
    LOG.info(_("Interface mappings: %s"), interface_mappings)

    polling_interval = cfg.CONF.AGENT.polling_interval
    root_helper = cfg.CONF.AGENT.root_helper
    agent = LinuxBridgeNeutronAgentRPC(interface_mappings,
                                       polling_interval,
                                       root_helper)
    LOG.info(_("Agent initialized successfully, now running... "))
    agent.daemon_loop()
    sys.exit(0)
def main():
    eventlet.monkey_patch()
    opts = [
        cfg.StrOpt('network_id'),
        cfg.StrOpt('router_id'),
        cfg.StrOpt('pid_file'),
        cfg.BoolOpt('daemonize', default=True),
        cfg.IntOpt('metadata_port',
                   default=9697,
                   help=_("TCP Port to listen for metadata server "
                          "requests.")),
    ]

    cfg.CONF.register_cli_opts(opts)
    # Don't get the default configuration file
    cfg.CONF(project='neutron', default_config_files=[])
    config.setup_logging(cfg.CONF)
    utils.log_opt_values(LOG)
    proxy = ProxyDaemon(cfg.CONF.pid_file,
                        cfg.CONF.metadata_port,
                        network_id=cfg.CONF.network_id,
                        router_id=cfg.CONF.router_id)

    if cfg.CONF.daemonize:
        proxy.start()
    else:
        proxy.run()
Beispiel #17
0
def init():
    import eventlet

    def eval_from_file(path_obj):
        """Evaluate sexpression in given file.
        """
        with path_obj.open() as fobj:
            expr = fobj.read()
        eval_sexp_str(expr)

    if platform().lower().startswith('win'):
        eventlet.monkey_patch(os=False)
    else:
        eventlet.monkey_patch()
    expr_path = Path(__file__).absolute().parents[1] / 'sexpressions'
    eval_from_file(expr_path / 'main.expr')
    if not IS_PYPY:
        eval_from_file(expr_path / 'cpython.expr')
    else:
        eval_from_file(expr_path / 'pypy.expr')

    eval_from_file(expr_path / 'del_hidden.expr')

    for syntax in {'for', 'each', 'while', 'break', 'continue'}:
        del syntax_table[syntax]
        del global_env[syntax]
        del global_scope[syntax]

    sys.path.append(os.getcwd())
def _patch_eventlet():
    import eventlet
    import eventlet.debug
    eventlet.monkey_patch()
    EVENTLET_DBLOCK = int(os.environ.get('EVENTLET_NOBLOCK', 0))
    if EVENTLET_DBLOCK:
        eventlet.debug.hub_blocking_detection(EVENTLET_DBLOCK)
Beispiel #19
0
def main():
    eventlet.monkey_patch()
    opts = [
        cfg.StrOpt('network_id'),
        cfg.StrOpt('router_id'),
        cfg.StrOpt('pid_file'),
        cfg.BoolOpt('daemonize', default=True),
        cfg.IntOpt('metadata_port',
                   default=9697,
                   help="TCP Port to listen for metadata server requests."),
    ]

    cfg.CONF.register_cli_opts(opts)
    cfg.CONF(project='quantum')
    config.setup_logging(cfg.CONF)

    proxy = ProxyDaemon(cfg.CONF.pid_file,
                        cfg.CONF.metadata_port,
                        network_id=cfg.CONF.network_id,
                        router_id=cfg.CONF.router_id)

    if cfg.CONF.daemonize:
        proxy.start()
    else:
        proxy.run()
def main():
    eventlet.monkey_patch()
    cfg.CONF(project='neutron')

    logging_config.setup_logging(cfg.CONF)

    integ_br = cfg.CONF.OVS.integration_bridge
    polling_interval = cfg.CONF.AGENT.polling_interval
    root_helper = cfg.CONF.AGENT.root_helper

    tunnel_ip = _get_tunnel_ip()
    LOG.debug(_('tunnel_ip %s'), tunnel_ip)
    ovsdb_port = cfg.CONF.OVS.ovsdb_port
    LOG.debug(_('ovsdb_port %s'), ovsdb_port)
    ovsdb_ip = _get_ovsdb_ip()
    LOG.debug(_('ovsdb_ip %s'), ovsdb_ip)
    try:
        agent = OVSNeutronOFPRyuAgent(integ_br, tunnel_ip, ovsdb_ip,
                                      ovsdb_port, polling_interval,
                                      root_helper)
    except httplib.HTTPException as e:
        LOG.error(_("Initialization failed: %s"), e)
        sys.exit(1)

    LOG.info(_("Ryu initialization on the node is done. "
               "Agent initialized successfully, now running..."))
    agent.daemon_loop()
    sys.exit(0)
def main():
    eventlet.monkey_patch()

    parser = argparse.ArgumentParser()
    parser.add_argument("-D", "--no-daemon", help="don't daemonize",
                        action="store_false", dest='daemonize', default=True)
    parser.add_argument("config_file", help="Proxy configuration file")
    args = parser.parse_args()

    try:
        config_dict = json.load(open(args.config_file))
    except IOError:
        raise SystemError('Unable to open config file at %s.' %
                          args.config_file)
    except:
        raise SystemError('Unable to parse config file at %s.' %
                          args.config_file)

    if args.daemonize:
        daemonize()

    pool = eventlet.GreenPool(1000)

    tenant_id = config_dict.pop('tenant_id')
    for network_id, config in config_dict.items():
        app = NetworkMetadataProxyHandler(tenant_id,
                                          network_id,
                                          config['ip_instance_map'])
        socket = eventlet.listen(('127.0.0.1', config['listen_port']),
                                 backlog=128)
        pool.spawn_n(eventlet.wsgi.server, socket, app, custom_pool=pool)

    pool.waitall()
Beispiel #22
0
def get_realtime_bid_ask(code, index=0, proxy=None, timeout=5):
    index %= 2
    if not proxy:
        proxies = None
    elif proxy[0:5] == "https":
        proxies = { "https" : proxy }
    else:
        proxies = { "http" : proxy }
    if index == 0:
        params = { "list" : "rt_hk%05d" % code }
        headers = { "Referer" : "http://stock.finance.sina.com.cn/hkstock/quotes/%05d.html" % code }
        try:
            eventlet.monkey_patch(all=False, os=False, select=False, socket=True, thread=False, time=False)
            with eventlet.Timeout(timeout):
                req = requests.get("http://hq.sinajs.cn/", params=params, headers=headers, proxies=proxies, timeout=timeout)
        except:
            return None
        if not req or req.status_code != requests.codes.ok:
            return None
        try:
            tokens = req.text.split(",")
            return (datetime.now().timestamp(), float(tokens[9]), float(tokens[10]))
        except:
            return None
    else:
        return None
Beispiel #23
0
def main():

    class SimpleDaemon(daemon.Daemon):
        """The purpose of this daemon is to serve as an example, and also as
        a dummy daemon, which can be invoked by functional testing, it
        does nothing but setting the pid file, and staying detached in the
        background.
        """

        def run(self):
            while True:
                eventlet.sleep(10)

    eventlet.monkey_patch()
    opts = [
        cfg.StrOpt('uuid',
                   help=_('uuid provided from the command line '
                          'so external_process can track us via /proc/'
                          'cmdline interface.'),
                   required=True),
        cfg.StrOpt('pid_file',
                   help=_('Location of pid file of this process.'),
                   required=True)
    ]

    cfg.CONF.register_cli_opts(opts)
    # Don't get the default configuration file
    cfg.CONF(project='neutron', default_config_files=[])
    simple_daemon = SimpleDaemon(cfg.CONF.pid_file,
                                 uuid=cfg.CONF.uuid)
    simple_daemon.start()
Beispiel #24
0
    def setup(cls):
        import eventlet

        if eventlet.version_info < (0, 9, 7):
            raise RuntimeError("You need eventlet >= 0.9.7")
        eventlet.monkey_patch(os=False)
        patch_sendfile()
Beispiel #25
0
def _kombu_mongo_open(self, scheme='mongodb://'):
    hostname, dbname, options = self._parse_uri(scheme=scheme)
    options.pop('auto_start_request', None)

    env = _detect_environment()
    if env == 'gevent':
        from gevent import monkey
        monkey.patch_all()
    elif env == 'eventlet':
        from eventlet import monkey_patch
        monkey_patch()

    mongoconn = MongoClient(
        host=hostname, ssl=options['ssl'],
        connectTimeoutMS=options['connectTimeoutMS'],
    )
    database = mongoconn[dbname]

    version = mongoconn.server_info()['version']
    if tuple(map(int, version.split('.')[:2])) < (1, 3):
        raise NotImplementedError(
            'Kombu requires MongoDB version 1.3+ (server is {0})'.format(
                version))

    self._create_broadcast(database, options)

    self._client = database
Beispiel #26
0
def initialize(extra_opts=None, pre_logging=None):
    # Initialize localization support (the underscore character).

    import gettext
    gettext.install('transformer', unicode=1)

    # Apply whole eventlet.monkey_patch excluding 'thread' module.
    # Decision for 'thread' module patching will be made
    # after debug_utils is set up.
    import eventlet
    eventlet.monkey_patch(all=True, thread=False)

    # Import only the modules necessary to initialize logging and determine if
    # debug_utils are enabled.
    import sys
    from transformer.common import cfg
    from transformer.openstack.common import log as logging

    conf = cfg.CONF
    if extra_opts:
        conf.register_cli_opts(extra_opts)

    cfg.parse_args(sys.argv)
    if pre_logging:
        pre_logging(conf)

    logging.setup(None)

    # Initialize Transformer database.
#     from transformer.db import get_db_api
#     get_db_api().configure_db(conf)

    return conf  # May be used by other scripts
Beispiel #27
0
def server(host=RESTAPI_DEFAULT_ADDRESS, port=RESTAPI_DEFAULT_PORT, server_name="wsgiref"):
    """
    REST-JSON API server
    """
    DataStore.admin_id = hexencode(os.urandom(16))
    Database.filepath = tempfile.mkstemp(prefix="sqlmapipc-", text=False)[1]

    logger.info("Running REST-JSON API server at '%s:%d'.." % (host, port))
    logger.info("Admin ID: %s" % DataStore.admin_id)
    logger.debug("IPC database: %s" % Database.filepath)

    # Initialize IPC database
    DataStore.current_db = Database()
    DataStore.current_db.connect()
    DataStore.current_db.init()

    # Run RESTful API
    try:
        if server_name == "gevent":
            from gevent import monkey

            monkey.patch_all()
        elif server_name == "eventlet":
            import eventlet

            eventlet.monkey_patch()
        logger.debug("use {0} adapter run bottle".format(server_name))
        run(host=host, port=port, quiet=True, debug=False, server=server_name)
    except socket.error, ex:
        if "already in use" in getSafeExString(ex):
            logger.error("Address already in use ('%s:%s')" % (host, port))
        else:
            raise
Beispiel #28
0
def main():
    eventlet.monkey_patch()
    cfg.CONF(args=sys.argv, project='quantum')

    # (TODO) gary - swap with common logging
    logging_config.setup_logging(cfg.CONF)

    try:
        interface_mappings = q_utils.parse_mappings(
            cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
    except ValueError as e:
        LOG.error(_("Parsing physical_interface_mappings failed: %s."
                    " Agent terminated!"), e)
        sys.exit(1)
    LOG.info(_("Interface mappings: %s") % interface_mappings)

    polling_interval = cfg.CONF.AGENT.polling_interval
    reconnect_interval = cfg.CONF.DATABASE.reconnect_interval
    root_helper = cfg.CONF.AGENT.root_helper
    rpc = cfg.CONF.AGENT.rpc
    if rpc:
        plugin = LinuxBridgeQuantumAgentRPC(interface_mappings,
                                            polling_interval,
                                            root_helper)
    else:
        db_connection_url = cfg.CONF.DATABASE.sql_connection
        plugin = LinuxBridgeQuantumAgentDB(interface_mappings,
                                           polling_interval,
                                           reconnect_interval,
                                           root_helper,
                                           db_connection_url)
    LOG.info("Agent initialized successfully, now running... ")
    plugin.daemon_loop()
    sys.exit(0)
Beispiel #29
0
def main():
    try:
        eventlet.monkey_patch(os=False)

        # the configuration will be into the cfg.CONF global data structure
        config.parseArgs(args=sys.argv[1:],
                         default_config_files=["/etc/synergy/synergy.conf"])

        if not cfg.CONF.config_file:
            sys.exit("ERROR: Unable to find configuration file via the "
                     "default search paths (~/.synergy/, ~/, /etc/synergy/"
                     ", /etc/) and the '--config-file' option!")

        setLogger(name="synergy")
        setLogger(name="oslo.messaging._drivers")

        global LOG

        LOG = logging.getLogger(__name__)
        LOG.info("Starting Synergy...")

        server = Synergy()
        server.start()

        LOG.info("Synergy started")
    except Exception as ex:
        LOG.error("unrecoverable error: %s" % ex)
Beispiel #30
0
def eventlet_monkey_patch():
    """Apply eventlet's monkey patch.

    This call should be the first call in application. It's safe to call
    monkey_patch multiple times.
    """
    eventlet.monkey_patch(**EVENTLET_MONKEY_PATCH_MODULES)
Beispiel #31
0
    'settings',
    'application',
    'Middleware',
    'dispatch',
    'version',
    'get_services',
    'get_service_dir',
]
version = '0.0.1'

import sys
reload(sys)
sys.setdefaultencoding('utf-8')  #@UndefinedVariable

import eventlet
eventlet.monkey_patch()


class ServosError(Exception):
    pass


class Middleware(object):
    ORDER = 500

    def __init__(self, application, settings):
        self.application = application
        self.settings = settings


import servos.core.dispatch as dispatch
from os.path import join as path_join
from unittest import main
from uuid import uuid4

from eventlet import GreenPool, Timeout
import eventlet
from sqlite3 import connect
from swiftclient import client

from swift.common import direct_client
from swift.common.exceptions import ClientException
from swift.common.utils import hash_path, readconf
from test.probe.common import kill_nonprimary_server, \
    kill_server, ReplProbeTest, start_server

eventlet.monkey_patch(all=False, socket=True)


def get_db_file_path(obj_dir):
    files = sorted(listdir(obj_dir), reverse=True)
    for filename in files:
        if filename.endswith('db'):
            return path_join(obj_dir, filename)


class TestContainerFailures(ReplProbeTest):
    def test_one_node_fails(self):
        # Create container1
        container1 = 'container-%s' % uuid4()
        cpart, cnodes = self.container_ring.get_nodes(self.account, container1)
        client.put_container(self.url, self.token, container1)
Beispiel #33
0
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
"""
:mod:`Ironic.tests` -- ironic Unittests
=====================================================

.. automodule:: ironic.tests
   :platform: Unix
"""

# TODO(deva): move eventlet imports to ironic.__init__ once we move to PBR

import eventlet

eventlet.monkey_patch(os=False)

# See http://code.google.com/p/python-nose/issues/detail?id=373
# The code below enables nosetests to work with i18n _() blocks
import __builtin__
setattr(__builtin__, '_', lambda x: x)
Beispiel #34
0
 def tearDownClass(cls):
     eventlet.monkey_patch(os=False,
                           select=False,
                           socket=False,
                           thread=False,
                           time=False)
import hashlib
import base64

import six
import eventlet
import easyimap
from flanker import mime

from st2reactor.sensor.base import PollingSensor

__all__ = ['IMAPSensor']

eventlet.monkey_patch(os=True,
                      select=True,
                      socket=True,
                      thread=True,
                      time=True)

DEFAULT_DOWNLOAD_ATTACHMENTS = False
DEFAULT_MAX_ATTACHMENT_SIZE = 1024
DEFAULT_ATTACHMENT_DATASTORE_TTL = 1800


class IMAPSensor(PollingSensor):
    def __init__(self, sensor_service, config=None, poll_interval=10):
        super(IMAPSensor, self).__init__(sensor_service=sensor_service,
                                         config=config,
                                         poll_interval=poll_interval)

        self._trigger = 'email.imap.message'
        self._logger = self._sensor_service.get_logger(__name__)
Beispiel #36
0
# noinspection PyUnresolvedReferences
from oslo_service import service
# noinspection PyUnresolvedReferences
from oslo_service.periodic_task import PeriodicTasks

# noinspection PyUnresolvedReferences
from cephclient import wrapper

from ceph_manager.monitor import Monitor
from ceph_manager import constants

from ceph_manager.i18n import _LI
from ceph_manager.i18n import _LW
from retrying import retry

eventlet.monkey_patch(all=True)

CONF = cfg.CONF
CONF.register_opts([
    cfg.StrOpt('sysinv_api_bind_ip',
               default='0.0.0.0',
               help='IP for the Ceph Manager server to bind to')
])
CONF.logging_default_format_string = ('%(asctime)s.%(msecs)03d %(process)d '
                                      '%(levelname)s %(name)s [-] %(message)s')
logging.register_options(CONF)
logging.setup(CONF, __name__)
LOG = logging.getLogger(__name__)
CONF.rpc_backend = 'rabbit'

import eventlet
from eventlet import tpool
eventlet.monkey_patch()  # noqa

from os_win import utilsfactory
from os_win.utils.compute import _clusapi_utils
from os_win.utils.compute import clusterutils
import wmi

import argparse
import json
import time
import logging

parser = argparse.ArgumentParser()
parser.add_argument('--log-file', required=False)

args = parser.parse_args()
LOG = logging.getLogger()


def setup_logging():
    log_level = logging.DEBUG

    handler = logging.StreamHandler()
    handler.setLevel(log_level)

    log_fmt = '[%(asctime)s] %(levelname)s - %(message)s'
    formatter = logging.Formatter(log_fmt)
    handler.setFormatter(formatter)
Beispiel #38
0
import eventlet
eventlet.monkey_patch(socket=False)
from flask import Flask
from flask_socketio import SocketIO
from flask_cors import CORS

app = Flask(__name__, static_folder='image', static_url_path='/image')
CORS(app, supports_credentials=True)
UPLOAD_FOLDER = 'image/'
STATIC_FOLDER = 'https://keelung-eat.herokuapp.com/image/'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['STATIC_FOLDER'] = STATIC_FOLDER

socketio = SocketIO(app, cors_allowed_origins='*')

import KeelungEat.http_route
import KeelungEat.ws_route
import KeelungEat.background_task
Beispiel #39
0
import eventlet

eventlet.monkey_patch(thread=False)

import sys
import workers

from config import Config
from database import (connect_mongo, ImageModel, create_from_json)

from flask import Flask
from flask_cors import CORS
from flask_socketio import SocketIO
from werkzeug.contrib.fixers import ProxyFix

from celery import Celery

from .watcher import run_watcher
from .api import blueprint as api
from .util import query_util
from .authentication import login_manager
from .sockets import socketio

import threading
import requests
import logging
import time
import os

connect_mongo('webserver')
import time
from threading import Thread, Event
from flask import Flask, flash, render_template, request, redirect, Response
from flask_socketio import SocketIO, join_room, emit, send
import eventlet 
import picommon
from pistrobe import PiStrobe
from piholder_web import heater_web
#from camera import Camera
from camera_base import CameraBase
from camera_pi import Camera
#from vimba_pi import Camera
from piflow_web import flow_web

eventlet.monkey_patch( os=True, select=True, socket=True, thread=False, time=True, psycopg=True )
#eventlet.monkey_patch()

exit_event = Event()

picommon.spi_init( 0, 2, 30000 )

debug_data = { 'update_count': 0 }

heaters_data = [ { 'status': '', 'temp_text': '', 'temp_c_actual': 0.0, 'temp_c_target': 0.0, 'pid_enabled': False,
                   'power_limit': 0, 'autotune_status': '', 'autotune_target_temp': 0.0, 'autotuning': False,
                   'stir_speed_text': '', 'stir_speed_target': 0, 'stir_enabled': False } for i in range(4) ]
heater1 = heater_web( 1, picommon.PORT_HEATER1 )
heater2 = heater_web( 2, picommon.PORT_HEATER2 )
heater3 = heater_web( 3, picommon.PORT_HEATER3 )
heater4 = heater_web( 4, picommon.PORT_HEATER4 )
heaters = [heater1, heater2, heater3, heater4]
Beispiel #41
0
from st2common.service_setup import deregister_service
from st2common.stream.listener import get_listener_if_set
from st2common.util.wsgi import shutdown_server_kill_pending_requests
from st2stream.signal_handlers import register_stream_signal_handlers
from st2stream import config

config.register_opts(ignore_errors=True)

from st2stream import app

__all__ = ["main"]

eventlet.monkey_patch(
    os=True,
    select=True,
    socket=True,
    thread=False if "--use-debugger" in sys.argv else True,
    time=True,
)

LOG = logging.getLogger(__name__)
STREAM = "stream"

# How much time to give to the request in progress to finish in seconds before killing them
WSGI_SERVER_REQUEST_SHUTDOWN_TIME = 2


def _setup():
    capabilities = {
        "name": "stream",
        "listen_host": cfg.CONF.stream.host,
def skip_timeout(program,timeout):
     eventlet.monkey_patch()
     with eventlet.Timeout(timeout,False):
         time.sleep(4)
         subprocess.call(program,shell=True)
Beispiel #43
0
Datei: demo3.py Projekt: Wjun0/im
# 1,协程打补丁,将IO操作变为异步
from eventlet import monkey_patch
monkey_patch()

import socketio
# 2,创建socketio服务器
sio = socketio.Server(async_model='eventlet')

# sio = socketio.AsyncServer()
# app = socketio.ASGIApp(sio)
# app = socketio.WSGIApp(sio,app)

# 3,创建应用,管理im服务器
app = socketio.Middleware(sio)

# 4,监听端口
import eventlet.wsgi
sock = eventlet.listen(('192.168.59.129', 9000))


@sio.event
def connect(sid, environ):
    print('连接时自动调用')
    print('+++', sid, environ)


@sio.event
def disconnect(sid):
    print('断开连接时调用', sid)

Beispiel #44
0
#coding:utf-8
import time
import eventlet  #导入eventlet这个模块
eventlet.monkey_patch()  #必须加这条代码
with eventlet.Timeout(2, False):  #设置超时时间为2秒
    time.sleep(1)
    print('没有跳过这条输出')
print('跳过了输出')
Beispiel #45
0
# -*- coding:utf-8 -*-
"""
k线分钟线,5,10,60数据存储,不再批量更新
"""

import pandas as pd
import datetime
import json
import eventlet as et
import constant as ct
from utils.wapper_interface import gm_api as gm
from pymongo import MongoClient
from datetime import datetime
from datetime import timedelta
et.monkey_patch(os=False)


class KminDbCache():
    def __init__(self):
        #
        self.gm = gm()
        self.pool = et.GreenPool(10)

    def get_k_data(self, *args, **kwargs):
        """
              :param args: args 第一项肯定为code
              :param kwargs: ktype,startdate等肯定包含在kwargs中
              :return: 先读cache,如果最近时间大于请求时间,则返回,否则调用更新算法,只更新这一个code,更新数据库;
              默认为5分钟线数据,一个文件一存
              """
        collection_name = 'k_data_'
Beispiel #46
0
 def initialize_reactor(cls):
     eventlet.monkey_patch()
     if not cls._timers:
         cls._timers = TimerManager()
         cls._timeout_watcher = eventlet.spawn(cls.service_timeouts)
         cls._new_timer = Event()
Beispiel #47
0
def run_gunicorn_eventlet():
    # this is taken from gunicorn EventletWorker.patch()
    import eventlet
    eventlet.monkey_patch(os=False)
    run_gunicorn()
Beispiel #48
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

# noinspection PyBroadException
try:
    import eventlet

    eventlet.monkey_patch(all=True, thread=False)
except:
    pass

import argparse
import itertools
import logging
import logging.config
import os
import signal

import eventlet.wsgi
import sqlalchemy_utils
import yaml
from flask import Flask, request, g as flask_g
from flask_babel import get_locale, Babel
from flask_babel import gettext
from flask_cors import CORS
from flask_migrate import Migrate
from flask_restful import Api, abort
from flask_swagger_ui import get_swaggerui_blueprint

from limonero import CustomJSONEncoder as LimoneroJSONEncoder
from limonero.cache import cache
def main(upgrade_api_args):
    if not upgrade_api_args.get("args_file", None):
        print(
            "\033[1;34mPlease set config file!! Default path is translate_src/conf/upgrade.conf\033[0m"
        )
        exit(1)
    if not upgrade_api_args.get("modify_dict", None):
        print(
            "\033[1;34mPlease set modify_dict file!! Default path is translate_src/dict/modify.dict\033[0m"
        )
        exit(1)

    upgrade_config_dict = load_config(upgrade_api_args["args_file"])
    if not os.path.isfile(upgrade_config_dict["input_path"]):
        file_py_list = get_cur_file_list()
    else:
        file_py_list = upgrade_config_dict["input_path"]
    modify_dict = load_modify_dict(upgrade_api_args["modify_dict"])
    delete_list = load_delete_dict(upgrade_api_args["delete_dict"])
    delete_pattern = "|".join(delete_list)

    if isinstance(file_py_list, list):
        if PROCESS_OR_THREAD == "MULTI_PROCESS":
            executor = concurrent.futures.ProcessPoolExecutor(
                max_workers=MAX_WORKERS)
            future_list = []
            for path in file_py_list:
                future = executor.submit(transformer_file, upgrade_config_dict,
                                         path, modify_dict, True,
                                         delete_pattern)  # 生成future实例
                future_list.append(future)

        elif PROCESS_OR_THREAD == "MULTI_THREAD":
            executor = concurrent.futures.ThreadPoolExecutor(
                max_workers=MAX_WORKERS)
            future_list = []
            for path in file_py_list:
                future = executor.submit(transformer_file, upgrade_config_dict,
                                         path, modify_dict, True,
                                         delete_pattern)  # 生成future实例
                future_list.append(future)

        executor.shutdown()
        for future in concurrent.futures.as_completed(future_list):
            if future.exception() is not None:
                print_info(
                    "\033[1;31m parallel error with future exception: %s \033[0m"
                    % (future.exception()))
            else:
                future.result()
        print_info("\033[1;33m all done.\033[0m")

    elif file_py_list is None:
        print_info(
            "\033[1;31mInput error: input must be a directory or a python file\033[0m"
        )
    else:
        try:
            eventlet.monkey_patch()
            with eventlet.Timeout(30, False):
                try:
                    transformer_file(upgrade_config_dict,
                                     upgrade_config_dict["input_path"],
                                     modify_dict,
                                     is_dir=False,
                                     delete_pattern=delete_pattern)
                except Exception as e:
                    print_info(
                        "\033[1;31m %s upgrade error, please check file, use a replacement policy and convert it manually, with error %s. \033[0m"
                        % (upgrade_config_dict["input_path"], e))
        except Exception as e:
            print_info(
                "\033[1;31m %s upgrade timeout, please check file, use a replacement policy and convert it manually, with error %s.\033[0m"
                % (upgrade_config_dict["input_path"], e))
Beispiel #50
0
 def patch(self):
     hubs.use_hub()
     eventlet.monkey_patch()
     patch_sendfile()
Beispiel #51
0
import eventlet
import msgpack
import flask
from flask import (Flask, render_template, make_response,
                   copy_current_request_context, jsonify, request)
from flask_socketio import SocketIO
from jinja2 import Environment, FileSystemLoader, ChoiceLoader

from bowtie._component import Event, Component, COMPONENT_REGISTRY
from bowtie.pager import Pager
from bowtie.exceptions import (GridIndexError, NoSidebarError,
                               NotStatefulEvent, NoUnusedCellsError,
                               SpanOverlapError, SizeError, WebpackError,
                               YarnError)

eventlet.monkey_patch(time=True)

Route = namedtuple('Route', ['view', 'path', 'exact'])
_Import = namedtuple('_Import', ['module', 'component'])

_DIRECTORY = Path('build')
_WEBPACK = './node_modules/.bin/webpack'
_MIN_NODE_VERSION = 6, 11, 5


class Scheduler:
    """Run scheduled tasks."""
    def __init__(self, app, seconds, func):
        """Create a scheduled function."""
        self.app = app
        self.seconds = seconds
Beispiel #52
0
#!/bin/env python
import eventlet
eventlet.monkey_patch()  # Fix IO Block

from simple_app import create_app, socketio
from simple_app.main.schema import Schema

app = create_app(debug=True)

if __name__ == '__main__':
    Schema()
    socketio.run(app)
Beispiel #53
0
import eventlet

eventlet.monkey_patch(subprocess=True)
#from gevent import monkey
#monkey.patch_all(subprocess=True)

#import gevent

import os
import sys
import importlib
import static
import templates
import subprocess
import json
import random
import requests
import urlparse2
import time
import hmac
#import pylint_flask
import functools

import redis

from python_path import PythonPath

from cloudant.result import Result, ResultByKey
from cloudant.query import Query, QueryResult

#from AMQonlinestore.AMQ_Dba import cloudant_online_store
Beispiel #54
0
"""Run nameko services.  Given a python path to a module containing one or more
nameko services, will host and run them. By default this will try to find
classes that look like services (anything with nameko entrypoints), but a
specific service can be specified via ``nameko run module:ServiceClass``.  """

from __future__ import print_function

import eventlet
eventlet.monkey_patch()  # noqa (code before rest of imports)

import errno
import inspect
import logging
import logging.config
import os
import re
import signal
import sys

import yaml
from eventlet import backdoor
from nameko.constants import AMQP_URI_CONFIG_KEY
from nameko.exceptions import CommandError
from nameko.extensions import ENTRYPOINT_EXTENSIONS_ATTR
from nameko.runners import ServiceRunner
from nameko.cli.reloader import run_with_reloader

logger = logging.getLogger(__name__)

MISSING_MODULE_TEMPLATE = "^No module named '?{}'?$"
Beispiel #55
0
def main(upgrade_api_args):
    if not upgrade_api_args.get("args_file", None):
        print(
            "\033[1;34mPlease set config file!! Default path is api_upgrade_src/conf/upgrade.conf\033[0m"
        )
        exit(1)
    if not upgrade_api_args.get("modify_dict", None):
        print(
            "\033[1;34mPlease set modify_dict file!! Default path is api_upgrade_src/dict/modify.dict\033[0m"
        )
        exit(1)

    upgrade_config_dict = load_config(upgrade_api_args["args_file"])
    if not os.path.isfile(upgrade_config_dict["input_path"]):
        file_py_list = get_cur_file_list()
    else:
        file_py_list = upgrade_config_dict["input_path"]
    modify_dict = load_modify_dict(upgrade_api_args["modify_dict"])
    delete_list = load_delete_dict(upgrade_api_args["delete_dict"])
    delete_pattern = "|".join(delete_list)

    if isinstance(file_py_list, list):
        for path in file_py_list:
            content = open(path, 'r').readlines()
            match = re.search(delete_pattern, "\n".join(content))
            if match:
                delete_api = match.group(0)
                print_info(
                    "\033[1;31m %s API has been deleted, please check file %s, use a replacement policy and convert it manually\033[0m"
                    % (delete_api, path))
            else:
                try:
                    eventlet.monkey_patch()
                    with eventlet.Timeout(30, False):
                        try:
                            transformer_file(upgrade_config_dict,
                                             path,
                                             modify_dict,
                                             is_dir=True)
                        except Exception as e:
                            print_info(
                                "\033[1;31m %s upgrade error, please check file, use a replacement policy and convert it manually, with error: %s. \033[0m"
                                % (path, e))

                except:
                    print_info(
                        "\033[1;31m %s upgrade timeout, please check file, use a replacement policy and convert it manually\033[0m"
                        % (path))
    elif file_py_list is None:
        print_info(
            "\033[1;31mInput error: input must be a directory or a python file\033[0m"
        )
    else:
        content = open(upgrade_config_dict["input_path"], 'r').readlines()
        match = re.search(delete_pattern, "\n".join(content))
        if match:
            delete_api = match.group(0)
            print_info(
                "\033[1;31m %s API has been deleted, please check file %s, use a replacement policy and convert it manually\033[0m"
                % (delete_api, upgrade_config_dict["input_path"]))
        else:
            try:
                eventlet.monkey_patch()
                with eventlet.Timeout(30, False):
                    try:
                        transformer_file(upgrade_config_dict,
                                         upgrade_config_dict["input_path"],
                                         modify_dict,
                                         is_dir=False)
                    except:
                        print_info(
                            "\033[1;31m %s upgrade error, please check file, use a replacement policy and convert it manually\033[0m"
                            % (upgrade_config_dict["input_path"]))
            except:
                print_info(
                    "\033[1;31m %s upgrade timeout, please check file, use a replacement policy and convert it manually\033[0m"
                    % (upgrade_config_dict["input_path"]))
Beispiel #56
0
 def run_experiment(self):
     eventlet.monkey_patch(socket=True, thread=True)
     self.init_socks()
     self.run_server()
Beispiel #57
0
#!/usr/bin/env python

import eventlet
eventlet.monkey_patch(socket=True, select=True, time=True)

import eventlet.wsgi
import socketio
import time
from flask import Flask, render_template

from bridge import Bridge
from conf import conf

sio = socketio.Server()
app = Flask(__name__)
msgs = []

dbw_enable = False


@sio.on('connect')
def connect(sid, environ):
    print("connect ", sid)


def send(topic, data):
    s = 1
    msgs.append((topic, data))
    #sio.emit(topic, data=json.dumps(data), skip_sid=True)

Beispiel #58
0
#!/usr/bin/env python

import eventlet
from eventlet.green import urllib2

eventlet.monkey_patch(os=False, thread=False)

urls = [
    "http://www.sina.com.cn",
    "http://baidu.com",
    "http://163.com",
]


def fetch(url):
    return urllib2.urlopen(url).read()


pool = eventlet.GreenPool()

for body in pool.imap(fetch, urls):
    print("got body", len(body))
#!/usr/bin/env python
import eventlet
eventlet.monkey_patch(socket=True)
from flask import Flask, render_template, session, request, jsonify, url_for
from flask_socketio import SocketIO, emit, join_room, leave_room, \
    close_room, rooms, disconnect

from celery import Celery

#from webapp.blueprints.bptest2 import bptest2
#from webapp.blueprints.bptest2 import tasks

socketio = SocketIO()


CELERY_TASK_LIST = [
    'webapp.blueprints.bptest1.tasks',
    'webapp.blueprints.bptest2.tasks',
]


def create_celery_app(app=None):
    """
    Create a new Celery object and tie together the Celery config to the app's
    config. Wrap all tasks in the context of the application.
    :param app: Flask app
    :return: Celery app
    """
    app = app or create_app()

    celery = Celery(app.import_name, broker='redis://:devpassword@redis:6379/0',
Beispiel #60
0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

import base64
import copy
import httplib
import netaddr
import threading
import time

import eventlet
eventlet.monkey_patch(thread=True)

from oslo.config import cfg
from six.moves import queue as Queue

from neutron.api.v2 import attributes
from neutron.common import log as call_log
from neutron import context
from neutron.db.loadbalancer import loadbalancer_db as lb_db
from neutron.extensions import loadbalancer
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers import abstract_driver
from neutron.services.loadbalancer.drivers.radware import exceptions as r_exc