Esempio n. 1
0
    def new_websocket_client(self):
        """Called after a new WebSocket connection has been established."""
        # Reopen the eventlet hub to make sure we don't share an epoll
        # fd with parent and/or siblings, which would be bad
        from eventlet import hubs
        hubs.use_hub()

        # The nova expected behavior is to have token
        # passed to the method GET of the request
        query = urlparse.urlparse(self.path).query
        token = urlparse.parse_qs(query).get("token", [""]).pop()
        if not token:
            # NoVNC uses it's own convention that forward token
            # from the request to a cookie header, we should check
            # also for this behavior
            hcookie = self.headers.getheader('cookie')
            if hcookie:
                cookie = Cookie.SimpleCookie()
                cookie.load(hcookie)
                if 'token' in cookie:
                    token = cookie['token'].value

        ctxt = context.get_admin_context()
        rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
        connect_info = rpcapi.check_token(ctxt, token=token)

        if not connect_info:
            raise exception.InvalidToken(token=token)

        self.msg(_('connect info: %s'), str(connect_info))
        host = connect_info['host']
        port = int(connect_info['port'])

        # Connect to the target
        self.msg(_("connecting to: %(host)s:%(port)s") % {'host': host,
                                                          'port': port})
        tsock = self.socket(host, port, connect=True)

        # Handshake as necessary
        if connect_info.get('internal_access_path'):
            tsock.send("CONNECT %s HTTP/1.1\r\n\r\n" %
                        connect_info['internal_access_path'])
            while True:
                data = tsock.recv(4096, socket.MSG_PEEK)
                if data.find("\r\n\r\n") != -1:
                    if data.split("\r\n")[0].find("200") == -1:
                        raise Exception(_("Invalid Connection Info"))
                    tsock.recv(len(data))
                    break

        # Start proxying
        try:
            self.do_proxy(tsock)
        except Exception:
            if tsock:
                tsock.shutdown(socket.SHUT_RDWR)
                tsock.close()
                self.vmsg(_("%(host)s:%(port)s: Target closed") %
                          {'host': host, 'port': port})
            raise
Esempio n. 2
0
    def new_websocket_client(self):
        """Called after a new WebSocket connection has been established."""
        # Reopen the eventlet hub to make sure we don't share an epoll
        # fd with parent and/or siblings, which would be bad
        from eventlet import hubs
        hubs.use_hub()

        # The zun expected behavior is to have token
        # passed to the method GET of the request
        parse = urlparse.urlparse(self.path)
        if parse.scheme not in ('http', 'https'):
            # From a bug in urlparse in Python < 2.7.4 we cannot support
            # special schemes (cf: https://bugs.python.org/issue9374)
            if sys.version_info < (2, 7, 4):
                raise exception.ZunException(
                    _("We do not support scheme '%s' under Python < 2.7.4, "
                      "please use http or https") % parse.scheme)

        query = parse.query
        token = urlparse.parse_qs(query).get("token", [""]).pop()
        uuid = urlparse.parse_qs(query).get("uuid", [""]).pop()
        exec_id = urlparse.parse_qs(query).get("exec_id", [""]).pop()

        ctx = context.get_admin_context(all_projects=True)

        if uuidutils.is_uuid_like(uuid):
            container = objects.Container.get_by_uuid(ctx, uuid)
        else:
            container = objects.Container.get_by_name(ctx, uuid)

        if exec_id:
            self._new_exec_client(container, token, uuid, exec_id)
        else:
            self._new_websocket_client(container, token, uuid)
Esempio n. 3
0
 def test_explicit_hub(self):
     oldhub = hubs.get_hub()
     try:
         hubs.use_hub(Foo)
         assert isinstance(hubs.get_hub(), Foo), hubs.get_hub()
     finally:
         hubs._threadlocal.hub = oldhub
Esempio n. 4
0
File: all.py Progetto: rdw/Eventlet
def restart_hub():
    from eventlet import hubs

    hub = hubs.get_hub()
    hub.abort()
    hub_shortname = hub.__module__.split(".")[-1]
    hubs.use_hub(hub_shortname)
Esempio n. 5
0
 def test_explicit_hub(self):
     oldhub = hubs.get_hub()
     try:
         hubs.use_hub(Foo)
         assert isinstance(hubs.get_hub(), Foo), hubs.get_hub()
     finally:
         hubs._threadlocal.hub = oldhub
Esempio n. 6
0
    def new_websocket_client(self):
        """Called after a new WebSocket connection has been established."""
        # Reopen the eventlet hub to make sure we don't share an epoll
        # fd with parent and/or siblings, which would be bad
        from eventlet import hubs
        hubs.use_hub()

        # The zun expected behavior is to have token
        # passed to the method GET of the request
        parse = urlparse.urlparse(self.path)
        if parse.scheme not in ('http', 'https'):
            # From a bug in urlparse in Python < 2.7.4 we cannot support
            # special schemes (cf: https://bugs.python.org/issue9374)
            if sys.version_info < (2, 7, 4):
                raise exception.ZunException(
                    _("We do not support scheme '%s' under Python < 2.7.4, "
                      "please use http or https") % parse.scheme)

        query = parse.query
        token = urlparse.parse_qs(query).get("token", [""]).pop()
        uuid = urlparse.parse_qs(query).get("uuid", [""]).pop()
        exec_id = urlparse.parse_qs(query).get("exec_id", [""]).pop()

        ctx = context.get_admin_context(all_projects=True)

        if uuidutils.is_uuid_like(uuid):
            container = objects.Container.get_by_uuid(ctx, uuid)
        else:
            container = objects.Container.get_by_name(ctx, uuid)

        if exec_id:
            self._new_exec_client(container, token, uuid, exec_id)
        else:
            self._new_websocket_client(container, token, uuid)
Esempio n. 7
0
def restart_hub():
    from eventlet import hubs
    hub = hubs.get_hub()
    hub_shortname = hub.__module__.split('.')[-1]
    # don't restart the pyevent hub; it's not necessary
    if hub_shortname != 'pyevent':
        hub.abort()
        hubs.use_hub(hub_shortname)
Esempio n. 8
0
def restart_hub():
    from eventlet import hubs
    hub = hubs.get_hub()
    hub_shortname = hub.__module__.split('.')[-1]
    # don't restart the pyevent hub; it's not necessary
    if hub_shortname != 'pyevent':
        hub.abort()
        hubs.use_hub(hub_shortname)
Esempio n. 9
0
 def test_explicit_hub(self):
     if getattr(hubs.get_hub(), 'uses_twisted_reactor', None):
         # doesn't work with twisted
         return
     oldhub = hubs.get_hub()
     try:
         hubs.use_hub(Foo)
         self.assert_(isinstance(hubs.get_hub(), Foo), hubs.get_hub())
     finally:
         hubs._threadlocal.hub = oldhub
Esempio n. 10
0
 def test_explicit_hub(self):
     if getattr(hubs.get_hub(), 'uses_twisted_reactor', None):
         # doesn't work with twisted
         return
     oldhub = hubs.get_hub()
     try:
         hubs.use_hub(Foo)
         self.assert_(isinstance(hubs.get_hub(), Foo), hubs.get_hub())
     finally:
         hubs._threadlocal.hub = oldhub
Esempio n. 11
0
    def new_client(self):
        """Called after a new WebSocket connection has been established."""
        # Reopen the eventlet hub to make sure we don't share an epoll
        # fd with parent and/or siblings, which would be bad
        from eventlet import hubs
        hubs.use_hub()

        cookie = Cookie.SimpleCookie()
        cookie.load(self.headers.getheader('cookie'))
        token = cookie['token'].value
        ctxt = context.get_admin_context()
        rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
        connect_info = rpcapi.check_token(ctxt, token=token)

        if not connect_info:
            LOG.audit(_("Invalid Token: %s"), token)
            raise Exception(_("Invalid Token"))

        host = connect_info['host']
        port = int(connect_info['port'])

        # Connect to the target
        self.msg("connecting to: %s:%s" % (host, port))
        LOG.audit(_("connecting to: %(host)s:%(port)s"), {
            'host': host,
            'port': port
        })
        tsock = self.socket(host, port, connect=True)

        # Handshake as necessary
        if connect_info.get('internal_access_path'):
            tsock.send("CONNECT %s HTTP/1.1\r\n\r\n" %
                       connect_info['internal_access_path'])
            while True:
                data = tsock.recv(4096, socket.MSG_PEEK)
                if data.find("\r\n\r\n") != -1:
                    if not data.split("\r\n")[0].find("200"):
                        LOG.audit(_("Invalid Connection Info %s"), token)
                        raise Exception(_("Invalid Connection Info"))
                    tsock.recv(len(data))
                    break

        # Start proxying
        try:
            self.do_proxy(tsock)
        except Exception:
            if tsock:
                tsock.shutdown(socket.SHUT_RDWR)
                tsock.close()
                self.vmsg("%s:%s: Target closed" % (host, port))
                LOG.audit(_("%(host)s:%(port)s: Target closed"), {
                    'host': host,
                    'port': port
                })
            raise
    def new_client(self):
        """
        Called after a new WebSocket connection has been established.
        """
        # Reopen the eventlet hub to make sure we don't share an epoll
        # fd with parent and/or siblings, which would be bad
        from eventlet import hubs
        hubs.use_hub()

        cookie = Cookie.SimpleCookie()
        cookie.load(self.headers.getheader('cookie'))
        token = cookie['token'].value
        ctxt = context.get_admin_context()
        rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
        connect_info = rpcapi.check_token(ctxt, token=token)

        if not connect_info:
            LOG.audit("Invalid Token: %s", token)
            raise Exception(_("Invalid Token"))

        host = connect_info['host']
        port = int(connect_info['port'])

        # Connect to the target
        self.msg("connecting to: %s:%s" % (host, port))
        LOG.audit("connecting to: %s:%s" % (host, port))
        tsock = self.socket(host, port, connect=True)

        # Handshake as necessary
        if connect_info.get('internal_access_path'):
            tsock.send("CONNECT %s HTTP/1.1\r\n\r\n" %
                        connect_info['internal_access_path'])
            while True:
                data = tsock.recv(4096, socket.MSG_PEEK)
                if data.find("\r\n\r\n") != -1:
                    if not data.split("\r\n")[0].find("200"):
                        LOG.audit("Invalid Connection Info %s", token)
                        raise Exception(_("Invalid Connection Info"))
                    tsock.recv(len(data))
                    break

        if self.verbose and not self.daemon:
            print(self.traffic_legend)

        # Start proxying
        try:
            self.do_proxy(tsock)
        except Exception:
            if tsock:
                tsock.shutdown(socket.SHUT_RDWR)
                tsock.close()
                self.vmsg("%s:%s: Target closed" % (host, port))
                LOG.audit("%s:%s: Target closed" % (host, port))
            raise
Esempio n. 13
0
def start(config, options):
    server_config = get_config_section_for_name(config, "server", options.name)
    if "hub_module" in server_config:
        hubs.use_hub(server_config.import_("hub_module"))

    if "fork_children" in server_config:
        if "prefork_listen" in server_config:
            _toaddr = lambda (host, port): (host, int(port))
            toaddr = lambda addrstr: _toaddr(addrstr.split(":", 1))
            addrs = map(toaddr, server_config["prefork_listen"].split(","))
            for addr in addrs:
                PREFORK_SOCKETS[addr] = eventlet.listen(addr)
        children = server_config["fork_children"].split(",")
        connections = {}
        for i, j in enumerate(children):
            for k, l in enumerate(children):
                if i != k and (i, k) not in connections:
                    a, b = socket.socketpair()
                    connections[(i, k)] = (l, a)
                    connections[(k, i)] = (j, b)
        for i, child in enumerate(children):
            print "forking", child
            pid = os.fork()
            if pid == 0:
                # child
                hub = hubs.get_hub()
                if hasattr(hub, "poll") and hasattr(hub.poll, "fileno"):
                    # probably epoll which uses a filedescriptor
                    # and thus forking without exec is bad using that
                    # poll instance.
                    hubs.use_hub(hubs.get_default_hub())
                myconns = []
                for (l, r), s in connections.items():
                    if l == i:
                        myconns.append(s)
                    else:
                        s[1].close()
                options.name = child
                start_single(config, options, myconns)
                sys.exit(1)
        while True:
            try:
                pid, exitstatus = os.wait()
                print "childprocess %d died" % pid
            except OSError, e:
                if e.errno == errno.ECHILD:
                    sys.exit(0)
                else:
                    raise
            except KeyboardInterrupt, e:
                print "quitting..."
Esempio n. 14
0
def main(bound_socket=None, bound_api_socket=None):
    hubs.use_hub("pyevent") # dlg test of alternate hub
    config = HookboxConfig()
    config.update_from_commandline_arguments(sys.argv)
    log.setup_logging(config)
    server = create_server(bound_socket, bound_api_socket, config, output_wrapper.outputter)
    if config['objgraph']:
        eventlet.spawn(run_objgraph, server, config)
    if config['debug']:
        eventlet.spawn(debugloop)
    try:
        server.run().wait()
    except KeyboardInterrupt:
        print "Ctr+C pressed; Exiting."
Esempio n. 15
0
    def safe_fork(user=None, group=None, umask=022):
        # Disable gc to avoid bug where gc -> file_dealloc ->
        # write to stderr -> hang.  http://bugs.python.org/issue1336
        # copy from subprocess.py
        gc_was_enabled = gc.isenabled()
        gc.disable()
        pid = os.fork()
        if pid == 0:
            # force stop eventlet loop
            def sysexit():
                os._exit(1)

            hub = hubs.get_hub()
            try:
                hub.abort(wait=False)
            except Exception:
                sysexit()

            # start new eventlet hub
            hubs.use_hub()
            hubs.get_hub()

            # set close exec for loggin
            logging.set_filehandler_close_exec()
            logging.set_syslog_handler_close_exec()

            # igonre all signal on man loop
            signal_handler = SignalHandler()
            signal_handler.clear()

            # add all signal to exit process
            signal_handler.add_handler('SIGTERM', sysexit)
            signal_handler.add_handler('SIGINT', sysexit)
            signal_handler.add_handler('SIGHUP', sysexit)
            signal_handler.add_handler('SIGALRM', sysexit)

            systemutils.drop_privileges(user, group)
            os.umask(umask)

        else:
            if gc_was_enabled:
                gc.enable()
            # force loop 100 times
            # wait sub processs stop hub
            i = 0
            while i < 100:
                i += 1
        return pid
Esempio n. 16
0
def greenify(cls_or_func):
    """Decorate classes or functions with this to make them spawn as 
    greenlets when initialized or called."""

    if not isinstance(get_hub(), TornadoHub):
        use_hub(TornadoHub)

    if inspect.isclass(cls_or_func) and tornado.web.RequestHandler in inspect.getmro(cls_or_func):
        execute = cls_or_func._execute
        cls_or_func._execute = lambda self, *args, **kwargs: eventlet.spawn_n(execute, self, *args, **kwargs)
        return cls_or_func
    else:
        def wrapper(*args, **kwargs):
            eventlet.spawn_n(cls_or_func, *args, **kwargs)
        setattr(wrapper, 'original', cls_or_func)
        return wrapper
Esempio n. 17
0
 def connect_container(self, method, account_name, container_name, \
                                                 container_path):
     self.logger.debug('Enter in connect_container')
     use_hub("selects")
     node = self.__service_locator.get_container_from_ring(\
         account_name, container_name)
     conn = None
     headers = None
     shift_param = 512
     with self.__account_updater_timeout.get_connection_timeout(\
         self.__conn_timeout):
         headers = self.__html_header_builder.get_headers(None)
         headers['x-updater-request'] = True
         headers['x-component-number'] = Calculation.evaluate(\
               container_name, shift_param) - 1
         headers['x-global-map-version'] = self.msg.get_global_map_version()
         conn = httplib.HTTPConnection(node['ip'], node['port'])
         conn.request(method, container_path, '', headers)
     return conn
Esempio n. 18
0
def start(config, options):
    if 'hub_module' in config.server:
        hubs.use_hub(config.server.import_('hub_module'))
    #from eventlet import patcher
    #patcher.monkey_patch(all=False, socket=True, select=True, os=True)
    server = Server(config, options)

    #def drop_to_shell(s, f):
        #from IPython.Shell import IPShell
        #s = IPShell([], {'server': server,
                         #'debug': debug,
                         #'stats': lambda: pprint.pprint(server.stats()),
                        #})
        #s.mainloop()
    #signal.signal(signal.SIGUSR2, drop_to_shell)

    if options.statdump:
        interval = options.statdump
        eventlet.spawn_after(interval, statdumper, server, interval)
    server.start()
Esempio n. 19
0
 def init_process(self):
     hubs.use_hub()
     super(EventletWorker, self).init_process()
Esempio n. 20
0
def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
    """
    Loads settings from conf, then instantiates daemon ``klass`` and runs the
    daemon with the specified ``once`` kwarg.  The section_name will be derived
    from the daemon ``klass`` if not provided (e.g. ObjectReplicator =>
    object-replicator).

    :param klass: Class to instantiate, subclass of :class:`Daemon`
    :param conf_file: Path to configuration file
    :param section_name: Section name from conf file to load config from
    :param once: Passed to daemon :meth:`Daemon.run` method
    """
    # very often the config section_name is based on the class name
    # the None singleton will be passed through to readconf as is
    if section_name == '':
        section_name = sub(r'([a-z])([A-Z])', r'\1-\2',
                           klass.__name__).lower()
    try:
        conf = utils.readconf(conf_file, section_name,
                              log_name=kwargs.get('log_name'))
    except (ValueError, IOError) as e:
        # The message will be printed to stderr
        # and results in an exit code of 1.
        sys.exit(e)

    use_hub(utils.get_hub())

    # once on command line (i.e. daemonize=false) will over-ride config
    once = once or not utils.config_true_value(conf.get('daemonize', 'true'))

    # pre-configure logger
    if 'logger' in kwargs:
        logger = kwargs.pop('logger')
    else:
        logger = utils.get_logger(conf, conf.get('log_name', section_name),
                                  log_to_console=kwargs.pop('verbose', False),
                                  log_route=section_name)

    # optional nice/ionice priority scheduling
    utils.modify_priority(conf, logger)

    # disable fallocate if desired
    if utils.config_true_value(conf.get('disable_fallocate', 'no')):
        utils.disable_fallocate()
    # set utils.FALLOCATE_RESERVE if desired
    utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
        utils.config_fallocate_value(conf.get('fallocate_reserve', '1%'))

    # By default, disable eventlet printing stacktraces
    eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no'))
    eventlet.debug.hub_exceptions(eventlet_debug)

    # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on
    # some platforms. This locks in reported times to UTC.
    os.environ['TZ'] = 'UTC+0'
    time.tzset()

    logger.notice('Starting %s', os.getpid())
    try:
        DaemonStrategy(klass(conf), logger).run(once=once, **kwargs)
    except KeyboardInterrupt:
        logger.info('User quit')
    logger.notice('Exited %s', os.getpid())
Esempio n. 21
0
def use_hub(*a, **kw):
    warnings.warn("eventlet.api.use_hub has moved to eventlet.hubs.use_hub",
        DeprecationWarning, stacklevel=2)
    return hubs.use_hub(*a, **kw)
Esempio n. 22
0
def main(args):
    parser = argparse.ArgumentParser(
        description='Relink and cleanup objects to increase partition power')
    parser.add_argument('action', choices=['relink', 'cleanup'])
    parser.add_argument(
        'conf_file',
        nargs='?',
        help=('Path to config file with [object-relinker] section'))
    parser.add_argument('--swift-dir',
                        default=None,
                        dest='swift_dir',
                        help='Path to swift directory')
    parser.add_argument(
        '--policy',
        default=[],
        dest='policies',
        action='append',
        type=policy,
        help='Policy to relink; may specify multiple (default: all)')
    parser.add_argument('--devices',
                        default=None,
                        dest='devices',
                        help='Path to swift device directory')
    parser.add_argument('--user',
                        default=None,
                        dest='user',
                        help='Drop privileges to this user before relinking')
    parser.add_argument('--device',
                        default=[],
                        dest='device_list',
                        action='append',
                        help='Device name to relink (default: all)')
    parser.add_argument('--partition',
                        '-p',
                        default=[],
                        dest='partitions',
                        type=non_negative_int,
                        action='append',
                        help='Partition to relink (default: all)')
    parser.add_argument('--skip-mount-check',
                        default=False,
                        help='Don\'t test if disk is mounted',
                        action="store_true",
                        dest='skip_mount_check')
    parser.add_argument('--files-per-second',
                        default=None,
                        type=non_negative_float,
                        dest='files_per_second',
                        help='Used to limit I/O. Zero implies no limit '
                        '(default: no limit).')
    parser.add_argument('--stats-interval',
                        default=None,
                        type=non_negative_float,
                        dest='stats_interval',
                        help='Emit stats to recon roughly every N seconds. '
                        '(default: %d).' % DEFAULT_STATS_INTERVAL)
    parser.add_argument('--workers',
                        default=None,
                        type=auto_or_int,
                        help=('Process devices across N workers '
                              '(default: one worker per device)'))
    parser.add_argument('--logfile',
                        default=None,
                        dest='logfile',
                        help='Set log file name. Ignored if using conf_file.')
    parser.add_argument('--debug',
                        default=False,
                        action='store_true',
                        help='Enable debug mode')

    args = parser.parse_args(args)
    hubs.use_hub(get_hub())
    if args.conf_file:
        conf = readconf(args.conf_file, 'object-relinker')
        if args.debug:
            conf['log_level'] = 'DEBUG'
        user = args.user or conf.get('user')
        if user:
            drop_privileges(user)
        logger = get_logger(conf)
    else:
        conf = {'log_level': 'DEBUG' if args.debug else 'INFO'}
        if args.user:
            # Drop privs before creating log file
            drop_privileges(args.user)
            conf['user'] = args.user
        logging.basicConfig(
            format='%(message)s',
            level=logging.DEBUG if args.debug else logging.INFO,
            filename=args.logfile)
        logger = logging.getLogger()

    conf.update({
        'swift_dir':
        args.swift_dir or conf.get('swift_dir', '/etc/swift'),
        'devices':
        args.devices or conf.get('devices', '/srv/node'),
        'mount_check': (config_true_value(conf.get('mount_check', 'true'))
                        and not args.skip_mount_check),
        'files_per_second':
        (args.files_per_second if args.files_per_second is not None else
         non_negative_float(conf.get('files_per_second', '0'))),
        'policies':
        set(args.policies) or POLICIES,
        'partitions':
        set(args.partitions),
        'workers':
        config_auto_int_value(
            conf.get('workers') if args.workers is None else args.workers,
            'auto'),
        'recon_cache_path':
        conf.get('recon_cache_path', DEFAULT_RECON_CACHE_PATH),
        'stats_interval':
        non_negative_float(
            args.stats_interval
            or conf.get('stats_interval', DEFAULT_STATS_INTERVAL)),
    })
    return parallel_process(args.action == 'cleanup', conf, logger,
                            args.device_list)
Esempio n. 23
0
 def tearDown(self):
     use_hub(self.orig_hub)
Esempio n. 24
0
 def start():
     use_hub(TornadoHub)
     tornado.ioloop.IOLoop.instance().start()
Esempio n. 25
0
import eventlet, sys
from eventlet.green import socket, zmq
from eventlet.hubs import use_hub
use_hub('zeromq')

ADDR = 'ipc:///tmp/chat'

ctx = zmq.Context()

def publish(writer):

    print("connected")
    socket = ctx.socket(zmq.SUB)

    socket.setsockopt(zmq.SUBSCRIBE, "")
    socket.connect(ADDR)
    eventlet.sleep(0.1)

    while True:
        msg = socket.recv_pyobj()
        str_msg = "%s: %s" % msg
        writer.write(str_msg)
        writer.flush()


PORT=3001

def read_chat_forever(reader, pub_socket):

    line = reader.readline()
    who = 'someone'
Esempio n. 26
0
 def setup(cls):
     hubs.use_hub()
     eventlet.monkey_patch(os=False)
     patch_sendfile()
Esempio n. 27
0
    def _countManagedGreenlets(self):
        return len([ref for ref in self.greenlets if ref()])

    def _tryToQuit(self, ref_):
        self.greenlets = [ref for ref in self.greenlets if ref is not ref_]
        if self.stopping and len(self.greenlets) == 0:
            QCoreApplication.instance().quit()

    def _tryToQuit2(self, greenlet):
        self.greenlets = [ref for ref in self.greenlets if ref() is not greenlet]
        if self.stopping and len(self.greenlets) == 0:
            QCoreApplication.instance().quit()

Hub = QtHub
use_hub(sys.modules[__name__])

def start_application(quitOnLastWindowClosed = True):
    if quitOnLastWindowClosed:
        app = QCoreApplication.instance()
        if hasattr(app, "lastWindowClosed"):
            app.lastWindowClosed.connect(stop_application, Qt.UniqueConnection)
            app.setQuitOnLastWindowClosed(False)
    get_hub().switch()
    listenerCount = len(get_hub().listeners[BaseHub.READ]) + len(get_hub().listeners[BaseHub.WRITE])
    if listenerCount > 0:
        logger.warning("You have %d open socket left.", listenerCount)
    if len(get_hub().timers) > 0:
        logger.warning("You have left %d timers.", len(get_hub().timers))

def stop_application():
Esempio n. 28
0
    def new_client(self):
        """Called after a new WebSocket connection has been established."""
        # Reopen the eventlet hub to make sure we don't share an epoll
        # fd with parent and/or siblings, which would be bad
        from eventlet import hubs
        hubs.use_hub()

        cookie = Cookie.SimpleCookie()
        cookie.load(self.headers.getheader('cookie'))
        token = cookie['token'].value
        ctxt = context.get_admin_context()
        rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
        connect_info = rpcapi.check_token(ctxt, token=token)

        if not connect_info:
            LOG.audit("Invalid Token: %s", token)
            raise Exception(_("Invalid Token"))

        # Verify Origin
        expected_origin_hostname = self.headers.getheader('Host')
        if ':' in expected_origin_hostname:
            e = expected_origin_hostname
            expected_origin_hostname = e.split(':')[0]
        origin_url = self.headers.getheader('Origin')
        # missing origin header indicates non-browser client which is OK
        if origin_url is not None:
            origin = urlparse.urlparse(origin_url)
            origin_hostname = origin.hostname
            origin_scheme = origin.scheme
            if origin_hostname == '' or origin_scheme == '':
                detail = _("Origin header not valid.")
                raise exception.ValidationError(detail=detail)
            if expected_origin_hostname != origin_hostname:
                detail = _("Origin header does not match this host.")
                raise exception.ValidationError(detail=detail)
            if not self.verify_origin_proto(connect_info['console_type'],
                                              origin.scheme):
                detail = _("Origin header protocol does not match this host.")
                raise exception.ValidationError(detail=detail)

        host = connect_info['host']
        port = int(connect_info['port'])

        # Connect to the target
        self.msg("connecting to: %s:%s" % (host, port))
        LOG.audit("connecting to: %s:%s" % (host, port))
        tsock = self.socket(host, port, connect=True)

        # Handshake as necessary
        if connect_info.get('internal_access_path'):
            tsock.send("CONNECT %s HTTP/1.1\r\n\r\n" %
                        connect_info['internal_access_path'])
            while True:
                data = tsock.recv(4096, socket.MSG_PEEK)
                if data.find("\r\n\r\n") != -1:
                    if not data.split("\r\n")[0].find("200"):
                        LOG.audit("Invalid Connection Info %s", token)
                        raise Exception(_("Invalid Connection Info"))
                    tsock.recv(len(data))
                    break

        if self.verbose and not self.daemon:
            print(self.traffic_legend)

        # Start proxying
        try:
            self.do_proxy(tsock)
        except Exception:
            if tsock:
                tsock.shutdown(socket.SHUT_RDWR)
                tsock.close()
                self.vmsg("%s:%s: Target closed" % (host, port))
                LOG.audit("%s:%s: Target closed" % (host, port))
            raise
Esempio n. 29
0
def patch_eventlet():
  """Patch Eventlet so it works with Syncless in the same process.

  This has been tested with Eventlet 0.9.7.

  Please note that multithreaded use of Eventlet is untested. (It could be
  tested by creating a thread pool of database client connections.)
  
  The emulation works with both Stackless and greenlet.
  """
  # Make sure greenlet is loaded properly.
  from syncless.best_greenlet.greenlet import greenlet
  from syncless import coio
  from eventlet import hubs
  from eventlet.hubs import hub
  from eventlet.common import clear_sys_exc_info
  old_hub_class = str(getattr(hubs._threadlocal, 'Hub', None))
  if (old_hub_class.startswith('<class \'') and
      old_hub_class.endswith('.SynclessHub\'>')):
    return  # Already patched.
  assert hub.greenlet.greenlet is greenlet, (
      'greenlet class implementation mismatch: syncless uses %r, '
      'Eventlet uses %r; import syncless.best_greenlet first to resolve' %
      (greenlet, hub.greenlet.greenlet))
  assert not hasattr(hubs._threadlocal, 'hub'), (
      'too late, Eventlet hub already created; '
      'to resolve, call patch_eventlet() before doing blocking I/O')

  EVTYPE_STR_TO_MODE = {
      hub.READ: 1,
      hub.WRITE: 2,
  }

  class SynclessHub(hub.BaseHub):
    def __init__(self, *args, **kwargs):
      hub.BaseHub.__init__(self, *args, **kwargs)
      # Map file descriptors to coio.wakeup_info_event objects.
      self.wakeup_info = coio.wakeup_info()
      self.listeners_by_mode = [None, self.listeners[hub.READ],
                                self.listeners[hub.WRITE]]
      # The dict maps file descriptors to coio.wakeup_info_event objects.
      self.events_by_mode = [None, {}, {}]

    def add(self, evtype, fileno, cb):
      # Eventlet is slow in general, because it creates an FdListener
      # (self.class) for each non-ready read and write operation.
      mode = EVTYPE_STR_TO_MODE[evtype]
      listener = self.lclass(evtype, fileno, cb)
      listener.mode = mode
      listeners = self.listeners_by_mode[mode]
      if fileno in listeners:
        listeners[fileno].append(listener)
      else:
        # Eventlet is careful: it doesn't install multiple listeners for the
        # same (fd, mode), so it would work with libevent.
        self.events_by_mode[mode][fileno] = self.wakeup_info.create_event(
            fileno, mode)
        listeners[fileno] = [listener]
      return listener

    def remove(self, listener):
      listeners = self.listeners_by_mode[listener.mode]
      fileno = listener.fileno
      try:
        listeners[fileno].remove(listener)
        if not listeners[fileno]:
          del listeners[fileno]
          self.events_by_mode[listener.mode].pop(fileno).delete()
      except (KeyError, ValueError):
        pass

    def remove_descriptor(self, fileno):
      if self.listeners_by_mode[1].pop(fileno, None):
        self.events_by_mode[1].pop(fileno).delete()
      if self.listeners_by_mode[2].pop(fileno, None):
        self.events_by_mode[2].pop(fileno).delete()

    def wait(self, timeout=None):
      # This calls the Syncless main loop if needed.
      events = self.wakeup_info.tick(timeout)
      while events:
        fileno, mode = events.pop()
        listeners = self.listeners_by_mode[mode][fileno]
        try:
          if listeners:
            # By design, Eventlet calls only the first registered listener.
            listeners[0](fileno)
        except self.SYSTEM_EXCEPTIONS:
          raise
        except:
          self.squelch_exception(fileno, sys.exc_info())
          clear_sys_exc_info()

  hubs.use_hub(SynclessHub)
  assert SynclessHub is hubs._threadlocal.Hub
Esempio n. 30
0
 def __init__(self):
     self.host = config.get("HOST")
     self.port = int(config.get("PORT"))
     self.backlog = int(config.get("BACKLOG"))
     self.sock = None
     hubs.use_hub()
Esempio n. 31
0
    event2 = threading.Event()
    event1.set()
    thread1 = BenchThread(event1, event2)
    thread2 = BenchThread(event2, event1)
    thread1.start()
    thread2.start()
    thread1.join()
    thread2.join()

print "Testing with %d context switches" % CONTEXT_SWITCHES
start = time.time()
test_thread()
print "threading: %.02f seconds" % (time.time() - start)

try:
    hubs.use_hub(pyevent)
    start = time.time()
    test_eventlet()
    print "pyevent:   %.02f seconds" % (time.time() - start)
except:
    print "pyevent hub unavailable"

try:
    hubs.use_hub(epolls)
    start = time.time()
    test_eventlet()
    print "epoll:     %.02f seconds" % (time.time() - start)
except:
    print "epoll hub unavailable"

try:
Esempio n. 32
0
 def patch(self):
     hubs.use_hub()
     eventlet.monkey_patch(os=False)
     patch_sendfile()
Esempio n. 33
0
def restart_hub():
    from eventlet import hubs
    hub = hubs.get_hub()
    hub.abort()
    hub_shortname = hub.__module__.split('.')[-1]
    hubs.use_hub(hub_shortname)
Esempio n. 34
0
 def patch(self):
     hubs.use_hub()
     eventlet.monkey_patch(os=False)
     patch_sendfile()
Esempio n. 35
0
def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
    """
    Loads settings from conf, then instantiates daemon ``klass`` and runs the
    daemon with the specified ``once`` kwarg.  The section_name will be derived
    from the daemon ``klass`` if not provided (e.g. ObjectReplicator =>
    object-replicator).

    :param klass: Class to instantiate, subclass of :class:`Daemon`
    :param conf_file: Path to configuration file
    :param section_name: Section name from conf file to load config from
    :param once: Passed to daemon :meth:`Daemon.run` method
    """
    # very often the config section_name is based on the class name
    # the None singleton will be passed through to readconf as is
    if section_name is '':
        section_name = sub(r'([a-z])([A-Z])', r'\1-\2',
                           klass.__name__).lower()
    try:
        conf = utils.readconf(conf_file, section_name,
                              log_name=kwargs.get('log_name'))
    except (ValueError, IOError) as e:
        # The message will be printed to stderr
        # and results in an exit code of 1.
        sys.exit(e)

    use_hub(utils.get_hub())

    # once on command line (i.e. daemonize=false) will over-ride config
    once = once or not utils.config_true_value(conf.get('daemonize', 'true'))

    # pre-configure logger
    if 'logger' in kwargs:
        logger = kwargs.pop('logger')
    else:
        logger = utils.get_logger(conf, conf.get('log_name', section_name),
                                  log_to_console=kwargs.pop('verbose', False),
                                  log_route=section_name)

    # optional nice/ionice priority scheduling
    utils.modify_priority(conf, logger)

    # disable fallocate if desired
    if utils.config_true_value(conf.get('disable_fallocate', 'no')):
        utils.disable_fallocate()
    # set utils.FALLOCATE_RESERVE if desired
    utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
        utils.config_fallocate_value(conf.get('fallocate_reserve', '1%'))

    # By default, disable eventlet printing stacktraces
    eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no'))
    eventlet.debug.hub_exceptions(eventlet_debug)

    # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on
    # some platforms. This locks in reported times to UTC.
    os.environ['TZ'] = 'UTC+0'
    time.tzset()

    logger.notice('Starting %s', os.getpid())
    try:
        DaemonStrategy(klass(conf), logger).run(once=once, **kwargs)
    except KeyboardInterrupt:
        logger.info('User quit')
    logger.notice('Exited %s', os.getpid())
Esempio n. 36
0
    whataremyips,
    unlink_older_than,
    lock_path,
    compute_eta,
    get_logger,
    write_pickle,
    renamer,
    dump_recon_cache,
    rsync_ip,
    mkdirs,
)
from swift.common.bufferedhttp import http_connect
from swift.common.daemon import Daemon
from swift.common.http import HTTP_OK, HTTP_INSUFFICIENT_STORAGE

hubs.use_hub("poll")

PICKLE_PROTOCOL = 2
ONE_WEEK = 604800
HASH_FILE = "hashes.pkl"


def quarantine_renamer(device_path, corrupted_file_path):
    """
    In the case that a file is corrupted, move it to a quarantined
    area to allow replication to fix it.

    :params device_path: The path to the device the corrupted file is on.
    :params corrupted_file_path: The path to the file you want quarantined.

    :returns: path (str) of directory the file was moved to
Esempio n. 37
0
    def new_websocket_client(self):
        """Called after a new WebSocket connection has been established."""
        # Reopen the eventlet hub to make sure we don't share an epoll
        # fd with parent and/or siblings, which would be bad
        from eventlet import hubs
        hubs.use_hub()

        # The nova expected behavior is to have token
        # passed to the method GET of the request
        parse = urlparse.urlparse(self.path)
        if parse.scheme not in ('http', 'https'):
            # From a bug in urlparse in Python < 2.7.4 we cannot support
            # special schemes (cf: http://bugs.python.org/issue9374)
            if sys.version_info < (2, 7, 4):
                raise exception.NovaException(
                    _("We do not support scheme '%s' under Python < 2.7.4, "
                      "please use http or https") % parse.scheme)

        query = parse.query
        token = urlparse.parse_qs(query).get("token", [""]).pop()
        if not token:
            # NoVNC uses it's own convention that forward token
            # from the request to a cookie header, we should check
            # also for this behavior
            hcookie = self.headers.getheader('cookie')
            if hcookie:
                cookie = Cookie.SimpleCookie()
                for hcookie_part in hcookie.split(';'):
                    hcookie_part = hcookie_part.lstrip()
                    try:
                        cookie.load(hcookie_part)
                    except Cookie.CookieError:
                        # NOTE(stgleb): Do not print out cookie content
                        # for security reasons.
                        LOG.warning(_LW('Found malformed cookie'))
                    else:
                        if 'token' in cookie:
                            token = cookie['token'].value

        ctxt = context.get_admin_context()
        rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
        connect_info = rpcapi.check_token(ctxt, token=token)

        if not connect_info:
            raise exception.InvalidToken(token=token)

        # Verify Origin
        expected_origin_hostname = self.headers.getheader('Host')
        if ':' in expected_origin_hostname:
            e = expected_origin_hostname
            if '[' in e and ']' in e:
                expected_origin_hostname = e.split(']')[0][1:]
            else:
                expected_origin_hostname = e.split(':')[0]
        expected_origin_hostnames = CONF.console.allowed_origins
        expected_origin_hostnames.append(expected_origin_hostname)
        origin_url = self.headers.getheader('Origin')
        # missing origin header indicates non-browser client which is OK
        if origin_url is not None:
            origin = urlparse.urlparse(origin_url)
            origin_hostname = origin.hostname
            origin_scheme = origin.scheme
            if origin_hostname == '' or origin_scheme == '':
                detail = _("Origin header not valid.")
                raise exception.ValidationError(detail=detail)
            if origin_hostname not in expected_origin_hostnames:
                detail = _("Origin header does not match this host.")
                raise exception.ValidationError(detail=detail)
            if not self.verify_origin_proto(connect_info, origin_scheme):
                detail = _("Origin header protocol does not match this host.")
                raise exception.ValidationError(detail=detail)

        self.msg(_('connect info: %s'), str(connect_info))
        host = connect_info['host']
        port = int(connect_info['port'])

        # Connect to the target
        self.msg(
            _("connecting to: %(host)s:%(port)s") % {
                'host': host,
                'port': port
            })
        tsock = self.socket(host, port, connect=True)

        # Handshake as necessary
        if connect_info.get('internal_access_path'):
            tsock.send("CONNECT %s HTTP/1.1\r\n\r\n" %
                       connect_info['internal_access_path'])
            while True:
                data = tsock.recv(4096, socket.MSG_PEEK)
                if data.find("\r\n\r\n") != -1:
                    if data.split("\r\n")[0].find("200") == -1:
                        raise exception.InvalidConnectionInfo()
                    tsock.recv(len(data))
                    break

        # Start proxying
        try:
            self.do_proxy(tsock)
        except Exception:
            if tsock:
                tsock.shutdown(socket.SHUT_RDWR)
                tsock.close()
                self.vmsg(
                    _("%(host)s:%(port)s: "
                      "Websocket client or target closed") % {
                          'host': host,
                          'port': port
                      })
            raise
Esempio n. 38
0
    def _tryToQuit(self, ref_):
        self.greenlets = [ref for ref in self.greenlets if ref is not ref_]
        if self.stopping and len(self.greenlets) == 0:
            QCoreApplication.instance().quit()

    def _tryToQuit2(self, greenlet):
        self.greenlets = [
            ref for ref in self.greenlets if ref() is not greenlet
        ]
        if self.stopping and len(self.greenlets) == 0:
            QCoreApplication.instance().quit()


Hub = QtHub
use_hub(sys.modules[__name__])


def start_application(quitOnLastWindowClosed=True):
    if quitOnLastWindowClosed:
        app = QCoreApplication.instance()
        if hasattr(app, "lastWindowClosed"):
            app.lastWindowClosed.connect(stop_application, Qt.UniqueConnection)
            app.setQuitOnLastWindowClosed(False)
    get_hub().switch()
    listenerCount = len(get_hub().listeners[BaseHub.READ]) + len(
        get_hub().listeners[BaseHub.WRITE])
    if listenerCount > 0:
        logger.warning("You have %d open socket left.", listenerCount)
    if len(get_hub().timers) > 0:
        logger.warning("You have left %d timers.", len(get_hub().timers))
Esempio n. 39
0
def patch_eventlet():
    """Patch Eventlet so it works with Syncless in the same process.

  This has been tested with Eventlet 0.9.7.

  Please note that multithreaded use of Eventlet is untested. (It could be
  tested by creating a thread pool of database client connections.)
  
  The emulation works with both Stackless and greenlet.
  """
    # Make sure greenlet is loaded properly.
    from syncless.best_greenlet.greenlet import greenlet
    from syncless import coio
    from eventlet import hubs
    from eventlet.hubs import hub
    from eventlet.common import clear_sys_exc_info
    old_hub_class = str(getattr(hubs._threadlocal, 'Hub', None))
    if (old_hub_class.startswith('<class \'')
            and old_hub_class.endswith('.SynclessHub\'>')):
        return  # Already patched.
    assert hub.greenlet.greenlet is greenlet, (
        'greenlet class implementation mismatch: syncless uses %r, '
        'Eventlet uses %r; import syncless.best_greenlet first to resolve' %
        (greenlet, hub.greenlet.greenlet))
    assert not hasattr(hubs._threadlocal, 'hub'), (
        'too late, Eventlet hub already created; '
        'to resolve, call patch_eventlet() before doing blocking I/O')

    EVTYPE_STR_TO_MODE = {
        hub.READ: 1,
        hub.WRITE: 2,
    }

    class SynclessHub(hub.BaseHub):
        def __init__(self, *args, **kwargs):
            hub.BaseHub.__init__(self, *args, **kwargs)
            # Map file descriptors to coio.wakeup_info_event objects.
            self.wakeup_info = coio.wakeup_info()
            self.listeners_by_mode = [
                None, self.listeners[hub.READ], self.listeners[hub.WRITE]
            ]
            # The dict maps file descriptors to coio.wakeup_info_event objects.
            self.events_by_mode = [None, {}, {}]

        def add(self, evtype, fileno, cb):
            # Eventlet is slow in general, because it creates an FdListener
            # (self.class) for each non-ready read and write operation.
            mode = EVTYPE_STR_TO_MODE[evtype]
            listener = self.lclass(evtype, fileno, cb)
            listener.mode = mode
            listeners = self.listeners_by_mode[mode]
            if fileno in listeners:
                listeners[fileno].append(listener)
            else:
                # Eventlet is careful: it doesn't install multiple listeners for the
                # same (fd, mode), so it would work with libevent.
                self.events_by_mode[mode][
                    fileno] = self.wakeup_info.create_event(fileno, mode)
                listeners[fileno] = [listener]
            return listener

        def remove(self, listener):
            listeners = self.listeners_by_mode[listener.mode]
            fileno = listener.fileno
            try:
                listeners[fileno].remove(listener)
                if not listeners[fileno]:
                    del listeners[fileno]
                    self.events_by_mode[listener.mode].pop(fileno).delete()
            except (KeyError, ValueError):
                pass

        def remove_descriptor(self, fileno):
            if self.listeners_by_mode[1].pop(fileno, None):
                self.events_by_mode[1].pop(fileno).delete()
            if self.listeners_by_mode[2].pop(fileno, None):
                self.events_by_mode[2].pop(fileno).delete()

        def wait(self, timeout=None):
            # This calls the Syncless main loop if needed.
            events = self.wakeup_info.tick(timeout)
            while events:
                fileno, mode = events.pop()
                listeners = self.listeners_by_mode[mode][fileno]
                try:
                    if listeners:
                        # By design, Eventlet calls only the first registered listener.
                        listeners[0](fileno)
                except self.SYSTEM_EXCEPTIONS:
                    raise
                except:
                    self.squelch_exception(fileno, sys.exc_info())
                    clear_sys_exc_info()

    hubs.use_hub(SynclessHub)
    assert SynclessHub is hubs._threadlocal.Hub
Esempio n. 40
0
import errno
import uuid

import eventlet
from eventlet import GreenPool, tpool, Timeout, sleep, hubs
from eventlet.green import subprocess
from eventlet.support.greenlets import GreenletExit

from chase.common.ring import Ring
from chase.common.utils import whataremyips, unlink_older_than, lock_path, \
        compute_eta, get_logger, write_pickle, renamer, dump_recon_cache, \
        TRUE_VALUES
from chase.common.bufferedhttp import http_connect
from chase.common.daemon import Daemon

hubs.use_hub('poll')

PICKLE_PROTOCOL = 2
ONE_WEEK = 604800
HASH_FILE = 'hashes.pkl'


def quarantine_renamer(device_path, corrupted_file_path):
    """
    In the case that a file is corrupted, move it to a quarantined
    area to allow replication to fix it.

    :params device_path: The path to the device the corrupted file is on.
    :params corrupted_file_path: The path to the file you want quarantined.

    :returns: path (str) of directory the file was moved to
Esempio n. 41
0
def use_hub(*a, **kw):
    warnings.warn("eventlet.api.use_hub has moved to eventlet.hubs.use_hub",
                  DeprecationWarning,
                  stacklevel=2)
    return hubs.use_hub(*a, **kw)

DEFAULT_PORT = 7777
DEFAULT_FILENAME = 'typescript'
DEFAULT_COMMAND = '/bin/sh'

CLIENTS = set()
CHILD_PID = None
CHILD_FD = None
LOGFILE_FD = None
LOGFILE = None
IO_SIZE = 1024
SOCKET_ENABLED = True


hubs.use_hub("selects")
eventlet.monkey_patch()


def reopen_logfile(*args):
    """Close and re-open the log file for rotation/trancating"""
    global LOGFILE_FD

    # Close the current file descriptor if needed
    if LOGFILE_FD is not None:
        LOGFILE_FD.close()

    # Re-open the log file
    LOGFILE_FD = open(LOGFILE, "wb", 1)

Esempio n. 43
0
import errno
import uuid

import eventlet
from eventlet import GreenPool, tpool, Timeout, sleep, hubs
from eventlet.green import subprocess
from eventlet.support.greenlets import GreenletExit

from chase.common.ring import Ring
from chase.common.utils import whataremyips, unlink_older_than, lock_path, \
        compute_eta, get_logger, write_pickle, renamer, dump_recon_cache, \
        TRUE_VALUES
from chase.common.bufferedhttp import http_connect
from chase.common.daemon import Daemon

hubs.use_hub('poll')

PICKLE_PROTOCOL = 2
ONE_WEEK = 604800
HASH_FILE = 'hashes.pkl'


def quarantine_renamer(device_path, corrupted_file_path):
    """
    In the case that a file is corrupted, move it to a quarantined
    area to allow replication to fix it.

    :params device_path: The path to the device the corrupted file is on.
    :params corrupted_file_path: The path to the file you want quarantined.

    :returns: path (str) of directory the file was moved to
Esempio n. 44
0
 def tearDown(self):
     if not self.producer.stopped.ready():
         self.producer.stop()
     self.producer.stopped.wait()
     # reset the hub
     hubs.use_hub(hubs.poll)
Esempio n. 45
0
 def bench_eventlet(options):
     from eventlet import spawn, sleep
     if options.eventlet_hub is not None:
         from eventlet.hubs import use_hub
         use_hub(options.eventlet_hub)
     return test(spawn, sleep, options)
So all messages are published to port 12345 and the device forwards all the
messages to 12346 where they are subscribed to
"""
import os
import sys
import eventlet
from collections import defaultdict
from eventlet import spawn_n, sleep
from eventlet import wsgi
from eventlet import websocket
from eventlet.green import zmq
from eventlet.hubs import get_hub, use_hub
from uuid import uuid1

use_hub('zeromq')
ctx = zmq.Context()


class IDName(object):

    def __init__(self):
        self.id = uuid1()
        self.name = None

    def __str__(self):
        if self.name:
            return self.name
        else:
            return str(self.id)
Esempio n. 47
0
    def new_websocket_client(self):
        """Called after a new WebSocket connection has been established."""
        # Reopen the eventlet hub to make sure we don't share an epoll
        # fd with parent and/or siblings, which would be bad
        from eventlet import hubs
        hubs.use_hub()

        # The nova expected behavior is to have token
        # passed to the method GET of the request
        parse = urlparse.urlparse(self.path)
        if parse.scheme not in ('http', 'https'):
            # From a bug in urlparse in Python < 2.7.4 we cannot support
            # special schemes (cf: http://bugs.python.org/issue9374)
            if sys.version_info < (2, 7, 4):
                raise exception.NovaException(
                    _("We do not support scheme '%s' under Python < 2.7.4, "
                      "please use http or https") % parse.scheme)

        query = parse.query
        token = urlparse.parse_qs(query).get("token", [""]).pop()
        if not token:
            # NoVNC uses it's own convention that forward token
            # from the request to a cookie header, we should check
            # also for this behavior
            hcookie = self.headers.getheader('cookie')
            if hcookie:
                cookie = Cookie.SimpleCookie()
                cookie.load(hcookie)
                if 'token' in cookie:
                    token = cookie['token'].value

        ctxt = context.get_admin_context()
        rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
        connect_info = rpcapi.check_token(ctxt, token=token)

        if not connect_info:
            raise exception.InvalidToken(token=token)

        self.msg(_('connect info: %s'), str(connect_info))
        host = connect_info['host']
        port = int(connect_info['port'])

        # Connect to the target
        self.msg(
            _("connecting to: %(host)s:%(port)s") % {
                'host': host,
                'port': port
            })
        tsock = self.socket(host, port, connect=True)

        # Handshake as necessary
        if connect_info.get('internal_access_path'):
            tsock.send("CONNECT %s HTTP/1.1\r\n\r\n" %
                       connect_info['internal_access_path'])
            while True:
                data = tsock.recv(4096, socket.MSG_PEEK)
                if data.find("\r\n\r\n") != -1:
                    if data.split("\r\n")[0].find("200") == -1:
                        raise exception.InvalidConnectionInfo()
                    tsock.recv(len(data))
                    break

        # Start proxying
        try:
            self.do_proxy(tsock)
        except Exception:
            if tsock:
                tsock.shutdown(socket.SHUT_RDWR)
                tsock.close()
                self.vmsg(
                    _("%(host)s:%(port)s: Target closed") % {
                        'host': host,
                        'port': port
                    })
            raise
Esempio n. 48
0
 def tearDown(self):
     use_hub(self.orig_hub)
import eventlet, sys
from eventlet.green import socket, zmq
from eventlet.hubs import use_hub

use_hub("zeromq")

ADDR = "ipc:///tmp/chat"

ctx = zmq.Context()


def publish(writer):

    print "connected"
    socket = ctx.socket(zmq.SUB)

    socket.setsockopt(zmq.SUBSCRIBE, "")
    socket.connect(ADDR)
    eventlet.sleep(0.1)

    while True:
        msg = socket.recv_pyobj()
        str_msg = "%s: %s" % msg
        writer.write(str_msg)
        writer.flush()


PORT = 3001


def read_chat_forever(reader, pub_socket):
Esempio n. 50
0
import signal
import sys

DEFAULT_PORT = 7777
DEFAULT_FILENAME = 'typescript'
DEFAULT_COMMAND = '/bin/sh'

CLIENTS = set()
CHILD_PID = None
CHILD_FD = None
LOGFILE_FD = None
LOGFILE = None
IO_SIZE = 1024
SOCKET_ENABLED = True

hubs.use_hub("selects")
eventlet.monkey_patch()


def reopen_logfile(*args):
    """Close and re-open the log file for rotation/trancating"""
    global LOGFILE_FD

    # Close the current file descriptor if needed
    if LOGFILE_FD is not None:
        LOGFILE_FD.close()

    # Re-open the log file
    LOGFILE_FD = open(LOGFILE, "wb", 1)

Esempio n. 51
0
from eventlet.support.greenlets import GreenletExit

from swift.common.ring.utils import is_local_device
from swift.common.utils import whataremyips, unlink_older_than, \
    compute_eta, get_logger, dump_recon_cache, ismount, \
    rsync_module_interpolation, mkdirs, config_true_value, list_from_csv, \
    get_hub, tpool_reraise, config_auto_int_value, storage_directory
from swift.common.bufferedhttp import http_connect
from swift.common.daemon import Daemon
from swift.common.http import HTTP_OK, HTTP_INSUFFICIENT_STORAGE
from swift.obj import ssync_sender
from swift.obj.diskfile import DiskFileManager, get_data_dir, get_tmp_dir
from swift.common.storage_policy import POLICIES, REPL_POLICY


hubs.use_hub(get_hub())


class ObjectReplicator(Daemon):
    """
    Replicate objects.

    Encapsulates most logic and data needed by the object replication process.
    Each call to .replicate() performs one replication pass.  It's up to the
    caller to do this in a loop.
    """

    def __init__(self, conf, logger=None):
        """
        :param conf: configuration object obtained from ConfigParser
        :param logger: logging object
Esempio n. 52
0
 def init_process(self):
     hubs.use_hub()
     super(EventletWorker, self).init_process()
Esempio n. 53
0
    def new_websocket_client(self):
        """Called after a new WebSocket connection has been established."""
        # Reopen the eventlet hub to make sure we don't share an epoll
        # fd with parent and/or siblings, which would be bad
        from eventlet import hubs
        hubs.use_hub()

        # The nova expected behavior is to have token
        # passed to the method GET of the request
        parse = urlparse.urlparse(self.path)
        if parse.scheme not in ('http', 'https'):
            # From a bug in urlparse in Python < 2.7.4 we cannot support
            # special schemes (cf: http://bugs.python.org/issue9374)
            if sys.version_info < (2, 7, 4):
                raise exception.NovaException(
                    _("We do not support scheme '%s' under Python < 2.7.4, "
                      "please use http or https") % parse.scheme)

        query = parse.query
        token = urlparse.parse_qs(query).get("token", [""]).pop()
        if not token:
            # NoVNC uses it's own convention that forward token
            # from the request to a cookie header, we should check
            # also for this behavior
            hcookie = self.headers.get('cookie')
            if hcookie:
                cookie = Cookie.SimpleCookie()
                for hcookie_part in hcookie.split(';'):
                    hcookie_part = hcookie_part.lstrip()
                    try:
                        cookie.load(hcookie_part)
                    except Cookie.CookieError:
                        # NOTE(stgleb): Do not print out cookie content
                        # for security reasons.
                        LOG.warning('Found malformed cookie')
                    else:
                        if 'token' in cookie:
                            token = cookie['token'].value

        ctxt = context.get_admin_context()
        connect_info = self._get_connect_info(ctxt, token)

        # Verify Origin
        expected_origin_hostname = self.headers.get('Host')
        if ':' in expected_origin_hostname:
            e = expected_origin_hostname
            if '[' in e and ']' in e:
                expected_origin_hostname = e.split(']')[0][1:]
            else:
                expected_origin_hostname = e.split(':')[0]
        expected_origin_hostnames = CONF.console.allowed_origins
        expected_origin_hostnames.append(expected_origin_hostname)
        origin_url = self.headers.get('Origin')
        # missing origin header indicates non-browser client which is OK
        if origin_url is not None:
            origin = urlparse.urlparse(origin_url)
            origin_hostname = origin.hostname
            origin_scheme = origin.scheme
            # If the console connection was forwarded by a proxy (example:
            # haproxy), the original protocol could be contained in the
            # X-Forwarded-Proto header instead of the Origin header. Prefer the
            # forwarded protocol if it is present.
            forwarded_proto = self.headers.get('X-Forwarded-Proto')
            if forwarded_proto is not None:
                origin_scheme = forwarded_proto
            if origin_hostname == '' or origin_scheme == '':
                detail = _("Origin header not valid.")
                raise exception.ValidationError(detail=detail)
            if origin_hostname not in expected_origin_hostnames:
                detail = _("Origin header does not match this host.")
                raise exception.ValidationError(detail=detail)
            if not self.verify_origin_proto(connect_info, origin_scheme):
                detail = _("Origin header protocol does not match this host.")
                raise exception.ValidationError(detail=detail)

        self.msg(_('connect info: %s'), str(connect_info))
        host = connect_info.host
        port = connect_info.port

        # Connect to the target
        self.msg(
            _("connecting to: %(host)s:%(port)s") % {
                'host': host,
                'port': port
            })
        tsock = self.socket(host, port, connect=True)

        # Handshake as necessary
        if 'internal_access_path' in connect_info:
            path = connect_info.internal_access_path
            if path:
                tsock.send(
                    encodeutils.safe_encode('CONNECT %s HTTP/1.1\r\n\r\n' %
                                            path))
                end_token = "\r\n\r\n"
                while True:
                    data = tsock.recv(4096, socket.MSG_PEEK)
                    token_loc = data.find(end_token)
                    if token_loc != -1:
                        if data.split("\r\n")[0].find("200") == -1:
                            raise exception.InvalidConnectionInfo()
                        # remove the response from recv buffer
                        tsock.recv(token_loc + len(end_token))
                        break

        if self.server.security_proxy is not None:
            tenant_sock = TenantSock(self)

            try:
                tsock = self.server.security_proxy.connect(tenant_sock, tsock)
            except exception.SecurityProxyNegotiationFailed:
                LOG.exception("Unable to perform security proxying, shutting "
                              "down connection")
                tenant_sock.close()
                tsock.shutdown(socket.SHUT_RDWR)
                tsock.close()
                raise

            tenant_sock.finish_up()

        # Start proxying
        try:
            self.do_proxy(tsock)
        except Exception:
            if tsock:
                tsock.shutdown(socket.SHUT_RDWR)
                tsock.close()
                self.vmsg(
                    _("%(host)s:%(port)s: "
                      "Websocket client or target closed") % {
                          'host': host,
                          'port': port
                      })
            raise
Esempio n. 54
0
    def new_websocket_client(self):
        """Called after a new WebSocket connection has been established."""
        # Reopen the eventlet hub to make sure we don't share an epoll
        # fd with parent and/or siblings, which would be bad
        from eventlet import hubs
        hubs.use_hub()

        # The nova expected behavior is to have token
        # passed to the method GET of the request
        query = urlparse.urlparse(self.path).query
        token = urlparse.parse_qs(query).get("token", [""]).pop()
        if not token:
            # NoVNC uses it's own convention that forward token
            # from the request to a cookie header, we should check
            # also for this behavior
            hcookie = self.headers.getheader('cookie')
            if hcookie:
                cookie = Cookie.SimpleCookie()
                cookie.load(hcookie)
                if 'token' in cookie:
                    token = cookie['token'].value

        ctxt = context.get_admin_context()
        rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
        connect_info = rpcapi.check_token(ctxt, token=token)

        if not connect_info:
            raise Exception(_("Invalid Token"))

        # Verify Origin
        expected_origin_hostname = self.headers.getheader('Host')
        if ':' in expected_origin_hostname:
            e = expected_origin_hostname
            expected_origin_hostname = e.split(':')[0]
        origin_url = self.headers.getheader('Origin')
        # missing origin header indicates non-browser client which is OK
        if origin_url is not None:
            origin = urlparse.urlparse(origin_url)
            origin_hostname = origin.hostname
            origin_scheme = origin.scheme
            if origin_hostname == '' or origin_scheme == '':
                detail = _("Origin header not valid.")
                raise exception.ValidationError(detail=detail)
            if expected_origin_hostname != origin_hostname:
                detail = _("Origin header does not match this host.")
                raise exception.ValidationError(detail=detail)
            if not self.verify_origin_proto(connect_info['console_type'],
                                              origin.scheme):
                detail = _("Origin header protocol does not match this host.")
                raise exception.ValidationError(detail=detail)

        self.msg(_('connect info: %s'), str(connect_info))
        host = connect_info['host']
        port = int(connect_info['port'])

        # Connect to the target
        self.msg(_("connecting to: %(host)s:%(port)s") % {'host': host,
                                                          'port': port})
        tsock = self.socket(host, port, connect=True)

        # Handshake as necessary
        if connect_info.get('internal_access_path'):
            tsock.send("CONNECT %s HTTP/1.1\r\n\r\n" %
                        connect_info['internal_access_path'])
            while True:
                data = tsock.recv(4096, socket.MSG_PEEK)
                if data.find("\r\n\r\n") != -1:
                    if not data.split("\r\n")[0].find("200"):
                        raise Exception(_("Invalid Connection Info"))
                    tsock.recv(len(data))
                    break

        instance_id = connect_info.get('instance_uuid', 'None')
        # Start proxying
        try:
            operationlog.info(
                "VNC: host:%s, port:%s, is connecting to vm %s, at %s" % (
                host, port, instance_id, timeutils.utcnow()),
                extra={"type": "operate"})
            self.do_proxy(tsock)
        except Exception:
            if tsock:
                tsock.shutdown(socket.SHUT_RDWR)
                tsock.close()
                operationlog.info(
                    "VNC: host:%s, port:%s, lost connection with vm %s, at %s"
                    % (host, port, instance_id, timeutils.utcnow()),
                    extra={"type": "operate"})
                self.vmsg(_("%(host)s:%(port)s: Target closed") %
                          {'host': host, 'port': port})
                LOG.audit("%s:%s: Target closed" % (host, port))
            raise
Esempio n. 55
0
    def new_websocket_client(self):
        """Called after a new WebSocket connection has been established."""
        # Reopen the eventlet hub to make sure we don't share an epoll
        # fd with parent and/or siblings, which would be bad
        from eventlet import hubs
        hubs.use_hub()

        # The nova expected behavior is to have token
        # passed to the method GET of the request
        parse = urlparse.urlparse(self.path)
        if parse.scheme not in ('http', 'https'):
            # From a bug in urlparse in Python < 2.7.4 we cannot support
            # special schemes (cf: http://bugs.python.org/issue9374)
            if sys.version_info < (2, 7, 4):
                raise exception.NovaException(
                    _("We do not support scheme '%s' under Python < 2.7.4, "
                      "please use http or https") % parse.scheme)

        query = parse.query
        token = urlparse.parse_qs(query).get("token", [""]).pop()
        if not token:
            # NoVNC uses it's own convention that forward token
            # from the request to a cookie header, we should check
            # also for this behavior
            hcookie = self.headers.getheader('cookie')
            if hcookie:
                cookie = Cookie.SimpleCookie()
                for hcookie_part in hcookie.split(';'):
                    hcookie_part = hcookie_part.lstrip()
                    try:
                        cookie.load(hcookie_part)
                    except Cookie.CookieError:
                        # NOTE(stgleb): Do not print out cookie content
                        # for security reasons.
                        LOG.warning(_LW('Found malformed cookie'))
                    else:
                        if 'token' in cookie:
                            token = cookie['token'].value

        ctxt = context.get_admin_context()
        rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
        connect_info = rpcapi.check_token(ctxt, token=token)

        if not connect_info:
            raise exception.InvalidToken(token=token)

        # Verify Origin
        expected_origin_hostname = self.headers.getheader('Host')
        if ':' in expected_origin_hostname:
            e = expected_origin_hostname
            if '[' in e and ']' in e:
                expected_origin_hostname = e.split(']')[0][1:]
            else:
                expected_origin_hostname = e.split(':')[0]
        expected_origin_hostnames = CONF.console.allowed_origins
        expected_origin_hostnames.append(expected_origin_hostname)
        origin_url = self.headers.getheader('Origin')
        # missing origin header indicates non-browser client which is OK
        if origin_url is not None:
            origin = urlparse.urlparse(origin_url)
            origin_hostname = origin.hostname
            origin_scheme = origin.scheme
            if origin_hostname == '' or origin_scheme == '':
                detail = _("Origin header not valid.")
                raise exception.ValidationError(detail=detail)
            if origin_hostname not in expected_origin_hostnames:
                detail = _("Origin header does not match this host.")
                raise exception.ValidationError(detail=detail)
            if not self.verify_origin_proto(connect_info, origin_scheme):
                detail = _("Origin header protocol does not match this host.")
                raise exception.ValidationError(detail=detail)

        self.msg(_('connect info: %s'), str(connect_info))
        host = connect_info['host']
        port = int(connect_info['port'])

        # Connect to the target
        self.msg(_("connecting to: %(host)s:%(port)s") % {'host': host,
                                                          'port': port})
        tsock = self.socket(host, port, connect=True)

        # Handshake as necessary
        if connect_info.get('internal_access_path'):
            tsock.send("CONNECT %s HTTP/1.1\r\n\r\n" %
                        connect_info['internal_access_path'])
            while True:
                data = tsock.recv(4096, socket.MSG_PEEK)
                if data.find("\r\n\r\n") != -1:
                    if data.split("\r\n")[0].find("200") == -1:
                        raise exception.InvalidConnectionInfo()
                    tsock.recv(len(data))
                    break

        # Start proxying
        try:
            self.do_proxy(tsock)
        except Exception:
            if tsock:
                tsock.shutdown(socket.SHUT_RDWR)
                tsock.close()
                self.vmsg(_("%(host)s:%(port)s: "
                          "Websocket client or target closed") %
                          {'host': host, 'port': port})
            raise
Esempio n. 56
0
 def start():
    use_hub(TornadoHub)
    tornado.ioloop.IOLoop.instance().start()
Esempio n. 57
0
from swift.common.ring.utils import is_local_device
from swift.common.utils import whataremyips, unlink_older_than, \
    compute_eta, get_logger, dump_recon_cache, ismount, \
    rsync_module_interpolation, mkdirs, config_true_value, list_from_csv, \
    get_hub, tpool_reraise, config_auto_int_value, storage_directory
from swift.common.bufferedhttp import http_connect
from swift.common.daemon import Daemon
from swift.common.http import HTTP_OK, HTTP_INSUFFICIENT_STORAGE
from swift.obj import ssync_sender
from swift.obj.diskfile import DiskFileManager, get_data_dir, get_tmp_dir
from swift.common.storage_policy import POLICIES, REPL_POLICY

DEFAULT_RSYNC_TIMEOUT = 900

hubs.use_hub(get_hub())


def _do_listdir(partition, replication_cycle):
    return (((partition + replication_cycle) % 10) == 0)


class ObjectReplicator(Daemon):
    """
    Replicate objects.

    Encapsulates most logic and data needed by the object replication process.
    Each call to .replicate() performs one replication pass.  It's up to the
    caller to do this in a loop.
    """
    def __init__(self, conf, logger=None):
Esempio n. 58
0
    def new_websocket_client(self):
        """Called after a new WebSocket connection has been established."""
        # Reopen the eventlet hub to make sure we don't share an epoll
        # fd with parent and/or siblings, which would be bad
        from eventlet import hubs
        hubs.use_hub()

        # The zun expected behavior is to have token
        # passed to the method GET of the request
        parse = urlparse.urlparse(self.path)
        if parse.scheme not in ('http', 'https'):
            # From a bug in urlparse in Python < 2.7.4 we cannot support
            # special schemes (cf: https://bugs.python.org/issue9374)
            if sys.version_info < (2, 7, 4):
                raise exception.ZunException(
                    _("We do not support scheme '%s' under Python < 2.7.4, "
                      "please use http or https") % parse.scheme)

        query = parse.query
        token = urlparse.parse_qs(query).get("token", [""]).pop()
        uuid = urlparse.parse_qs(query).get("uuid", [""]).pop()

        dbapi = db_api._get_dbdriver_instance()
        ctx = context.get_admin_context(all_tenants=True)

        if uuidutils.is_uuid_like(uuid):
            container = dbapi.get_container_by_uuid(ctx, uuid)
        else:
            container = dbapi.get_container_by_name(ctx, uuid)

        if token != container.websocket_token:
            raise exception.InvalidWebsocketToken(token)

        access_url = '%s?token=%s&uuid=%s' % (CONF.websocket_proxy.base_url,
                                              token, uuid)

        # Verify Origin
        expected_origin_hostname = self.headers.get('Host')
        if ':' in expected_origin_hostname:
            e = expected_origin_hostname
            if '[' in e and ']' in e:
                expected_origin_hostname = e.split(']')[0][1:]
            else:
                expected_origin_hostname = e.split(':')[0]
        expected_origin_hostnames = CONF.websocket_proxy.allowed_origins
        expected_origin_hostnames.append(expected_origin_hostname)
        origin_url = self.headers.get('Origin')

        # missing origin header indicates non-browser client which is OK
        if origin_url is not None:
            origin = urlparse.urlparse(origin_url)
            origin_hostname = origin.hostname
            origin_scheme = origin.scheme
            if origin_hostname == '' or origin_scheme == '':
                detail = _("Origin header not valid.")
                raise exception.ValidationError(detail)
            if origin_hostname not in expected_origin_hostnames:
                detail = _("Origin header does not match this host.")
                raise exception.ValidationError(detail)
            if not self.verify_origin_proto(access_url, origin_scheme):
                detail = _("Origin header protocol does not match this host.")
                raise exception.ValidationError(detail)

        if container.websocket_url:
            target_url = container.websocket_url
            escape = "~"
            close_wait = 0.5
            wscls = WebSocketClient(host_url=target_url,
                                    escape=escape,
                                    close_wait=close_wait)
            wscls.connect()
            self.target = wscls
        else:
            raise exception.InvalidWebsocketUrl()

        # Start proxying
        try:
            self.do_proxy(self.target.ws)
        except Exception as e:
            if self.target.ws:
                self.target.ws.close()
                self.vmsg(_("%Websocket client or target closed"))
            raise