Exemple #1
0
def handle(socket, address):
    log.debug('new connection from %r', address)
    
    from uuss.server.connection import ConnectionHandler
    connection_handler = ConnectionHandler(socket, address, config)

    from gevent import Greenlet
    
    fail_healthcheck_watcher = Greenlet.spawn(connection_watch_for_fail_healthcheck, connection_handler)
    unset_fail_healthcheck_watcher = Greenlet.spawn(connection_watch_for_unset_fail_healthcheck, connection_handler)
    shutdown_watcher = Greenlet.spawn(connection_watch_for_shutdown, connection_handler)

    # make sure that if the events are set the connection_handler uses them right away
    # (the greenlets watcher greenlets won't be run until after this handler has started processing)
    if fail_healthcheck_event.wait(0):
        connection_handler.fail_healthcheck()
    if unset_fail_healthcheck_event.wait(0):
        connection_handler.fail_healthcheck(False)
    if shutdown_event.wait(0):
        connection_handler.shutdown()
    
    try:
        connection_handler.run()
    finally:
        fail_healthcheck_watcher.kill()
        unset_fail_healthcheck_watcher.kill()
        shutdown_watcher.kill()
        del connection_handler
Exemple #2
0
    def __init__(self, config_file='hive.cfg', key='server.key', cert='server.crt'):
        self.key = key
        self.cert = cert

        self.config = ConfigParser.ConfigParser()

        if not os.path.exists(config_file):
            raise ConfigNotFound('Configuration file could not be found. ({0})'.format(config_file))

        self.config.read(config_file)

        #check cert and key
        if not (os.path.isfile(self.key) and os.path.isfile(self.cert)):
            gen_cmd = "openssl req -new -newkey rsa:1024 -days 365 -nodes -x509 -keyout server.key -out server.crt"
            gen_cmd += ' && openssl rsa -in server.key -text -out server.key'
            logger.error('No valid key or certificate found, '
                         'a selfsigned cert and key can be generated with the following '
                         'command: "{0}"'.format(gen_cmd))
            sys.exit(1)

        #inject authentication mechanism
        Session.authenticator = Authenticator()

        #spawning time checker
        if self.config.getboolean('timecheck', 'Enabled'):
            Greenlet.spawn(self.checktime)
Exemple #3
0
def test_server_closess(server):
    websocket = WebSocket(server_url='ws://0.0.0.0:8001')
    with patch.object(websocket, 'handle_close') as mock_handle:
        websocket.connect(upgrade=False)

        def connection_handler():
            while True:
                try:
                    message = websocket.receive()
                except Exception:
                    logger.exception('connection handler exploded')
                    raise
                if message:
                    logger.info('got message: %s', message)

        Greenlet.spawn(connection_handler)
        gevent.sleep(0.01)  # enough for the upgrade to happen

        clients = server.clients
        client_handler = list(clients.values())[0]
        socket = client_handler.ws
        Greenlet.spawn(socket.close)

        with gevent.Timeout(1):
            while mock_handle.call_count != 1:
                gevent.sleep(0.01)

        assert mock_handle.call_count == 1
        assert mock_handle.call_args == call(close_frame=ANY)

        call_param = mock_handle.call_args[1]['close_frame']
        assert isinstance(call_param, Close)
Exemple #4
0
    def msg_received(self, msg):
        # TODO: this will require reimplementing messages in some kind of
        #   FSM DSL, ideally programming lang agnostic
        if msg.id == 1:
            if not self.state is SS.NEW_CLIENT:
                raise RuntimeError
            self.name = msg.name
            self.transport.send(factory(2))
            self.state = SS.GAME_CHALLENGE

        elif self.state is SS.GAME_CHALLENGE:
            if msg.id == 4:
                self.sock.shutdown(2)
            elif msg.id == 5:
                self.world = World(20, 20)
                self.player = self.world.spawn(Player, owner=self)
                Greenlet.spawn(self.world.ticker())

                msg = factory(6)
                msg.shape = self.world.shape
                msg.size = self.world.size
                self.transport.send(msg)
                self.state = SS.GAME_IN_PROGRESS
            else:
                raise RuntimeError
        elif self.state is SS.GAME_IN_PROGRESS:
            if msg.id == 8:
                args = [int(x) for x in msg.coord.split(" ")]
                self.player.move(Coord(*args))
            else:
                raise RuntimeError
Exemple #5
0
    def __init__(self, work_dir, config, key='server.key', cert='server.crt', **kwargs):
        """
            Main class which runs Beeswarm in Honeypot mode.

        :param work_dir: Working directory (usually the current working directory)
        :param config: Beeswarm configuration dictionary, None if no configuration was supplied.
        :param key: Key file used for SSL enabled capabilities
        :param cert: Cert file used for SSL enabled capabilities
        """
        if config is None or not os.path.isdir(os.path.join(work_dir, 'data')):
            Honeypot.prepare_environment(work_dir)

        self.work_dir = work_dir
        self.config = config
        self.key = os.path.join(work_dir, key)
        self.cert = os.path.join(work_dir, cert)
        self._servers = []
        self._server_greenlets = []

        self.honeypot_id = self.config['general']['id']
        Session.honeypot_id = self.honeypot_id

        # write ZMQ keys to files - as expected by pyzmq
        extract_keys(work_dir, config)
        if not (os.path.isfile(os.path.join(work_dir, 'server.key'))):
            cert_info = config['certificate_info']
            if cert_info['common_name']:
                cert_info['common_name'] = cert_info['common_name']
            else:
                cert_info['common_name'] = get_most_likely_ip()

            cert, priv_key = create_self_signed_cert(cert_info['country'], cert_info['state'],
                                                     cert_info['organization'], cert_info['locality'],
                                                     cert_info['organization_unit'], cert_info['common_name'])

            cert_path = os.path.join(work_dir, 'server.crt')
            key_path = os.path.join(work_dir, 'server.key')
            with open(cert_path, 'w') as certfile:
                certfile.write(cert)
            with open(key_path, 'w') as keyfile:
                keyfile.write(priv_key)
            send_zmq_push(SocketNames.SERVER_RELAY.value,
                          '{0} {1} {2}'.format(Messages.KEY.value, self.honeypot_id, priv_key))
            send_zmq_push(SocketNames.SERVER_RELAY.value,
                          '{0} {1} {2}'.format(Messages.CERT.value, self.honeypot_id, cert))

        if self.config['general']['fetch_ip']:
            try:
                url = 'http://api.externalip.net/ip'
                req = requests.get(url)
                self.honeypot_ip = req.text
                logger.info('Fetched {0} as external ip for Honeypot.'.format(self.honeypot_ip))
            except (Timeout, ConnectionError) as e:
                logger.warning('Could not fetch public ip: {0}'.format(e))
        else:
            self.honeypot_ip = ''

        # spawning time checker
        if self.config['timecheck']['enabled']:
            Greenlet.spawn(self.check_time)
Exemple #6
0
def handle(sock, addr):
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)

    deviceId, command, params = unpack.read(sock)
    try:
        Device.objects(deviceId=deviceId).get()
    except:
        sock.send("register first!")
        return None
    """
    短连接命令执行
    """
    if command in ComandList:
        ComandList[command](sock, deviceId, params)
        return None
    # 建立长连接
    elif command == "conn":
        dc = DeviceConnect(sock, addr)
        if not deviceId in DeviceList:
            dt = DeviceTerminal(deviceId)
            a = Greenlet.spawn(timeLiving, dt)
            a.start()
            DeviceList[deviceId] = dt
        else:
            dt = DeviceList[deviceId]
            if dt.conn:
                dt.conn.stop()
        dt.connect(dc)
        # 循环发送队列消息
        a = Greenlet.spawn(DeviceSocketSend, dt, dc)
        a.start()
Exemple #7
0
    def reset_normalized(self):
        """Deletes all normalized data from the datastore."""

        logger.info("Initiating database reset - all normalized data will be deleted. (Starting timer)")
        start = time.time()
        for collection in self.db.collection_names():
            if collection not in ["system.indexes", "hpfeed", "hpfeeds"]:
                logger.warning("Dropping collection: {0}.".format(collection))
                self.db.drop_collection(collection)
        logger.info("All collections dropped. (Elapse: {0})".format(time.time() - start))
        logger.info("Dropping indexes before bulk operation.")
        self.db.hpfeed.drop_indexes()
        logger.info("Indexes dropped(Elapse: {0}).".format(time.time() - start))
        logger.info("Resetting normalization flags from hpfeeds collection.")
        self.db.hpfeed.update(
            {}, {"$set": {"normalized": False}, "$unset": {"last_error": 1, "last_error_timestamp": 1}}, multi=True
        )
        logger.info("Done normalization flags from hpfeeds collection.(Elapse: {0}).".format(time.time() - start))
        logger.info("Recreating indexes.")
        self.ensure_index()
        logger.info("Done recreating indexes (Elapse: {0})".format(time.time() - start))

        logger.info("Full reset done in {0} seconds".format(time.time() - start))

        # This is a one-off job to generate stats for hpfeeds which takes a while.
        Greenlet.spawn(self.rg.do_legacy_hpfeeds)
Exemple #8
0
def main():
    global session
    session = Session()
    session_greenlet = Greenlet.spawn(session.start)
    #spam_greenlet = Greenlet.spawn(spam, session)
    http_server = HTTPServer(("", 8081), SimpleHTTPRequestHandler)
    http_greenlet = Greenlet.spawn(http_server.serve_forever)
    WSGIServer(("", 8000), rpc_server, handler_class=WebSocketHandler).serve_forever()
Exemple #9
0
def tracking(response, *args, **kwargs):
    utm_url = track_page_view(request)
    environ = {
        "HTTP_USER_AGENT": request.environ.get("HTTP_USER_AGENT", "unknown"),
        "HTTP_ACCEPT_LANGUAGE": request.environ.get("HTTP_ACCEPT_LANGUAGE", "")
        }
    Greenlet.spawn(send_request_to_google_analytics, utm_url, environ)
    return response
Exemple #10
0
def print_dots():
    """This Greenlet prints dots to the console which is useful for making
    sure that other greenlets are properly not blocking."""
    def m():
        while True:
            sys.stdout.write("."),
            sys.stdout.flush()
            sleep(.02)
    Greenlet.spawn(m)
Exemple #11
0
def cron(callback, interval):
    """Add new cronjob.
       callback -- the callable to run.
       interval -- timedelta specifying when the cronjob will be called.
    """
    if interval == 0:
        error = "cron: your timedelta has a too small resolution (< 1s)"
        raise ValueError(error)
    Greenlet.spawn(_cron, callback, interval)
 def _eventCallback(self, event):
     print event
     if event.startswith('BACKEND_MESSAGE[]:[]SYSTEM_EVENT SCHEDULER_'):
         self.updateUpcoming()
   
     if event.startswith('BACKEND_MESSAGE[]:[]SYSTEM_EVENT REC_'):
         # sometimes we need to wait a second or so, for recordings to actually start.
         print "Recording backend event. Triggering deferred greenlet."
         Greenlet.spawn(self._deferredRecordingUpdate, self)
Exemple #13
0
    def __call__(self):
        log.debug("TICK")

        [x.ticked(False) for x in self.world.objects]
        [x.tick() for x in self.world.objects]

        gevent.sleep(1)
        if self._run:
            Greenlet.spawn(self)
Exemple #14
0
    def start_serving(self):
        """ Starts services. """

        #will contain HiveStreamServer objects
        self.servers = []
        self.server_greenlets = []
        #will contain Session objects
        self.sessions = {}

        self.public_ip = self.config.get('public_ip', 'public_ip')
        self.fetch_ip = self.config.getboolean('public_ip', 'fetch_public_ip')

        #greenlet to consume the provided sessions
        self.session_consumer = consumer.Consumer(self.sessions, public_ip=self.public_ip, fetch_public_ip=self.fetch_ip)
        Greenlet.spawn(self.session_consumer.start)

        #protocol handlers
        for c in handlerbase.HandlerBase.__subclasses__():

            cap_name = 'cap_' + c.__name__

            if not self.config.has_section(cap_name):
                logger.warning(
                    "Not loading {0} capability because it has no option in configuration file.".format(c.__name__))
                continue
                #skip loading if disabled
            if not self.config.getboolean(cap_name, 'Enabled'):
                continue

            port = self.config.getint(cap_name, 'port')
            #carve out the options for this specific service
            options = list2dict(self.config.items(cap_name))
            cap = c(self.sessions, options)

            try:
                socket = create_socket(('0.0.0.0', port))
                #Convention: All capability names which end in 's' will be wrapped in ssl.
                if cap_name.endswith('s'):
                    server = HiveStreamServer(socket, cap.handle_session,
                                              keyfile=self.key, certfile=self.cert)
                else:
                    server = HiveStreamServer(socket, cap.handle_session)

                self.servers.append(server)
                server_greenlet = Greenlet(server.start())
                self.server_greenlets.append(server_greenlet)

            except _socket.error as ex:
                logger.error("Could not start {0} server on port {1}. Error: {2}".format(c.__name__ , port, ex))
            else:
                logger.info('Started {0} capability listening on port {1}'.format(c.__name__, port))

        drop_privileges()

        logger.info("Hive running - see log file (hive.log) for attack events.")
        gevent.joinall(self.server_greenlets)
Exemple #15
0
    def run(self):
        signal.signal(signal.SIGINT, self.signal_handler)

        self.pool = Pool(10000)

        Greenlet.spawn(Server.update_all, self)
        self.server = StreamServer((self.host, self.port), self.handle, spawn=self.pool)

        self.logger.info('Starting FyIRCd on {0}:{1}'.format(self.host, self.port))

        self.server.serve_forever()
Exemple #16
0
def main():
    global session
    session = Session()
    session_greenlet = Greenlet.spawn(session.start)
    #spam_greenlet = Greenlet.spawn(spam, session)
    http_server = HTTPServer(("", 8081), SimpleHTTPRequestHandler)
    http_greenlet = Greenlet.spawn(http_server.serve_forever)
    try:
        sockjs = SockJSServer(("", 8000), router, trace=True)
        sockjs.serve_forever()
    except KeyboardInterrupt:
        sockjs.kill()
Exemple #17
0
    def process_message(channel, method, properties, body):
        logging.debug("Received message: %s", body)
        try:
            message = json.loads(body)
        except (TypeError, ValueError):
            logging.error("Not JSON-encoded message received: %s", body)

        try:
            Greenlet.spawn(handler.dispatch, message)
        except Exception:
            logging.exception("Unhandled exception: \n Message = %s\n\n", body)

        channel.basic_ack(delivery_tag = method.delivery_tag)
Exemple #18
0
    def __init__(self, config="hive.cfg"):
        conf_parser = ConfigParser()
        conf_parser.read(config)

        self.host = conf_parser.get("log_hpfeed", "host")
        self.port = conf_parser.getint("log_hpfeed", "port")
        self.secret = conf_parser.get("log_hpfeed", "secret").encode('latin1')
        self.chan = conf_parser.get("log_hpfeed", "chan").encode('latin1')
        self.ident = conf_parser.get("log_hpfeed", "ident").encode('latin1').strip()
        self.enabled = True

        #Used for authentication and handling of socket errors
        Greenlet.spawn(self._start)
Exemple #19
0
    def _loop(self):
        # Allocate a pool of 100 channels for playing games.
        self.channels = queue.Queue()
        for i in range(0, CHANNELS):
            self.channels.put(i)

        self.upcoming = queue.Queue()
        while True:
            candidates, result = self.upcoming.get()
            if not candidates or not result:
                break 
            index = self.channels.get()
            Greenlet.spawn(self._play, index, candidates, result)
Exemple #20
0
    def start(self):
        """ Starts services. """
        self.servers = []
        self.server_greenlets = []
        #will contain Session objects
        self.sessions = {}

        #greenlet to consume the provided sessions
        self.session_consumer = Consumer(self.sessions, self.honeypot_ip, self.config, self.status, self.work_dir)
        Greenlet.spawn(self.session_consumer.start)

        #protocol handlers
        for c in handlerbase.HandlerBase.__subclasses__():

            cap_name = c.__name__.lower()

            if cap_name not in self.config['capabilities']:
                logger.warning(
                    "Not loading {0} capability because it has no option in configuration file.".format(c.__name__))
                continue
                #skip loading if disabled
            if not self.config['capabilities'][cap_name]['enabled']:
                continue

            port = self.config['capabilities'][cap_name]['port']
            #carve out the options for this specific service
            options = self.config['capabilities'][cap_name]
            cap = c(self.sessions, options, self.users, self.work_dir)

            try:
                #Convention: All capability names which end in 's' will be wrapped in ssl.
                if cap_name.endswith('s'):
                    server = StreamServer(('0.0.0.0', port), cap.handle_session,
                                          keyfile=self.key, certfile=self.cert)
                else:
                    server = StreamServer(('0.0.0.0', port), cap.handle_session)

                self.servers.append(server)
                self.status['enabled_capabilities'].append(cap_name)
                server_greenlet = Greenlet(server.start())
                self.server_greenlets.append(server_greenlet)

            except _socket.error as ex:
                logger.error("Could not start {0} server on port {1}. Error: {2}".format(c.__name__, port, ex))
            else:
                logger.info('Started {0} capability listening on port {1}'.format(c.__name__, port))

        logger.info("Honeypot running - see log file (honeypot.log) for attack events.")

        gevent.joinall(self.server_greenlets)
Exemple #21
0
 def run(self):
     '''Start running the server'''
     self.app.route('/<path:path>')(self.handle)
     if self.mode == 'fork':
         logger.info('Forking server with %s' % self.server)
         self.pid = os.fork()
         if self.pid == 0:
             # I'm the child process
             self._run()
             exit(0)
         else:
             import time
             time.sleep(1)
             logger.info('Server started in %s' % self.pid)
     elif self.mode == 'gevent':
         logger.info('Launching server in greenlet %s' % self.server)
         from gevent import Greenlet
         self.pid = Greenlet.spawn(self._run)
         # We actually need to wait a very short time before saying that
         # it's started. I believe that all this does is to yield control
         # from this green thread to the other green thread briefly
         self.pid.join(0.01)
     else:
         # Blocking
         self._run()
Exemple #22
0
    def data_scan(self, args, quiet=False, worker_count=1):
        """
        Invoke cephfs-data-scan with the passed arguments, and return its stdout

        :param worker_count: if greater than 1, multiple workers will be run
                             in parallel and the return value will be None
        """

        workers = []

        for n in range(0, worker_count):
            if worker_count > 1:
                # data-scan args first token is a command, followed by args to it.
                # insert worker arguments after the command.
                cmd = args[0]
                worker_args = [cmd] + ["--worker_n", n.__str__(), "--worker_m", worker_count.__str__()] + args[1:]
            else:
                worker_args = args

            workers.append(Greenlet.spawn(lambda wargs=worker_args:
                                          self._run_tool("cephfs-data-scan", wargs, None, quiet)))

        for w in workers:
            w.get()

        if worker_count == 1:
            return workers[0].value
        else:
            return None
Exemple #23
0
 def __init__(self, cappedCol, fields=None, startFrom=True, channel=None, func=None,
              autoJoin=True, retryOnDeadCursor=False, query=None):
     if not cappedCol.options().get('capped'):
         raise ValueError("collection_is_not_capped")
         return
     #super(SubToCapped, self).__init__(channel if channel else cappedCol.full_name)
     #Greenlet.__init__(self)
     if func:
         self.onDoc = func
     self.collection = cappedCol
     self.isOpLog = cappedCol.full_name == 'local.oplog.rs'
     self.query = query
     #self.query = None if (isOpLog and startFrom) else query #@note: not compatible with
     #_QUERY_OPTIONS['oplog_replay']  see:database error: no ts field in query
     self.fields = fields
     self.retryOnDeadCursor = retryOnDeadCursor
     self.startFrom = startFrom
     self.channel = channel if channel else cappedCol.full_name
     self.id_start = None
     self.id_last = None
     self.t_start = datetime.utcnow()
     self.dt_last = None
     self.docs_fetched = 0
     self.docs_skipped = 0
     self.__stop = False
     #self.start()
     self.glet = Greenlet.spawn(self._run)
     if autoJoin:
         self.join()
Exemple #24
0
    def start_server(self, host, port, linked_servers=[]):
        result = ObjectoPlex((host, port),
                             middlewares=[
                                 PingPongMiddleware(),
                                 LegacySubscriptionMiddleware(),
                                 StatisticsMiddleware(),
                                 ChecksumMiddleware(),
                                 RoutingMiddleware(),
                                 ],
                             linked_servers=linked_servers)
        gevent.signal(signal.SIGTERM, result.stop)
        gevent.signal(signal.SIGINT, result.stop)
        Greenlet.spawn(result.serve_forever)
        sleep(0.1)

        return result
Exemple #25
0
 def start(self):
     """ Put job in waiting queue, it will start when dependencies are
     fulfilled and resources are available"""
     if self.state == self.STATE_UNDEF and self.orphan():
         logging.debug("{0} is short circuiting ({1})".format(
             self.name, self.state
         )
         )
         self.events[self.STATE_RUNNING].set()
         self.events[self.STATE_SUCCESSFULL].set()
         return True
     # Using STATE_SUCCESSFULL instead of is_successfull because we don't want to skip starting undef jobs
     elif self.state == self.STATE_SUCCESSFULL:
         return False
     Greenlet.spawn(self._run)
     return True
  def _proc_loop(self, pipe, num_greenlets):
    result_to_id = {}
    executor = GreenletExecutor(num_greenlets=num_greenlets)
    # Event to indicate that there are some tasks in flight and so write_loop
    # should start waiting on their results.
    tasks_in_flight = Event()

    def on_kill(signum, frame):
      pipe.put(None)
      return

    signal.signal(signal.SIGINT, on_kill)  # When server is terminated.

    def signal_tasks_in_flight():
      if not tasks_in_flight.is_set():
        tasks_in_flight.set()

    def read_loop():
      """
      Reads all tasks sent across the pipe by the parent process and passes
      them to the local GreenletExecutor instance.
      """
      while True:
        task_args = pipe.get()
        if task_args is None:
          executor.shutdown()
          signal_tasks_in_flight()
          return
        task = Task(*task_args)
        result_to_id[task.result] = task.id
        executor._submit(task)
        signal_tasks_in_flight()

    read_greenlet = Greenlet.spawn(read_loop)

    def write_loop():
      """
      Waits for results of tasks being executed by the local GreenletExecutor
      and pipes the result back to the parent (callee) process.
      """
      while True:
        if not result_to_id:
          # Wait for some result to be ready.
          tasks_in_flight.clear()
          try:
            if read_greenlet.ready():
              raise LoopExit
            tasks_in_flight.wait()
            continue
          except LoopExit, e:
            pipe.put(None)
            return
        ready_results = wait(objects=result_to_id.keys(), count=1)
        for result in ready_results:
          try:
            value = result.get()
          except Exception, e:
            value = e
          pipe.put((result_to_id[result], result.successful(), value))
          del result_to_id[result]
Exemple #27
0
    def __init__(self, broker):
        if self.CHANNEL_NAME == None:
            raise NotImplementedError('Class %s does not set the CHANNEL_NAME to use.' % self.__class__.__name__)

        self.broker = broker
        self.broker.initChannel(self.CHANNEL_NAME)
        self.greenlet = Greenlet.spawn(self._handler, self)
    def __init__(self, concurrent = 2, minSecondsBetweenRequests = 0.15, defaultTimeout = None, retryStrategy = Strict(), responsePreprocessor = ResponsePreprocessor()):
        if not isinstance(retryStrategy, RetryStrategy):
            raise TypeError('retryStrategy must be an instance of RetryStrategy, not %s' % type(retryStrategy))

        if not isinstance(responsePreprocessor, ResponsePreprocessor):
            raise TypeError('responsePreprocessor must be an instance of ResponsePreprocessor, not %s' % type(responsePreprocessor))

        self.session = Session()
        self.pool = Pool(concurrent)
        self.minSecondsBetweenRequests = minSecondsBetweenRequests
        self.retryStrategy = retryStrategy
        self.responsePreprocessor = responsePreprocessor

        self._adapter = _DefaultTimeoutHTTPAdapter()
        self.session.mount('http://', self._adapter)
        self.session.mount('https://', self._adapter)
        self._adapter.defaultTimeout = defaultTimeout

        self._requestGroups = 0
        self._requestAdded = Event()
        self._requestQueue = _RequestQueue()
        self._retryQueue = _RetryQueue()

        self._killed = False

        Requests._runningRequests.add(Greenlet.spawn(self._run))
def socketio(request):
    """The socket.io view."""
    io = request.environ['socketio']
    redis_sub = redis_client().pubsub()
    user = username(request.user)

    # Subscribe to incoming pubsub messages from redis.
    def subscriber(io):
        redis_sub.subscribe(room_channel())
        redis_client().publish(room_channel(), user + ' connected.')
        while io.connected():
            for message in redis_sub.listen():
                if message['type'] == 'message':
                    io.send(message['data'])
    greenlet = Greenlet.spawn(subscriber, io)

    # Listen to incoming messages from client.
    while io.connected():
        message = io.recv()
        if message:
            redis_client().publish(room_channel(), user + ': ' + message[0])

    # Disconnected. Publish disconnect message and kill subscriber greenlet.
    redis_client().publish(room_channel(), user + ' disconnected')
    greenlet.throw(Greenlet.GreenletExit)

    return HttpResponse()
Exemple #30
0
def parent(my_id):
    print "parent start", my_id
    time.sleep(random.random())
    greenlets = []
    for i in xrange(NUM_CHILDREN):
        greenlets.append(Greenlet.spawn(child, my_id, i))
    for greenlet in greenlets: greenlet.join()
    print "parent end", my_id
Exemple #31
0
def api_backend(raiden_network, rest_api_port_number):
    raiden_api = RaidenAPI(raiden_network[0].raiden)
    rest_api = RestAPI(raiden_api)
    api_server = APIServer(rest_api)
    api_server.flask_app.config['SERVER_NAME'] = 'localhost:{}'.format(
        rest_api_port_number)

    # TODO: Find out why tests fail with debug=True
    server = Greenlet.spawn(
        api_server.run,
        port=rest_api_port_number,
        debug=False,
        use_evalex=False,
    )

    # Fixes flaky test, were requests are done prior to the server initializing
    # the listening socket.
    # https://github.com/raiden-network/raiden/issues/389#issuecomment-305551563
    wait_for_listening_port(rest_api_port_number)

    yield api_server, rest_api

    server.kill(block=True, timeout=10)
Exemple #32
0
def api_backend(rest_api_port_number):
    # Initializing it without raiden_service.api here since that is a
    # function scope fixture. We will inject it to rest_api object later
    rest_api = RestAPI(None)
    api_server = APIServer(rest_api)
    api_server.flask_app.config['SERVER_NAME'] = 'localhost:{}'.format(rest_api_port_number)

    # TODO: Find out why tests fail with debug=True
    server = Greenlet.spawn(
        api_server.run,
        rest_api_port_number,
        debug=False,
        use_evalex=False,
    )

    # Fixes flaky test, were requests are done prior to the server initializing
    # the listening socket.
    # https://github.com/raiden-network/raiden/issues/389#issuecomment-305551563
    wait_for_listening_port(rest_api_port_number)

    yield api_server, rest_api

    server.kill(block=True, timeout=10)
def spanning_threads():
    def foo(message, n):
        """
        Each thread will be passed the message, and n arguments
        in its initialization.
        """
        gevent.sleep(n)
        print(message)

    # Initialize a new Greenlet instance running the named function
    # foo
    thread1 = Greenlet.spawn(foo, "Hello", 1)

    # Wrapper for creating and running a new Greenlet from the named
    # function foo, with the passed arguments
    thread2 = gevent.spawn(foo, "I live!", 2)

    # Lambda expressions
    thread3 = gevent.spawn(lambda x: (x + 1), 2)

    threads = [thread1, thread2, thread3]

    # Block until all threads complete.
    gevent.joinall(threads)
Exemple #34
0
 def __init__(self,
              cappedCol,
              fields=None,
              startFrom=True,
              channel=None,
              func=None,
              autoJoin=True,
              retryOnDeadCursor=False,
              query=None):
     if not cappedCol.options().get('capped'):
         raise ValueError("collection_is_not_capped")
         return
     #super(SubToCapped, self).__init__(channel if channel else cappedCol.full_name)
     #Greenlet.__init__(self)
     if func:
         self.onDoc = func
     self.collection = cappedCol
     self.isOpLog = cappedCol.full_name == 'local.oplog.rs'
     self.query = query
     #self.query = None if (isOpLog and startFrom) else query #@note: not compatible with
     #_QUERY_OPTIONS['oplog_replay']  see:database error: no ts field in query
     self.fields = fields
     self.retryOnDeadCursor = retryOnDeadCursor
     self.startFrom = startFrom
     self.channel = channel if channel else cappedCol.full_name
     self.id_start = None
     self.id_last = None
     self.t_start = datetime.utcnow()
     self.dt_last = None
     self.docs_fetched = 0
     self.docs_skipped = 0
     self.__stop = False
     #self.start()
     self.glet = Greenlet.spawn(self._run)
     if autoJoin:
         self.join()
Exemple #35
0
    def __init__(self,
                 work_dir,
                 config,
                 key='server.key',
                 cert='server.crt',
                 **kwargs):
        """
            Main class which runs Beeswarm in Honeypot mode.

        :param work_dir: Working directory (usually the current working directory)
        :param config: Beeswarm configuration dictionary, None if no configuration was supplied.
        :param key: Key file used for SSL enabled capabilities
        :param cert: Cert file used for SSL enabled capabilities
        """

        if fs.__version__ != '0.5.4':
            os.exit('the python fs package must be verison 0.5.4')

        if config is None or not os.path.isdir(os.path.join(work_dir, 'data')):
            Honeypot.prepare_environment(work_dir)

        self.work_dir = work_dir
        self.config = config
        self.key = os.path.join(work_dir, key)
        self.cert = os.path.join(work_dir, cert)
        self._servers = []
        self._server_greenlets = []

        self.honeypot_id = self.config['general']['id']
        Session.honeypot_id = self.honeypot_id

        # write ZMQ keys to files - as expected by pyzmq
        extract_keys(work_dir, config)
        if not (os.path.isfile(os.path.join(work_dir, 'server.key'))):
            cert_info = config['certificate_info']
            if cert_info['common_name']:
                cert_info['common_name'] = cert_info['common_name']
            else:
                cert_info['common_name'] = get_most_likely_ip()

            cert, priv_key = create_self_signed_cert(
                cert_info['country'], cert_info['state'],
                cert_info['organization'], cert_info['locality'],
                cert_info['organization_unit'], cert_info['common_name'])

            cert_path = os.path.join(work_dir, 'server.crt')
            key_path = os.path.join(work_dir, 'server.key')
            with open(cert_path, 'w') as certfile:
                certfile.write(cert)
            with open(key_path, 'w') as keyfile:
                keyfile.write(priv_key)
            send_zmq_push(
                SocketNames.SERVER_RELAY.value,
                '{0} {1} {2}'.format(Messages.KEY.value, self.honeypot_id,
                                     priv_key))
            send_zmq_push(
                SocketNames.SERVER_RELAY.value,
                '{0} {1} {2}'.format(Messages.CERT.value, self.honeypot_id,
                                     cert))

        if self.config['general']['fetch_ip']:
            try:
                url = 'http://api.externalip.net/ip'
                req = requests.get(url)
                self.honeypot_ip = req.text
                logger.info('Fetched {0} as external ip for Honeypot.'.format(
                    self.honeypot_ip))
            except (Timeout, ConnectionError) as e:
                logger.warning('Could not fetch public ip: {0}'.format(e))
        else:
            self.honeypot_ip = ''

        # spawning time checker
        if self.config['timecheck']['enabled']:
            Greenlet.spawn(self.check_time)
def handle(environ, start_response):
    start_response('200 OK', [('Content-Type', 'text/plain')])
    body = queue.Queue()
    Greenlet.spawn(current_time, body)
    return body
Exemple #37
0
    def run(self):
        def on_text(payload):
            self.outq.put(XboxEvent(EVT_XBOX_SYSTEMTEXT_INPUT_ID, payload))

        def on_connect_request(req: ConnectionRequest):
            auth_mgr = AuthenticationManager.from_file(TOKENS_FILE)
            auth_mgr.dump(TOKENS_FILE)
            userhash = auth_mgr.userinfo.userhash
            token = auth_mgr.xsts_token.jwt
            for c in Console.discovered():
                if str(c.uuid) == str(req.console_dict['uuid']):
                    self._console = c
            if self._console is None:
                self.outq.put(
                    XboxEvent(EVT_XBOX_DISCONNECT_ID, "Failed to connect"))
                return
            self._console.add_manager(TextManager)
            self._console.text.on_systemtext_input += on_text
            self._console.protocol.on_timeout += lambda: self._timedout.set()
            try:
                status = self._console.connect(userhash, token)
            except OSError as e:
                self.outq.put(
                    XboxEvent(EVT_XBOX_DISCONNECT_ID,
                              f"Failed to connect {e}"))
                return
            self._console.wait(1)
            self.outq.put(XboxEvent(EVT_XBOX_CONNECT_ID,
                                    self._console.address))

        def mainloop(skip_connection=False):
            while True:
                if skip_connection:
                    new_item = None
                    try:
                        new_item = self.inq.get_nowait()
                    except:
                        pass
                    if new_item is not None:
                        if isinstance(new_item, DiscoverRequest):
                            try:
                                discovered = Console.discover(
                                    addr=new_item.addr)
                            except OSError as e:
                                self.outq.put(
                                    XboxEvent(EVT_XBOX_DISCOVERYFAILURE_ID, e))
                            for console in discovered:
                                dc = DiscoveredConsole(console)
                                self.outq.put(
                                    XboxEvent(EVT_XBOX_DISCOVERED_CONSOLE_ID,
                                              dc))
                            if len(discovered) < 1:
                                try:
                                    Console.__protocol__.start()
                                    Console.__protocol__._discover(
                                        xbox.sg.factory.discovery(),
                                        xbox.sg.protocol.BROADCAST, 5)
                                except OSError as e:
                                    self.outq.put(
                                        XboxEvent(EVT_XBOX_DISCOVERYFAILURE_ID,
                                                  e))
                            self.inq.task_done()
                        if isinstance(new_item,
                                      ConnectionRequest) or isinstance(
                                          new_item, DiscoveredConsole):
                            on_connect_request(
                                new_item)  # this instantiates self._console
                            self.inq.task_done()
                        if isinstance(new_item, SystemTextSend):
                            if self._console is None or not self._console.connected:
                                error = "Failed to send text. Disconnected!"
                                self.outq.put(error)
                                print(error)
                            else:
                                self._console.send_systemtext_input(
                                    new_item.text)
                                self._console.finish_text_input()
                                self.outq.put(
                                    f"Sent {new_item.text} to console")
                                self.inq.task_done()
                        if isinstance(new_item, DisconnectRequest):
                            if self._console:
                                if self._console.connected:
                                    self._console.protocol._stop_event.set()
                                    self._console.disconnect()
                                    self.inq.task_done()
                                    self.outq.put(
                                        XboxEvent(
                                            EVT_XBOX_DISCONNECT_ID,
                                            self._console.address
                                            if self._console else None))
                                else:
                                    self.inq.task_done()
                                    self.outq.put(
                                        XboxEvent(
                                            EVT_XBOX_DISCONNECT_ID,
                                            self._console.address
                                            if self._console else None))
                            else:
                                self.inq.task_done()
                                self.outq.put(
                                    XboxEvent(
                                        EVT_XBOX_DISCONNECT_ID,
                                        self._console.address
                                        if self._console else None))
                        if new_item == POISON_PILL:
                            if self._console:
                                if self._console.connected:
                                    self._console.disconnect()
                                    self._console = None
                            self.inq.put("STOP")
                            self.inq.task_done()
                            break
                    gevent.sleep(0)

                if self._console:
                    if self._console.connected:
                        # print("console instantiated. started:", self._console.protocol.started)
                        # print("console status:", self._console.connection_state)
                        # print("Closed?:", self._console.protocol.closed)
                        # print("Timedout?:", self._timedout.is_set())
                        try:
                            ""
                            gevent.sleep(
                                self._console.protocol.HEARTBEAT_INTERVAL)
                            self._console.protocol.ack([], [],
                                                       ServiceChannel.Core,
                                                       need_ack=True)
                        except (OSError, AttributeError) as e:
                            self._console.protocol.on_timeout()
                            self.outq.put(
                                XboxEvent(EVT_XBOX_DISCONNECT_ID,
                                          f"Failed to connect {e}"))
                        finally:
                            if self._stay_connected.is_set(
                            ) and self._timedout.is_set():
                                gevent.sleep(10)
                                on_connect_request(
                                    ConnectionRequest(self._console))
                gevent.sleep(0.1)

        event_thread = Greenlet.spawn(mainloop, True)
        event_thread.start()
        event_thread.join()
Exemple #38
0
    def start_client(self, cb):
        def start():
            cb(self.start_client_sync())

        return Greenlet.spawn(start)
def run_stress_worker(server_host_ip, server_port, num_runs, worker_model_real,
                      chunk_str):
    """
    Run the workers loop for the basic stress test. This involves
    creating a a set of workers according to the keys in STRESS_KEYS_FOLDER then:

    - registering them with the server
    - get the current global model
    - send an update
    - Get the next global model.

    Parameters
    ----------

    server_host_ip: str
        The ip-address of the host of the server.

    server_port: int
        The port at which the serer should listen to

    num_runs: int
        Number of runs of the sending of models etc. to perform

    worker_model_real: bool
        If true, the global model returned is a bianry serialized version of
        MobileNetV2 that is used in the plantvillage example.

    chunk_str: str
        String giving the chunk of keys to use.
    """
    workers = []
    if worker_model_real:
        model_data = io.BytesIO()
        torch.save(models.mobilenet_v2(pretrained=True), model_data)
        bin_model = model_data.getvalue()
    else:
        bin_model = msgpack.packb("A 'local model update'!!")

    for fn in get_worker_keys_from_chunk(chunk_str):
        workers.append(
            SimpleLPWorker(server_host_ip, server_port,
                           os.path.join(STRESS_KEYS_FOLDER, fn)))

    num_workers = len(workers)
    for i, worker in enumerate(workers):
        logger.info(f'Registering {i} th worker')
        worker.worker.register_worker()

    # get the current global model and check
    for i, worker in enumerate(workers):
        print(
            f"Requesting global model for {worker.worker.worker_id} (no. {i}) "
        )
        worker.global_model_changed_callback(worker.worker.get_global_model())

    done_count = 0

    def run_wg(gl_worker, num):
        nonlocal done_count
        logger.info(
            f"Starting long poll for {gl_worker.worker.worker_id} (no. {num})")
        gl_worker.global_model_changed_callback(
            gl_worker.worker.get_global_model())
        logger.info(f"Long poll for {gl_worker.worker.worker_id} finished")
        done_count += 1

    try:
        for run_no in range(num_runs):
            logger.info(f"********************** STARTING RUN {run_no + 1}:")
            sleep(5)
            for i, worker in enumerate(workers):
                response = worker.worker.send_model_update(bin_model)
                logger.info(
                    f"Response from server sending model update: {response}")
                logger.info(f"Spawning for worker {i}")
                Greenlet.spawn(run_wg, worker, i)
                if (i + 1) % 10 == 0:
                    sleep(0.5)

            while done_count < num_workers:
                sleep(1)
                logger.info(
                    f"{done_count} workers have received the global model update - need to get to {num_workers}..."
                )
            done_count = 0
    except Exception as e:
        print(e)
        exit()
Exemple #40
0
    def _proc_loop(self, pipe, num_greenlets):
        result_to_id = {}
        executor = GreenletExecutor(num_greenlets=num_greenlets)
        # Event to indicate that there are some tasks in flight and so write_loop
        # should start waiting on their results.
        tasks_in_flight = Event()

        def on_kill(signum, frame):
            pipe.put(None)
            return

        signal.signal(signal.SIGINT, on_kill)  # When server is terminated.

        def signal_tasks_in_flight():
            if not tasks_in_flight.is_set():
                tasks_in_flight.set()

        def read_loop():
            """
      Reads all tasks sent across the pipe by the parent process and passes
      them to the local GreenletExecutor instance.
      """
            while True:
                task_args = pipe.get()
                if task_args is None:
                    executor.shutdown()
                    signal_tasks_in_flight()
                    return
                task = Task(*task_args)
                result_to_id[task.result] = task.id
                executor._submit(task)
                signal_tasks_in_flight()

        read_greenlet = Greenlet.spawn(read_loop)

        def write_loop():
            """
      Waits for results of tasks being executed by the local GreenletExecutor
      and pipes the result back to the parent (callee) process.
      """
            while True:
                if not result_to_id:
                    # Wait for some result to be ready.
                    tasks_in_flight.clear()
                    try:
                        if read_greenlet.ready():
                            raise LoopExit
                        tasks_in_flight.wait()
                        continue
                    except LoopExit, e:
                        pipe.put(None)
                        return
                ready_results = wait(objects=result_to_id.keys(), count=1)
                for result in ready_results:
                    try:
                        value = result.get()
                    except Exception, e:
                        value = e
                    pipe.put(
                        (result_to_id[result], result.successful(), value))
                    del result_to_id[result]
Exemple #41
0
    def __init__(self,
                 inputs,
                 outputs,
                 zmq_args=None,
                 datastore="ait.core.db.InfluxDBBackend",
                 **kwargs):
        """
        Params:
            inputs:     names of inbound streams plugin receives data from
            outputs:    names of outbound streams plugin sends its data to
            zmq_args:   dict containing the follow keys:
                            zmq_context
                            zmq_proxy_xsub_url
                            zmq_proxy_xpub_url
                        Defaults to empty dict. Default values
                        assigned during instantiation of parent class.
            datastore:   path to database backend to use
            **kwargs:   (optional) Dependent on requirements of child class.
        """

        super(AITOpenMctPlugin, self).__init__(inputs, outputs, zmq_args,
                                               **kwargs)

        log.info("Running AIT OpenMCT Plugin")

        self._datastore = datastore

        # Initialize state fields
        # Debug state fields
        self._debugEnabled = AITOpenMctPlugin.DEFAULT_DEBUG
        self._debugMimicRepeat = False
        # Port value for the server
        self._servicePort = AITOpenMctPlugin.DEFAULT_PORT
        # Flag indicating if we should create a database connection for historical queries
        self._databaseEnabled = AITOpenMctPlugin.DEFAULT_DATABASE_ENABLED

        # Check for AIT config overrides
        self._check_config()

        # Setup server state
        self._app = bottle.Bottle()
        self._servers = []

        # Queues for AIT events events
        self._tlmQueue = api.GeventDeque(maxlen=100)

        # Load AIT tlm dict and create OpenMCT format of it
        self._aitTlmDict = tlm.getDefaultDict()
        self._mctTlmDict = DictUtils.format_tlmdict_for_openmct(
            self._aitTlmDict)

        # Create lookup from packet-uid to packet def
        self._uidToPktDefMap = DictUtils.create_uid_pkt_map(self._aitTlmDict)

        # Attempt to initialize database, None if no DB
        self._database = self.load_database(**kwargs)

        # Maintains a set of active websocket structs
        self._socket_set = set()

        # Spawn greenlets to poll telemetry
        self.tlm_poll_greenlet = Greenlet.spawn(
            self.poll_telemetry_periodically)

        gevent.spawn(self.init)
Exemple #42
0
    return redirect(url_for(onelove.api.endpoint('doc')))


@onelove.socketio.on('connect')
def on_connect():
    from onelove.models import User
    token = request.args.get('token', None)
    request.namespace = '/onelove'
    if token is None:
        disconnect()
        return

    current_identity = None
    try:
        current_identity = onelove.jwt.jwt_decode_callback(token)
    except:
        disconnect()

    if current_identity is None:
        disconnect()
        return
    user = User.objects.get(id=current_identity['identity'])
    join_room(str(user.id))


if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
    Greenlet.spawn(monitor)

if __name__ == '__main__':
    manager.run()
Exemple #43
0
 def watching(self):
     for key, callback_handler in self.__handers__.items():
         Greenlet.spawn(self.watch, key, callback_handler)
Exemple #44
0
#!/usr/bin/env python

from gevent import Greenlet
from brubeck.request_handling import Brubeck
from brubeck.templating import load_jinja2_env
import handlers
import os
import sys

Greenlet.spawn(handlers.PollHandler.loop, delay=5)

config = {
    'mongrel2_pair': ('ipc://run/mongrel2_send', 'ipc://run/mongrel2_recv'),
    'handler_tuples': [
        (r'^/$', handlers.IndexHandler),
        (r'^/poll$', handlers.PollHandler),
    ],
    'template_loader':
    load_jinja2_env('./templates'),
}

app = Brubeck(**config)
if __name__ == '__main__':
    app.run()
Exemple #45
0
    def __call__(self, client, msg):
        if msg.command == '001':
            self.client = client
            client.send_message(message.Join('#resistance'))
            Greenlet.spawn(self._loop)

        elif msg.command == 'PING':
            client.send_message(message.Command(msg.params, 'PONG'))

        elif msg.command == '353':
            if msg.params[2] != '#resistance':
                # When joining specific bot private channels, see if the bot is
                # already there waiting and don't require rejoin.
                waiting = [u.strip('+@') for u in msg.params[3:]]
                for g in self.games:
                    for b in [b for b in g.bots if b.name in waiting]:
                        if b.channel == msg.params[2]:
                            if b._join:
                                b._join.set()
                return
            self.competitors = [u.strip('+@') for u in msg.params[3:]]
            self.competitors.remove(client.nick)

        elif msg.command == 'JOIN':
            user = msg.prefix.split('!')[0].strip('+@')
            if user == client.nick:
                return
            channel = msg.params[0].lstrip(':')
            if channel != '#resistance':
                for g in self.games:
                    for b in g.bots:
                        if b.channel == channel:
                            if b._join:
                                b._join.set()
                            return
                        if channel in b.channel:
                            # Bot joined shared game channel, used for global chat.
                            return
                print("Not waiting for a player to join this channel.", file=sys.stderr)
            else:
                self.competitors.append(user)

        elif msg.command == 'PART':
            user = msg.prefix.split('!')[0].strip('+@')
            if user == client.nick:
                return
            channel = msg.params[0].lstrip(':')
            if channel == '#resistance':
                self.competitors.remove(user)
                return
            else:
                for g in self.games:
                    for b in g.bots:
                        if b.channel == channel and b._part:
                            # Only leave the channel once the other has left, to avoid
                            # synchronization problems when batch processing games.
                            b._part.set()
                            return

        elif msg.command == 'PRIVMSG':
            # Any human may ask this server to run games with available players.
            channel = msg.params[0].lstrip(':')
            if channel == '#resistance':
                if msg.params[1].lower() == 'play':
                    self.run(' '.join(msg.params[2:]))
                return

            # Connecting bots always self-identify as bot for future reference.
            if len(msg.params) > 1 and msg.params[1] == 'BOT':
                self.identities.append(msg.prefix.split('!')[0])

            for g in self.games:
                user = msg.prefix.split('!')[0].strip('+@')
                if g.channel == channel:
                    g.file.write('[%s] ' % user + ' '.join(msg.params[1:])+'\n')
                    g.file.flush()

                # Check if this is a report message about sabotages in
                # games played between humans alone or with bots.
                if g.channel == channel and msg.params[1].upper() == 'SABOTAGES':
                    try:
                        remaining = int(msg.params[2].strip('.,!;')) 
                    except ValueError:
                        return

                    for bot in g.bots:
                        if bot._sabotage is not None:
                            r = bool(remaining > 0)
                            bot.send("SABOTAGED %s" % showYesOrNo(r))
                            if bot.spy:
                                bot._sabotage.set(r)
                                remaining -= 1
                            else:
                                bot._sabotage.set(False)
                            bot._sabotage = None

                if g.channel == channel and msg.params[1].upper() == 'VOTES':
                    votes = [parseYesOrNo(v) for v in msg.params[2:]]
                    for bot in g.bots:
                        if bot._vote is not None and bot.name not in self.identities:
                            v = votes.pop(0)
                            bot.send("VOTED %s." % showYesOrNo(v))
                            bot._vote.set(v)

                if g.channel == channel and msg.params[1].upper() == 'SELECTS':
                    for bot in g.bots:
                        if bot._select is not None:
                            bot.send("SELECTED %s" % ' '.join(msg.params[2:]))
                            bot.process_SELECTED(msg.params)

                # Now check if a bot is expecting a message, and pass it along.
                for bot in g.bots:
                    if bot.channel != channel:
                        continue
                    name = 'process_'+msg.params[1].upper()
                    if hasattr(self, name):
                        process = getattr(self, name)
                        process(msg.params)
                    elif hasattr(bot, name):
                        process = getattr(bot, name)
                        process(msg.params)
                    elif bot.expecting:
                        try:
                            bot.expecting(msg.params)
                        except:
                            # Comments can overflow in multiple lines.
                            pass
Exemple #46
0
def main():
    threads = []
    for i in range(0, 10):
        threads.append(Greenlet.spawn(test, i))

    gevent.joinall(threads)
Exemple #47
0
import gevent
from gevent import Greenlet

def foo(message, n):
    """
    Each thread will be passed the message, and n arguments
    in its initialization.
    """
    print(message)
    gevent.sleep(n)
    print(message, "complete...")

# Initialize a new Greenlet instance running the named function
# foo
thread1 = Greenlet.spawn(foo, "Hello", 1)

# Wrapper for creating and running a new Greenlet from the named
# function foo, with the passed arguments
thread2 = gevent.spawn(foo, "I live!", 2)

# Lambda expressions
thread3 = gevent.spawn(lambda x: (x+1), 2)

threads = [thread1, thread2, thread3]

# Block until all threads complete.
gevent.joinall(threads)

Exemple #48
0
 def start(self):
     self.stop()
     self.loop = Greenlet.spawn(self.run)
     self.loop.start()
Exemple #49
0
 def __gblocking__(fun, *args, **kwargs):
     return Greenlet.spawn(fun, *args, **kwargs).get()
Exemple #50
0
# 示例
    import gevent
    from gevent import Greenlet

    def foo(message, n):
        """
        Each thread will be passed the message, and n arguments in its initialization.
        每个线程初始化时都需要传递 message 和 n 参数
        """
        gevent.sleep(n)
        print(message)

    # Initialize a new Greenlet instance running the named function foo
    # 初始化一个新的 Greenlet 实例,用来运行名为 foo 的函数
    thread1 = Greenlet.spawn(foo, "Hello", 1)   ### 省去了线程的 start() 步骤 ###

    # Wrapper for creating and running a new Greenlet from the named function foo, with the passed arguments
    # 新建一个将要运行的 Greenlet 实例,它将名为 foo 的函数和这函数需要的参数封装起来
    thread2 = gevent.spawn(foo, "I live!", 2)   ### 注意 thread1 和 thread2 的不同点,这里是用 gevent.spawn, 而 thread1 是用 Greenlet.spawn ###

    # Lambda expressions
    # Lambda 表达式的封装
    thread3 = gevent.spawn(lambda x: (x+1), 2)

    threads = [thread1, thread2, thread3]

    # Block until all threads complete.
    # 阻塞,等待所有线程都运行完为止(其实是等待所有的协程运行完毕)
    gevent.joinall(threads)
Exemple #51
0
    def __call__(self, client, msg):
        if msg.command == '001':
            self.client = client
            client.send_message(message.Join('#resistance'))
            Greenlet.spawn(self._loop)
        elif msg.command == 'PING':
            client.send_message(message.Command(msg.params, 'PONG'))
        elif msg.command == '353':
            if msg.params[2] != '#resistance':
                # When joining specific bot private channels, see if the bot is
                # already there waiting and don't require rejoin.
                waiting = [u.strip('+@') for u in msg.params[3:]]
                for g in self.games:
                    for b in [b for b in g.bots if b.name in waiting]:
                        if b.channel == msg.params[
                                2] and b._join and not b._join.ready():
                            b._join.set()
                return

            self.competitors = [u.strip('+@') for u in msg.params[3:]]
            self.competitors.remove(client.nick)

        elif msg.command == 'JOIN':
            user = msg.prefix.split('!')[0].strip('+@')
            if user == client.nick:
                return
            channel = msg.params[0].lstrip(':')
            if channel != '#resistance':
                for g in self.games:
                    for b in g.bots:
                        if b.channel == channel and b._join:
                            b._join.set()
                            return
                assert False, "Not waiting for a player to join this channel."
            else:
                self.competitors.append(user)
        elif msg.command == 'PART':
            user = msg.prefix.split('!')[0].strip('+@')
            if user == client.nick:
                return
            channel = msg.params[0].lstrip(':')
            if channel == '#resistance':
                self.competitors.remove(user)
                return
            else:
                for g in self.games:
                    for b in g.bots:
                        if b.channel == channel and b._part:
                            # Only leave the channel once the other has left, to avoid
                            # synchronization problems when batch processing games.
                            b._part.set()
                            return
        elif msg.command == 'PRIVMSG':
            channel = msg.params[0].lstrip(':')
            if channel == '#resistance':
                if msg.params[1] == 'PLAY':
                    self.run(' '.join(msg.params[2:]))
                return
            if msg.params[1] == 'BOT':
                self.identities.append(msg.prefix.split('!')[0])
            for g in self.games:
                # First check if this is a report message about sabotages in
                # games played between humans alone or with bots.
                if g.channel == channel and msg.params[1].upper(
                ) == 'SABOTAGES':
                    remaining = int(msg.params[2].strip('.,!;'))
                    for bot in g.bots:
                        if bot._sabotage is not None:
                            bot.send("SABOTAGES %i" % (remaining))
                            if bot.spy:
                                bot._sabotage.set(bool(remaining > 0))
                                remaining -= 1
                            else:
                                bot._sabotage.set(False)

                # Now check if a bot is expecting a message, and pass it along.
                for bot in g.bots:
                    if bot.channel != channel:
                        continue
                    name = 'process_' + msg.params[1].upper()
                    if name == 'process_COMMENT':
                        pass
                    elif hasattr(bot, name):
                        process = getattr(bot, name)
                        process(msg.params)
                    elif bot.expecting:
                        bot.expecting(msg.params)
Exemple #52
0
 def __enter__(self):
     if self.greenlet:
         raise error.ProgrammingError("UI is already running and can't be started again")
     self.greenlet = Greenlet.spawn(self.run)
     return self
Exemple #53
0
def test_worker_persistence():
    worker_ids = []
    added_workers = []
    worker_updates = {}

    global_model_version = "1"
    worker_global_model_version = "0"
    os.environ[ADMIN_USERNAME] = 'admin'
    os.environ[ADMIN_PASSWORD] = 'str0ng_s3cr3t'
    admin_auth = ('admin', 'str0ng_s3cr3t')

    public_keys = []
    private_keys = []
    num_workers = 6
    num_pre_load_workers = 3
    worker_key_file_prefix = 'worker_key_file'
    for n in range(num_workers):
        private_key, public_key = gen_pair(worker_key_file_prefix + f'_{n}')
        private_keys.append(
            private_key.encode(encoder=HexEncoder).decode('utf-8'))
        public_keys.append(
            public_key.encode(encoder=HexEncoder).decode('utf-8'))

    # write the pre-loaded keys to the
    worker_key_file = 'worker_public_keys.txt'
    with open(worker_key_file, 'w') as f:
        for public_key in public_keys[0:num_pre_load_workers]:
            f.write(public_key + os.linesep)

    def begin_server(server, server_adapter):
        server.start_server(server_adapter)

    def test_register_func_cb(id):
        worker_ids.append(id)

    def test_unregister_func_cb(id):
        worker_ids.remove(id)

    def test_ret_global_model_cb():
        return create_model_dict(msgpack.packb("Pickle dump of a string"),
                                 global_model_version)

    def is_global_model_most_recent(version):
        return int(version) == global_model_version

    def test_rec_server_update_cb(worker_id, update):
        if worker_id in worker_ids:
            worker_updates[worker_id] = update
            return f"Update received for worker {worker_id[0:WID_LEN]}."
        else:
            return f"Unregistered worker {worker_id[0:WID_LEN]} tried to send an update."

    def get_signed_phrase(private_key, phrase=b'test phrase'):
        return SigningKey(private_key, encoder=HexEncoder).sign(phrase).hex()

    if os.path.exists('workers_db.json'):
        os.remove('workers_db.json')

    server = DCFServer(
        register_worker_callback=test_register_func_cb,
        unregister_worker_callback=test_unregister_func_cb,
        return_global_model_callback=test_ret_global_model_cb,
        is_global_model_most_recent=is_global_model_most_recent,
        receive_worker_update_callback=test_rec_server_update_cb,
        server_mode_safe=True,
        load_last_session_workers=True,
        path_to_keys_db='workers_db.json',
        key_list_file=worker_key_file)

    worker_updates = {}
    worker_ids = []
    added_workers = []
    stoppable_server = StoppableServer(host=get_host_ip(), port=8080)
    server_gl = Greenlet.spawn(begin_server, server, stoppable_server)
    sleep(2)

    assert len(server.worker_manager.public_keys_db) == 3
    # Register a set of workers using the admin API and test registration
    for i in range(num_pre_load_workers, num_workers):

        admin_registered_worker = {
            PUBLIC_KEY_STR: public_keys[i],
            REGISTRATION_STATUS_KEY: True
        }
        response = requests.post(
            f"http://{server.server_host_ip}:{server.server_port}/{WORKERS_ROUTE}",
            json=admin_registered_worker,
            auth=admin_auth)

        added_worker_dict = json.loads(response.content.decode('utf-8'))
        idx = i - num_pre_load_workers
        assert len(worker_ids) == idx + 1
        assert worker_ids[idx] == added_worker_dict[WORKER_ID_KEY]
        added_workers.append(added_worker_dict[WORKER_ID_KEY])

    assert len(server.worker_manager.public_keys_db) == 6

    for doc in server.worker_manager.public_keys_db.all():
        assert doc[PUBLIC_KEY_STR] in public_keys

    # Send updates and receive global updates for the registered workers
    # This should succeed
    worker_updates = {}
    for i in range(num_pre_load_workers, num_workers):
        # send updates

        signed_phrase = get_signed_phrase(
            private_keys[i],
            hashlib.sha256(msgpack.packb("Model update!!")).digest())
        response = requests.post(
            f"http://{server.server_host_ip}:{server.server_port}/"
            f"{RECEIVE_WORKER_UPDATE_ROUTE}/{added_workers[i - num_pre_load_workers]}",
            files={
                WORKER_MODEL_UPDATE_KEY:
                zlib.compress(msgpack.packb("Model update!!")),
                SIGNED_PHRASE:
                signed_phrase
            }).content
        assert msgpack.unpackb(worker_updates[worker_ids[
            i - num_pre_load_workers]]) == "Model update!!"
        assert response.decode(
            "UTF-8"
        ) == f"Update received for worker {added_workers[i - num_pre_load_workers][0:WID_LEN]}."

        # receive updates

        challenge_phrase = requests.get(
            f"http://{server.server_host_ip}:{server.server_port}/"
            f"{CHALLENGE_PHRASE_ROUTE}/{added_workers[i - num_pre_load_workers]}"
        ).content
        model_return_binary = requests.post(
            f"http://{server.server_host_ip}:{server.server_port}/{RETURN_GLOBAL_MODEL_ROUTE}",
            json={
                WORKER_ID_KEY: added_workers[i - num_pre_load_workers],
                SIGNED_PHRASE: get_signed_phrase(private_keys[i],
                                                 challenge_phrase),
                LAST_WORKER_MODEL_VERSION: "0"
            }).content
        model_return = msgpack.unpackb(zlib.decompress(model_return_binary))
        assert isinstance(model_return, dict)
        assert model_return[GLOBAL_MODEL_VERSION] == global_model_version
        assert msgpack.unpackb(
            model_return[GLOBAL_MODEL]) == "Pickle dump of a string"

    stoppable_server.shutdown()

    worker_ids = []
    worker_updates = {}

    server = DCFServer(
        register_worker_callback=test_register_func_cb,
        unregister_worker_callback=test_unregister_func_cb,
        return_global_model_callback=test_ret_global_model_cb,
        is_global_model_most_recent=is_global_model_most_recent,
        receive_worker_update_callback=test_rec_server_update_cb,
        server_mode_safe=True,
        load_last_session_workers=True,
        path_to_keys_db='workers_db.json',
        key_list_file=worker_key_file)

    assert len(server.worker_manager.public_keys_db) == 6
    assert len(server.worker_manager.allowed_workers) == 6
    for doc in server.worker_manager.public_keys_db.all():
        assert doc[PUBLIC_KEY_STR] in server.worker_manager.allowed_workers

    stoppable_server = StoppableServer(host=get_host_ip(), port=8080)
    server_gl = Greenlet.spawn(begin_server, server, stoppable_server)
    sleep(2)

    # Delete existing workers and check this works.
    for i in range(num_pre_load_workers):
        response = requests.delete(
            f"http://{server.server_host_ip}:{server.server_port}/{WORKERS_ROUTE}"
            f"/{added_workers[i]}",
            auth=admin_auth)
        message_dict = json.loads(response.content.decode('utf-8'))
        assert SUCCESS_MESSAGE_KEY in message_dict
    assert len(worker_ids) == 0

    assert len(server.worker_manager.public_keys_db) == 3
    assert len(server.worker_manager.allowed_workers) == 3
    for doc in server.worker_manager.public_keys_db.all():
        assert doc[PUBLIC_KEY_STR] in server.worker_manager.allowed_workers

    stoppable_server.shutdown()

    # delete the files
    for n in range(num_workers):
        os.remove(worker_key_file_prefix + f'_{n}')
        os.remove(worker_key_file_prefix + f'_{n}.pub')
    os.remove(worker_key_file)

    os.remove('workers_db.json')
    os.remove('workers_db.json.bak')
Exemple #54
0
import gevent

from gevent import Greenlet


def foo(message, n):
    gevent.sleep(0)
    print(message)


thread1 = Greenlet.spawn(foo, 'Hello', 1)
thread2 = gevent.spawn(foo, 'I live', 2)
thread3 = gevent.spawn(lambda x: (x + 1), 2)

threads = [thread1, thread2, thread3]
gevent.joinall(threads)
Exemple #55
0
                continue


def signal_handler(signum, aaa):
    sys.exit()


if __name__ == "__main__":
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)
    threads = []
    start_time = time.time()
    for i in range(threadNum):
        thrd = Ranker(i)
        threads.append(thrd)
        g = Greenlet.spawn(thrd.run)
        #g.start()
        #thrd.start()

    while True:
        time.sleep(status_interval)
        print "Rankered Count: %d" % cur_rankered_count
        print "Rankered succ: %d,  correct count: %d  rankered failed: %d,  req failed: %d" % (
            cur_rankered_succ, correct_count, cur_rankered_fail, req_failed)
        print "Rankered QPS: %f" % (cur_rankered_succ /
                                    (time.time() - start_time))
        if cur_rankered_succ == 0:
            latency = 0
            recall = 0
            accuracy = 0
        else:
 def start(self, task_list, worker_func, worker_count=4):
     for i in range(worker_count):
         w = Greenlet.spawn(worker_func, task_list.get_queue(), self.quit_workers)
         self.workers.append(w)
    def _acquire_partition_ownership(self):
        # cleanup any finished greenlets
        self._cleanup_greenlets()

        # this variable will help us decide if we need to call callback
        updated_curr_ownership = False

        # list of partitions for which locks have to be released
        release_lock_list = []

        self._logger.info("known servers: %s" % self._con_hash.get_all_nodes())

        for part in range(0, self._max_partition):
            if (part in self._target_part_ownership_list):
                if (part in self._curr_part_ownership_list):
                    # do nothing, I already have ownership of this partition
                    self._logger.info("No need to acquire ownership of:" +
                                      str(part))
                else:
                    # I need to acquire lock for this partition before I own
                    if (part in list(self._part_lock_task_dict.keys())):
                        try:
                            self._part_lock_task_dict[part].get(block=False)
                        except:
                            # do nothing there is already a greenlet running to
                            # acquire the lock
                            self._logger.error("Already a greenlet running to"
                                               " acquire:" + str(part))
                            continue

                        # Greenlet died without getting ownership. Cleanup
                        self._logger.error("Cleanup stale greenlet running to"
                                           " acquire:" + str(part))
                        del self._part_lock_task_dict[part]

                    self._logger.error("Starting greenlet running to"
                                       " acquire:" + str(part))
                    # launch the greenlet to acquire the loc, k
                    g = Greenlet.spawn(self._acquire_lock, part)
                    self._part_lock_task_dict[part] = g

            else:
                # give up ownership of the partition

                # cancel any lock acquisition which is ongoing
                if (part in list(self._part_lock_task_dict.keys())):
                    try:
                        self._part_lock_task_dict[part].get(block=False)
                    except:

                        self._logger.error(
                            "canceling lock acquisition going on \
                            for:" + str(part))
                        # Cancelling the lock should result in killing the gevent
                        self._part_locks[part].cancel()
                        self._part_lock_task_dict[part].get(block=True)

                    del self._part_lock_task_dict[part]

                if (part in self._curr_part_ownership_list):
                    release_lock_list.append(part)
                    self._curr_part_ownership_list.remove(part)
                    updated_curr_ownership = True
                    self._logger.error("giving up ownership of:" + str(part))

        if (updated_curr_ownership is True):
            # current partition membership was updated call the callback
            self._update_cb(self._curr_part_ownership_list)

        if (len(release_lock_list) != 0):
            # release locks which were acquired
            for part in release_lock_list:
                self._logger.error("release the lock which was acquired:" + \
                        str(part))
                try:
                    self._part_locks[part].release()
                    self._logger.error("fully gave up ownership of:" +
                                       str(part))
                except:
                    pass
Exemple #58
0
from gevent import Greenlet


def thrFunc(s):
    print "%s start.\n" % s
    gevent.sleep(2)
    print "%s end.\n" % s


gThr = Greenlet(thrFunc, "startjoin1")
gThr.start()
gThr.join()
gThr = Greenlet(thrFunc, "startjoin2")
gThr.start()
gThr.join()

print "+++++++++"
gThr = Greenlet(thrFunc, "start1")
gThr.start()
gThr = Greenlet(thrFunc, "start2")
gThr.start()
gThr = Greenlet(thrFunc, "start3")
gThr.start()
gThr = Greenlet(thrFunc, "start4")
gThr.start()

print "+++++++++"
gThr1 = Greenlet.spawn(thrFunc, "spawn1")
gThr2 = Greenlet.spawn(thrFunc, "spawn2")
gThr2.join()
Exemple #59
0
    def on_subscribe(self, msg):
        """
        subscribe to a channel

        The channels for this app are one of:

            connected users
            the current collection of interest
            any model currently displayed
        """
        def subscriber(io, topic):
            """
            Subscribe to incoming pubsub messages from redis.

            This will run in a greenlet, and blocks waiting for publish
            messages from other redis clients. One source for the publish
            events is a bridge to Django's signals - see colorpicks.publisher.

            When a message is received on the redis channel, it emits an event
            to backbone over socketio
            """
            redis_sub = self.redis.pubsub()
            redis_sub.subscribe(topic)

            while io.socket.connected:
                # TODO the nesting dictionaries with the key 'data'
                # is highly problematic now - need to fix
                for message in redis_sub.listen():
                    if message['type'] != 'message':
                        # We are only interested in 'message' events on the channel
                        # dlog( 'rejecting ', message)
                        continue
                    dlog(id(self), ' pubsub ', message)

                    # redis pubsub data is always a string
                    data = json.loads(message['data'])

                    if data['action'] == 'unsub':
                        redis_sub.unsubscribe(topic)
                        # ends the greenlet
                        return

                    colorid = str(data['data']['id'])

                    chan = message['channel']

                    if (data['action'] == 'delete'
                            and colorid == self.colorid):
                        # don't delete yourself, this can happen when you
                        # are watching the 'similar' channel, and you change
                        # your color
                        continue

                    if (data['action'] in ['update', 'create']
                            and self.show_only_connected_users
                            and not self.redis.sismember(
                                'connected_users', colorid)):
                        # a color of a non-connected user has been updated
                        # (perhaps through admin) and we are only watching for
                        # connected users - so do nothing
                        return

                    if chan == 'connected_users':
                        # we want to see if a connecting user is part of
                        # the current collection
                        if (self.redis.sismember(self.collection, colorid)
                                and self.show_only_connected_users):
                            # emit as if this were just added to the collection
                            chan = self.collection
                        else:
                            # don't emit a create event
                            continue

                    if not chan.startswith('color/'):
                        # again - hack to manage collection state on self here
                        chan = 'all'

                    dlog(id(self), 'emitting: ', chan, data['action'],
                         data['data'])

                    io.emit(chan + ":" + data['action'], data['data'])

                    if data['action'] == 'delete':
                        # send an extra delete event for the model also
                        # as the backbone collection seems buggy
                        io.emit('color/{}:delete'.format(data['data']['id']),
                                data['data'])
                        io.emit(chan + ":" + data['action'],
                                list(data['data']))

        # we could filter our own ID out, so we don't subscribe to
        # ourselves. It would depend on whether you want to allow changes
        # made through other avenues to be reflected
        # if you don't filter, that means there is no way to avoid
        # getting your own round tripped updates - which defeates some of the
        # point of the client side MVC

        url = msg['url']
        if url not in self.subscribers:
            greenlet = Greenlet.spawn(subscriber, self, url)
            # stash this greenlet in a dictionary in order
            # to kill/unsub later on
            self.subscribers[url] = greenlet
        else:
            pass
Exemple #60
0
        'xg',
        'rb',
        'hg',
        'mlxy',
        'tg',
        'om',
        'hx',
    ]
    # 照片分类
    photo_thread = [1, 2]
    # 线程计数器
    photo_number = -1
    # 下载图片计数器,最大50
    # index_number = 530273
    # 页面计数器,最小530273,最大544527
    file = '../photo/'
    # 图片的保存地址
    thread1 = Greenlet.spawn(photo_download, photo_thread[0], 530273,
                             photo_number, 0)
    # 从命名中创建,并运行新的Greenlet的包装器
    # 函数photo_download,带有传递的参数
    thread2 = gevent.spawn(photo_download, photo_thread[1], 533836,
                           photo_number, 0)
    # 两个thread运行,一个从530273页面开始爬取,另一个从537400页面开始爬取
    # 537400 - 530273 = 7127
    # 7127 / 2 = 3564
    # 3564 + 530273 = 533836
    threads = [thread1, thread2]
    # 阻止所有线程完成
    gevent.joinall(threads)