예제 #1
0
    def __init__(self, run=None, parent=None):
        self.run = run
        if parent is ROOT:
            self.parent = None
        elif parent is not None:
            self.parent = parent
        else:
            parent = getcurrent()
            print "Setting parent", parent
            self.parent = parent
            self._frame = self._mailbox = None
            self._thread = threading.current_thread() # temp FIXME
            print "Set the parent {} {}".format(self, self.parent)

        # Top user frame of this greenlet; per the normal C
        # implementation of greenlet, this is only available during
        # the execution of the greenlet - not when it's not running
        self._frame = None

        # Mailbox is used in this code to highlight that it's a specialized
        # queue of length 1, used for the specific synchronization model of
        # greenlet.switch
        self._mailbox = ArrayBlockingQueue(1)

        # Set up thread for the actual emulation. This could be a
        # lightweight thread, such as provided by Quasar
        if self.parent is None:
            # Special case root greenlets
            self._thread = threading.current_thread()
        else:
            self._thread = threading.Thread(target=self._wrapper)
            self._thread.setDaemon(True)  # greenlets don't block exit; FIXME apparently daemon=True doesn't yet work on Jython
            self._thread.start()  # the wrapper will immediately block on its mailbox

        print "Initialized greenlet {}".format(self)
예제 #2
0
 def configure(self):
     self.dataCenterBuilder = SimulatorBuilder("configs/DC_Logic.xml")
     self.simulatorPOD = self.dataCenterBuilder.build()
     self.environment = Environment()
     self.partialResults = ArrayBlockingQueue(5)
     self.simulator = Simulator(self.simulatorPOD, self.environment, self.partialResults)
     #all racks contain the same number of chassis and all chassis have the same amount of servers.
     #it is a limitation, unless this becomes a requirement it will remain as it is.
     self.racks = self.simulator.getDatacenter().getRacks()
     self.chassis = self.racks.toArray()[0].getChassis()
     self.servers = self.chassis.toArray()[0].getServers()
예제 #3
0
    def listen(self, backlog):
        self.socket_type = SERVER_SOCKET

        b = ServerBootstrap()
        b.group(NIO_GROUP)
        b.channel(NioServerSocketChannel)
        b.option(ChannelOption.SO_BACKLOG, backlog)
        # FIXME pass through child options from self; note that C API sockets do not distinguish
        # EXAMPLE - b.childOption(ChannelOption.SO_KEEPALIVE, True)

        # FIXME per http://stackoverflow.com/questions/9774023/netty-throttling-accept-on-boss-thread,
        # should set a parentHandler to ensure throttling to avoid denial of service attacks against this layer;
        # it's up to using Python code to do this, but at the very least there should be some sort of blocking
        # to ensure we don't exceed the desired backlog in this chunk of code;
        # right now, assumption is a ArrayBlockingQueue of sufficient size should suffice instead
        self.client_queue = ArrayBlockingQueue(backlog)

        # FIXME this should queue up sockets that are wrapped accordingly;
        # in particular they should be wrapped SSLSocket objects (inheriting SSLEngine settings) 
        b.childHandler(ClientSocketHandler(self))

        # returns a ChannelFuture, but regardless for blocking/nonblocking, return immediately
        b.bind(_get_inet_addr(self.bind_addr))
예제 #4
0
class _socketobject(object):

    def __init__(self, family=None, type=None, proto=None):
        # FIXME verify these are supported
        self.family = family
        self.type = type
        self.proto = proto

        self.blocking = True
        self.timeout = None
        self.channel = None
        self.bind_addr = None
        self.selectors = CopyOnWriteArrayList()

        if self.type == SOCK_DGRAM:
            self.socket_type = DATAGRAM_SOCKET
            self.connected = False
            self.incoming = LinkedBlockingQueue()  # list of read buffers
            self.incoming_head = None  # allows msg buffers to be broken up
            self.python_inbound_handler = None
            self.can_write = True
        else:
            self.socket_type = UNKNOWN_SOCKET

    def _register_selector(self, selector):
        self.selectors.addIfAbsent(selector)

    def _unregister_selector(self, selector):
        return self.selectors.remove(selector)

    def _notify_selectors(self):
        for selector in self.selectors:
            selector.notify()

    def _handle_channel_future(self, future, reason):
        # All differences between nonblocking vs blocking with optional timeouts
        # is managed by this method.

        # All sockets can be selected on, regardless of blocking/nonblocking

        def workaround_jython_bug_for_bound_methods(_):
            self._notify_selectors()

        future.addListener(workaround_jython_bug_for_bound_methods)

        if self.blocking:
            if self.timeout is None:
                return future.sync()
            else:
                future.await(self.timeout * _TO_NANOSECONDS,
                             TimeUnit.NANOSECONDS)
                return future
        else:
            return future

    def setblocking(self, mode):
        self.blocking = mode

    def settimeout(self, timeout):
        if not timeout:
            self.blocking = False
        else:
            self.timeout = timeout

    def bind(self, address):
        # Netty 4 supports binding a socket to multiple addresses;
        # apparently this is the not the case for C API sockets

        self.bind_addr = address


    # CLIENT METHODS
    # Calling connect/connect_ex means this is a client socket; these
    # in turn use _connect, which uses Bootstrap, not ServerBootstrap

    def _init_client_mode(self, channel=None):
        # this is client socket specific 
        self.socket_type = CLIENT_SOCKET
        self.incoming = LinkedBlockingQueue()  # list of read buffers
        self.incoming_head = None  # allows msg buffers to be broken up
        self.python_inbound_handler = None
        self.can_write = True
        self.connect_handlers = []
        self.peer_closed = False
        self.connected = False
        if channel:
            self.channel = channel
            self.python_inbound_handler = PythonInboundHandler(self)
            self.connect_handlers = [self.python_inbound_handler]
            self.connected = True

    def _connect(self, addr):
        print "Begin _connect"
        self._init_client_mode()
        self.connected = True
        self.python_inbound_handler = PythonInboundHandler(self)
        bootstrap = Bootstrap().group(NIO_GROUP).channel(NioSocketChannel)
        # add any options

        # FIXME really this is just for SSL handling
        if self.connect_handlers:
            for handler in self.connect_handlers:
                print "Adding connect handler", handler
                bootstrap.handler(handler)
        else:
            print "Adding read adapter", self.python_inbound_handler
            bootstrap.handler(self.python_inbound_handler)
        
        # FIXME also support any options here

        def completed(f):
            self._notify_selectors()
            print "Connection future - connection completed", f
        
        host, port = addr
        future = bootstrap.connect(host, port)
        future.addListener(completed)
        self._handle_channel_future(future, "connect")
        self.channel = future.channel()
        print "Completed _connect on {}".format(self)

    def _post_connect(self):
        # Post-connect step is necessary to handle SSL setup,
        # otherwise the read adapter can race in seeing encrypted
        # messages from the peer
        if self.connect_handlers:
            print "Adding read adapter", self.python_inbound_handler
            self.channel.pipeline().addLast(self.python_inbound_handler)
        
        def peer_closed(x):
            print "Peer closed channel {} {}".format(self, x)
            self.incoming.put(_PEER_CLOSED)
            self._notify_selectors()

        self.channel.closeFuture().addListener(peer_closed)

    def connect(self, addr):
        # Unwrapped sockets can immediately perform the post-connect step
        self._connect(addr)
        self._post_connect()
        print "Completed connect {} to {}".format(self, addr)

    def connect_ex(self, addr):
        self.connect(addr)
        if self.blocking:
            return errno.EISCONN
        else:
            return errno.EINPROGRESS


    # SERVER METHODS
    # Calling listen means this is a server socket

    def listen(self, backlog):
        self.socket_type = SERVER_SOCKET

        b = ServerBootstrap()
        b.group(NIO_GROUP)
        b.channel(NioServerSocketChannel)
        b.option(ChannelOption.SO_BACKLOG, backlog)
        # FIXME pass through child options from self; note that C API sockets do not distinguish
        # EXAMPLE - b.childOption(ChannelOption.SO_KEEPALIVE, True)

        # FIXME per http://stackoverflow.com/questions/9774023/netty-throttling-accept-on-boss-thread,
        # should set a parentHandler to ensure throttling to avoid denial of service attacks against this layer;
        # it's up to using Python code to do this, but at the very least there should be some sort of blocking
        # to ensure we don't exceed the desired backlog in this chunk of code;
        # right now, assumption is a ArrayBlockingQueue of sufficient size should suffice instead
        self.client_queue = ArrayBlockingQueue(backlog)

        # FIXME this should queue up sockets that are wrapped accordingly;
        # in particular they should be wrapped SSLSocket objects (inheriting SSLEngine settings) 
        b.childHandler(ClientSocketHandler(self))

        # returns a ChannelFuture, but regardless for blocking/nonblocking, return immediately
        b.bind(_get_inet_addr(self.bind_addr))

    def accept(self):
        s = self.client_queue.take()
        return s, s.getpeername()


    # DATAGRAM METHODS
    
    # needs to implicitly bind to 0 if not specified

    def _datagram_connect(self):
        # FIXME raise exception if not of the right family
        if not self.connected:
            print "Connecting datagram socket to", self.bind_addr
            self.connected = True
            self.python_inbound_handler = PythonInboundHandler(self)
            bootstrap = Bootstrap().group(NIO_GROUP).channel(NioDatagramChannel)
            bootstrap.handler(self.python_inbound_handler)
            # add any options
            # such as .option(ChannelOption.SO_BROADCAST, True)
            future = bootstrap.bind(_get_inet_addr(self.bind_addr))
            self._handle_channel_future(future, "bind")
            self.channel = future.channel()
            print "Completed _datagram_connect on {}".format(self)

    def sendto(self, string, arg1, arg2=None):
        # Unfortunate overloading
        if arg2 is not None:
            flags = arg1
            address = arg2
        else:
            flags = None
            address = arg1

        print "Sending data", string
        self._datagram_connect()
        # need a helper function to select proper address;
        # this should take in account if AF_INET, AF_INET6
        packet = DatagramPacket(Unpooled.wrappedBuffer(string),
                                _get_inet_addr(address))
        future = self.channel.writeAndFlush(packet)
        self._handle_channel_future(future, "sendto")
        return len(string)


    # GENERAL METHODS
                                             
    def close(self):
        future = self.channel.close()
        self._handle_channel_future(future, "close")

    def shutdown(self, how):
        if how & SHUT_RD:
            try:
                self.channel.pipeline().remove(self.python_inbound_handler)
            except NoSuchElementException:
                pass  # already removed, can safely ignore (presumably)
        if how & SHUT_WR:
            self.can_write = False
            
    def _readable(self):
        if self.socket_type == CLIENT_SOCKET:
            return ((self.incoming_head is not None and self.incoming_head.readableBytes()) or
                    self.incoming.peek())
        elif self.socket_type == SERVER_SOCKET:
            return bool(self.client_queue.peek())
        else:
            return False

    def _writable(self):
        return self.channel.isActive() and self.channel.isWritable()

    def send(self, data):
        data = str(data)  # FIXME temporary fix if data is of type buffer
        print "Sending data <<<{}>>>".format(data)
        if not self.can_write:
            raise Exception("Cannot write to closed socket")  # FIXME use actual exception
        future = self.channel.writeAndFlush(Unpooled.wrappedBuffer(data))
        self._handle_channel_future(future, "send")
        # FIXME are we sure we are going to be able to send this much data, especially async?
        return len(data)
    
    sendall = send   # see note above!

    def _get_incoming_msg(self):
        if self.incoming_head is None:
            if self.blocking:
                if self.timeout is None:
                    self.incoming_head = self.incoming.take()
                else:
                    self.incoming_head = self.incoming.poll(self.timeout * _TO_NANOSECONDS, TimeUnit.NANOSECONDS)
            else:
                self.incoming_head = self.incoming.poll()  # Could be None

        # Only return _PEER_CLOSED once
        msg = self.incoming_head
        if msg is _PEER_CLOSED:
            self.incoming_head = None
        return msg

    def recv(self, bufsize, flags=0):
        # For obvious reasons, concurrent reads on the same socket
        # have to be locked; I don't believe it is the job of recv to
        # do this; in particular this is the policy of SocketChannel,
        # which underlies Netty's support for such channels.
        msg = self._get_incoming_msg()
        if msg is None:
            return None
        elif msg is _PEER_CLOSED:
            return ""
        msg_length = msg.readableBytes()
        buf = jarray.zeros(min(msg_length, bufsize), "b")
        msg.readBytes(buf)
        if msg.readableBytes() == 0:
            msg.release()  # return msg ByteBuf back to Netty's pool
            self.incoming_head = None
        return buf.tostring()

    def recvfrom(self, bufsize, flags=0):
        # FIXME refactor common code from recv
        self._datagram_connect()
        packet = self._get_incoming_msg()
        if packet is None:
            return None
        elif packet is _PEER_CLOSED:
            return ""
        msg = packet.content()
        msg_length = msg.readableBytes()
        buf = jarray.zeros(min(msg_length, bufsize), "b")
        msg.readBytes(buf)
        remote_addr = packet.sender()  # may not be available on non datagram channels
        sender = remote_addr.getHostString(), remote_addr.getPort()
        if msg.readableBytes() == 0:
            packet.release()  # return msg ByteBuf back to Netty's pool
            self.incoming_head = None
        return buf.tostring(), sender

    def fileno(self):
        return self

    def getsockopt(self, level, option):
        return 0

    def getpeername(self):
        remote_addr = self.channel.remoteAddress()
        return remote_addr.getHostString(), remote_addr.getPort()

    def _unlatch(self):
        pass  # no-op once mutated from ChildSocket to normal _socketobject
예제 #5
0
class greenlet(object):

    def __init__(self, run=None, parent=None):
        self.run = run
        if parent is ROOT:
            self.parent = None
        elif parent is not None:
            self.parent = parent
        else:
            parent = getcurrent()
            print "Setting parent", parent
            self.parent = parent
            self._frame = self._mailbox = None
            self._thread = threading.current_thread() # temp FIXME
            print "Set the parent {} {}".format(self, self.parent)

        # Top user frame of this greenlet; per the normal C
        # implementation of greenlet, this is only available during
        # the execution of the greenlet - not when it's not running
        self._frame = None

        # Mailbox is used in this code to highlight that it's a specialized
        # queue of length 1, used for the specific synchronization model of
        # greenlet.switch
        self._mailbox = ArrayBlockingQueue(1)

        # Set up thread for the actual emulation. This could be a
        # lightweight thread, such as provided by Quasar
        if self.parent is None:
            # Special case root greenlets
            self._thread = threading.current_thread()
        else:
            self._thread = threading.Thread(target=self._wrapper)
            self._thread.setDaemon(True)  # greenlets don't block exit; FIXME apparently daemon=True doesn't yet work on Jython
            self._thread.start()  # the wrapper will immediately block on its mailbox

        print "Initialized greenlet {}".format(self)


    def __str__(self):
        if self.parent is None:
            parent_id = None
        else:
            parent_id = "{:#x}".format(id(self.parent))
        return "<greenlet id={:#x}, parent={}, frame={}, mailbox={}, thread={} daemon={}>".format(
            id(self), parent_id, self._frame, self._mailbox, self._thread.name, self._thread.isDaemon())

    __repr__ = __str__

    def _propagate(self, *args, **kwargs):
        print "In switch to parent for {} from {}".format(self, context._current)
        self._mailbox.add(GreenletArgs(args, kwargs))

    def switch(self, *args, **kwargs):
        # Using add ensures that we will quickly fail if multiple greenlets
        # switch to the same one. Should not happen in actual greenlets,
        # and presumably the user-directed scheduling of switch should ensure
        # the same for this emulation
        print "In switch for {} from {}".format(self, context._current)
        self._mailbox.add(GreenletArgs(args, kwargs))
        self._frame = sys._getframe(-1)  # caller
        try:
            print "Waiting on mailbox from switched away greenlet {}".format(context._current)
            result = _handle_result(context._current._mailbox.take())
            print "Completed waiting on mailbox from switched away greenlet {} result={} thread={}".format(context._current, result,  threading.current_thread())
            return result
        finally:
            self._frame = None

    def throw(self, *args):
        if len(args == 0):
            self._mailbox.add(GreenletException(GreenletExit(), None, None))
        else:
            self._mailbox.add(GreenletException(
                args[0], args[1] if len(args) > 1 else None, args[2]  if len(args) > 2 else None))
        self._frame = sys._getframe(-1)  # caller
        try:
            return _handle_result(context._current._mailbox.take())
        finally:
            self._frame = None

    @property
    def dead(self):
        return not self._thread.is_alive()

    @property
    def gr_frame(self):
        return self._frame

    def __nonzero__(self):
        # NB: defining this method makes for tests to be more interesting than usual;
        # always need to compare a greenlet is None instead of if greenlet!
        return self._thread.is_alive() and not hasattr(self, "run")
 
    def _wrapper(self):
        # Now that this thread is started, we need to be prepared to
        # be immediately switched to it on a subsequent scheduling
        # (all user directed of course)
        context._current = self
        args, kwargs = _handle_result(self._mailbox.take(), applicable=True)

        # per the greenlet docs, the run attribute must be deleted
        # once the greenlet starts running
        run = self.run
        del self.run

        if run:
            print "Running greenlet thread {} self={} run={} args={} kwargs={}".format(self._thread.name, self, run, args, kwargs)
            result = run(*args, **kwargs)
            print "Completed greenlet thread {}".format(self._thread.name)

        # Switch up the parent hierarchy
        if self.parent is not None:
            print "Switching to parent={} result={} from context={}".format(self.parent, result, context._current)
            self.parent._propagate(result)
        print "Completed greenlet {}".format(self)

    def __del__(self):
        self.throw()
예제 #6
0
class Sim(ApplicationSession):

    @inlineCallbacks
    def onJoin(self, details):
        print("session attached")
        results = []
        res = yield self.register(self)
        results.extend(res)

        for res in results:
            if isinstance(res, Failure):
                print("Failed to register procedure: {}".format(res.value))
            else:
                print("registration ID {}: {}".format(res.id, res.procedure))

    @wamp.register(u'digs.sim.topology')
    def topology(self):
        self.configure()
        print('topology called\n')
        dataCenter = self.simulator.getDatacenter()
        dataCenterSpecificationPayload = json.dumps(dataCenterToJSON(dataCenter), ensure_ascii = False).encode('utf8')
        return dataCenterSpecificationPayload

    def configure(self):
        self.dataCenterBuilder = SimulatorBuilder("configs/DC_Logic.xml")
        self.simulatorPOD = self.dataCenterBuilder.build()
        self.environment = Environment()
        self.partialResults = ArrayBlockingQueue(5)
        self.simulator = Simulator(self.simulatorPOD, self.environment, self.partialResults)
        #all racks contain the same number of chassis and all chassis have the same amount of servers.
        #it is a limitation, unless this becomes a requirement it will remain as it is.
        self.racks = self.simulator.getDatacenter().getRacks()
        self.chassis = self.racks.toArray()[0].getChassis()
        self.servers = self.chassis.toArray()[0].getServers()

    @wamp.register(u'digs.sim.execute')
    def execute(self):
        print('Execute called')
        self.simulatorThread = Thread(target=self.simulator.run)
        self.simulatorThread.start()
        self.partialResultsThread = Thread(target=self.partialResultsPublisher)
        self.partialResultsThread.start()

    def partialResultsPublisher(self):
        counter = 0
        amountOfDataToBeSent = 0
        bundle = list()
        partial = PartialResults(len(self.racks), len(self.chassis), len(self.servers))
        messageCounter = 0
        while(True):
            partialResult = self.partialResults.poll(50, TimeUnit.MILLISECONDS)
            if partialResult != None:

                counter = 0
                amountOfDataToBeSent += 1
                if amountOfDataToBeSent == 10:
                    amountOfDataToBeSent = 0
                    racksStats = {'racksStats' : partial.toJSON(partialResult.getRacksStats()) }
                    bundle.append(racksStats)

                if len(bundle) == 7:
                    payload = json.dumps({'results' : bundle }, ensure_ascii = False, separators=(',',':')).encode('utf8')
                    #print('Payload size {0}'.format(len(payload)))
                    reactor.callFromThread(self.publish, u'digs.sim.partialResult', payload)
                    messageCounter += 1
                    del bundle[:]
            else:
                counter += 1
            if counter > 50:
                break;
        print('Simulation ended, publish called {0} times'.format(messageCounter))

    @wamp.register(u'digs.sim.results')
    def results(self):
        print('Results called')
        self.partialResultsThread.join()
        self.simulatorThread.join()
        results = SimulationResults(self.simulator)
        print('Results collected')
        return {'Total energy Consumption' : results.getTotalPowerConsumption(),
                'LocalTime' : results.getLocalTime(),
                'Mean Power Consumption' : results.getMeanPowerConsumption(),
                'Over RED' : results.getOverRedTemperatureNumber(),
                'Messages' : {
                    '# of Messages DC to sys' : results.getNumberOfMessagesFromDataCenterToSystem(),
                    '# of Messages sys to nodes' : results.getNumberOfMessagesFromSystemToNodes()
                }
        }