Esempio n. 1
0
    def _send_loop(self):
        if self.is_done:
            return
        self.cwnd = 100

        # Get next packet, send it
        while len(self.in_flight) + 1 <= self.cwnd:
            # What packet?
            if len(self.retransmit_q) > 0:
                p = self.retransmit_q.pop()
                if self.id == PARAMS.flow_print:
                    vprint("flow : %s retransmit" % p)
            else:
                try:
                    p = next(self.packets)
                except StopIteration:
                    # no more packets!
                    break

            # Check it's not gotten acked...
            if self.is_acked(p.seq_num):
                continue

            if self.id == PARAMS.flow_print:
                vprint("flow : %s sent, cwnd: %s/%.1f" % (p, len(self.in_flight)+1, self.cwnd))

            self.in_flight.add(p.seq_num)
            p.sent_ms = R.time
            self.src_send_q.enq(p)
            #vprint(self, len(self.in_flight))

            # Setup the timeout
            R.call_in(self.rto, self.timeout, p, rto = self.rto)
Esempio n. 2
0
    def del_flow(flow_id):
        global FLOWS, N_DONE
        vprint("%s done!" % (FLOWS[flow_id]))
        flow = FLOWS[flow_id]
        flow.end = R.time
        LOG.log_flow_done(FLOWS[flow_id])

        N_DONE[0] += 1
        del FLOWS[flow_id]

        if PARAMS.arrive_at_start:
            if N_DONE[0] == N_FLOWS[0]:
                R.stop()
Esempio n. 3
0
 def enq(self, packet):
     if packet.flow_id == PARAMS.flow_print:
         vprint("nic  : %s enq  %s" % (packet, self))
     if self.queue_size_max is not None and \
             self.q_size_B + packet.size_B > self.queue_size_max:
         if packet.flow_id == PARAMS.flow_print:
             vprint("%s dropped, full queue %s" % (packet, self))
         return
     self._queue.appendleft(packet)
     self.q_size_B += packet.size_B
     # Having a delay of 0 makes it so that even if we can send immediately,
     # it waits until the caller is done, making the behavior of enq
     # consistent regardless of current queue size
     #R.call_in(0, self._send)
     self._send()
Esempio n. 4
0
        def recv(packet):
            """Actually receives packets for `port_id`"""

            if not self.enabled:
                assert False,\
                        "@%.3f%s: %s drop from :%s" % (R.time, self, packet, port_id)

            # Forward to destination
            dst_id = self.dests[port_id]

            if packet.flow_id == PARAMS.flow_print:
                vprint("sw % 2d: %s recv %s -> %s" %
                       (self.id, packet, self, dst_id))

            self.tx[dst_id].enq(packet)

            self.packets_by_port[port_id] += 1
Esempio n. 5
0
    def _send(self, ports=None):
        #vprint("%s: _send()" % self)

        if ports is None:
            ports = list(self.available_ports)

        #vprint("%s: available ports: %s" % (self, self.available_ports))
        for priority_i in range(1):
            for free_port in ports:
                port_type = get_port_type(free_port)
                dst = self.ports_dst[free_port]
                if dst is None:
                    continue
                port_dst = self.ports_dst[free_port].id
                buffers_type = self.buffers_dst_type[port_dst]

                priority_type = self.priorities[port_type][priority_i]
                buf = buffers_type[priority_type]
                sz = self.buffers_dst_type_sizes[port_dst][priority_type]
                # assert len(buf) == sz, "%s: buffer[%s][%s] size %s, recorded %s" % (self, port_dst, priority_type, len(buf), sz)

                if False and self.id == 32:
                    vprint("%s:   :%s (%s) considering %s/%s (%d)..." % (
                        self, free_port, port_type, port_dst, priority_type, sz
                        #end = ""
                    ))

                pkt = None
                if priority_type in self.pull_fns:
                    # Eventually should all be here, for now, not all implemented...
                    pkt = self.pull_fns[priority_type](port_id=free_port,
                                                       dst_tor_id=port_dst)
                elif sz > 0:
                    #vprint(" has packets!")
                    pkt = buf.popleft()
                    self.buffers_dst_type_sizes[port_dst][pkt.tag] -= 1

                if pkt is not None and free_port in self.available_ports:
                    pkt.intended_dest = port_dst
                    if pkt.flow_id == PARAMS.flow_print:  # or self.id == 16:
                        vprint("%s: sending %s on :%s -> %s" %
                               (self, pkt, free_port, port_dst))
                    self.ports_tx[free_port].enq(pkt)
                    self.available_ports.remove(free_port)
                    pkt_tor_dst = self.dst_to_tor[pkt.dst_id]
Esempio n. 6
0
    def timeout(self, packet, rto = 0):
        if self.is_done:
            return

        if packet.seq_num in self.in_flight:
            if self.id == PARAMS.flow_print:
                vprint("flow : %s \033[0;31mtimeout after %.3f\033[0;00m" % (
                    packet, rto))
            self.in_flight.remove(packet.seq_num)

            if R.time > self.timeout_lock:
                if self.id == PARAMS.flow_print:
                    vprint("flow : %s \033[0;31m MD!!\033[0;00m" % packet)
                self.cwnd = max(1, self.cwnd/2)
                self.sthresh = self.cwnd
                self.timeout_lock = R.time + self.rtt_ms

            self.retransmit_q.appendleft(packet.copy())
            self._send_loop()
Esempio n. 7
0
    def request_matching(self, tor, dst_id):
        #assert self.available_up[tor.id]
        if not self.available_up[tor.id]:
            return False

        # Make sure the connection can be established
        if not self.available_dn[dst_id]:
            return False

        vprint("%s: %s req -> %s" % (self, tor, dst_id))

        # True it
        self.available_up[tor.id] = False
        self.available_dn[dst_id] = False

        self.dests[ tor.id] = dst_id
        self.starts[tor.id] = R.time + 15#...

        return True
Esempio n. 8
0
    def recv(self, packet):
        """For reaveiving packets from the outside"""
        #vprint("%s received at %s" % (packet, self))
        flow_id = packet.flow_id
        if flow_id == PARAMS.flow_print:
            vprint("srvr : %s recv on %s" % (packet, self))

        if packet.final_dst != self.id:
            packet.dst_id = packet.final_dst
            packet.src_id = self.id
            self.uplink.enq(packet)
            return

        if flow_id in self.flows:
            # This is okay:
            # maybe a flow is over and stragglers are coming
            if flow_id == PARAMS.flow_print:
                vprint("srvr : %s recv on %s" % (packet, self))
            self.flows[flow_id](packet)
        else:
            pass
Esempio n. 9
0
    def _send(self):
        # Currently sending something, or paused, or no packets to send
        if not self._enabled or self._paused:
            return
        if len(self._queue) == 0:
            if self.empty_callback is not None:
                self.empty_callback()
            return

        # Disable
        self._enabled = False

        # Get packet and compute tx time
        pkt = self._queue.pop()
        self.q_size_B -= pkt.size_B
        tx_delay = pkt.size_B * self.ms_per_byte

        if pkt.flow_id == PARAMS.flow_print:
            vprint("queue: %s sent %s tx %.6f lat %.6f" %
                   (pkt, self, tx_delay, self.prop_delay))
        R.call_in(tx_delay, self._enable)
        R.call_in(self.prop_delay + tx_delay, self.dst_recv, pkt)
Esempio n. 10
0
    def src_recv(self, packet):
        #assert ack_packet.is_ack
        if self.is_done:
            return

        if self.id == PARAMS.flow_print:
            vprint("flow : %s acked" % (packet))

        # Mark the ack
        self.process_ack(packet.seq_num)

        # Done! woot
        if self.n_acked == self.size_packets:
            self._done()
            return

        # Update rtt estimate
        rtt_sample = R.time - packet.sent_ms
        rtt_err = rtt_sample - self.rtt_ms
        self.rtt_ms     += self.alpha * rtt_err
        self.rtt_dev_ms += self.beta  * (abs(rtt_err) - self.rtt_dev_ms)
        if self.id == PARAMS.flow_print:
            vprint("flow : rtt/timeout: %.3f/%.3f" % (rtt_sample, self.rto))


        # Remove from in-flight if necessary
        if packet.seq_num in self.in_flight:
            self.in_flight.remove(packet.seq_num)
            if self.cwnd < self.sthresh:
                self.cwnd += 1
            else:
                self.cwnd += 1/self.cwnd
            if self.id == PARAMS.flow_print:
                vprint("flow : cwnd", self.cwnd)

        self._send_loop()
Esempio n. 11
0
            http://www.eng.utah.edu/~cs5340/project/project.pdf"""
    parser = argparse.ArgumentParser(
        description='Coreference Resolution Engine', epilog=pinfo)
        
    # Get required and optional arguments
    parser.add_argument("listfile", help="File containing file path strings")
    parser.add_argument("responsedir", help="Path to output directory")
    parser.add_argument("-v", "--verbose", help="increase output verbosity",
                        action="store_true")
    parser.add_argument("-t", "--test", help="run doctests only",
                        action="store_true")
    parser.add_argument('-H', '--host', default='127.0.0.1',
                      help='Host to running stanford corenlp server')
    args = parser.parse_args()

    # if verbose flag is True, create global method vprint which prints to
    # stdout only in verbose mode
    import helpers
    helpers.mk_verbose_printer(args.verbose)
    
    # Now that vprint is created, we can import the rest of the modules
    from helpers import vprint
    from data import mk_parses
    from rulealgs import apply_rules

    if args.test:
        helpers.run_doctests()
    else:
        result = main(args)
        vprint(strerror(result))
        exit(result)
Esempio n. 12
0
    def recv(self, packet):
        """Receives packets for `port_id`"""

        if packet.flow_id == PARAMS.flow_print:
            vprint("%s: %s recv" % (self, packet))

        # Sanity check
        if packet.intended_dest != None:
            assert packet.intended_dest == self.id, \
                "@%.3f %s received %s, was intendd for %s" % (R.time, self, packet, packet.intended_dest)

        # Update hop count
        packet.hop_count += 1
        assert packet.hop_count < 50, "Hop count >50? %s" % packet

        # Deliver locally
        if packet.dst_id in self.local_dests:
            if packet.flow_id == PARAMS.flow_print:
                vprint("%s: %s Local destination" % (self, packet))

            next_port_id = self.local_dests[packet.dst_id]
            self.ports_tx[next_port_id].enq(packet)
        else:
            packet._tor_arrival = R.time
            next_tor_id = self.dst_to_tor[packet.dst_id]

            dst_tag = ToRSwitch.packet_tag(packet.tag)

            # CACHE handling
            if packet.src_id in self.local_dests and dst_tag == "cache" and next_tor_id not in self.will_have_cache_to:
                for port_id in cache_ports:
                    if self.ports_dst[port_id] is None:
                        if self.switches[port_id].request_matching(
                                self, next_tor_id):
                            # Stops us from requesting this again
                            self.will_have_cache_to.add(next_tor_id)
                            R.call_in(15, self.activate_cache_link, port_id,
                                      next_tor_id)
                            FLOWS[packet.flow_id].add_callback_done(
                                self.deactivate_cache_link(next_tor_id))
                            break

            # If we don't have a cache yet, make it rotor
            if dst_tag == "cache" and next_tor_id not in self.have_cache_to:
                dst_tag = "rotor"

            # TODO can just enqueue right here?
            #if dst_tag == "cache":
            #vprint("%s %s going to cache" % (self, packet))

            # ROTOR requires some handling...
            # ...adapt our capacity on rx
            if dst_tag == "rotor":
                self.capacity[next_tor_id] -= 1

                # ... if indirect, put it in higher queue...
                if packet.src_id not in self.local_dests:
                    if packet.flow_id == PARAMS.flow_print:
                        vprint("%s: %s is old indirect" % (self, packet))
                    dst_tag = "rotor-old"
                else:
                    self.nonempty_rotor_dst.add(next_tor_id)

            self.buffers_dst_type[next_tor_id][dst_tag].append(packet)
            self.buffers_dst_type_sizes[next_tor_id][dst_tag] += 1

            # debug print
            if packet.flow_id == PARAMS.flow_print:
                vprint("%s: %s Outer destination %s/%s (%d)" %
                       (self, packet, next_tor_id, dst_tag,
                        len(self.buffers_dst_type[next_tor_id][packet.tag])))

            # trigger send loop
            buf = self.buffers_dst_type[next_tor_id][dst_tag]
            sz = self.buffers_dst_type_sizes[next_tor_id][dst_tag]
            #assert len(buf) == sz, "%s: recv buffer[%s][%s] size %s, recorded %s" % (self, next_tor_id, dst_tag, len(buf), sz)
            self._send()
Esempio n. 13
0
 def deactivate(flow_id):
     vprint("%s: release cache link to %s" % (self, tor_dst_id))
     # TODO mechanism to allow multiple flows to the same dst to use the same cache link
     self.have_cache_to.discard(tor_dst_id)
     self.will_have_cache_to.discard(tor_dst_id)
Esempio n. 14
0
 def activate_cache_link(self, port_id, dst_tor_id):
     if self.id == 26:
         vprint("%s: activate :%d -> %s" % (self, port_id, dst_tor_id))
     self.ports_dst[port_id] = self.tors[dst_tor_id]
     self.have_cache_to.add(dst_tor_id)
     self._send()
Esempio n. 15
0
def mk_fparse(filename, pserver):
    """Parses input to get list of paragraphs with sentence structure
        and a dictionary of noun phrases contained in the COREF tags

    Args:
        filename: string, path to crf file
        pserver: jsonrpc.ServerProxy, stanford corenlp server for parsing

    Returns:
        tuple, (list_stanford_sent_parses, dict_file_corefs, dict_file_synsets)

    """
    parses = []
    
    try:
        with open(filename) as f:
            vprint('OPEN: %s' % filename)
            xml = f.read()
    except IOError:
        print strerror(EIO)
        print("ERROR: Could not open %s" % filename)
        return (parses, get_tagged_corefs(''), get_synsets({}))

    # remove unwanted characters from xml
    vprint('\tPARSE: Parsing file: %s' % filename)
    # parse_tries = 0
    #     while parse_tries < 5:
    #         try:
    #             t = loads(pserver.parse(_normalize_sentence(_remove_tags(xml))))
    #             parse_tries = 0
    #             break
    #         except jsonrpc.RPCTimeoutError:
    #             vprint('\tERROR: RPCTimeoutError - retrying')
    #             parse_tries += 3
    #         except jsonrpc.RPCTransportError:
    #             vprint('\tERROR: RPCTransportError - retrying')
    #             data = _normalize_sentence(_remove_tags(xml))
    #             sentences = [sent for part in data.split('\n\n')
    #                          for sent in sent_tokenize(part)]
    #             try:
    #                 xml1 = data[:data.find(sentences[len(sentences)/3])]
    #                 xml2 = data[data.find(sentences[len(sentences)/3+1]):data.find(sentences[2*len(sentences)/3])]
    #                 xml3 = data[data.find(sentences[2*len(sentences)/3+1]):]
    #                 t1 = loads(pserver.parse(xml1))
    #                 t2 = loads(pserver.parse(xml2))
    #                 t3 = loads(pserver.parse(xml3))
    #                 t = dict(t1.items() + t2.items() + t3.items())
    #                 parse_tries = 0
    #                 break
    #             except Exception:
    #                 parse_tries = -1
    #                 break
    #             parse_tries += 1
    #     if parse_tries != 0:
    #         vprint('\tFATAL: RPCTransportError - skipping')
    
    sentences = [sent for part in xml.split('\n\n')
                 for sent in sent_tokenize(part)]
    vprint('\tPARSE: Parsing sentences: %s' % filename)
    for sent in sentences:
        sent_corefs = get_tagged_corefs(sent, ordered=True)
        # remove unwanted characters from xml
        sent = _normalize_sentence(_remove_tags(sent))
        parse_tries = 0
        while parse_tries < 5:
            try:
                sparse = loads(pserver.parse(sent))
                parse_tries = 0
                break
            except jsonrpc.RPCTransportError:
                vprint('\tERROR: RPCTransportError - retrying')
                parse_tries += 1
        if parse_tries != 0:
            vprint('\tFATAL: RPCTransportError - skipping')
            
        pparse = _process_parse(sparse, sent_corefs)
        if pparse:
            parses.append(pparse)

    pos_tags = {}
    for parse in parses:
        for word, attr in parse[1]:
            tags = pos_tags.get(word, set())
            tags.add(attr['PartOfSpeech'])
            pos_tags[word] = tags
        
    return parses, get_tagged_corefs(xml), get_synsets(pos_tags)
Esempio n. 16
0
 def add_flow(self, flow, receiver):
     if flow.id == PARAMS.flow_print:
         vprint("server: flow %s installed at %s" % (flow, self))
     self.flows[flow.id] = receiver
     flow.add_callback_done(self.flow_done)