Пример #1
0
def check_valley_free(g, path, log=None):
  r = connection.Redis()

  if r.get(dbkeys.AS.status('peering_data')) != "True":
    sys.stderr.write("Attempted to check valley-free property, "
                     "but have no peering data.\n")
    return (0, 1)  # IF we have no peering data. It's irrelevant.

  if len(path) == 0:
    return (0, 1)

  went_down = False
  errct = 0.0
  hopct = 0.0
  asn_path = [g.node[hop]['asn']
              for hop in path
              if g.node[hop]['asn'] != "N/A"]

  for as1, as2 in pairwise(asn_path):

    hopct += 1
    if as1 == as2:
      continue

    relationship = r().hget(dbkeys.AS.relationship(as1), as2)

    if not relationship:
      # Let's try the other side.
      relationship = r().hget(dbkeys.AS.relationship(as2), as1)
      if not relationship:
        if log:
          log.write("No relationship for %s <-> %s\n" % (as1, as2))
        errct += 1
        continue
      else:
        # We need to swap -1 and 1 for this side
        # since it's the opposite perspective.
        # If we swap a 2 to a -2, who cares.
        relationship *= -1

    if relationship == 1:
      went_down = True

    elif relationship == -1:  # AS1 is a customer of AS2
      if went_down:
        raise ValleyFreeError()

  return (errct, hopct)
  def identify_ixps(self, as_path):
    """Identify the IXP and MetaIXPs that occur along a given AS path

    :as_path: An iterable of AS numbers representing a path
    :returns: A tuple of the form (ixps, metaixps), which are sets
    """
    if as_path is None:
      return ([], [])

    path_ixps = set()

    for pair in pairwise(as_path.split()):
      if pair in self.ixps:
        path_ixps |= self.ixps[pair]

    path_metaixps = set([self.lookup_metaixp(ixp) for ixp in path_ixps])

    return (path_ixps, path_metaixps)
Пример #3
0
def thread_shortest_path(graphpath, sp_key, type_key,
                         node_key, path_key, used_key):

    global thread_graph

    log_out = redirect_output()

    log_out.write("Spawned with sp_key = {0}; type_key = {1}; "
                  "node_key={2}; path_key={3}\n"
                  .format(sp_key, type_key, used_key, path_key))
    log_out.flush()

    r = connection.Redis()

    log_out.write("Reading graph...")
    log_out.flush()

    thread_graph = nx.read_graphml(graphpath)

    log_out.write(" Complete\n")
    log_out.flush()

    target = r.spop(sp_key)
    while target:
        used_nodes = set()
        used_paths = set()
        log_out.write("Obtaining shortest paths for %s... " % target)
        log_out.flush()

        paths = nx.single_source_dijkstra_path(
            thread_graph, target, weight='latency')

        for path_target, path in paths.iteritems():
          if r.hget(type_key, path_target) in ('relay', 'client', 'dest'):
            # We only care for relays, clients and destinations
            try:
              errors, total = check_valley_free(
                  thread_graph, path, log=log_out)

#              if total > 0:
#                log_out.write("Path from %{0} to %s was valley-free. "
#                              " %0.0f/%0.0f (%0.2f) missing links in calc\n"
#                              % (target, path_target,
#                                 errors, total, errors / total))

            except ValleyFreeError:
              # If it's not valley free, we rebuild a new path
              log_out.write("Path from %s to %s is not valley-free. "
                            "Building a new one... "
                            % (target, path_target))

              try:
                path, time_taken = valley_free_path(
                    thread_graph, target, path_target)

                log_out.write(" finished in %s seconds. New Path: %s "
                              "[%0.0f/%0.0f (%0.2f%%) of links had no "
                              "information]\n"
                              % (Color.wrap(time_taken, Color.OKBLUE),
                                 str(path), path.errct, len(path) - 1,
                                 float(path.errct) / float(len(path) - 1)))

              except ValleyFreeError:
                log_out.write(
                    Color.warn(
                        "Couldn't produce valley-free path from %s to %s\n"
                        % (target, path_target)))
                raise

            else:
              # Now store the links and nodes from this
              # path so that we keep them.
              used_nodes.update(path)
              hops = list()
              for hop in pairwise(path):
                  hops.append(hop)
                  #Add each hop to our set of paths
                  used_paths.add(hop)

        r.sadd(path_key, *used_paths)
        r.sadd(used_key, *used_nodes)
        log_out.write("Done\n")
        log_out.flush()
        target = r.spop(sp_key)

    log_out.write("Exiting ")
    log_out.flush()
    log_out.close()
    sys.exit(0)
  def collapse_degree_two(self, protected=[]):
    log.info("Cleaning up collapse dbkeys...")
    r = connection.Redis()
    p = r.pipeline()
    for key in r.keys("graph:collapsed:*"):
      p.delete(key)
    write_failed(p.execute())

    pass_ctr = 0
    collapsable = True
    ignoreable = set()
    clogout = open('collapse.log', 'w')
    while collapsable:
      pass_ctr += 1
      sys.stderr.write("\n")
      collapsable = False
      degree2nodes = filter(
          lambda val: (len(val[1]) == 2 and val[0] not in ignoreable),
          self.iteritems())

      counter = 0
      n = 0
      deferred = 0
      collapsed = set()
      timer = ProgressTimer(len(degree2nodes))

      for node, connections in degree2nodes:

        if n % 50 == 0 or n == timer.total - 1:
          timer.tick(50)
          sys.stderr.write(
              "{0}Pass {1}: {2} {3}".format(
                  Color.NEWL, pass_ctr,
                  Color.wrapformat(
                      "[{0} processed, {1} collapsed, {2} deferred]",
                      Color.HEADER, n, counter, deferred
                  ),
                  Color.wrapformat(
                      "[eta: {0}]",
                      Color.OKGREEN, timer.eta()
                  ))
          )

        n += 1

        asns = [r.get(dbkeys.POP.asn(x)) for x in connections | set([node])]
        countries = [r.smembers(dbkeys.POP.countries(x))
                     for x in connections | set([node])]

        same_asn = reduce(lambda x, y: x if x == y else False, asns)
        same_country = True
        for x, y in pairwise(countries):
          if x & y != x:
            same_country = False

        if (same_asn is False or same_country is False or node in protected):
          ignoreable.update(connections | set([node]))
          continue

        if len(collapsed & (connections | set([node]))) != 0:
          deferred += 1
          continue

        collapsed.update(connections | set([node]))
        side1 = connections.pop()
        side2 = connections.pop()
        connections.update(set([side1, side2]))

        try:
          #side1_delay = median(get_delays(dbkeys.Link.interlink(node, side1)))
          side1_delays = decile_transform(
              [float(delay)
               for edge in r.smembers(dbkeys.Link.interlink(node, side1))
               for delay in r.smembers(dbkeys.delay_key(*eval(edge)))])
        except:
          side1_delays = eval(r.get("graph:collapsed:%s" %
                              (dbkeys.Link.interlink(node, side1))))
        try:
          #side2_delay = median(get_delays(dbkeys.Link.interlink(node, side2)))
          side2_delays = decile_transform(
              [float(delay)
               for edge in r.smembers(dbkeys.Link.interlink(node, side2))
               for delay in r.smembers(dbkeys.delay_key(*eval(edge)))])
        except:
          side2_delays = eval(r.get("graph:collapsed:%s" %
                              (dbkeys.Link.interlink(node, side2))))

        combined_delays = [s1 + s2
                           for s1 in side1_delays
                           for s2 in side2_delays]

        r.set('graph:collapsed:%s'
              % (dbkeys.Link.interlink(*list(connections))),
              decile_transform(combined_delays))

        clogout.write("Collapsed %s <-> %s <-> %s\n" %
                      (side1, node, side2))

        collapsable = True

        del self[node]
        self[side1].add(side2)
        self[side2].add(side1)
        self[side1].remove(node)
        self[side2].remove(node)

        counter += 1

    clogout.close()