def handle_same_pop_link(link):
  """ Handle links which should belong to the same PoP

  a. Neither has a PoP assigned
    - Assign both the same pop and set links:intra
  b. One side has a PoP assigned
    - Assign the other one the existing PoP and add links:intra
  c. Both sides have a PoP assigned
    - add to delayed_job:popjoins
  """
  r = connection.Redis()

  ip1, ip2 = link.split(":")[2:]

  with r.pipeline() as pipe:
    try:
      pipe.watch(dbkeys.ip_key(ip1))
      pipe.watch(dbkeys.ip_key(ip2))
      pop1 = dbkeys.get_pop(ip1, pipe=pipe)
      pop2 = dbkeys.get_pop(ip2, pipe=pipe)
      pipe.multi()

      if pop1 is None and pop2 is None:
        pop1 = dbkeys.setpopnumber(dbkeys.mutex_popnum(), ip1, pipe=pipe)
        pipe.hset(dbkeys.ip_key(ip2), 'pop', pop1)
        pipe.sadd(dbkeys.POP.members(pop1), ip2)

        store_link(r, (ip1, ip2), pop1, pipe=pipe)
      elif pop1 is not None and pop2 is not None:
        if not r.sismember('delayed_job:popjoins:known', (pop1, pop2)):
          pipe.lpush("delayed_job:popjoins", (pop1, pop2))
          pipe.sadd('delayed_job:popjoins:known', (pop1, pop2))
      else:
        if pop1 is None:
          knownpop = pop2
          pipe.hset(dbkeys.ip_key(ip1), 'pop', knownpop)
          pipe.sadd(dbkeys.POP.members(knownpop), ip1)
        else:
          knownpop = pop1
          pipe.hset(dbkeys.ip_key(ip2), 'pop', knownpop)
          pipe.sadd(dbkeys.POP.members(knownpop), ip2)
        store_link(r, (ip1, ip2), knownpop, pipe=pipe)

      pipe.execute()
      return True
    except redis.WatchError:
      return False
    finally:
      pipe.reset()
Example #2
0
def _assign_pops(unassigned_list_key, failed_list_key,
                 no_add_processed=False):
  """ Assign all of the IP addresses found in the redis list
  :unassigned_list_key:. If any fail, put them in the redis list
  :failed_list_key.

  Store processed links in 'delayed_job:processed_links' unless
  :no_add_processed: is False
  """

  r = connection.Redis()

  while r.llen(unassigned_list_key) > 0:
    try:
      if no_add_processed:
        link = r.rpop(unassigned_list_key)
      else:
        link = r.rpoplpush(unassigned_list_key, "delayed_job:processed_links")
      if link is None:
        return
      ip1, ip2 = link.split(":")[2:]
      cross_as = different_as(r, dbkeys.ip_key(ip1), dbkeys.ip_key(ip2))
      cross_24 = different_24(r, ip1, ip2)

      if cross_as is None:
        # This means that one side of this link has no AS. We don't want it
       continue

      log.info("link: %s, delay: %s" % (link, dbkeys.get_delay(link)))
      if dbkeys.get_delay(link) > 2.5 or cross_as or cross_24:
        success = handle_cross_pop_link(link)
      else:
        success = handle_same_pop_link(link)

      log.info("Assigning PoPs. Remaining: [{0}]. "
               "Deferred for join: [{1}]".format(
                   Color.wrap(r.llen(unassigned_list_key), Color.OKBLUE),
                   Color.wrap(r.llen('delayed_job:popjoins'), Color.HEADER)))

      if not success:
        assert_pops_ok(r, ip1, ip2)
        r.lpush(failed_list_key, link)

    except DataError as e:
      log.error("Fatal Error - Resetting: " + e)
      args = object()
      args.reset = True
      return assign_pops(args)
def load_link_pairs(newpairs, geoipdb=None):
  global lua_push_unique

  r = connection.Redis()

  if lua_push_unique is None:
    lua_push_unique = r.register_script("""
      local exists
      exists = redis.call("EXISTS", KEYS[1])
      if exists == 0 then
        redis.call("LPUSH", "delayed_job:unassigned_links", KEYS[1])
      end
      redis.call("SADD", KEYS[1], ARGV[1])
      return redis.status_reply("OK")
    """)

  with r.pipeline() as pipe:

    for link in newpairs:
      if link[0] == link[1]:
        raise Exception("Should not happen")

      lua_push_unique(
          keys=[dbkeys.delay_key(link[0], link[1])],
          args=[link[2]],
          client=pipe)

      pipe.sadd('iplist', *link[:2])
      for ip, asn in itertools.izip(link, geoipdb.lookup_ips(link[:2])):
        pipe.hmset(dbkeys.ip_key(ip), {'asn': asn})

    pipe.execute()
def load_attr_data(args):
  r = connection.Redis()
  try:
    keys = None
    i = 0
    with open(args.attr_file) as f:
      for i, line in enumerate(f):
        fields = line.split()
        if len(fields) == 0:
          continue
        if i == 0:
          if fields[0] == "#":
            keys = fields[1:]
            continue
        ip = fields[0]
        if keys:
          vals = dict([pair for pair in zip(keys, fields[1:])
                       if pair[0] != 'pop'])
        else:
          vals = dict(zip(fields[1::2], fields[2::2]))
          if 'pop' in vals:
            del vals['pop']

        r.hmset(dbkeys.ip_key(ip), vals)
        r.sadd('iplist', ip)
        if i % 10000 == 0:
          log.info("Set values for %d" % i)

  except IOError as e:
    log.error("Error: %s" % e)
    raise SilentExit
  except Exception as e:
    log.warn("\nError parsing line [%s]: %s\n" % (re.escape(line), e))
def handle_cross_pop_link(link):
  """ Handle a situation where the two IPs on either end of a
  link should be in different PoPs.

  - Neither has a PoP assigned
    - Assign two new PoPs, and create links:inter
  - One side has a PoP assigned
    - Assign 1 new PoP and create links:inter
  -  Both sides have a PoP assigned
    - Add it to the links:inter
  """
  r = connection.Redis()

  ip1, ip2 = link.split(":")[2:]

  with r.pipeline() as pipe:
    try:
      pipe.watch(dbkeys.ip_key(ip1))
      pipe.watch(dbkeys.ip_key(ip2))
      pop1 = dbkeys.get_pop(ip1, pipe=pipe)
      pop2 = dbkeys.get_pop(ip2, pipe=pipe)
      pipe.multi()

      if pop1 is None and pop2 is None:
        pop1 = dbkeys.setpopnumber(dbkeys.mutex_popnum(), ip1, pipe=pipe)
        pop2 = dbkeys.setpopnumber(dbkeys.mutex_popnum(), ip2, pipe=pipe)

      elif pop1 is not None and pop2 is not None:
        pass
      else:
        if pop1 is None:
          pop1 = dbkeys.setpopnumber(dbkeys.mutex_popnum(), ip1, pipe=pipe)
        else:
          pop2 = dbkeys.setpopnumber(dbkeys.mutex_popnum(), ip2, pipe=pipe)
      store_link(r, (ip1, ip2), pop1, pop2, pipe=pipe)

      pipe.execute()
      return True
    except redis.WatchError:
      return False
    finally:
      pipe.reset()
def cleanup(args):
  """
  Clean all of the pop and link related information out of the database.
  """
  r = connection.Redis(structures.ConnectionInfo(**args.redis))

  log.info("Removing IP pop data (may take a while)... ")
  ips = r.smembers('iplist')
  p = r.pipeline()
  for ip in ips:
    p.hdel(dbkeys.ip_key(ip), 'pop')
  result = p.execute()
  del ips
  log.info(Color.wrapformat("[{0} removed]",
                            Color.OKBLUE,
                            len(filter(None, result))))

  log.info("Removing PoP link data (may take a while)... ")
  linkkeys = r.keys("links:*")
  p = r.pipeline()
  for key in linkkeys:
    p.delete(key)
  result = p.execute()
  write_failed(result)
  del linkkeys

  log.info("Removing pop keys... ")
  popkeys = r.keys("pop:*")
  p = r.pipeline()
  for key in popkeys:
    p.delete(key)
  result = p.execute()
  write_failed(result)
  del popkeys

  log.info("Removing asn keys... ")
  popkeys = r.keys("asn:*")
  p = r.pipeline()
  for key in popkeys:
    p.delete(key)
  result = p.execute()
  write_failed(result)
  del popkeys

  if args.ip_links:
    log.info("Removing ip links... ")
    p = r.pipeline()
    p.delete(dbkeys.Link.unassigned())
    p.delete(dbkeys.Link.unassigned_fails())
    p.delete("delayed_job:processed_links")
    iplinkkeys = r.keys("ip:links:*")
    for key in iplinkkeys:
      p.delete(key)
    result = p.execute()
    write_failed(result)

  pipelined_delete(r,
                   'poplist',
                   'join:history',
                   'delayed_job:popjoins',
                   'delayed_job:popjoins:known',
                   'popincr',
                   'mutex:popjoin:init')
def join_pops(r, newpop, oldpop):
  """
  Join oldpop to newpop.
  """

  if newpop == oldpop:
    return

  if not r.sismember(dbkeys.POP.list(), newpop):
    raise Exception("%s is not in the poplist\n", newpop)

  members = r.smembers(dbkeys.POP.members(oldpop))
  popas = r.get(dbkeys.POP.asn(oldpop))
  interlinks = r.smembers(dbkeys.POP.neighbors(oldpop))

  pipe = r.pipeline()
  for connected_pop in interlinks:
    if connected_pop == newpop:
      # What used to be an inter link from oldpop -> newpop
      # needs to become an intra link
      intralinkdata = r.smembers(dbkeys.Link.interlink(connected_pop,
                                 oldpop))
      store_link(r, map(eval, intralinkdata),
                 newpop, pipe=pipe, multi=True)

    else:
      # The inter link between connected -> oldpop needs to be
      # redirected to point at newpop
      interlinkdata = r.smembers(dbkeys.Link.interlink(connected_pop, oldpop))

      if len(interlinkdata) == 0:
        raise IndexError("Link between {0} and {1} has no links".format(
                         connected_pop, oldpop))

      store_link(r, map(eval, interlinkdata),
                 newpop, connected_pop, pipe=pipe, multi=True)

    pipe.delete(dbkeys.Link.interlink(connected_pop, oldpop))
    pipe.srem(dbkeys.POP.neighbors(connected_pop), oldpop)

  #Move every intra link that used to be in oldpop to newpop
  store_link(r, map(eval, r.smembers(dbkeys.Link.intralink(oldpop))),
             newpop, pipe=pipe, multi=True)

  pipe.sunionstore(dbkeys.POP.countries(newpop), dbkeys.POP.countries(oldpop))

  # Update the pop value for every member of oldpop, and move it to newpop
  for member in members:
    pipe.hset(dbkeys.ip_key(member), 'pop', newpop)
    pipe.smove(dbkeys.POP.members(oldpop), dbkeys.POP.members(newpop), member)

  # Clean up oldpop
  pipe.delete(dbkeys.POP.members(oldpop))
  pipe.delete(dbkeys.POP.countries(oldpop))
  pipe.delete(dbkeys.POP.neighbors(oldpop))
  pipe.delete(dbkeys.Link.intralink(oldpop))
  pipe.srem(dbkeys.ASN.pops(popas), oldpop)
  pipe.delete(dbkeys.POP.asn(oldpop))
  pipe.srem(dbkeys.POP.list(), oldpop)

  # Mark it as joined
  pipe.set(dbkeys.POP.joined(oldpop), newpop)
  pipe.rpush("join:history", "%s => %s" % (oldpop, newpop))
  pipe.execute()
  return (newpop, oldpop)