예제 #1
0
def node_remove(request, node):
  """
  Displays node info.
  """
  if not node.is_current_owner(request):
    raise Http404
  
  if request.method == 'POST':
    # Generate node removed event and remove node
    Event.create_event(node, EventCode.NodeRemoved, '', EventSource.NodeDatabase,
                       data = 'Removed by: %s' % request.user.username)
    node.delete()
    return HttpResponseRedirect(reverse("my_nodes"))
  
  return render_to_response('nodes/remove.html',
    { 'node' : node },
    context_instance = RequestContext(request)
  )
예제 #2
0
def check_network_status():
  """
  Performs the network status check.
  """
  # Initialize the state of nodes and subnets, remove out of date ap clients and graph items
  Node.objects.all().update(visible = False)
  Subnet.objects.all().update(visible = False)
  Link.objects.all().update(visible = False)
  APClient.objects.filter(last_update__lt = datetime.now() -  timedelta(minutes = 11)).delete()

  # Reset some states
  NodeWarning.objects.all().update(source = EventSource.Monitor, dirty = False)
  Node.objects.all().update(warnings = False, conflicting_subnets = False)

  # Fetch routing tables from OLSR
  try:
    nodes, hna = wifi_utils.get_tables(settings.MONITOR_OLSR_HOST)
  except TypeError:
    logging.error("Unable to fetch routing tables from '%s'!" % settings.MONITOR_OLSR_HOST)
    return

  # Ping nodes present in the database and visible in OLSR
  dbNodes = {}
  nodesToPing = []
  for nodeIp in nodes.keys():
    try:
      # Try to get the node from the database
      n = Node.get_exclusive(ip = nodeIp)
      n.visible = True
      n.peers = len(nodes[nodeIp].links)

      # If we have succeeded, add to list (if not invalid)
      if not n.is_invalid():
        if n.awaiting_renumber:
          # Reset any status from awaiting renumber to invalid
          for notice in n.renumber_notices.all():
            try:
              rn = Node.objects.get(ip = notice.original_ip)
              if rn.status == NodeStatus.AwaitingRenumber:
                rn.status = NodeStatus.Invalid
                rn.node_type = NodeType.Unknown
                rn.awaiting_renumber = False
                rn.save()
            except Node.DoesNotExist:
              pass
            
            notice.delete()
          
          n.awaiting_renumber = False
          n.save()
        
        nodesToPing.append(nodeIp)
      else:
        n.last_seen = datetime.now()
        n.peers = len(nodes[nodeIp].links)
        
        # Create a warning since node is not registered
        NodeWarning.create(n, WarningCode.UnregisteredNode, EventSource.Monitor)
        n.save()
      
      dbNodes[nodeIp] = n
    except Node.DoesNotExist:
      # Node does not exist, create an invalid entry for it
      n = Node(ip = nodeIp, status = NodeStatus.Invalid, last_seen = datetime.now())
      n.visible = True
      n.node_type = NodeType.Unknown
      n.peers = len(nodes[nodeIp].links)
      
      # Check if there are any renumber notices for this IP address
      try:
        notice = RenumberNotice.objects.get(original_ip = nodeIp)
        n.status = NodeStatus.AwaitingRenumber
        n.node_type = notice.node.node_type
        n.awaiting_renumber = True
      except RenumberNotice.DoesNotExist:
        pass
      
      n.save(force_insert = True)
      dbNodes[nodeIp] = n

      # Create an event and append a warning since an unknown node has appeared
      NodeWarning.create(n, WarningCode.UnregisteredNode, EventSource.Monitor)
      Event.create_event(n, EventCode.UnknownNodeAppeared, '', EventSource.Monitor)
  
  # Add a warning to all nodes that have been stuck in renumbering state for over a week
  for node in Node.objects.filter(renumber_notices__renumbered_at__lt = datetime.now() - timedelta(days = 7)):
    NodeWarning.create(node, WarningCode.LongRenumber, EventSource.Monitor)
    node.save()
  
  # Mark invisible nodes as down
  for node in Node.objects.exclude(status__in = (NodeStatus.Invalid, NodeStatus.AwaitingRenumber)):
    oldStatus = node.status

    if node.ip not in dbNodes:
      if node.status == NodeStatus.New:
        node.status = NodeStatus.Pending
      elif node.status != NodeStatus.Pending:
        node.status = NodeStatus.Down
      node.save()

    if oldStatus in (NodeStatus.Up, NodeStatus.Visible, NodeStatus.Duped) and node.status == NodeStatus.Down:
      Event.create_event(node, EventCode.NodeDown, '', EventSource.Monitor)
      
      # Invalidate uptime credit for this node
      node.uptime_last = None
      node.save()
  
  # Generate timestamp and snapshot identifier
  timestamp = datetime.now()
  snapshot_id = int(time.time())
  
  # Setup all node peerings
  for nodeIp, node in nodes.iteritems():
    n = dbNodes[nodeIp]
    n.redundancy_link = False
    links = []
    
    # Find old VPN server peers
    old_vpn_peers = set([p.dst for p in n.get_peers().filter(dst__vpn_server = True)])

    for peerIp, lq, ilq, etx, vtime in node.links:
      try:
        l = Link.objects.get(src = n, dst = dbNodes[peerIp])
      except Link.DoesNotExist:
        l = Link(src = n, dst = dbNodes[peerIp])
      
      l.lq = float(lq)
      l.ilq = float(ilq)
      l.etx = float(etx)
      l.vtime = vtime
      l.visible = True
      l.save()
      links.append(l)
      
      # Check if any of the peers has never peered with us before
      if n.is_adjacency_important() and l.dst.is_adjacency_important() and not n.peer_history.filter(pk = l.dst.pk).count():
        n.peer_history.add(l.dst)
        Event.create_event(n, EventCode.AdjacencyEstablished, '', EventSource.Monitor,
                           data = 'Peer node: %s' % l.dst, aggregate = False)
        Event.create_event(l.dst, EventCode.AdjacencyEstablished, '', EventSource.Monitor,
                           data = 'Peer node: %s' % n, aggregate = False)

      # Check if we have a peering with any VPN servers
      if l.dst.vpn_server:
        n.redundancy_link = True
    
    if not n.is_invalid():
      # Determine new VPN server peers
      new_vpn_peers = set([p.dst for p in n.get_peers().filter(visible = True, dst__vpn_server = True)])
      
      if old_vpn_peers != new_vpn_peers:
        for p in old_vpn_peers:
          if p not in new_vpn_peers:
            # Redundancy loss has ocurred
            Event.create_event(n, EventCode.RedundancyLoss, '', EventSource.Monitor,
                               data = 'VPN server: %s' % p)
        
        for p in new_vpn_peers:
          if p not in old_vpn_peers:
            # Redundancy restoration has ocurred
            Event.create_event(n, EventCode.RedundancyRestored, '', EventSource.Monitor,
                               data = 'VPN server: %s' % p)
      
      # Issue a warning when node requires peering but has none
      if n.redundancy_req and not n.redundancy_link:
        NodeWarning.create(n, WarningCode.NoRedundancy, EventSource.Monitor)
    
    n.save()
    
    # Archive topology information
    data_archive.record_topology_entry(snapshot_id, timestamp, n, links)

  # Update valid subnet status in the database
  for nodeIp, subnets in hna.iteritems():
    if nodeIp not in dbNodes:
      continue

    for subnet in subnets:
      subnet, cidr = subnet.split("/")
      
      try:
        s = Subnet.objects.get(node__ip = nodeIp, subnet = subnet, cidr = int(cidr))
        s.last_seen = datetime.now()
        s.visible = True
      except Subnet.DoesNotExist:
        s = Subnet(node = dbNodes[nodeIp], subnet = subnet, cidr = int(cidr), last_seen = datetime.now())
        s.visible = True
        s.allocated = False
      
      # Save previous subnet status for later use
      old_status = s.status
      
      # Set status accoording to allocation flag
      if s.allocated:
        s.status = SubnetStatus.AnnouncedOk
      else:
        s.status = SubnetStatus.NotAllocated
      
      # Check if this is a more specific prefix announce for an allocated prefix
      if s.is_more_specific() and not s.allocated:
        s.status = SubnetStatus.Subset
      
      # Check if this is a hijack
      try:
        origin = Subnet.objects.ip_filter(
          # Subnet overlaps with another one
          ip_subnet__contains = '%s/%s' % (subnet, cidr)
        ).exclude(
          # Of another node (= filter all subnets belonging to current node)
          node = s.node
        ).get(
          # That is allocated and visible
          allocated = True,
          visible = True
        )
        s.status = SubnetStatus.Hijacked
      except Subnet.DoesNotExist:
        pass
      
      # Generate an event if status has changed
      if old_status != s.status and s.status == SubnetStatus.Hijacked:
        Event.create_event(n, EventCode.SubnetHijacked, '', EventSource.Monitor,
                           data = 'Subnet: %s/%s\n  Allocated to: %s' % (s.subnet, s.cidr, origin.node))
      
      # Flag node entry with warnings flag for unregistered announces
      if not s.is_properly_announced():
        if s.node.border_router and not s.is_from_known_pool():
          # TODO when we have peering announce registration this should first check if
          #      the subnet is registered as a peering
          s.status = SubnetStatus.Peering
        
        if not s.node.border_router or s.status == SubnetStatus.Hijacked or s.is_from_known_pool():
          # Add a warning message for unregistered announced subnets
          NodeWarning.create(s.node, WarningCode.UnregisteredAnnounce, EventSource.Monitor)
          s.node.save()
      
      s.save()
      
      # Detect subnets that cause conflicts and raise warning flags for all involved
      # nodes
      if s.is_conflicting():
        NodeWarning.create(s.node, WarningCode.AnnounceConflict, EventSource.Monitor)
        s.node.conflicting_subnets = True
        s.node.save()
        
        for cs in s.get_conflicting_subnets():
          NodeWarning.create(cs.node, WarningCode.AnnounceConflict, EventSource.Monitor)
          cs.node.conflicting_subnets = True
          cs.node.save()
  
  # Remove subnets that were hijacked but are not visible anymore
  for s in Subnet.objects.filter(status = SubnetStatus.Hijacked, visible = False):
    Event.create_event(s.node, EventCode.SubnetRestored, '', EventSource.Monitor, data = 'Subnet: %s/%s' % (s.subnet, s.cidr))
    s.delete()
  
  # Remove (or change their status) subnets that are not visible
  Subnet.objects.filter(allocated = False, visible = False).delete()
  Subnet.objects.filter(allocated = True, visible = False).update(status = SubnetStatus.NotAnnounced)
  
  for subnet in Subnet.objects.filter(status = SubnetStatus.NotAnnounced, node__visible = True):
    NodeWarning.create(subnet.node, WarningCode.OwnNotAnnounced, EventSource.Monitor)
    subnet.node.save()
  
  # Remove invisible unknown nodes
  for node in Node.objects.filter(status = NodeStatus.Invalid, visible = False).all():
    # Create an event since an unknown node has disappeared
    Event.create_event(node, EventCode.UnknownNodeDisappeared, '', EventSource.Monitor)
  
  Node.objects.filter(status__in = (NodeStatus.Invalid, NodeStatus.AwaitingRenumber), visible = False).delete()
  
  # Remove invisible links
  Link.objects.filter(visible = False).delete()
  
  # Add nodes to topology map and generate output
  if not getattr(settings, 'MONITOR_DISABLE_GRAPHS', None):
    # Only generate topology when graphing is not disabled
    topology = DotTopologyPlotter()
    for node in dbNodes.values():
      topology.addNode(node)
    topology.save(os.path.join(settings.GRAPH_DIR, 'network_topology.png'), os.path.join(settings.GRAPH_DIR, 'network_topology.dot'))

  # Ping the nodes to prepare information for later node processing
  varsize_results = {}
  results, dupes = wifi_utils.ping_hosts(10, nodesToPing)
  for packet_size in (100, 500, 1000, 1480):
    r, d = wifi_utils.ping_hosts(10, nodesToPing, packet_size - 8)
    for node_ip in nodesToPing:
      varsize_results.setdefault(node_ip, []).append(r[node_ip][3] if node_ip in r else None)
  
  if getattr(settings, 'MONITOR_DISABLE_MULTIPROCESSING', None):
    # Multiprocessing is disabled (the MONITOR_DISABLE_MULTIPROCESSING option is usually
    # used for debug purpuses where a single process is prefered)
    for node_ip in nodesToPing:
      process_node(node_ip, results.get(node_ip), node_ip in dupes, nodes[node_ip].links, varsize_results.get(node_ip))
    
    # Commit the transaction here since we do everything in the same session
    transaction.commit()
  else:
    # We MUST commit the current transaction here, because we will be processing
    # some transactions in parallel and must ensure that this transaction that has
    # modified the nodes is commited. Otherwise this will deadlock!
    transaction.commit()
    
    worker_results = []
    for node_ip in nodesToPing:
      worker_results.append(
        WORKER_POOL.apply_async(process_node, (node_ip, results.get(node_ip), node_ip in dupes, nodes[node_ip].links, varsize_results.get(node_ip)))
      )
    
    # Wait for all workers to finish processing
    objects = {}
    for result in worker_results:
      try:
        k, v = result.get()
        objects[k] = v
      except Exception, e:
        logging.warning(format_exc())
    
    # When GC debugging is enabled make some additional computations
    if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None):
      global _MAX_GC_OBJCOUNT
      objcount = sum(objects.values())
      
      if '_MAX_GC_OBJCOUNT' not in globals():
        _MAX_GC_OBJCOUNT = objcount
      
      logging.debug("GC object count: %d %s" % (objcount, "!M" if objcount > _MAX_GC_OBJCOUNT else ""))
      _MAX_GC_OBJCOUNT = max(_MAX_GC_OBJCOUNT, objcount)
예제 #3
0
def process_node(node_ip, ping_results, is_duped, peers, varsize_results):
  """
  Processes a single node.

  @param node_ip: Node's IP address
  @param ping_results: Results obtained from ICMP ECHO tests
  @param is_duped: True if duplicate echos received
  @param peers: Peering info from routing daemon
  @param varsize_results: Results of ICMP ECHO tests with variable payloads
  """
  transaction.set_dirty()
  
  try:
    n = Node.get_exclusive(ip = node_ip)
  except Node.DoesNotExist:
    # This might happen when we were in the middle of a renumbering and
    # did not yet have access to the node. Then after the node has been
    # renumbered we gain access, but the IP has been changed. In this
    # case we must ignore processing of this node.
    return
  
  grapher = graphs.Grapher(n)
  oldStatus = n.status
  old_last_seen = n.last_seen

  # Determine node status
  if ping_results is not None:
    n.status = NodeStatus.Up
    n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results
    
    # Add RTT graph
    grapher.add_graph(GraphType.RTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max)

    # Add uptime credit
    if n.uptime_last:
      n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds
    
    n.uptime_last = datetime.now()
  else:
    n.status = NodeStatus.Visible
  
  # Measure packet loss with different packet sizes and generate a graph
  if ping_results is not None and varsize_results is not None:
    losses = [n.pkt_loss] + varsize_results
    grapher.add_graph(GraphType.PacketLoss, 'Packet Loss', 'packetloss', *losses)
  
  if is_duped:
    n.status = NodeStatus.Duped
    NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor)

  # Generate status change events
  if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible):
    if oldStatus in (NodeStatus.New, NodeStatus.Pending):
      n.first_seen = datetime.now()
      if n.node_type == NodeType.Wireless:
        generate_new_node_tweet(n)

    Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor)
  elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped:
    Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor)
  
  # Add olsr peer count graph
  grapher.add_graph(GraphType.OlsrPeers, 'Routing Peers', 'olsrpeers', n.peers)

  # Add LQ/ILQ/ETX graphs
  if n.peers > 0:
    etx_avg = lq_avg = ilq_avg = 0.0
    for peer in n.get_peers():
      lq_avg += float(peer.lq)
      ilq_avg += float(peer.ilq)
      etx_avg += float(peer.etx)
    
    lq_graph = grapher.add_graph(GraphType.LQ, 'Average Link Quality', 'lq', ilq_avg / n.peers, lq_avg / n.peers)
    etx_graph = grapher.add_graph(GraphType.ETX, 'Average ETX', 'etx', etx_avg / n.peers)

    for peer in n.get_peers():
      # Link quality
      grapher.add_graph(
        GraphType.LQ,
        'Link Quality to {0}'.format(peer.dst),
        'lq_peer_{0}'.format(peer.dst.pk),
        peer.ilq,
        peer.lq,
        name = peer.dst.ip,
        parent = lq_graph
      )
      
      # ETX
      grapher.add_graph(
        GraphType.ETX,
        'ETX to {0}'.format(peer.dst),
        'etx_peer_{0}'.format(peer.dst.pk),
        peer.etx,
        name = peer.dst.ip,
        parent = etx_graph
      )

  n.last_seen = datetime.now()
  
  # Attempt to fetch data from nodewatcher
  info = nodewatcher.fetch_node_info(node_ip)
  
  # XXX This is an ugly hack for server-type nodes, but it will be fixed by modularization
  #     rewrite anyway, so no need to make it nice
  if n.node_type == NodeType.Server and info is not None and 'iface' in info:
    try:
      # Record interface traffic statistics for all interfaces
      for iid, iface in info['iface'].iteritems():
        grapher.add_graph(
          GraphType.Traffic,
          'Traffic - {0}'.format(iid),
          'traffic_{0}'.format(iid),
          iface['up'],
          iface['down'],
          name = iid
        )
    except:
      pass
    
    info = None
  
  # Check if we have fetched nodewatcher data
  if info is not None and 'general' in info:
    try:
      oldUptime = n.uptime or 0
      oldChannel = n.channel or 0
      oldVersion = n.firmware_version
      n.firmware_version = info['general']['version']
      n.local_time = safe_date_convert(info['general']['local_time'])
      n.bssid = info['wifi']['bssid']
      n.essid = info['wifi']['essid']
      n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency'])
      n.clients = 0
      n.uptime = safe_uptime_convert(info['general']['uptime'])
      
      # Treat missing firmware version file as NULL version
      if n.firmware_version == "missing":
        n.firmware_version = None
      
      # Validate BSSID and ESSID
      if n.bssid != "02:CA:FF:EE:BA:BE":
        NodeWarning.create(n, WarningCode.BSSIDMismatch, EventSource.Monitor)
      
      try:
        if n.essid != n.configured_essid:
          NodeWarning.create(n, WarningCode.ESSIDMismatch, EventSource.Monitor)
      except Project.DoesNotExist:
        pass
      
      if 'uuid' in info['general']:
        n.reported_uuid = info['general']['uuid']
        if n.reported_uuid and n.reported_uuid != n.uuid:
          NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor)

      if oldVersion != n.firmware_version:
        Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n  New version: %s' % (oldVersion, n.firmware_version))

      if oldUptime > n.uptime:
        Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n  New uptime: %s' % (oldUptime, n.uptime))
        
        # Setup reboot mode for further graphs as we now know the node has
        # been rebooted
        grapher.enable_reboot_mode(n.uptime, old_last_seen)

      if oldChannel != n.channel and oldChannel != 0:
        Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n  New channel %s' % (oldChannel, n.channel))
      
      try:
        if n.channel != n.profile.channel:
          NodeWarning.create(n, WarningCode.ChannelMismatch, EventSource.Monitor)
      except Profile.DoesNotExist:
        pass

      if n.has_time_sync_problems():
        NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor)

      if 'errors' in info['wifi']:
        error_count = safe_int_convert(info['wifi']['errors'])
        if error_count != n.wifi_error_count and error_count > 0:
          Event.create_event(n, EventCode.WifiErrors, '', EventSource.Monitor, data = 'Old count: %s\n  New count: %s' % (n.wifi_error_count, error_count))
        
        n.wifi_error_count = error_count
      
      if 'net' in info:
        loss_count = safe_int_convert(info['net']['losses']) if 'losses' in info['net'] else 0
        if loss_count != n.loss_count and loss_count > 1:
          Event.create_event(n, EventCode.ConnectivityLoss, '', EventSource.Monitor, data = 'Old count: %s\n  New count: %s' % (n.loss_count, loss_count))
        
        n.loss_count = loss_count
        
        # Check VPN configuration 
        if 'vpn' in info['net']:
          n.vpn_mac = info['net']['vpn']['mac'] or None
          try:
            offset = -3
            unit = 1000
            if 'Kbit' in info['net']['vpn']['upload_limit']:
              offset = -4
              unit = 1
            
            upload_limit = safe_int_convert(info['net']['vpn']['upload_limit'][:offset]) // unit
          except TypeError:
            upload_limit = None
          
          if n.vpn_mac and n.vpn_mac != n.vpn_mac_conf:
            NodeWarning.create(n, WarningCode.VPNMacMismatch, EventSource.Monitor)
          
          try:
            if upload_limit != n.profile.vpn_egress_limit:
              NodeWarning.create(n, WarningCode.VPNLimitMismatch, EventSource.Monitor)
          except Profile.DoesNotExist:
            pass
      
      # Parse nodogsplash client information
      oldNdsStatus = n.captive_portal_status
      if 'nds' in info:
        if 'down' in info['nds'] and info['nds']['down'] == '1':
          n.captive_portal_status = False
          
          # Create a node warning when captive portal is down and the node has it
          # selected in its image generator profile
          try:
            if n.project.captive_portal and n.has_client_subnet():
              NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor)
          except Profile.DoesNotExist:
            pass
        else:
          n.captive_portal_status = True

          for cid, client in info['nds'].iteritems():
            if not cid.startswith('client'):
              continue

            try:
              c = APClient.objects.get(node = n, ip = client['ip'])
            except APClient.DoesNotExist:
              c = APClient(node = n)
              n.clients_so_far += 1
            
            n.clients += 1
            c.ip = client['ip']
            c.connected_at = safe_date_convert(client['added_at'])
            c.uploaded = safe_int_convert(client['up'])
            c.downloaded = safe_int_convert(client['down'])
            c.last_update = datetime.now()
            c.save()
      else:
        n.captive_portal_status = True
      
      # Check for captive portal status change
      if n.has_client_subnet():
        if oldNdsStatus and not n.captive_portal_status:
          Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor)
        elif not oldNdsStatus and n.captive_portal_status:
          Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor)

      # Generate a graph for number of wifi cells
      if 'cells' in info['wifi']:
        grapher.add_graph(GraphType.WifiCells, 'Nearby WiFi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0)

      # Update node's MAC address on wifi iface
      if 'mac' in info['wifi']:
        n.wifi_mac = info['wifi']['mac']
      
      # Update node's RTS and fragmentation thresholds
      if 'rts' in info['wifi'] and 'frag' in info['wifi']:
        n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347
        n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347
      
      # Check node's multicast rate
      if 'mcast_rate' in info['wifi']:
        rate = safe_int_convert(info['wifi']['mcast_rate'])
        if rate != 5500:
          NodeWarning.create(n, WarningCode.McastRateMismatch, EventSource.Monitor)
      
      # Check node's wifi bitrate, level and noise
      if 'signal' in info['wifi']:
        bitrate = safe_int_convert(info['wifi']['bitrate'])
        signal = safe_dbm_convert(info['wifi']['signal'])
        noise = safe_dbm_convert(info['wifi']['noise'])
        snr = float(signal) - float(noise)
        
        grapher.add_graph(GraphType.WifiBitrate, 'WiFi Bitrate', 'wifibitrate', bitrate)
        grapher.add_graph(GraphType.WifiSignalNoise, 'WiFi Signal/Noise', 'wifisignalnoise', signal, noise)
        grapher.add_graph(GraphType.WifiSNR, 'WiFi Signal/Noise Ratio', 'wifisnr', snr)
      
      # Check for IP shortage
      wifi_subnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True)
      if wifi_subnet and n.clients > max(0, ipcalc.Network(wifi_subnet[0].subnet, wifi_subnet[0].cidr).size() - 4):
        Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n  Clients: %s' % (wifi_subnet[0], n.clients))
        NodeWarning.create(n, WarningCode.IPShortage, EventSource.Monitor)
      
      # Fetch DHCP leases when available
      lease_count = 0
      if 'dhcp' in info:
        per_subnet_counts = {}
        
        for cid, client in info['dhcp'].iteritems():
          if not cid.startswith('client'):
            continue
          
          # Determine which subnet this thing belongs to
          client_subnet = n.subnet_set.ip_filter(ip_subnet__contains = client['ip'])
          if client_subnet:
            client_subnet = client_subnet[0]
            per_subnet_counts[client_subnet] = per_subnet_counts.get(client_subnet, 0) + 1
          else:
            # TODO Subnet is not announced by this node - potential problem, but ignore for now
            pass
          
          lease_count += 1
        
        # Check for IP shortage
        for client_subnet, count in per_subnet_counts.iteritems():
          if count > ipcalc.Network(client_subnet.subnet, client_subnet.cidr).size() - 4:
            Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: {0}\n  Leases: {1}' % (client_subnet, count))
            NodeWarning.create(n, WarningCode.IPShortage, EventSource.Monitor)
      
      # Generate a graph for number of clients
      if 'nds' in info or lease_count > 0:
        grapher.add_graph(GraphType.Clients, 'Connected Clients', 'clients', n.clients, lease_count)
      
      # Record interface traffic statistics for all interfaces
      for iid, iface in info['iface'].iteritems():
        if iid not in ('wifi0', 'wmaster0'):
          # Check mappings for known wifi interfaces so we can handle hardware changes while
          # the node is up and not generate useless intermediate graphs
          try:
            if n.profile:
              iface_wifi = n.profile.template.iface_wifi
              if Template.objects.filter(iface_wifi = iid).count() >= 1:
                iid = iface_wifi
          except Profile.DoesNotExist:
            pass
          
          grapher.add_graph(GraphType.Traffic, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down'], name = iid)
      
      # Generate load average statistics
      if 'loadavg' in info['general']:
        n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg'])
        grapher.add_graph(GraphType.LoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min)
        grapher.add_graph(GraphType.NumProc, 'Number of Processes', 'numproc', n.numproc)

      # Generate free memory statistics
      if 'memfree' in info['general']:
        n.memfree = safe_int_convert(info['general']['memfree'])
        buffers = safe_int_convert(info['general'].get('buffers', 0))
        cached = safe_int_convert(info['general'].get('cached', 0))
        grapher.add_graph(GraphType.MemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached)

      # Generate solar statistics when available
      if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]):
        states = {
          'boost'       : 1,
          'equalize'    : 2,
          'absorption'  : 3,
          'float'       : 4
        }
        
        for key, value in info['solar'].iteritems():
          if not value.strip():
            info['solar'][key] = None
        
        grapher.add_graph(GraphType.Solar, 'Solar Monitor', 'solar',
          info['solar']['batvoltage'],
          info['solar']['solvoltage'],
          info['solar']['charge'],
          states.get(info['solar']['state']),
          info['solar']['load']
        )
      
      # Generate statistics for environmental data
      if 'environment' in info:
        for key, value in info['environment'].iteritems():
          if not key.startswith('sensor'):
            continue
          if 'temp' in value:
            temp = safe_float_convert(value['temp'])
            serial = value['serial']
            grapher.add_graph(GraphType.Temperature, 'Temperature ({0})'.format(serial), 'temp_{0}'.format(serial), temp, name = serial)

      # XXX UGLY HACK: Some random voltage reports
      if 'voltage' in info:
        serial = info['voltage']['serial']
        voltages = [safe_float_convert(info['voltage'][x].strip()) for x in '1234']
        multipliers = [safe_int_convert(info['voltage']['%sm' % x].strip()) for x in '1234']
        results = []
        for voltage, multiplier in zip(voltages, multipliers):
          if voltage is not None:
            results.append(voltage * multiplier)
          else:
            results.append(None)
        
        grapher.add_graph(GraphType.Voltage, 'Voltage ({0})'.format(serial), 'volt_{0}'.format(serial), *results, name = serial)

      # Check for installed package versions (every hour)
      try:
        last_pkg_update = n.installedpackage_set.all()[0].last_update
      except:
        last_pkg_update = None

      if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1):
        packages = nodewatcher.fetch_installed_packages(n.ip) or {}

        # Remove removed packages and update existing package versions
        for package in n.installedpackage_set.all():
          if package.name not in packages:
            package.delete()
          else:
            package.version = packages[package.name]
            package.last_update = datetime.now()
            package.save()
            del packages[package.name]

        # Add added packages
        for packageName, version in packages.iteritems():
          package = InstalledPackage(node = n)
          package.name = packageName
          package.version = version
          package.last_update = datetime.now()
          package.save()
      
      # Check if all selected optional packages are present in package listing
      try:
        missing_packages = []
        for package in n.profile.optional_packages.all():
          for pname in package.name.split():
            if n.installedpackage_set.filter(name = pname).count() == 0:
              missing_packages.append(pname)
        
        if missing_packages:
          NodeWarning.create(n, WarningCode.OptPackageNotFound, EventSource.Monitor, details = ("Packages missing: %s" % ", ".join(missing_packages)))
      except Profile.DoesNotExist:
        pass
      
      # Check if DNS works
      if 'dns' in info:
        old_dns_works = n.dns_works
        n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0'
        if not n.dns_works:
          NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor)

        if old_dns_works != n.dns_works:
          # Generate a proper event when the state changes
          if n.dns_works:
            Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor)
          else:
            Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor)
    except:
      logging.warning("Failed to interpret nodewatcher data for node '%s (%s)'!" % (n.name, n.ip))
      logging.warning(format_exc())
      NodeWarning.create(n, WarningCode.NodewatcherInterpretFailed, EventSource.Monitor)

  n.save()
  
  # When GC debugging is enabled perform some more work
  if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None):
    gc.collect()
    return os.getpid(), len(gc.get_objects())
  
  return None, None
예제 #4
0
def check_events():
  """
  Check events that need resend.
  """
  transaction.set_dirty()
  Event.post_events_that_need_resend()
예제 #5
0
  def save(self, node, user):
    """
    Completes node data update.
    """
    self.requires_firmware_update = False
    ip = self.cleaned_data.get('ip')
    oldName = node.name
    oldProject = node.project
    
    # Update node metadata
    node.name = self.cleaned_data.get('name').lower()
    node.owner = self.cleaned_data.get('owner')
    node.location = self.cleaned_data.get('location')
    node.geo_lat = self.cleaned_data.get('geo_lat')
    node.geo_long = self.cleaned_data.get('geo_long')
    node.ant_external = self.cleaned_data.get('ant_external')
    node.ant_polarization = self.cleaned_data.get('ant_polarization')
    node.ant_type = self.cleaned_data.get('ant_type')
    node.project = self.cleaned_data.get('project')
    node.node_type = self.cleaned_data.get('node_type')
    node.notes = self.cleaned_data.get('notes')
    node.url = self.cleaned_data.get('url')
    node.redundancy_req = self.cleaned_data.get('redundancy_req')

    if user.is_staff:
      node.system_node = self.cleaned_data.get('system_node')
      node.vpn_server = self.cleaned_data.get('vpn_server')
    
    if user.is_staff or getattr(settings, 'NONSTAFF_BORDER_ROUTERS', False):
      node.border_router = self.cleaned_data.get('border_router')
    
    node.save()

    # Update node traffic control policy
    tc_ingress = self.cleaned_data.get('tc_ingress')
    if tc_ingress:
      Policy.set_policy(node, node.vpn_mac_conf, PolicyAction.Shape, tc_ingress, PolicyFamily.Ethernet)
    else:
      try:
        node.gw_policy.get(addr = node.vpn_mac_conf, family = PolicyFamily.Ethernet).delete()
      except Policy.DoesNotExist:
        pass

    # Update DNS records on name changes
    if oldName != node.name or oldProject != node.project:
      Record.update_for_node(node, old_name = oldName, old_project = oldProject)

      # Generate node renamed event
      if oldName != node.name:
        Event.create_event(node, EventCode.NodeRenamed, '', EventSource.NodeDatabase, data = 'Old name: %s\n  New name: %s' % (oldName, node.name))

    # Update node profile for image generator
    try:
      profile = node.profile
    except Profile.DoesNotExist:
      profile = None

    if self.cleaned_data.get('template'):
      if not profile:
        profile = Profile(node = node, template = self.cleaned_data.get('template'))
      
      # Handle potential hardware changes
      new_template = self.cleaned_data.get('template')
      if profile.template != new_template:
        # Rename traffic graphs to preserve history
        node.rename_graphs(GraphType.Traffic, profile.template.iface_wifi, new_template.iface_wifi)
      
      def set_and_check(**kwargs):
        for key, value in kwargs.iteritems():
          field = getattr(profile, key)
          meta = profile._meta.get_field(key)
          prep = meta.get_db_prep_value(value)
          
          if isinstance(meta, models.ManyToManyField):
            if set([m.pk for m in field.all()]) != set([m.pk for m in value]):
              self.requires_firmware_update = True
          elif field != prep:
            self.requires_firmware_update = True
          
          setattr(profile, key, value)
      
      if not self.cleaned_data.get('channel'):
        set_and_check(channel = node.project.channel)
      else:
        set_and_check(channel = self.cleaned_data.get('channel'))
      
      set_and_check(
        template = self.cleaned_data.get('template'),
        root_pass = self.cleaned_data.get('root_pass'),
        use_vpn = self.cleaned_data.get('use_vpn'),
        antenna = self.cleaned_data.get('ant_conn') or 0,
        lan_bridge = self.cleaned_data.get('lan_bridge') or False,
        wan_dhcp = self.cleaned_data.get('wan_dhcp'),
        wan_ip = self.cleaned_data.get('wan_ip'),
        wan_cidr = self.cleaned_data.get('wan_cidr'),
        wan_gw = self.cleaned_data.get('wan_gw')
      )
      
      if self.cleaned_data.get('tc_egress'):
        set_and_check(vpn_egress_limit = self.cleaned_data.get('tc_egress').bandwidth)
      else:
        set_and_check(vpn_egress_limit = None)
 
      profile.save()

      set_and_check(optional_packages = self.cleaned_data.get('optional_packages'))
      profile.save()
    elif profile and (settings.IMAGE_GENERATOR_ENABLED or settings.DEBUG):
      profile.delete()
    
    # Registers node name
    NodeNames(name = node.name, node = node).save()
    
    return node
예제 #6
0
  def save(self, user):
    """
    Completes node registration.
    """
    ip = self.cleaned_data.get('ip')
    project = self.cleaned_data.get('project')
    pool = self.cleaned_data.get('pool')
    prefix_len = self.cleaned_data.get('prefix_len')
    if prefix_len == 0:
      prefix_len = None
    subnet = None

    if not ip:
      # Assign a new IP address from the selected pool (if no IP address selected)
      node = Node()
      fresh_subnet = pool.allocate_subnet(prefix_len)
      net = ipcalc.Network(fresh_subnet.network, fresh_subnet.cidr)
      node.ip = str(net.host_first())
      
      # Create a new subnet for this node or use an existing one if available
      subnet = Subnet(node = node, subnet = fresh_subnet.network, cidr = fresh_subnet.cidr)
      subnet.allocated = True
      subnet.allocated_at = datetime.now()
      subnet.status = SubnetStatus.NotAnnounced
    else:
      # When prefix is not available we should use /32
      if prefix_len is None:
        prefix_len = 32
      
      net = ipcalc.Network(ip, prefix_len)
      sub_ip = str(net.network())
      
      # Check if this node already exists
      try:
        node = Node.objects.get(ip = str(net.host_first()))
      except Node.DoesNotExist:
        node = Node(ip = str(net.host_first()))
      
      # Reserve existing IP in the pool
      pool.reserve_subnet(sub_ip, prefix_len)
      try:
        subnet = Subnet.objects.get(node = node, subnet = sub_ip, cidr = prefix_len)
        subnet.status = SubnetStatus.AnnouncedOk
      except Subnet.DoesNotExist:
        subnet = Subnet(node = node, subnet = sub_ip, cidr = prefix_len)
        subnet.status = SubnetStatus.NotAnnounced
      
      subnet.allocated = True
      subnet.allocated_at = datetime.now()

    # Update node metadata
    node.name = self.cleaned_data.get('name').lower()
    node.project = project
    node.owner = user
    node.location = self.cleaned_data.get('location')
    node.geo_lat = self.cleaned_data.get('geo_lat')
    node.geo_long = self.cleaned_data.get('geo_long')
    node.ant_external = self.cleaned_data.get('ant_external')
    node.ant_polarization = self.cleaned_data.get('ant_polarization')
    node.ant_type = self.cleaned_data.get('ant_type')
    node.node_type = self.cleaned_data.get('node_type')
    node.notes = self.cleaned_data.get('notes')
    node.url = self.cleaned_data.get('url')
    node.redundancy_req = self.cleaned_data.get('redundancy_req')
    node.warnings = False

    for i in xrange(10):
      try:
        mac = gen_mac_address()
        Node.objects.get(vpn_mac_conf = mac)
      except Node.DoesNotExist: 
        node.vpn_mac_conf = mac
        break
    else:
      raise Exception, "unable to generate unique MAC"

    if user.is_staff:
      node.system_node = self.cleaned_data.get('system_node')
      node.vpn_server = self.cleaned_data.get('vpn_server')
    
    if user.is_staff or getattr(settings, 'NONSTAFF_BORDER_ROUTERS', False):
      node.border_router = self.cleaned_data.get('border_router') 
    
    node.status = NodeStatus.New
    node.save()

    # Create node traffic control policy
    tc_ingress = self.cleaned_data.get('tc_ingress')
    if tc_ingress:
      Policy.set_policy(node, node.vpn_mac_conf, PolicyAction.Shape, tc_ingress, PolicyFamily.Ethernet)
    
    # Create node profile for image generator
    if self.cleaned_data.get('template'):
      profile = Profile(node = node, template = self.cleaned_data.get('template'))
      if self.cleaned_data.get('channel') in ('', "0", None):
        profile.channel = node.project.channel
      else:
        profile.channel = self.cleaned_data.get('channel')
      profile.root_pass = self.cleaned_data.get('root_pass')
      profile.use_vpn = self.cleaned_data.get('use_vpn')
      profile.antenna = self.cleaned_data.get('ant_conn') or 0
      profile.lan_bridge = self.cleaned_data.get('lan_bridge') or False
      profile.wan_dhcp = self.cleaned_data.get('wan_dhcp')
      profile.wan_ip = self.cleaned_data.get('wan_ip')
      profile.wan_cidr = self.cleaned_data.get('wan_cidr')
      profile.wan_gw = self.cleaned_data.get('wan_gw')

      if self.cleaned_data.get('tc_egress'):
        profile.vpn_egress_limit = self.cleaned_data.get('tc_egress').bandwidth

      profile.save()

      profile.optional_packages = self.cleaned_data.get('optional_packages')
      profile.save()

    if subnet:
      subnet.node = node
      subnet.save()

    # Update DNS entries
    Record.update_for_node(node)
    
    # Registers node name
    NodeNames(name = node.name, node = node).save()

    # Generate node added event
    Event.create_event(node, EventCode.NodeAdded, '', EventSource.NodeDatabase,
                       data = 'Maintainer: %s' % node.owner.username)

    self.node = node
    return node
예제 #7
0
 def save(self):
   """
   Performs the actual renumbering.
   """
   # We must ensure exclusive access during node updates as otherwise this might happen
   # in the middle of a monitor update and this would cause unwanted consequences
   self.__node.ensure_exclusive_access()
   
   # Determine what subnet primary IP belonged to
   primary = self.__node.get_primary_subnet()
   renumber_primary = False
   old_router_id = self.__node.ip
   
   # Renumber subnets first
   for subnet in queryset_by_ip(self.__node.subnet_set.filter(allocated = True), 'ip_subnet')[:]:
     action = int(self.cleaned_data.get('subnet_%s' % subnet.pk))
     prefix_len = int(self.cleaned_data.get('prefix_%s' % subnet.pk) or 27)
     manual_ip = self.cleaned_data.get('manual_%s' % subnet.pk)
     
     if action == RenumberAction.Keep:
       pass
     elif action == RenumberAction.Remove:
       subnet.delete()
     else:
       # This means we should renumber to some other pool
       pool = Pool.objects.get(pk = action)
       if manual_ip:
         new_subnet = pool.reserve_subnet(manual_ip, prefix_len)
       else:
         new_subnet = pool.allocate_subnet(prefix_len)
       
       # If the old subnet has been the source of node's primary IP remember that
       save_primary = (not renumber_primary and primary and primary[0] == subnet)
       
       # Remove old subnet and create a new one; it is deleted here so the old allocation
       # is returned to the pool and all status info is reset
       subnet.delete()
       
       s = Subnet(node = self.__node, subnet = new_subnet.network, cidr = new_subnet.cidr)
       s.allocated = True
       s.allocated_at = datetime.now()
       s.status = SubnetStatus.NotAnnounced
       s.description = subnet.description
       s.gen_iface_type = subnet.gen_iface_type
       s.gen_dhcp = subnet.gen_dhcp
       s.save()
       
       if save_primary:
         primary = s
         renumber_primary = True
   
   # The subnet have now been renumbered, check if we need to renumber the primary IP
   router_id_changed = False
   if renumber_primary:
     net = ipcalc.Network(primary.subnet, primary.cidr)
     self.__node.ip = str(net.host_first())
     router_id_changed = True
   
   # Remove conflicting invalid nodes (another node with the IP we just renumbered to)
   existing_nodes = Node.objects.filter(ip = self.__node.ip, status = NodeStatus.Invalid)
   if existing_nodes.count() > 0:
     self.warning_or_continue(_("There is an existing but unregistered node with the same primary IP address you are currently renumbering to! If you continue with this operation, this invalid node will be replaced."))
   existing_nodes.delete()
   
   # Node has been renumbered, reset monitoring status as this node is obviously not
   # visible right after renumbering.
   if router_id_changed:
     # Update node's DNS record
     Record.update_for_node(self.__node)
     
     if not self.__node.is_pending():
       self.__node.status = NodeStatus.Down
       self.__node.peers = 0
       Link.objects.filter(src = self.__node).delete()
       Link.objects.filter(dst = self.__node).delete()
       self.__node.subnet_set.filter(allocated = False).delete()
       self.__node.subnet_set.all().update(status = SubnetStatus.NotAnnounced)
       
       # Setup a node renumbered notice (if one doesn't exist yet)
       try:
         notice = RenumberNotice.objects.get(node = self.__node)
         
         if notice.original_ip == self.__node.ip:
           notice.delete()
           self.__node.awaiting_renumber = False
         else:
           self.__node.awaiting_renumber = True
       except RenumberNotice.DoesNotExist:
         notice = RenumberNotice(node = self.__node)
         notice.original_ip = old_router_id
         notice.renumbered_at = datetime.now()
         notice.save()
         self.__node.awaiting_renumber = True
   
   self.__node.save()
   
   # Generate node renumbered event
   Event.create_event(self.__node, EventCode.NodeRenumbered, '', EventSource.NodeDatabase,
                      data = 'Old address: %s' % old_router_id)