예제 #1
0
def base_stats():
  # Reports base-level statistical information about the health of the server.
  # This is used for the /stats and /heartbeat call.
  try:
    # for some reason this can lead to a memory error
    load = [float(unit) for unit in os.popen("/usr/bin/uptime | awk -F : ' { print $NF } '").read().split(', ')]

  except:
    load = 0

  uptime = TS.uptime()
  return {
    'human-uptime': "%dd %02d:%02d:%02d" % ( uptime / TS.ONE_DAY_SECOND, (uptime / TS.ONE_HOUR_SECOND) % 24, (uptime / 60) % 60, uptime % 60 ),
    'human-now': TS.ts_to_name(),
    'computer-uptime': uptime,
    'computer-now': time.time(),
    'last-recorded': float(DB.get('last_recorded', use_cache=False) or 0),
    'hits': DB.run('select sum(value) from kv where key like "%hit%"').fetchone()[0],
    'version': __version__,
    'uuid': config['uuid'],
    'next-prune': int(last_prune - (TS.unixtime('prune') - prune_duration)), 
    'load': load,
    'files': [m.path for m in psutil.Process().open_files()],
    'connections': len(psutil.Process().connections()),
    'memory': [
      # Current memory footprint in MB
      psutil.Process(os.getpid()).memory_info().rss / (1024.0 * 1024), 
      
      # Maximum lifetime memory footpring in MB
      resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.0
    ],
    'threads': [ thread.name for thread in threading.enumerate() ],
    'disk': cloud.size('.') / (1024.0 ** 3)
  }
예제 #2
0
파일: misc.py 프로젝트: kristopolous/DRR
def base_stats():
  # Reports base-level statistical information about the health of the server.
  # This is used for the /stats and /heartbeat call.
  try:
    # for some reason this can lead to a memory error
    load = [float(unit) for unit in os.popen("/usr/bin/uptime | awk -F : ' { print $NF } '").read().split(', ')]

  except:
    load = 0

  uptime = TS.uptime()
  return {
    'human-uptime': "%dd %02d:%02d:%02d" % ( uptime / TS.ONE_DAY_SECOND, (uptime / TS.ONE_HOUR_SECOND) % 24, (uptime / 60) % 60, uptime % 60 ),
    'human-now': TS.ts_to_name(),
    'computer-uptime': uptime,
    'computer-now': time.time(),
    'last-recorded': float(DB.get('last_recorded', use_cache=False) or 0),
    'hits': DB.run('select sum(value) from kv where key like "%hit%"').fetchone()[0],
    'version': __version__,
    'uuid': config['uuid'],
    'next-prune': int(last_prune - (TS.unixtime('prune') - prune_duration)), 
    'load': load,
    'files': [m.path for m in psutil.Process().open_files()],
    'connections': len(psutil.Process().connections()),
    'memory': [
      # Current memory footprint in MB
      psutil.Process(os.getpid()).memory_info().rss / (1024.0 * 1024), 
      
      # Maximum lifetime memory footpring in MB
      resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.0
    ],
    'threads': [ thread.name for thread in threading.enumerate() ],
    'disk': cloud.size('.') / (1024.0 ** 3)
  }
예제 #3
0
파일: cloud.py 프로젝트: kristopolous/DRR
def prune(reindex=False, force=False):
  import lib.misc as misc 
  # Gets rid of files older than archive - cloud stores things if relevant. 

  # Now when the child calls it it won't hit the network for every prune.
  process = Thread(name='Prune:%s' % (TS.ts_to_name(), ), target=prune_process, args=(reindex, force))
  process.start()
  return process
예제 #4
0
def stitch_and_slice(file_list, start_minute, duration_minute):
  # Given a file_list in a directory and a duration, this function will seek out
  # adjacent files if necessary and serialize them accordingly, and then return the
  # file name of an audio slice that is the combination of them.
  #from multiprocessing import Process
  from threading import Thread
  slice_process = Thread(name='sitch-slice-%s' % (TS.ts_to_name(), ), target=stitch_and_slice_process, args=(file_list, start_minute, duration_minute))
  slice_process.daemon = True
  slice_process.start()
예제 #5
0
파일: cloud.py 프로젝트: WaiveCar/DRR
def prune(reindex=False, force=False):
    import lib.misc as misc
    # Gets rid of files older than archive - cloud stores things if relevant.

    # Now when the child calls it it won't hit the network for every prune.
    process = Thread(name='Prune:%s' % (TS.ts_to_name(), ),
                     target=prune_process,
                     args=(reindex, force))
    process.start()
    return process
예제 #6
0
파일: audio.py 프로젝트: WaiveCar/DRR
def stitch_and_slice(file_list, start_minute, duration_minute):
    # Given a file_list in a directory and a duration, this function will seek out
    # adjacent files if necessary and serialize them accordingly, and then return the
    # file name of an audio slice that is the combination of them.
    #from multiprocessing import Process
    from threading import Thread
    slice_process = Thread(name='sitch-slice-%s' % (TS.ts_to_name(), ),
                           target=stitch_and_slice_process,
                           args=(file_list, start_minute, duration_minute))
    slice_process.daemon = True
    slice_process.start()
예제 #7
0
    def download_start(file_name):
        """ Starts a process that manages the downloading of a stream. """
        global g_download_pid

        g_download_pid += 1

        #
        # There may be a multi-second lapse time from the naming of the file to
        # the actual start of the download so we should err on that side by putting it
        # in the future by some margin
        #
        file_name = '%s/%s-%s.mp3' % (
            misc.DIR_STREAMS, callsign,
            TS.ts_to_name(TS.now(offset_sec=misc.PROCESS_DELAY / 2)))
        logging.info('Starting download #%d (%s). Next up in %ds' %
                     (g_download_pid, file_name, cascade_margin))

        process = Thread(
            target=stream_download,
            name='Download-%d:%s' % (g_download_pid, TS.ts_to_name()),
            args=(callsign, misc.config['stream'], g_download_pid, file_name))
        process.daemon = True
        process.start()
        return [file_name, process]
예제 #8
0
def stream_name(list_in, absolute_start_minute, duration_minute, relative_start_minute=None):
  import lib.misc as misc
  # Get the stream name from list and start minute over a given duration. 
  duration_sec = duration_minute * 60.0

  # The start_minute above is in absolute terms, not those relative to the file.
  if not relative_start_minute:
    relative_start_minute = absolute_start_minute - list_in[0]['start_minute']

  first_file = list_in[0]['name']
  info = stream_info(first_file)
  ts = TS.ts_to_name(info['start_date'] + timedelta(minutes=relative_start_minute))
  
  # print '--offset', unix_time, start_minute, list_in[0]['start_minute'] , list_in
  fname = "%s/%s-%s_%d.mp3" % (misc.DIR_SLICES, info['callsign'], ts, duration_minute)
  return fname
예제 #9
0
파일: indy_server.py 프로젝트: EQ4/DRR
  def download_start(file_name):
    """ Starts a process that manages the downloading of a stream. """
    global g_download_pid

    g_download_pid += 1
    logging.info('Starting cascaded downloader #%d. Next up in %ds' % (g_download_pid, cascade_margin))

    #
    # There may be a multi-second lapse time from the naming of the file to
    # the actual start of the download so we should err on that side by putting it
    # in the future by some margin
    #
    file_name = '%s/%s-%s.mp3' % (misc.DIR_STREAMS, callsign, TS.ts_to_name(TS.now(offset_sec=misc.PROCESS_DELAY / 2)))
    process = Process(target=stream_download, args=(callsign, misc.config['stream'], g_download_pid, file_name))
    process.start()
    return [file_name, process]
예제 #10
0
  def at(start, duration_string='1hr'):
    """
    Sends a stream using a human-readable (and human-writable) definition 
    at start time.  This uses the dateutils.parser library and so strings 
    such as "Monday 2pm" are accepted.

    Because the space, 0x20 is such a pain in HTTP, you can use "_", 
    "-" or "+" to signify it.  For instance,

        /at/monday_2pm/1hr

    Will work fine
    """
    dt = TS.str_to_time(start)
    duration_min = TS.duration_parse(duration_string)
    endpoint = '%s-%s_%d.mp3' % (misc.config['callsign'], TS.ts_to_name(dt), duration_min)
    return send_stream(endpoint, download_name=endpoint)
예제 #11
0
파일: cloud.py 프로젝트: kristopolous/DRR
def rename():
  import lib.misc as misc 
  all_files = glob('%s/*.mp3' % misc.DIR_STREAMS)
  count = 0
  total = 0
  for fname in all_files:
    (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(fname)
    oldts = os.path.getctime(fname)
    newts = TS.ts_to_name(atime + TS.get_offset() * 60)
    newname = "%s/%s-%s.mp3" % (misc.DIR_STREAMS, misc.config['callsign'], newts)

    if not os.path.exists(newname):
      count += 1.0
      os.rename(fname, newname)
      
    total += 1.0

  return "%f" % (count / total)
예제 #12
0
파일: server.py 프로젝트: EQ4/DRR
    def at(start, duration_string='1hr'):
        """
    Sends a stream using a human-readable (and human-writable) definition 
    at start time.  This uses the dateutils.parser library and so strings 
    such as "Monday 2pm" are accepted.

    Because the space, 0x20 is such a pain in HTTP, you can use "_", 
    "-" or "+" to signify it.  For instance,

        /at/monday_2pm/1hr

    Will work fine
    """
        dt = TS.str_to_time(start)
        duration_min = TS.duration_parse(duration_string)
        endpoint = '%s-%s_%d.mp3' % (misc.config['callsign'],
                                     TS.ts_to_name(dt), duration_min)
        return send_stream(endpoint, download_name=endpoint)
예제 #13
0
파일: cloud.py 프로젝트: WaiveCar/DRR
def rename():
    import lib.misc as misc
    all_files = glob('%s/*.mp3' % misc.DIR_STREAMS)
    count = 0
    total = 0
    for fname in all_files:
        (mode, ino, dev, nlink, uid, gid, size, atime, mtime,
         ctime) = os.stat(fname)
        oldts = os.path.getctime(fname)
        newts = TS.ts_to_name(atime + TS.get_offset() * 60)
        newname = "%s/%s-%s.mp3" % (misc.DIR_STREAMS, misc.config['callsign'],
                                    newts)

        if not os.path.exists(newname):
            count += 1.0
            os.rename(fname, newname)

        total += 1.0

    return "%f" % (count / total)
예제 #14
0
파일: audio.py 프로젝트: WaiveCar/DRR
def stream_name(list_in,
                absolute_start_minute,
                duration_minute,
                relative_start_minute=None):
    import lib.misc as misc
    # Get the stream name from list and start minute over a given duration.
    duration_sec = duration_minute * 60.0

    # The start_minute above is in absolute terms, not those relative to the file.
    if not relative_start_minute:
        relative_start_minute = absolute_start_minute - list_in[0][
            'start_minute']

    first_file = list_in[0]['name']
    info = stream_info(first_file)
    ts = TS.ts_to_name(info['start_date'] +
                       timedelta(minutes=relative_start_minute))

    # print '--offset', unix_time, start_minute, list_in[0]['start_minute'] , list_in
    fname = "%s/%s-%s_%d.mp3" % (misc.DIR_SLICES, info['callsign'], ts,
                                 duration_minute)
    return fname
예제 #15
0
def stream_manager():
  global g_download_kill_pid
  import random

  # Manager process which makes sure that the
  # streams are running appropriately.
  callsign = misc.config['callsign']

  #
  # AAC bitrate is some non-trivial thing that even ffprobe doesn't
  # do a great job at. This solution looks at number of bits that
  # transit over the wire given a duration of time, and then uses
  # that to compute the bitrate, since in practice, that's what
  # bitrate effectively means, and why it's such an important metric.
  #
  # This is to compute a format agnostic bitrate
  # (see heartbeat for more information)
  #
  has_bitrate = DB.get('bitrate')
  if has_bitrate and int(has_bitrate) == 0:
    has_bitrate = False

  first_time = 0
  total_bytes = 0
  normalize_delay = 6
  cycle_count = 0

  cascade_time = misc.config['cascade_time']
  cascade_buffer = misc.config['cascade_buffer']
  cascade_margin = cascade_time - cascade_buffer

  last_prune = 0
  last_success = 0
  last_heartbeat = None
  
  change_state = None
  SHUTDOWN = 1
  RESTART = 2
  shutdown_time = None
  misc.download_ipc = Queue()

  # Number of seconds to be cycling
  cycle_time = misc.config['cycle_time']

  process = None
  process_next = None

  # The manager will be the one that starts this.
  #server.manager(misc.config)
  webserver = Thread(target=server.manager, name='Webserver', args=(misc.config,))
  webserver.start()

  file_name = None

  # A wrapper function to start a donwnload process
  def download_start(file_name):
    """ Starts a process that manages the downloading of a stream. """
    global g_download_pid

    g_download_pid += 1

    #
    # There may be a multi-second lapse time from the naming of the file to
    # the actual start of the download so we should err on that side by putting it
    # in the future by some margin
    #
    file_name = '%s/%s-%s.mp3' % (misc.DIR_STREAMS, callsign, TS.ts_to_name(TS.now(offset_sec=misc.PROCESS_DELAY / 2)))
    logging.info('Starting download #%d (%s). Next up in %ds' % (g_download_pid, file_name, cascade_margin))

    process = Thread(target=stream_download, name='Download-%d:%s' % (g_download_pid, TS.ts_to_name()), args=(callsign, misc.config['stream'], g_download_pid, file_name))
    process.daemon = True
    process.start()
    return [file_name, process]


  # see https://github.com/kristopolous/DRR/issues/91:
  # Randomize prune to offload disk peaks
  prune_duration = misc.config['prune_every'] * (1.10 - random.random() / 5.0)
  misc.prune_duration = prune_duration

  last_heartbeat_tid = -1
  while True:
    #
    # We cycle this to off for every run. By the time we go throug the queue so long 
    # as we aren't supposed to be shutting down, this should be toggled to true.
    #
    if last_prune < (TS.unixtime('prune') - prune_duration):
      prune_duration = misc.config['prune_every'] * (1.10 - random.random() / 5.0)
      misc.prune_duration = prune_duration
      # We just assume it can do its business in under a day
      prune = cloud.prune()
      last_prune = TS.unixtime('prune')
      misc.last_prune = last_prune

    # Increment the amount of time this has been running
    if cycle_count % 30 == 0:
      # we only do these things occasionally, they 
      # are either not very important or are not
      # expected to change that often
      TS.get_offset()

    cycle_count += 1

    lr_set = False
    expired_heartbeat = last_heartbeat and time.time() - last_heartbeat > cycle_time * 2

    while not misc.queue.empty():
      what, value = misc.queue.get(False)

      # The curl proces discovered a new stream to be
      # used instead.
      if what == 'stream':
        misc.config['stream'] = value
        logging.info("Using %s as the stream now" % value)
        # We expire our heartbeat in order to force a new stream
        # to start
        expired_heartbeat = True

      elif what == 'db-debug':
        DB.debug()

      elif what == 'shutdown':
        change_state = SHUTDOWN

      elif what == 'restart':
        logging.info(DB.get('runcount', use_cache=False))
        cwd = os.getcwd()
        os.chdir(misc.PROCESS_PATH)
        Popen(sys.argv)
        os.chdir(cwd)

        change_state = RESTART

        # Try to record for another restart_overlap seconds - make sure that
        # we don't perpetually put this in the future due to some bug.
        if not shutdown_time:
          shutdown_time = TS.unixtime('dl') + misc.config['restart_overlap']
          logging.info("Restart requested ... shutting down download at %s" % TS.ts_to_name(shutdown_time, with_seconds=True))

          #misc.shutdown_real(do_restart=False)
          #misc.download_ipc.put(('shutdown_time', shutdown_time))

          while True:
            time.sleep(5)
            with open(misc.PIDFILE_MANAGER, 'r') as f:
              manager_pid = f.read()

            #print manager_pid, os.getpid(), manager_pid == os.getpid()
            #logging.info(DB.get('runcount', use_cache=False))
            #logging.info(('ps axf | grep [%c]%s | grep python | wc -l' % (misc.config['callsign'][0], misc.config['callsign'][1:]) ).read().strip())
            ps_out = int(os.popen('ps axf | grep [%c]%s | grep python | wc -l' % (misc.config['callsign'][0], misc.config['callsign'][1:]) ).read().strip())

            if ps_out > 1: 
              logging.info("Found %d potential candidates (need at least 2)" % ps_out)
              # This makes it a restricted soft shutdown
              misc.shutdown_real(do_restart=True)
              misc.download_ipc.put(('shutdown_time', shutdown_time))
              break

            else:
              Popen(sys.argv)
              logging.warn("Couldn't find a replacement process ... not going anywhere.");

      elif what == 'heartbeat':
        if not lr_set:
          lr_set = True
          last_heartbeat = time.time()
          last_heartbeat_tid = value[1]

          if last_heartbeat_tid < g_download_kill_pid:
            logging.warn("hb: Got a heartbeat for #%d but everything below #%d should be gone!" % (last_heartbeat_tid, g_download_kill_pid))

          DB.set('last_recorded', time.time())

        if not has_bitrate: 
          margin = 60

          # Keep track of the first time this stream started (this is where our total
          # byte count is derived from)
          if not first_time: 
            first_time = value[0]

          #
          # Otherwise we give a large (in computer time) margin of time to confidently
          # guess the bitrate.  I didn't do great at stats in college, but in my experiments,
          # the estimation falls within 98% of the destination.  I'm pretty sure it's really
          # unlikely this will come out erroneous, but I really can't do the math, it's probably
          # a T value, but I don't know. Anyway, whatevs.
          #
          # The normalize_delay here is for both he-aac+ streams which need to put in some frames
          # before the quantizing pushes itself up and for other stations which sometimes put a canned
          # message at the beginning of the stream, like "Live streaming supported by ..."
          #
          # Whe we discount the first half-dozen seconds as not being part of the total, we get a 
          # stabilizing convergence far quicker.
          #
          elif (value[0] - first_time > normalize_delay):
            # If we haven't determined this stream's bitrate (which we use to estimate 
            # the amount of content is in a given archived stream), then we compute it 
            # here instead of asking the parameters of a given block and then presuming.
            total_bytes += value[2]

            # We still give it a time period after the normalizing delay in order to build enough
            # samples to make a solid guess at what this number should be.
            if (value[0] - first_time > (normalize_delay + margin)):
              # We take the total bytes, calculate it over our time, in this case, 25 seconds.
              est = total_bytes / (value[0] - first_time - normalize_delay)

              # We find the nearest 8Kb increment this matches and then scale out.
              # Then we multiply out by 8 (for _K_ B) and 8 again for K _b_.
              bitrate = int( round (est / 1000) * 8 )
              #print("Estimated bitrate:%d total:%d est:%d denom:%d" % (bitrate, total_bytes, est, value[0] - first_time - normalize_delay) )
              if bitrate > 0:
                DB.set('bitrate', bitrate)
                has_bitrate = DB.get('bitrate')

    #if last_heartbeat:
    #  logging.info("%d heartbeat %d" % (last_heartbeat, last_heartbeat_tid))

    # Check for our management process
    if not misc.manager_is_running():
      logging.info("Manager isn't running");
      change_state = SHUTDOWN

    # we get here if we should NOT be recording.  So we make sure we aren't.
    if change_state == SHUTDOWN or (change_state == RESTART and TS.unixtime('dl') > shutdown_time):
      misc.shutdown_real()

    else:
      if not process and not change_state:
        logging.info("Failed to find downloader, starting new one")
        file_name, process = download_start(file_name)
        last_success = TS.unixtime('dl')

      # If we've hit the time when we ought to cascade
      # If our last_success stream was more than cascade_time - cascade_buffer
      # then we start our process_next
      elif TS.unixtime('dl') - last_success > cascade_margin or expired_heartbeat:
        #logging.info("heartbeat expired %s %s %d %d %d" % (type(process_next), type(process), last_success, cascade_time, TS.unixtime('dl')))

        # And we haven't created the next process yet, then we start it now.
        if not process_next:
          logging.info("Failed to find downloader, starting new one")
          file_name, process_next = download_start(file_name)

      
      # If there is still no process then we should definitely bail.
      if not process:
        misc.shutdown_real()

    #
    # This needs to be on the outside loop in case we are doing a cascade
    # outside of a full mode. In this case, we will need to shut things down
    #
    # If we are past the cascade_time and we have a process_next, then
    # we should shutdown our previous process and move the pointers around.
    #
    if not change_state and (expired_heartbeat or (TS.unixtime('dl') - last_success > cascade_time and process)):
      g_download_kill_pid += 1
      #process.terminate()

      # If the process_next is running then we move our last_success forward to the present
      last_success = TS.unixtime('dl')

      # we rename our process_next AS OUR process
      process = process_next

      # and then clear out the old process_next pointer
      process_next = None

    time.sleep(cycle_time)
예제 #16
0
파일: indy_server.py 프로젝트: EQ4/DRR
def stream_manager():
    import random

    # Manager process which makes sure that the
    # streams are running appropriately.
    callsign = misc.config['callsign']

    #
    # AAC bitrate is some non-trivial thing that even ffprobe doesn't
    # do a great job at. This solution looks at number of bits that
    # transit over the wire given a duration of time, and then uses
    # that to compute the bitrate, since in practice, that's what
    # bitrate effectively means, and why it's such an important metric.
    #
    # This is to compute a format agnostic bitrate
    # (see heartbeat for more information)
    #
    has_bitrate = DB.get('bitrate')
    first_time = 0
    total_bytes = 0
    normalize_delay = 6

    cascade_time = misc.config['cascadetime']
    cascade_buffer = misc.config['cascadebuffer']
    cascade_margin = cascade_time - cascade_buffer

    last_prune = 0
    last_success = 0

    change_state = None
    SHUTDOWN = 1
    RESTART = 2
    shutdown_time = None
    misc.download_ipc = Queue()

    # Number of seconds to be cycling
    cycle_time = misc.config['cycletime']

    process = None
    process_next = None

    # The manager will be the one that starts this.
    misc.pid_map['webserver'] = Process(target=server.manager,
                                        args=(misc.config, ))
    misc.pid_map['webserver'].start()

    file_name = None

    # A wrapper function to start a donwnload process
    def download_start(file_name):
        """ Starts a process that manages the downloading of a stream. """
        global g_download_pid

        g_download_pid += 1
        logging.info('Starting cascaded downloader #%d. Next up in %ds' %
                     (g_download_pid, cascade_margin))

        #
        # There may be a multi-second lapse time from the naming of the file to
        # the actual start of the download so we should err on that side by putting it
        # in the future by some margin
        #
        file_name = '%s/%s-%s.mp3' % (
            misc.DIR_STREAMS, callsign,
            TS.ts_to_name(TS.now(offset_sec=misc.PROCESS_DELAY / 2)))
        process = Process(target=stream_download,
                          args=(callsign, misc.config['stream'],
                                g_download_pid, file_name))
        process.start()
        return [file_name, process]

    # see https://github.com/kristopolous/DRR/issues/91:
    # Randomize prune to offload disk peaks
    prune_duration = misc.config['pruneevery'] + (1 / 8.0 -
                                                  random.random() / 4.0)

    while True:
        #
        # We cycle this to off for every run. By the time we go throug the queue so long
        # as we aren't supposed to be shutting down, this should be toggled to true.
        #
        flag = False

        if last_prune < (TS.unixtime('prune') -
                         TS.ONE_DAY_SECOND * prune_duration):
            prune_duration = misc.config['pruneevery'] + (
                1 / 8.0 - random.random() / 4.0)
            # We just assume it can do its business in under a day
            misc.pid_map['prune'] = cloud.prune()
            last_prune = TS.unixtime('prune')

        TS.get_offset()

        lr_set = False
        while not misc.queue.empty():
            flag = True
            what, value = misc.queue.get(False)

            # The curl proces discovered a new stream to be
            # used instead.
            if what == 'stream':
                misc.config['stream'] = value
                logging.info("Using %s as the stream now" % value)
                # We now don't toggle to flag in order to shutdown the
                # old process and start a new one

            elif what == 'db-debug':
                DB.debug()

            elif what == 'shutdown':
                change_state = SHUTDOWN

            elif what == 'restart':
                logging.info(DB.get('runcount', use_cache=False))
                cwd = os.getcwd()
                os.chdir(misc.PROCESS_PATH)
                Popen(sys.argv)
                os.chdir(cwd)

                change_state = RESTART

                # Try to record for another restart_overlap seconds - make sure that
                # we don't perpetually put this in the future due to some bug.
                if not shutdown_time:
                    shutdown_time = TS.unixtime(
                        'dl') + misc.config['restart_overlap']
                    logging.info(
                        "Restart requested ... shutting down downloader at %s"
                        % TS.ts_to_name(shutdown_time, with_seconds=True))

                    while True:
                        time.sleep(20)
                        #logging.info(DB.get('runcount', use_cache=False))
                        logging.info(
                            ('ps axf | grep [%c]%s | grep python | wc -l' %
                             (misc.config['callsign'][0],
                              misc.config['callsign'][1:])).read().strip())
                        ps_out = int(
                            os.popen(
                                'ps axf | grep [%c]%s | grep python | wc -l' %
                                (misc.config['callsign'][0],
                                 misc.config['callsign'][1:])).read().strip())

                        if ps_out > 1:
                            logging.info(
                                "Found %d potential candidates (need at least 2)"
                                % ps_out)
                            # This makes it a restricted soft shutdown
                            misc.shutdown_real(do_restart=True)
                            misc.download_ipc.put(
                                ('shutdown_time', shutdown_time))
                            break

                        else:
                            Popen(sys.argv)
                            logging.warn(
                                "Couldn't find a replacement process ... not going anywhere."
                            )

            elif what == 'heartbeat':
                if not lr_set and value[1] > 100:
                    lr_set = True
                    DB.set('last_recorded', time.time())

                if not has_bitrate:

                    # Keep track of the first time this stream started (this is where our total
                    # byte count is derived from)
                    if not first_time:
                        first_time = value[0]

                    #
                    # Otherwise we give a large (in computer time) margin of time to confidently
                    # guess the bitrate.  I didn't do great at stats in college, but in my experiments,
                    # the estimation falls within 98% of the destination.  I'm pretty sure it's really
                    # unlikely this will come out erroneous, but I really can't do the math, it's probably
                    # a T value, but I don't know. Anyway, whatevs.
                    #
                    # The normalize_delay here is for both he-aac+ streams which need to put in some frames
                    # before the quantizing pushes itself up and for other stations which sometimes put a canned
                    # message at the beginning of the stream, like "Live streaming supported by ..."
                    #
                    # Whe we discount the first half-dozen seconds as not being part of the total, we get a
                    # stabilizing convergence far quicker.
                    #
                    elif (value[0] - first_time > normalize_delay):
                        # If we haven't determined this stream's bitrate (which we use to estimate
                        # the amount of content is in a given archived stream), then we compute it
                        # here instead of asking the parameters of a given block and then presuming.
                        total_bytes += value[1]

                        # We still give it a time period after the normalizing delay in order to build enough
                        # samples to make a solid guess at what this number should be.
                        if (value[0] - first_time > (normalize_delay + 60)):
                            # We take the total bytes, calculate it over our time, in this case, 25 seconds.
                            est = total_bytes / (value[0] - first_time -
                                                 normalize_delay)

                            # We find the nearest 8Kb increment this matches and then scale out.
                            # Then we multiply out by 8 (for _K_ B) and 8 again for K _b_.
                            bitrate = int(round(est / 1000) * 8)
                            DB.set('bitrate', bitrate)

        # Check for our management process
        if not misc.manager_is_running():
            logging.info("Manager isn't running")
            change_state = SHUTDOWN

        # The only way for the bool to be toggled off is if we are not in full-mode ...
        # we get here if we should NOT be recording.  So we make sure we aren't.
        if change_state == SHUTDOWN or (change_state == RESTART
                                        and TS.unixtime('dl') > shutdown_time):
            process = my_process_shutdown(process)
            process_next = my_process_shutdown(process_next)
            misc.shutdown_real()

        else:
            # Didn't respond in cycle_time seconds so kill it
            if not flag:
                process = my_process_shutdown(process)

            if not process and not change_state:
                file_name, process = download_start(file_name)
                last_success = TS.unixtime('dl')

            # If we've hit the time when we ought to cascade
            elif TS.unixtime('dl') - last_success > cascade_margin:

                # And we haven't created the next process yet, then we start it now.
                if not process_next:
                    file_name, process_next = download_start(file_name)

            # If our last_success stream was more than cascade_time - cascade_buffer
            # then we start our process_next

            # If there is still no process then we should definitely bail.
            if not process:
                misc.shutdown_real()

        #
        # This needs to be on the outside loop in case we are doing a cascade
        # outside of a full mode. In this case, we will need to shut things down
        #
        # If we are past the cascade_time and we have a process_next, then
        # we should shutdown our previous process and move the pointers around.
        #
        if not change_state and TS.unixtime(
                'dl') - last_success > cascade_time and process:
            logging.info("Stopping cascaded downloader")
            process.terminate()

            # If the process_next is running then we move our last_success forward to the present
            last_success = TS.unixtime('dl')

            # we rename our process_next AS OUR process
            process = process_next

            # and then clear out the old process_next pointer
            process_next = None

        # Increment the amount of time this has been running
        DB.incr('uptime', cycle_time)

        time.sleep(cycle_time)