예제 #1
0
파일: cloud.py 프로젝트: WaiveCar/DRR
def register_stream_list(reindex=False):
    import lib.misc as misc
    # Find the local streams and make sure they are all registered in the sqlite3 database.
    #
    # Get the existing streams as a set
    #
    # If we are asked to re-index (due to trying to fix a bug) then we ignore what we have
    # and just go ahead and do everything.
    #
    if reindex:
        all_registered = set([])

    else:
        all_registered = set(DB.all('streams', ['name']))

    # There should be a smarter way to do this ... you'd think. We should also
    # be more faithfully giving things extensions since it's not 100% mp3
    all_files = set(glob('%s/*.mp3' % misc.DIR_STREAMS))

    diff = all_files.difference(all_registered)

    # This is a list of files we haven't scanned yet...
    if not diff: return True

    # This basically means we could still be writing
    # this file.
    #
    # We take the cascade time and then buffer it by a minute, just
    # to be sure.
    #
    # If the creation time is less then this then we don't register this
    # until later.
    cutoff = time.mktime((
        datetime.now() -
        timedelta(minutes=1, seconds=misc.config['cascade_time'])).timetuple())

    for fname in diff:
        if len(fname) == 0 or os.path.getctime(fname) > cutoff:
            next

        info = stream_info(fname)
        if not info:
            continue

        DB.register_stream(info)

        if not misc.manager_is_running():
            logging.info("Manager is gone, shutting down")
            raise Exception()
예제 #2
0
파일: cloud.py 프로젝트: kristopolous/DRR
def register_stream_list(reindex=False):
  import lib.misc as misc 
  # Find the local streams and make sure they are all registered in the sqlite3 database. 
  #
  # Get the existing streams as a set
  #
  # If we are asked to re-index (due to trying to fix a bug) then we ignore what we have
  # and just go ahead and do everything.
  #
  if reindex:
    all_registered = set([])

  else: 
    all_registered = set(DB.all('streams', ['name']))

  # There should be a smarter way to do this ... you'd think. We should also
  # be more faithfully giving things extensions since it's not 100% mp3
  all_files = set(glob('%s/*.mp3' % misc.DIR_STREAMS))
 
  diff = all_files.difference(all_registered)

  # This is a list of files we haven't scanned yet...
  if not diff: return True

  # This basically means we could still be writing
  # this file.
  #
  # We take the cascade time and then buffer it by a minute, just
  # to be sure.
  # 
  # If the creation time is less then this then we don't register this
  # until later.
  cutoff = time.mktime((datetime.now() - timedelta(minutes=1, seconds=misc.config['cascade_time'])).timetuple())

  for fname in diff:
    if len(fname) == 0 or os.path.getctime(fname) > cutoff:
      next

    info = stream_info(fname)
    if not info:
      continue

    DB.register_stream(info)

    if not misc.manager_is_running():
      logging.info("Manager is gone, shutting down")
      raise Exception()
예제 #3
0
파일: indy_server.py 프로젝트: EQ4/DRR
    def cback(data):

        if not misc.params['shutdown_time']:
            if not misc.download_ipc.empty():
                what, value = misc.download_ipc.get(False)
                if what == 'shutdown_time':
                    misc.params['shutdown_time'] = value

        elif TS.unixtime('dl') > misc.params['shutdown_time']:
            sys.exit(0)

        if misc.params['isFirst'] == True:
            misc.params['isFirst'] = False

            if len(data) < 800:
                if re.match('https?://', data):
                    # If we are getting a redirect then we don't mind, we
                    # just put it in the stream and then we leave
                    misc.queue.put(('stream', data.strip()))
                    return True

                # A pls style playlist
                elif re.findall('File\d', data, re.M):
                    logging.info('Found a pls, using the File1 parameter')
                    matches = re.findall('File1=(.*)\n', data, re.M)
                    misc.queue.put(('stream', matches[0].strip()))
                    return True

        # This provides a reliable way to determine bitrate.  We look at how much
        # data we've received between two time periods
        misc.queue.put(('heartbeat', (TS.unixtime('hb'), len(data))))

        if not nl['stream']:
            try:
                nl['stream'] = open(file_name, 'w')

            except Exception as exc:
                logging.critical(
                    "Unable to open %s. Can't record. Must exit." % file_name)
                sys.exit(-1)

        nl['stream'].write(data)

        if not misc.manager_is_running():
            misc.shutdown()
예제 #4
0
파일: indy_server.py 프로젝트: EQ4/DRR
  def cback(data): 

    if not misc.params['shutdown_time']:
      if not misc.download_ipc.empty():
        what, value = misc.download_ipc.get(False)
        if what == 'shutdown_time':
          misc.params['shutdown_time'] = value

    elif TS.unixtime('dl') > misc.params['shutdown_time']:
      sys.exit(0)

    if misc.params['isFirst'] == True:
      misc.params['isFirst'] = False

      if len(data) < 800:
        if re.match('https?://', data):
          # If we are getting a redirect then we don't mind, we
          # just put it in the stream and then we leave
          misc.queue.put(('stream', data.strip()))
          return True

        # A pls style playlist
        elif re.findall('File\d', data, re.M):
          logging.info('Found a pls, using the File1 parameter')
          matches = re.findall('File1=(.*)\n', data, re.M)
          misc.queue.put(('stream', matches[0].strip()))
          return True

    # This provides a reliable way to determine bitrate.  We look at how much 
    # data we've received between two time periods
    misc.queue.put(('heartbeat', (TS.unixtime('hb'), len(data))))

    if not nl['stream']:
      try:
        nl['stream'] = open(file_name, 'w')

      except Exception as exc:
        logging.critical("Unable to open %s. Can't record. Must exit." % file_name)
        sys.exit(-1)

    nl['stream'].write(data)

    if not misc.manager_is_running():
      misc.shutdown()
예제 #5
0
  def cback(data): 
    global g_download_kill_pid

    """
      if len(data):
        catchall('download', json.dumps([g_download_kill_pid, nl['pid'], len(data)]))
      else:
        catchall('download', json.dumps([g_download_kill_pid, 'no data']))
    """
    # print nl['pid'], g_download_kill_pid
    if nl['pid'] <= g_download_kill_pid or not data:
      logging.info("Stopping download #%d" % nl['pid'])
      return False

    # misc.params can fail based on a shutdown sequence.
    if misc is None or misc.params is None or not misc.manager_is_running():
      # if misc is not None:
      #  misc.shutdown()
      return False

    elif not misc.params['shutdown_time']:
      if not misc.download_ipc.empty():
        what, value = misc.download_ipc.get(False)
        if what == 'shutdown_time':
          misc.params['shutdown_time'] = value

    elif TS.unixtime('dl') > misc.params['shutdown_time']:
      raise TypeError("Download Stop")

    if misc.params['isFirst'] == True:
      misc.params['isFirst'] = False

      if len(data) < 800:
        try:
          data_string = data.decode('utf-8')
        
          if re.match('https?://', data_string):
            # If we are getting a redirect then we don't mind, we
            # just put it in the stream and then we leave
            misc.queue.put(('stream', data_string.strip()))
            return False

          # A pls style playlist
          elif re.findall('File\d', data_string, re.M):
            logging.info('%d: Found a pls, using the File1 parameter' % (nl['pid'], ))
            matches = re.findall('File1=(.*)\n', data_string, re.M)
            misc.queue.put(('stream', matches[0].strip()))
            return False

        # If it gets here it's binary ... I guess that's fine.
        except:
          pass

    # This provides a reliable way to determine bitrate.  We look at how much 
    # data we've received between two time periods
    misc.queue.put(('heartbeat', (TS.unixtime('hb'), nl['pid'], len(data))))

    if not nl['stream']:
      try:
        nl['stream'] = open(file_name, 'wb')

      except Exception as exc:
        logging.critical("%d: Unable to open %s. Can't record. Must exit." % (nl['pid'], file_name))
        return False

    nl['stream'].write(data)
예제 #6
0
def stream_manager():
  global g_download_kill_pid
  import random

  # Manager process which makes sure that the
  # streams are running appropriately.
  callsign = misc.config['callsign']

  #
  # AAC bitrate is some non-trivial thing that even ffprobe doesn't
  # do a great job at. This solution looks at number of bits that
  # transit over the wire given a duration of time, and then uses
  # that to compute the bitrate, since in practice, that's what
  # bitrate effectively means, and why it's such an important metric.
  #
  # This is to compute a format agnostic bitrate
  # (see heartbeat for more information)
  #
  has_bitrate = DB.get('bitrate')
  if has_bitrate and int(has_bitrate) == 0:
    has_bitrate = False

  first_time = 0
  total_bytes = 0
  normalize_delay = 6
  cycle_count = 0

  cascade_time = misc.config['cascade_time']
  cascade_buffer = misc.config['cascade_buffer']
  cascade_margin = cascade_time - cascade_buffer

  last_prune = 0
  last_success = 0
  last_heartbeat = None
  
  change_state = None
  SHUTDOWN = 1
  RESTART = 2
  shutdown_time = None
  misc.download_ipc = Queue()

  # Number of seconds to be cycling
  cycle_time = misc.config['cycle_time']

  process = None
  process_next = None

  # The manager will be the one that starts this.
  #server.manager(misc.config)
  webserver = Thread(target=server.manager, name='Webserver', args=(misc.config,))
  webserver.start()

  file_name = None

  # A wrapper function to start a donwnload process
  def download_start(file_name):
    """ Starts a process that manages the downloading of a stream. """
    global g_download_pid

    g_download_pid += 1

    #
    # There may be a multi-second lapse time from the naming of the file to
    # the actual start of the download so we should err on that side by putting it
    # in the future by some margin
    #
    file_name = '%s/%s-%s.mp3' % (misc.DIR_STREAMS, callsign, TS.ts_to_name(TS.now(offset_sec=misc.PROCESS_DELAY / 2)))
    logging.info('Starting download #%d (%s). Next up in %ds' % (g_download_pid, file_name, cascade_margin))

    process = Thread(target=stream_download, name='Download-%d:%s' % (g_download_pid, TS.ts_to_name()), args=(callsign, misc.config['stream'], g_download_pid, file_name))
    process.daemon = True
    process.start()
    return [file_name, process]


  # see https://github.com/kristopolous/DRR/issues/91:
  # Randomize prune to offload disk peaks
  prune_duration = misc.config['prune_every'] * (1.10 - random.random() / 5.0)
  misc.prune_duration = prune_duration

  last_heartbeat_tid = -1
  while True:
    #
    # We cycle this to off for every run. By the time we go throug the queue so long 
    # as we aren't supposed to be shutting down, this should be toggled to true.
    #
    if last_prune < (TS.unixtime('prune') - prune_duration):
      prune_duration = misc.config['prune_every'] * (1.10 - random.random() / 5.0)
      misc.prune_duration = prune_duration
      # We just assume it can do its business in under a day
      prune = cloud.prune()
      last_prune = TS.unixtime('prune')
      misc.last_prune = last_prune

    # Increment the amount of time this has been running
    if cycle_count % 30 == 0:
      # we only do these things occasionally, they 
      # are either not very important or are not
      # expected to change that often
      TS.get_offset()

    cycle_count += 1

    lr_set = False
    expired_heartbeat = last_heartbeat and time.time() - last_heartbeat > cycle_time * 2

    while not misc.queue.empty():
      what, value = misc.queue.get(False)

      # The curl proces discovered a new stream to be
      # used instead.
      if what == 'stream':
        misc.config['stream'] = value
        logging.info("Using %s as the stream now" % value)
        # We expire our heartbeat in order to force a new stream
        # to start
        expired_heartbeat = True

      elif what == 'db-debug':
        DB.debug()

      elif what == 'shutdown':
        change_state = SHUTDOWN

      elif what == 'restart':
        logging.info(DB.get('runcount', use_cache=False))
        cwd = os.getcwd()
        os.chdir(misc.PROCESS_PATH)
        Popen(sys.argv)
        os.chdir(cwd)

        change_state = RESTART

        # Try to record for another restart_overlap seconds - make sure that
        # we don't perpetually put this in the future due to some bug.
        if not shutdown_time:
          shutdown_time = TS.unixtime('dl') + misc.config['restart_overlap']
          logging.info("Restart requested ... shutting down download at %s" % TS.ts_to_name(shutdown_time, with_seconds=True))

          #misc.shutdown_real(do_restart=False)
          #misc.download_ipc.put(('shutdown_time', shutdown_time))

          while True:
            time.sleep(5)
            with open(misc.PIDFILE_MANAGER, 'r') as f:
              manager_pid = f.read()

            #print manager_pid, os.getpid(), manager_pid == os.getpid()
            #logging.info(DB.get('runcount', use_cache=False))
            #logging.info(('ps axf | grep [%c]%s | grep python | wc -l' % (misc.config['callsign'][0], misc.config['callsign'][1:]) ).read().strip())
            ps_out = int(os.popen('ps axf | grep [%c]%s | grep python | wc -l' % (misc.config['callsign'][0], misc.config['callsign'][1:]) ).read().strip())

            if ps_out > 1: 
              logging.info("Found %d potential candidates (need at least 2)" % ps_out)
              # This makes it a restricted soft shutdown
              misc.shutdown_real(do_restart=True)
              misc.download_ipc.put(('shutdown_time', shutdown_time))
              break

            else:
              Popen(sys.argv)
              logging.warn("Couldn't find a replacement process ... not going anywhere.");

      elif what == 'heartbeat':
        if not lr_set:
          lr_set = True
          last_heartbeat = time.time()
          last_heartbeat_tid = value[1]

          if last_heartbeat_tid < g_download_kill_pid:
            logging.warn("hb: Got a heartbeat for #%d but everything below #%d should be gone!" % (last_heartbeat_tid, g_download_kill_pid))

          DB.set('last_recorded', time.time())

        if not has_bitrate: 
          margin = 60

          # Keep track of the first time this stream started (this is where our total
          # byte count is derived from)
          if not first_time: 
            first_time = value[0]

          #
          # Otherwise we give a large (in computer time) margin of time to confidently
          # guess the bitrate.  I didn't do great at stats in college, but in my experiments,
          # the estimation falls within 98% of the destination.  I'm pretty sure it's really
          # unlikely this will come out erroneous, but I really can't do the math, it's probably
          # a T value, but I don't know. Anyway, whatevs.
          #
          # The normalize_delay here is for both he-aac+ streams which need to put in some frames
          # before the quantizing pushes itself up and for other stations which sometimes put a canned
          # message at the beginning of the stream, like "Live streaming supported by ..."
          #
          # Whe we discount the first half-dozen seconds as not being part of the total, we get a 
          # stabilizing convergence far quicker.
          #
          elif (value[0] - first_time > normalize_delay):
            # If we haven't determined this stream's bitrate (which we use to estimate 
            # the amount of content is in a given archived stream), then we compute it 
            # here instead of asking the parameters of a given block and then presuming.
            total_bytes += value[2]

            # We still give it a time period after the normalizing delay in order to build enough
            # samples to make a solid guess at what this number should be.
            if (value[0] - first_time > (normalize_delay + margin)):
              # We take the total bytes, calculate it over our time, in this case, 25 seconds.
              est = total_bytes / (value[0] - first_time - normalize_delay)

              # We find the nearest 8Kb increment this matches and then scale out.
              # Then we multiply out by 8 (for _K_ B) and 8 again for K _b_.
              bitrate = int( round (est / 1000) * 8 )
              #print("Estimated bitrate:%d total:%d est:%d denom:%d" % (bitrate, total_bytes, est, value[0] - first_time - normalize_delay) )
              if bitrate > 0:
                DB.set('bitrate', bitrate)
                has_bitrate = DB.get('bitrate')

    #if last_heartbeat:
    #  logging.info("%d heartbeat %d" % (last_heartbeat, last_heartbeat_tid))

    # Check for our management process
    if not misc.manager_is_running():
      logging.info("Manager isn't running");
      change_state = SHUTDOWN

    # we get here if we should NOT be recording.  So we make sure we aren't.
    if change_state == SHUTDOWN or (change_state == RESTART and TS.unixtime('dl') > shutdown_time):
      misc.shutdown_real()

    else:
      if not process and not change_state:
        logging.info("Failed to find downloader, starting new one")
        file_name, process = download_start(file_name)
        last_success = TS.unixtime('dl')

      # If we've hit the time when we ought to cascade
      # If our last_success stream was more than cascade_time - cascade_buffer
      # then we start our process_next
      elif TS.unixtime('dl') - last_success > cascade_margin or expired_heartbeat:
        #logging.info("heartbeat expired %s %s %d %d %d" % (type(process_next), type(process), last_success, cascade_time, TS.unixtime('dl')))

        # And we haven't created the next process yet, then we start it now.
        if not process_next:
          logging.info("Failed to find downloader, starting new one")
          file_name, process_next = download_start(file_name)

      
      # If there is still no process then we should definitely bail.
      if not process:
        misc.shutdown_real()

    #
    # This needs to be on the outside loop in case we are doing a cascade
    # outside of a full mode. In this case, we will need to shut things down
    #
    # If we are past the cascade_time and we have a process_next, then
    # we should shutdown our previous process and move the pointers around.
    #
    if not change_state and (expired_heartbeat or (TS.unixtime('dl') - last_success > cascade_time and process)):
      g_download_kill_pid += 1
      #process.terminate()

      # If the process_next is running then we move our last_success forward to the present
      last_success = TS.unixtime('dl')

      # we rename our process_next AS OUR process
      process = process_next

      # and then clear out the old process_next pointer
      process_next = None

    time.sleep(cycle_time)
예제 #7
0
parser.add_argument("--daemon", action='store_true',  help="Run as daemon")
args = parser.parse_args()
if args.daemon:
  Popen( [x for x in sys.argv if x != '--daemon'] )
  sys.exit(0)

def new_sys_exit(value): 
  pdb.set_trace()
  old_sys_exit(value)
  sys.exit = new_sys_exit

old_sys_exit = sys.exit

if args.debug:
  import pdb
  sys.exit = new_sys_exit

read_config(args.config)      
del(read_config)

SP.setproctitle("%s-indycast" % misc.config['callsign'])
pid = os.getpid()

# This is the pid that should be killed to shut the system
# down.
misc.manager_is_running(pid)
with open(misc.PIDFILE_MANAGER, 'w+') as f:
  f.write(str(pid))

stream_manager()
예제 #8
0
파일: cloud.py 프로젝트: kristopolous/DRR
def prune_process(reindex=False, force=False):
  import lib.misc as misc 
  # This is internal, call prune() directly. This is a normally blocking
  # process that is prepared by prune(), making it easily callable asynchronously 
  # If another prune is running then we just bail
  if not misc.lockMap['prune'].acquire(False) and not force:
    logging.warn("Tried to run another prune whilst one is running. Aborting")
    return True

  # If we are the first process then we need to make sure that the webserver is up before
  # we do this to check to see if we are official
  time.sleep(2)

  #pid = misc.change_proc_name("%s-cleanup" % misc.config['callsign'])
  # We want to run the am_i_official here since it could block on a DNS lookup
  misc.am_i_official()

  try:
    register_stream_list(reindex)

  except Exception as e:
    logging.info("Wasn't able to register streams: %s" % e)
    misc.lockMap['prune'].release()
    return None

  archive_duration = misc.config['cloud_archive']
  cutoff = TS.unixtime('prune') - archive_duration

  # Remove all slices older than 4 hours.
  slice_cutoff = TS.unixtime('prune') - 0.1667 * TS.ONE_DAY_SECOND

  cloud_cutoff = None
  if misc.config['cloud']:
    cloud_cutoff = TS.unixtime('prune') - misc.config['disk_archive']

  # Put thingies into the cloud.
  count_cloud = 0
  count_delete = 0

  for file_name in glob('*/*.mp3'):
    #
    # Depending on many factors this could be running for hours
    # or even days.  We want to make sure this isn't a blarrrghhh
    # zombie process or worse yet, still running and competing with
    # other instances of itself.
    #
    if not misc.manager_is_running():
      misc.lockMap['prune'].release()
      return None

    if not os.path.exists(file_name):
      continue 

    ctime = os.path.getctime(file_name)

    # print "Looking at ", file_name, ctime, cutoff, archive_duration,  misc.config['archive'], misc.am_i_official()
    # We observe the rules set up in the config.
    logging.debug("%s cloud:%d ctime:%d slice:%d cutoff:%d ctime-cloud:%d ctime-slice:%d" %(file_name, cloud_cutoff, ctime, slice_cutoff, cutoff, ctime-cloud_cutoff, ctime-slice_cutoff ))
    if file_name.startswith('slices') and ctime < slice_cutoff or ctime < cutoff:
      logging.debug("Prune[remove]: %s (ctime)" % file_name)
      os.unlink(file_name)
      count_delete += 1 

    # We want to make sure we aren't archiving the slices
    elif cloud_cutoff and ctime < cloud_cutoff and not file_name.startswith('slice'):
      logging.debug("Prune[cloud]: %s" % file_name)

      # <s>Only unlink the file if I can successfully put it into the cloud.</s>
      #
      # Actually, policy change: 
      #   We should dump the file regardless because otherwise we would smash the disk
      #   AS HAS HAPPENED MULTIPLE TIMES
      #
      # Then you have an irrelevant past build up a forced discarding of the desired 
      # future ... just like with life itself.
      #
      res = put(file_name)

      if misc.am_i_official():
        try:
          os.unlink(file_name)

          # This is only a self-reporting system... we can use our success code for
          # our honesty here.
          if res:
            count_cloud += 1 

        except Exception as e:
          logging.debug("Prune[cloud]: Couldn't remove {}: {}".format(file_name, e))

  for file_name in glob('%s/*.gz' % misc.DIR_BACKUPS):
    ctime = os.path.getctime(file_name)

    # We observe the rules set up in the config.
    if ctime < cutoff:
      logging.debug("Prune: %s" % file_name)
      os.unlink(file_name)
      count_delete += 1 

  # Don't do this f*****g shit at all because f**k this so hard.
  #logging.info('select name, id from streams where end_unix < date("now", "-%d seconds") or (end_minute - start_minute < 0.05 and start_unix < date("now", "%d seconds"))' % (archive_duration, TS.get_offset() * 60 - 1200))

  unlink_list = DB.run('select name, id from streams where end_unix < date("now", "-%d seconds")' % (archive_duration)).fetchall()

  for file_name_tuple in unlink_list:
    file_name = str(file_name_tuple[0])
    id = file_name_tuple[1]

    #logging.debug("Prune[remove]: %s (unlink list)" % file_name)
    # If there's a cloud account at all then we need to unlink the 
    # equivalent mp3 file
    if cloud_cutoff and misc.am_i_official():
      "cloud.";unlink(file_name)

      # After we remove these streams then we delete them from the db.
      DB.run('delete from streams where id = %d' % id)

    # now only after we've deleted from the cloud can we delete the local file
    if os.path.exists(file_name):
      os.unlink(file_name)
      count_delete += 1


  logging.info("Deleted %d files and put %d on the cloud." % (count_delete, count_cloud))
  misc.lockMap['prune'].release()
예제 #9
0
파일: indy_server.py 프로젝트: EQ4/DRR
def stream_manager():
    import random

    # Manager process which makes sure that the
    # streams are running appropriately.
    callsign = misc.config['callsign']

    #
    # AAC bitrate is some non-trivial thing that even ffprobe doesn't
    # do a great job at. This solution looks at number of bits that
    # transit over the wire given a duration of time, and then uses
    # that to compute the bitrate, since in practice, that's what
    # bitrate effectively means, and why it's such an important metric.
    #
    # This is to compute a format agnostic bitrate
    # (see heartbeat for more information)
    #
    has_bitrate = DB.get('bitrate')
    first_time = 0
    total_bytes = 0
    normalize_delay = 6

    cascade_time = misc.config['cascadetime']
    cascade_buffer = misc.config['cascadebuffer']
    cascade_margin = cascade_time - cascade_buffer

    last_prune = 0
    last_success = 0

    change_state = None
    SHUTDOWN = 1
    RESTART = 2
    shutdown_time = None
    misc.download_ipc = Queue()

    # Number of seconds to be cycling
    cycle_time = misc.config['cycletime']

    process = None
    process_next = None

    # The manager will be the one that starts this.
    misc.pid_map['webserver'] = Process(target=server.manager,
                                        args=(misc.config, ))
    misc.pid_map['webserver'].start()

    file_name = None

    # A wrapper function to start a donwnload process
    def download_start(file_name):
        """ Starts a process that manages the downloading of a stream. """
        global g_download_pid

        g_download_pid += 1
        logging.info('Starting cascaded downloader #%d. Next up in %ds' %
                     (g_download_pid, cascade_margin))

        #
        # There may be a multi-second lapse time from the naming of the file to
        # the actual start of the download so we should err on that side by putting it
        # in the future by some margin
        #
        file_name = '%s/%s-%s.mp3' % (
            misc.DIR_STREAMS, callsign,
            TS.ts_to_name(TS.now(offset_sec=misc.PROCESS_DELAY / 2)))
        process = Process(target=stream_download,
                          args=(callsign, misc.config['stream'],
                                g_download_pid, file_name))
        process.start()
        return [file_name, process]

    # see https://github.com/kristopolous/DRR/issues/91:
    # Randomize prune to offload disk peaks
    prune_duration = misc.config['pruneevery'] + (1 / 8.0 -
                                                  random.random() / 4.0)

    while True:
        #
        # We cycle this to off for every run. By the time we go throug the queue so long
        # as we aren't supposed to be shutting down, this should be toggled to true.
        #
        flag = False

        if last_prune < (TS.unixtime('prune') -
                         TS.ONE_DAY_SECOND * prune_duration):
            prune_duration = misc.config['pruneevery'] + (
                1 / 8.0 - random.random() / 4.0)
            # We just assume it can do its business in under a day
            misc.pid_map['prune'] = cloud.prune()
            last_prune = TS.unixtime('prune')

        TS.get_offset()

        lr_set = False
        while not misc.queue.empty():
            flag = True
            what, value = misc.queue.get(False)

            # The curl proces discovered a new stream to be
            # used instead.
            if what == 'stream':
                misc.config['stream'] = value
                logging.info("Using %s as the stream now" % value)
                # We now don't toggle to flag in order to shutdown the
                # old process and start a new one

            elif what == 'db-debug':
                DB.debug()

            elif what == 'shutdown':
                change_state = SHUTDOWN

            elif what == 'restart':
                logging.info(DB.get('runcount', use_cache=False))
                cwd = os.getcwd()
                os.chdir(misc.PROCESS_PATH)
                Popen(sys.argv)
                os.chdir(cwd)

                change_state = RESTART

                # Try to record for another restart_overlap seconds - make sure that
                # we don't perpetually put this in the future due to some bug.
                if not shutdown_time:
                    shutdown_time = TS.unixtime(
                        'dl') + misc.config['restart_overlap']
                    logging.info(
                        "Restart requested ... shutting down downloader at %s"
                        % TS.ts_to_name(shutdown_time, with_seconds=True))

                    while True:
                        time.sleep(20)
                        #logging.info(DB.get('runcount', use_cache=False))
                        logging.info(
                            ('ps axf | grep [%c]%s | grep python | wc -l' %
                             (misc.config['callsign'][0],
                              misc.config['callsign'][1:])).read().strip())
                        ps_out = int(
                            os.popen(
                                'ps axf | grep [%c]%s | grep python | wc -l' %
                                (misc.config['callsign'][0],
                                 misc.config['callsign'][1:])).read().strip())

                        if ps_out > 1:
                            logging.info(
                                "Found %d potential candidates (need at least 2)"
                                % ps_out)
                            # This makes it a restricted soft shutdown
                            misc.shutdown_real(do_restart=True)
                            misc.download_ipc.put(
                                ('shutdown_time', shutdown_time))
                            break

                        else:
                            Popen(sys.argv)
                            logging.warn(
                                "Couldn't find a replacement process ... not going anywhere."
                            )

            elif what == 'heartbeat':
                if not lr_set and value[1] > 100:
                    lr_set = True
                    DB.set('last_recorded', time.time())

                if not has_bitrate:

                    # Keep track of the first time this stream started (this is where our total
                    # byte count is derived from)
                    if not first_time:
                        first_time = value[0]

                    #
                    # Otherwise we give a large (in computer time) margin of time to confidently
                    # guess the bitrate.  I didn't do great at stats in college, but in my experiments,
                    # the estimation falls within 98% of the destination.  I'm pretty sure it's really
                    # unlikely this will come out erroneous, but I really can't do the math, it's probably
                    # a T value, but I don't know. Anyway, whatevs.
                    #
                    # The normalize_delay here is for both he-aac+ streams which need to put in some frames
                    # before the quantizing pushes itself up and for other stations which sometimes put a canned
                    # message at the beginning of the stream, like "Live streaming supported by ..."
                    #
                    # Whe we discount the first half-dozen seconds as not being part of the total, we get a
                    # stabilizing convergence far quicker.
                    #
                    elif (value[0] - first_time > normalize_delay):
                        # If we haven't determined this stream's bitrate (which we use to estimate
                        # the amount of content is in a given archived stream), then we compute it
                        # here instead of asking the parameters of a given block and then presuming.
                        total_bytes += value[1]

                        # We still give it a time period after the normalizing delay in order to build enough
                        # samples to make a solid guess at what this number should be.
                        if (value[0] - first_time > (normalize_delay + 60)):
                            # We take the total bytes, calculate it over our time, in this case, 25 seconds.
                            est = total_bytes / (value[0] - first_time -
                                                 normalize_delay)

                            # We find the nearest 8Kb increment this matches and then scale out.
                            # Then we multiply out by 8 (for _K_ B) and 8 again for K _b_.
                            bitrate = int(round(est / 1000) * 8)
                            DB.set('bitrate', bitrate)

        # Check for our management process
        if not misc.manager_is_running():
            logging.info("Manager isn't running")
            change_state = SHUTDOWN

        # The only way for the bool to be toggled off is if we are not in full-mode ...
        # we get here if we should NOT be recording.  So we make sure we aren't.
        if change_state == SHUTDOWN or (change_state == RESTART
                                        and TS.unixtime('dl') > shutdown_time):
            process = my_process_shutdown(process)
            process_next = my_process_shutdown(process_next)
            misc.shutdown_real()

        else:
            # Didn't respond in cycle_time seconds so kill it
            if not flag:
                process = my_process_shutdown(process)

            if not process and not change_state:
                file_name, process = download_start(file_name)
                last_success = TS.unixtime('dl')

            # If we've hit the time when we ought to cascade
            elif TS.unixtime('dl') - last_success > cascade_margin:

                # And we haven't created the next process yet, then we start it now.
                if not process_next:
                    file_name, process_next = download_start(file_name)

            # If our last_success stream was more than cascade_time - cascade_buffer
            # then we start our process_next

            # If there is still no process then we should definitely bail.
            if not process:
                misc.shutdown_real()

        #
        # This needs to be on the outside loop in case we are doing a cascade
        # outside of a full mode. In this case, we will need to shut things down
        #
        # If we are past the cascade_time and we have a process_next, then
        # we should shutdown our previous process and move the pointers around.
        #
        if not change_state and TS.unixtime(
                'dl') - last_success > cascade_time and process:
            logging.info("Stopping cascaded downloader")
            process.terminate()

            # If the process_next is running then we move our last_success forward to the present
            last_success = TS.unixtime('dl')

            # we rename our process_next AS OUR process
            process = process_next

            # and then clear out the old process_next pointer
            process_next = None

        # Increment the amount of time this has been running
        DB.incr('uptime', cycle_time)

        time.sleep(cycle_time)
예제 #10
0
파일: indy_server.py 프로젝트: EQ4/DRR
        parser = argparse.ArgumentParser()
        parser.add_argument(
            "-c",
            "--config",
            default="./indy_config.txt",
            help="Configuration file (default ./indy_config.txt)")
        parser.add_argument('--version',
                            action='version',
                            version='indycast %s :: Aug 2015' %
                            misc.__version__)
        parser.add_argument("--daemon",
                            action='store_true',
                            help="run as daemon")
        args = parser.parse_args()
        if args.daemon:
            Popen(filter(lambda x: x != '--daemon', sys.argv))
            sys.exit(0)

        read_config(args.config)
        del (read_config)

        pid = misc.change_proc_name("%s-manager" % misc.config['callsign'])

        # This is the pid that should be killed to shut the system
        # down.
        misc.manager_is_running(pid)
        with open(misc.PIDFILE_MANAGER, 'w+') as f:
            f.write(str(pid))

        stream_manager()
예제 #11
0
파일: cloud.py 프로젝트: WaiveCar/DRR
def prune_process(reindex=False, force=False):
    import lib.misc as misc
    # This is internal, call prune() directly. This is a normally blocking
    # process that is prepared by prune(), making it easily callable asynchronously
    # If another prune is running then we just bail
    if not misc.lockMap['prune'].acquire(False) and not force:
        logging.warn(
            "Tried to run another prune whilst one is running. Aborting")
        return True

    # If we are the first process then we need to make sure that the webserver is up before
    # we do this to check to see if we are official
    time.sleep(2)

    #pid = misc.change_proc_name("%s-cleanup" % misc.config['callsign'])
    # We want to run the am_i_official here since it could block on a DNS lookup
    misc.am_i_official()

    try:
        register_stream_list(reindex)

    except Exception as e:
        logging.info("Wasn't able to register streams: %s" % e)
        misc.lockMap['prune'].release()
        return None

    archive_duration = misc.config['cloud_archive']
    cutoff = TS.unixtime('prune') - archive_duration

    # Remove all slices older than 4 hours.
    slice_cutoff = TS.unixtime('prune') - 0.1667 * TS.ONE_DAY_SECOND

    cloud_cutoff = None
    if misc.config['cloud']:
        cloud_cutoff = TS.unixtime('prune') - misc.config['disk_archive']

    # Put thingies into the cloud.
    count_cloud = 0
    count_delete = 0

    for file_name in glob('*/*.mp3'):
        #
        # Depending on many factors this could be running for hours
        # or even days.  We want to make sure this isn't a blarrrghhh
        # zombie process or worse yet, still running and competing with
        # other instances of itself.
        #
        if not misc.manager_is_running():
            misc.lockMap['prune'].release()
            return None

        if not os.path.exists(file_name):
            continue

        ctime = os.path.getctime(file_name)

        # print "Looking at ", file_name, ctime, cutoff, archive_duration,  misc.config['archive'], misc.am_i_official()
        # We observe the rules set up in the config.
        logging.debug(
            "%s cloud:%d ctime:%d slice:%d cutoff:%d ctime-cloud:%d ctime-slice:%d"
            % (file_name, cloud_cutoff, ctime, slice_cutoff, cutoff,
               ctime - cloud_cutoff, ctime - slice_cutoff))
        if file_name.startswith(
                'slices') and ctime < slice_cutoff or ctime < cutoff:
            logging.debug("Prune[remove]: %s (ctime)" % file_name)
            os.unlink(file_name)
            count_delete += 1

        # We want to make sure we aren't archiving the slices
        elif cloud_cutoff and ctime < cloud_cutoff and not file_name.startswith(
                'slice'):
            logging.debug("Prune[cloud]: %s" % file_name)

            # <s>Only unlink the file if I can successfully put it into the cloud.</s>
            #
            # Actually, policy change:
            #   We should dump the file regardless because otherwise we would smash the disk
            #   AS HAS HAPPENED MULTIPLE TIMES
            #
            # Then you have an irrelevant past build up a forced discarding of the desired
            # future ... just like with life itself.
            #
            res = put(file_name)

            if misc.am_i_official():
                try:
                    os.unlink(file_name)

                    # This is only a self-reporting system... we can use our success code for
                    # our honesty here.
                    if res:
                        count_cloud += 1

                except Exception as e:
                    logging.debug(
                        "Prune[cloud]: Couldn't remove {}: {}".format(
                            file_name, e))

    for file_name in glob('%s/*.gz' % misc.DIR_BACKUPS):
        ctime = os.path.getctime(file_name)

        # We observe the rules set up in the config.
        if ctime < cutoff:
            logging.debug("Prune: %s" % file_name)
            os.unlink(file_name)
            count_delete += 1

    # Don't do this f*****g shit at all because f**k this so hard.
    #logging.info('select name, id from streams where end_unix < date("now", "-%d seconds") or (end_minute - start_minute < 0.05 and start_unix < date("now", "%d seconds"))' % (archive_duration, TS.get_offset() * 60 - 1200))

    unlink_list = DB.run(
        'select name, id from streams where end_unix < date("now", "-%d seconds")'
        % (archive_duration)).fetchall()

    for file_name_tuple in unlink_list:
        file_name = str(file_name_tuple[0])
        id = file_name_tuple[1]

        #logging.debug("Prune[remove]: %s (unlink list)" % file_name)
        # If there's a cloud account at all then we need to unlink the
        # equivalent mp3 file
        if cloud_cutoff and misc.am_i_official():
            "cloud."
            unlink(file_name)

            # After we remove these streams then we delete them from the db.
            DB.run('delete from streams where id = %d' % id)

        # now only after we've deleted from the cloud can we delete the local file
        if os.path.exists(file_name):
            os.unlink(file_name)
            count_delete += 1

    logging.info("Deleted %d files and put %d on the cloud." %
                 (count_delete, count_cloud))
    misc.lockMap['prune'].release()
예제 #12
0
    def cback(data):
        global g_download_kill_pid
        """
      if len(data):
        catchall('download', json.dumps([g_download_kill_pid, nl['pid'], len(data)]))
      else:
        catchall('download', json.dumps([g_download_kill_pid, 'no data']))
    """
        # print nl['pid'], g_download_kill_pid
        if nl['pid'] <= g_download_kill_pid or not data:
            logging.info("Stopping download #%d" % nl['pid'])
            return False

        # misc.params can fail based on a shutdown sequence.
        if misc is None or misc.params is None or not misc.manager_is_running(
        ):
            # if misc is not None:
            #  misc.shutdown()
            return False

        elif not misc.params['shutdown_time']:
            if not misc.download_ipc.empty():
                what, value = misc.download_ipc.get(False)
                if what == 'shutdown_time':
                    misc.params['shutdown_time'] = value

        elif TS.unixtime('dl') > misc.params['shutdown_time']:
            raise TypeError("Download Stop")

        if misc.params['isFirst'] == True:
            misc.params['isFirst'] = False

            if len(data) < 800:
                try:
                    data_string = data.decode('utf-8')

                    if re.match('https?://', data_string):
                        # If we are getting a redirect then we don't mind, we
                        # just put it in the stream and then we leave
                        misc.queue.put(('stream', data_string.strip()))
                        return False

                    # A pls style playlist
                    elif re.findall('File\d', data_string, re.M):
                        logging.info(
                            '%d: Found a pls, using the File1 parameter' %
                            (nl['pid'], ))
                        matches = re.findall('File1=(.*)\n', data_string, re.M)
                        misc.queue.put(('stream', matches[0].strip()))
                        return False

                # If it gets here it's binary ... I guess that's fine.
                except:
                    pass

        # This provides a reliable way to determine bitrate.  We look at how much
        # data we've received between two time periods
        misc.queue.put(
            ('heartbeat', (TS.unixtime('hb'), nl['pid'], len(data))))

        if not nl['stream']:
            try:
                nl['stream'] = open(file_name, 'wb')

            except Exception as exc:
                logging.critical(
                    "%d: Unable to open %s. Can't record. Must exit." %
                    (nl['pid'], file_name))
                return False

        nl['stream'].write(data)