Пример #1
0
args = parser.parse_args()

if args.config and not os.path.exists(args.config) and args.station == 'all':
  args.station = args.config

config_list = []

# This permits just an inquiry like server_query -c kcrw -k version
if not args.query:
  args.query = "stats" if args.key else "heartbeat"

for station_config in glob('../server/configs/*txt'):
  Config = configparser.ConfigParser()
  Config.read(station_config)
  config = misc.config_section_map('Main', Config)
  if config['callsign'] == 'test':
    continue
  config_list.append(config)

# retrieve a list of the active stations
if args.station == 'all':
  all_stations = config_list

else:
  specific_station = True
  all_stations = []
  station_list = args.station.split(',')

  for config in config_list:
    if config['callsign'] in station_list:
Пример #2
0
def read_config(config):
  import configparser
  # Reads a configuration file. 
  # Currently documented at https://github.com/kristopolous/DRR/wiki/Join-the-Federation
  Config = configparser.ConfigParser()
  Config.read(config)
  misc.config = misc.config_section_map('Main', Config)
  misc.PROCESS_PATH = os.path.dirname(os.path.realpath(__file__))
  
  defaults = {
    # The log level to be put into the indycast.log file.
    'loglevel': 'DEBUG',

    #
    # The relative, or absolute directory to put things in
    # The default goes into the home directory to try to avoid a situation
    # where we can't read or write something on default startup - also we keep
    # it out of a dot directory intentionally so that we don't fill up a home
    # directory in some hidden path - that's really dumb.
    #
    'storage': "%s/radio" % os.path.expanduser('~'),

    # The time to prolong a download to make sure that 
    # a restart or upgrade is seamless, in seconds.
    'restart_overlap': 15,

    # The TCP port to run the server on
    'port': 5000,

    # The time in looking to see if our stream is running
    'cycle_time': 7,

    # The time to start a stream BEFORE the lapse of the cascade-time
    'cascade_buffer': 15,

    # The time between cascaded streams
    'cascade_time': '15m',

    # Cloud credentials (ec2, azure etc)
    'cloud': None,

    #
    # When to get things off local disk and store to the cloud
    # This means that after this many days data is sent remote and then 
    # retained for `archivedays`.  This makes the entire user-experience
    # a bit slower of course, and has an incurred throughput cost - but
    # it does save price VPS disk space which seems to come at an unusual
    # premium.
    #
    'disk_archive': '1.20d',

    # The (day) duration we should be archiving things.
    'cloud_archive': '14d',
    
    # Run the pruning every this many days (float)
    'prune_every': '0.5d'
  }

  for k, v in list(defaults.items()):
    if k not in misc.config:
      misc.config[k] = v

    misc.config[k] = unit_convert_to_sec(misc.config[k])

  misc.config['port'] = int(misc.config['port'])

  # In case someone is specifying ~/radio 
  misc.config['storage'] = os.path.expanduser(misc.config['storage'])
  misc.config['_private'] = {}

  if misc.config['cloud']:
    misc.config['cloud'] = os.path.expanduser(misc.config['cloud'])

    if os.path.exists(misc.config['cloud']):
      # If there's a cloud conifiguration file then we read that too
      cloud_config = configparser.ConfigParser()
      cloud_config.read(misc.config['cloud'])

      # Things stored in the _private directory don't get reported back in a status
      # query.
      #
      # see https://github.com/kristopolous/DRR/issues/73 for what this is about.
      misc.config['_private']['azure'] = misc.config_section_map('Azure', cloud_config)

  if not os.path.isdir(misc.config['storage']):
    try:
      # If I can't do this, that's fine.
      os.mkdir(misc.config['storage'])

    except Exception as exc:
      # We make it from the current directory
      misc.config['storage'] = defaults['storage']

      if not os.path.isdir(misc.config['storage']):
        os.mkdir(misc.config['storage'])

  # Go to the callsign level in order to store multiple station feeds on a single
  # server in a single parent directory without forcing the user to decide what goes
  # where.
  misc.config['storage'] += '/%s/' % misc.config['callsign']
  misc.config['storage'] = re.sub('\/+', '/', misc.config['storage'])

  if not os.path.isdir(misc.config['storage']):
    os.mkdir(misc.config['storage'])

  # We have a few sub directories for storing things
  for subdir in [misc.DIR_STREAMS, misc.DIR_SLICES, misc.DIR_BACKUPS]:
    if not os.path.isdir(misc.config['storage'] + subdir):
      os.mkdir(misc.config['storage'] + subdir)

  # Now we try to do all this stuff again
  if os.path.isdir(misc.config['storage']):
    #
    # There's a bug after we chdir, where the multiprocessing is trying to grab the same 
    # invocation as the initial argv[0] ... so we need to make sure that if a user did 
    # ./blah this will be maintained.
    #
    if not os.path.isfile(misc.config['storage'] + __file__):
      os.symlink(os.path.abspath(__file__), misc.config['storage'] + __file__)

    conf_path = misc.config['storage'] + "config"
    if os.path.exists(conf_path):
      os.path.unlink(conf_path)

    os.symlink(os.path.abspath(config), conf_path)
    os.chdir(misc.config['storage'])

  else:
    logging.warning("Can't find %s. Using current directory." % misc.config['storage'])

  misc.PIDFILE_MANAGER = '%s/%s' % (os.getcwd(), 'pid-manager')
  # If there is an existing pid-manager, that means that 
  # there is probably another version running.
  if os.path.isfile(misc.PIDFILE_MANAGER):
    with open(misc.PIDFILE_MANAGER, 'r') as f:
      oldserver = f.readline()

      try:  
        logging.info("Replacing our old image")
        os.kill(int(oldserver), signal.SIGUSR1)
        # We give it a few seconds to shut everything down
        # before trying to proceed
        time.sleep(misc.PROCESS_DELAY / 2)

      except:
        pass
   
  # From https://docs.python.org/2/howto/logging.html
  numeric_level = getattr(logging, misc.config['loglevel'].upper(), None)
  if not isinstance(numeric_level, int):
    raise ValueError('Invalid log level: %s' % loglevel)

  logger = logging.getLogger()
  formatter = logging.Formatter(str(os.getpid()) + ':%(asctime)s:%(message)s', '%m%d_%H%M_%S')
  misc.handler = RotatingFileHandler('indycast.log', maxBytes=5000000, backupCount=2)
  misc.handler.setFormatter(formatter)
  misc.handler.setLevel(numeric_level)
  logger.setLevel(numeric_level)
  logger.addHandler(misc.handler)

  # Increment the number of times this has been run so we can track the stability of remote 
  # servers and instances.
  DB.upgrade()
  del(DB.upgrade)
  DB.incr('runcount')

  # This is how we discover if we are the official server or not.
  # Look at the /uuid endpoint to see how this magic works.
  misc.config['uuid'] = os.popen('uuidgen').read().strip()

  signal.signal(signal.SIGINT, misc.shutdown_handler)
  signal.signal(signal.SIGUSR1, misc.shutdown_handler)
  signal.signal(signal.SIGHUP, misc.do_nothing)
Пример #3
0
def read_config(config):
    import ConfigParser
    # Reads a configuration file.
    # Currently documented at https://github.com/kristopolous/DRR/wiki/Join-the-Federation
    Config = ConfigParser.ConfigParser()
    Config.read(config)
    misc.config = misc.config_section_map('Main', Config)
    misc.PROCESS_PATH = os.path.dirname(os.path.realpath(__file__))

    defaults = {
        # The log level to be put into the indycast.log file.
        'loglevel': 'DEBUG',

        #
        # The relative, or absolute directory to put things in
        # The default goes into the home directory to try to avoid a situation
        # where we can't read or write something on default startup - also we keep
        # it out of a dot directory intentionally so that we don't fill up a home
        # directory in some hidden path - that's really dumb.
        #
        'storage': "%s/radio" % os.path.expanduser('~'),

        # The (day) time to expire an intent to record
        'expireafter': 45,

        # The time to prolong a download to make sure that
        # a restart or upgrade is seamless, in seconds.
        'restart_overlap': 15,

        # The TCP port to run the server on
        'port': 5000,

        # The (day) duration we should be archiving things.
        'archivedays': 28,

        # The (second) time in looking to see if our stream is running
        'cycletime': 7,

        # The (second) time to start a stream BEFORE the lapse of the cascade-time
        'cascadebuffer': 15,

        # The (second) time between cascaded streams
        'cascadetime': 60 * 15,

        # Cloud credentials (ec2, azure etc)
        'cloud': None,

        #
        # When to get things off local disk and store to the cloud
        # This means that after this many days data is sent remote and then
        # retained for `archivedays`.  This makes the entire user-experience
        # a bit slower of course, and has an incurred throughput cost - but
        # it does save price VPS disk space which seems to come at an unusual
        # premium.
        #
        'cloudarchive': 1.20,

        # Run the pruning every this many days (float)
        'pruneevery': 0.5
    }

    for k, v in defaults.items():
        if k not in misc.config:
            misc.config[k] = v
        else:
            if type(v) is int: misc.config[k] = int(misc.config[k])
            elif type(v) is long: misc.config[k] = long(misc.config[k])
            elif type(v) is float: misc.config[k] = float(misc.config[k])

    # In case someone is specifying ~/radio
    misc.config['storage'] = os.path.expanduser(misc.config['storage'])
    misc.config['_private'] = {}

    if misc.config['cloud']:
        misc.config['cloud'] = os.path.expanduser(misc.config['cloud'])

        if os.path.exists(misc.config['cloud']):
            # If there's a cloud conifiguration file then we read that too
            cloud_config = ConfigParser.ConfigParser()
            cloud_config.read(misc.config['cloud'])

            # Things stored in the _private directory don't get reported back in a status
            # query.
            #
            # see https://github.com/kristopolous/DRR/issues/73 for what this is about.
            misc.config['_private']['azure'] = misc.config_section_map(
                'Azure', cloud_config)

    if not os.path.isdir(misc.config['storage']):
        try:
            # If I can't do this, that's fine.
            os.mkdir(misc.config['storage'])

        except Exception as exc:
            # We make it from the current directory
            misc.config['storage'] = defaults['storage']

            if not os.path.isdir(misc.config['storage']):
                os.mkdir(misc.config['storage'])

    # Go to the callsign level in order to store multiple station feeds on a single
    # server in a single parent directory without forcing the user to decide what goes
    # where.
    misc.config['storage'] += '/%s/' % misc.config['callsign']
    misc.config['storage'] = re.sub('\/+', '/', misc.config['storage'])

    if not os.path.isdir(misc.config['storage']):
        os.mkdir(misc.config['storage'])

    # We have a few sub directories for storing things
    for subdir in [misc.DIR_STREAMS, misc.DIR_SLICES, misc.DIR_BACKUPS]:
        if not os.path.isdir(misc.config['storage'] + subdir):
            os.mkdir(misc.config['storage'] + subdir)

    # Now we try to do all this stuff again
    if os.path.isdir(misc.config['storage']):
        #
        # There's a bug after we chdir, where the multiprocessing is trying to grab the same
        # invocation as the initial argv[0] ... so we need to make sure that if a user did
        # ./blah this will be maintained.
        #
        if not os.path.isfile(misc.config['storage'] + __file__):
            os.symlink(os.path.abspath(__file__),
                       misc.config['storage'] + __file__)

        os.chdir(misc.config['storage'])

    else:
        logging.warning("Can't find %s. Using current directory." %
                        misc.config['storage'])

    misc.PIDFILE_MANAGER = '%s/%s' % (os.getcwd(), 'pid-manager')
    # If there is an existing pid-manager, that means that
    # there is probably another version running.
    if os.path.isfile(misc.PIDFILE_MANAGER):
        with open(misc.PIDFILE_MANAGER, 'r') as f:
            oldserver = f.readline()

            try:
                logging.info("Replacing our old image")
                os.kill(int(oldserver), signal.SIGUSR1)
                # We give it a few seconds to shut everything down
                # before trying to proceed
                time.sleep(misc.PROCESS_DELAY / 2)

            except:
                pass

    # From https://docs.python.org/2/howto/logging.html
    numeric_level = getattr(logging, misc.config['loglevel'].upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % loglevel)

    logger = logging.getLogger()
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s',
                                  '%Y%m%d_%H%M_%S')
    handler = RotatingFileHandler('indycast.log',
                                  maxBytes=2000000,
                                  backupCount=5)
    handler.setFormatter(formatter)
    handler.setLevel(numeric_level)
    logger.setLevel(numeric_level)
    logger.addHandler(handler)

    # Increment the number of times this has been run so we can track the stability of remote
    # servers and instances.
    DB.upgrade()
    del (DB.upgrade)
    DB.incr('runcount')

    # This is how we discover if we are the official server or not.
    # Look at the /uuid endpoint to see how this magic works.
    misc.config['uuid'] = os.popen('uuidgen').read().strip()

    signal.signal(signal.SIGINT, misc.shutdown_handler)
    signal.signal(signal.SIGUSR1, misc.shutdown_handler)
    signal.signal(signal.SIGHUP, misc.do_nothing)
Пример #4
0
parser.add_argument("-p", "--put", help="put a file into the cloud")
parser.add_argument("-d", "--rm", help="remove a file from the cloud")
args = parser.parse_args()

if args.config is None:
  print("Define the cloud configuration location with the CLOUD_CFG environment variable or using the -c option")
  sys.exit(-1)

if args.station == 'all':
  args.station = re.sub('.txt', '', ','.join([ os.path.basename(path) for path in glob('../server/configs/*txt')]))

station_list = args.station.split(',')

cloud_config = configparser.ConfigParser()
cloud_config.read(args.config)
config = {'azure': misc.config_section_map('Azure', cloud_config)}

blob_service, container = cloud.connect(config)

if args.get:
  print("Getting")
  for name in args.get.split(','):
    print(" ↓ %s" % name)
    res = cloud.download(name, name, config=config)
    if not res:
      sys.stderr.write("%s\n" % name)
      fail("Couldn't download %s" % name)

elif args.put:
  print("Putting")
  for name in args.put.split(','):
Пример #5
0
Файл: cloud.py Проект: EQ4/DRR
parser.add_argument("-c", "--config", default=cfg, help="cloud credential file to use")
parser.add_argument("-q", "--query", default="size", help="query to send to the cloud (list, size, unlink)")
args = parser.parse_args()

if args.config is None:
  print "Define the cloud configuration location with the CLOUD_CFG environment variable or using the -c option"
  sys.exit(-1)

if args.station == 'all':
  args.station = re.sub('.txt', '', ','.join([ os.path.basename(path) for path in glob('../server/configs/*txt')]))

station_list = args.station.split(',')

cloud_config = ConfigParser.ConfigParser()
cloud_config.read(args.config)
config = {'azure': misc.config_section_map('Azure', cloud_config)}

blob_service, container = cloud.connect(config)

if args.query == 'size':
  get_size(station_list, blob_service)

elif args.query == 'list':
  get_files(station_list, blob_service)

elif args.query == 'unlink':
  print "Reading files to unlink from stdin"

  for line in sys.stdin:
    line = line.strip()
    print "Removing %s" % line
Пример #6
0
args = parser.parse_args()

if args.config and not os.path.exists(args.config) and args.station == 'all':
    args.station = args.config

config_list = []

# This permits just an inquiry like server_query -c kcrw -k version
if not args.query:
    args.query = "stats" if args.key else "heartbeat"

for station_config in glob('../server/configs/*txt'):
    Config = ConfigParser.ConfigParser()
    Config.read(station_config)
    config = misc.config_section_map('Main', Config)
    config_list.append(config)

# retrieve a list of the active stations
if args.station == 'all':
    all_stations = config_list

else:
    all_stations = []
    station_list = args.station.split(',')

    for config in config_list:
        if config['callsign'] in station_list:
            all_stations.append(config)

if args.list: