Example #1
0
File: common.py Project: bzero/JARR
 def reqparse_args(self, req=None, strict=False, default=True, args=None):
     """
     strict: bool
         if True will throw 400 error if args are defined and not in request
     default: bool
         if True, won't return defaults
     args: dict
         the args to parse, if None, self.attrs will be used
     """
     parser = reqparse.RequestParser()
     for attr_name, attrs in (args or self.attrs).items():
         if attrs.pop('force_default', False):
             parser.add_argument(attr_name, location='json', **attrs)
         elif not default and (not request.json
                 or request.json and attr_name not in request.json):
             continue
         else:
             parser.add_argument(attr_name, location='json', **attrs)
     parsed = parser.parse_args(strict=strict) if req is None \
             else parser.parse_args(req, strict=strict)
     for field in self.to_date:
         if parsed.get(field):
             try:
                 parsed[field] = dateutil.parser.parse(parsed[field])
             except Exception:
                 logger.exception('failed to parse %r', parsed[field])
     return parsed
   def do_authorize(self, cr, uid, ids, context=None):
     
       account = self.browse(cr, uid, ids[0])
       
       FLOW = client.flow_from_clientsecrets(account.secrets_path,
                                             scope=[
                                                    'https://www.googleapis.com/auth/calendar',
                                                    'https://www.googleapis.com/auth/calendar.readonly',
                                                    'https://www.google.com/m8/feeds',
                                                    ],
                                             message=tools.message_if_missing(account.secrets_path))
               
       storage = file.Storage(account.credential_path)        
       credentials = storage.get()
       if credentials is None or credentials.invalid:
           parser = argparse.ArgumentParser(
                                            description=__doc__,
                                            formatter_class=argparse.RawDescriptionHelpFormatter,
                                            parents=[tools.argparser])
           if not account.use_local_browser:
               flags = parser.parse_args(['--noauth_local_webserver'])
           else:
               flags = parser.parse_args([])
           credentials = tools.run_flow(FLOW, storage, flags) 
 
       raise osv.except_osv(_('Done.'), _('Please verify if your credential file is created or updated.'))
Example #3
0
def main():
    colorama.init()
    
    parser = argparse.ArgumentParser()
    parser.add_argument('-e', '--endpoint', default='', help="Set a " +\
        "filter on matches to retrieve (current, today, tomorrow)")
    parser.add_argument('-c', '--country', help="Filter matches to a " +\
        "specific country code.")
    parser.add_argument('-g', '--group', help="Filter matches to a " +\
        "specific group.")
    args = parser.parse_args()

    endpoint = 'matches/' + args.endpoint
    
    if (args.country):
        endpoint = 'matches/country?fifa_code=%(country)s' % {
            "country": args.country.upper()
        }
    elif (args.group):
        endpoint = 'group_results'
        group_id = int(args.group)
        for match in fetch(endpoint):
            if (match.get('group_id') == group_id):
                print(group_list(match))
        return

    for match in fetch(endpoint):
        print(prettify(match))
Example #4
0
def main():
  parser = argparse.ArgumentParser(description='Add NextAction labels to Todoist.')
  parser.add_argument('--api_token', required=True, help='Your API key')
  args = parser.parse_args()
  logging.basicConfig(level=logging.INFO)
  response = GetResponse(args.api_token)
  initial_data = response.read()
  logging.debug("Got initial data: %s", initial_data)
  initial_data = json.loads(initial_data)
  a = TodoistData(initial_data)
  while True:
    mods = a.GetProjectMods()
    if len(mods) == 0:
      time.sleep(5)
    else:
      logging.info("* Modifications necessary - skipping sleep cycle.")
    logging.info("** Beginning sync")
    sync_state = a.GetSyncState()
    changed_data = DoSyncAndGetUpdated(args.api_token,mods, sync_state).read()
    logging.debug("Got sync data %s", changed_data)
    changed_data = json.loads(changed_data)
    logging.info("* Updating model after receiving sync data")
    a.UpdateChangedData(changed_data)
    logging.info("* Finished updating model")
    logging.info("** Finished sync")
def main():
    parser = argparse.ArgumentParser(description='Load incidents data (v3)')
    parser.add_argument('incidents_csv_dir', help='Path to directory containing incidents CSVs')
    parser.add_argument('--schema-path', help='Path to JSON file defining schema',
                        default=os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                             'incident_schema_v3.json'))
    parser.add_argument('--api-url', help='API host / path to target for loading data',
                        default='http://localhost:7000/api')
    parser.add_argument('--authz', help='Authorization header')
    args = parser.parse_args()

    headers = None

    if args.authz:
        headers = {'Authorization': args.authz}

    # Do the work
    schema_id = create_schema(args.schema_path, args.api_url, headers)
    logger.info("Loading data")
    count = 1

    # Load all files in the directory, ordered by file size
    files = sorted(glob.glob(args.incidents_csv_dir + '/*.csv'), key=os.path.getsize)
    logger.info("Files to process: {}".format(files))

    for csv_file in files:
        logger.info("Loading file: {}".format(csv_file))
        for record in extract(csv_file):
            if count % 100 == 0:
                logger.info("{0} (file {1} of {2})".format(
                    count, files.index(csv_file) + 1, len(files)))
            load(transform(record, schema_id), args.api_url, headers)
            count += 1
    logger.info('Loading complete')
def parse_args(description=__doc__):
    parser = ArgumentParser(description=description)
    parser.add_argument(
        '-d', metavar='DIR', required=True,
        help='Log dir'
    )
    return parser.parse_args()
Example #7
0
def parse_args():
    parser = argparse.ArgumentParser(description='Cleans up stale chronos jobs.')
    parser.add_argument('-d', '--soa-dir', dest="soa_dir", metavar="SOA_DIR",
                        default=chronos_tools.DEFAULT_SOA_DIR,
                        help="define a different soa config directory")
    args = parser.parse_args()
    return args
def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        'input_files',
        nargs='+',
        type=pathlib.Path,
    )
    parser.add_argument(
        'counts_dataset_dir',
        type=pathlib.Path,
    )
    parser.add_argument(
        '--counts-period-start',
        required=True,
        type=parse_cmdline_date,
    )
    parser.add_argument(
        '--counts-period-end',
        required=True,
        type=parse_cmdline_date,
    )
    parser.add_argument(
        'db_url',
        type=urllib.parse.urlparse,
        help='Database connection URL',
    )
    parser.add_argument(
        'output_dir',
        type=pathlib.Path,
    )
    return parser.parse_args()
Example #9
0
def main(argv):
    global args
    global dbh

    strava_access_token = None

    parser = argparse.ArgumentParser(description = 'Zwift Strava ID retriaval')
    parser.add_argument('-v', '--verbose', action='store_true',
            help='Verbose output')
    parser.add_argument('--dont-check-certificates', action='store_false',
            dest='verifyCert', default=True)
    parser.add_argument('-u', '--user', help='Zwift user name')
    parser.add_argument('-D', '--mysql_database', help='mysql database (overrides --database)', required=True)
    parser.add_argument('-H', '--mysql_host', help='mysql host')
    parser.add_argument('-U', '--mysql_user', help='mysql user')
    parser.add_argument('-P', '--mysql_password', help='mysql password')
    args = parser.parse_args()

    if args.user:
        password = getpass.getpass("Password for %s? " % args.user)
    else:
        file = 'zwift_creds.json'
        with open(file) as f:
            try:
                cred = json.load(f)
            except ValueError, se:
                sys.exit('"%s": %s' % (args.output, se))
        f.close
        args.user = cred['user']
        password = cred['pass']
        strava_access_token = cred['strava_access_token']
def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("-f", "--force", help=u"Bestehende Flyer überschreiben", action="store_true")
    parser.add_argument("-o", "--output", help="Ausgabe Ordner", action="store", default="woechentliche Flyer")
    parser.add_argument("-t", "--template", help="Vorlage Datei", action="store", default="vorlage.svg")
    parser.add_argument("Program", type=argparse.FileType("r"), help="JSON Datei mit dem aktuellen Programm")
    return parser.parse_args()
def main():
  parser = argparse.ArgumentParser(description='Add NextAction labels to Todoist.')
  parser.add_argument('--api_token', required=True, help='Your API key')
  parser.add_argument('--use_priority', required=False,
      action="store_true", help='Use priority 1 rather than a label to indicate the next actions.')
  global args
  args = parser.parse_args()
  logging.basicConfig(level=logging.DEBUG)
  response = GetResponse(args.api_token)
  initial_data = response.read()
  logging.debug("Got initial data: %s", initial_data)
  initial_data = json.loads(initial_data)
  a = TodoistData(initial_data)
  while True:
    try:   
      mods = a.GetProjectMods()
      if len(mods) == 0:
        time.sleep(5)
      else:
        logging.info("* Modifications necessary - skipping sleep cycle.")
      #mods2 = a.GetProjectMods2()
      logging.info("** Beginning sync")
      sync_state = a.GetSyncState()
      changed_data = DoSyncAndGetUpdated(args.api_token,mods, sync_state).read()
      logging.debug("Got sync data %s", changed_data)
      changed_data = json.loads(changed_data)
      logging.info("* Updating model after receiving sync data")
      a.UpdateChangedData(changed_data)
      logging.info("* Finished updating model")
      logging.info("** Finished sync")
    except:
      print "Network error, try again.."
Example #12
0
def main(args):
    global CONF, SECT
    parser = OptionParser(usage=usage)
    parser.add_option("-c", "--config", dest="config", default='config/config.ini', help="Location of config file")
    parser.add_option("-s", "--section", dest="section", default='ipyno', help="Config section to use")
    parser.add_option("-t", "--section", dest="test", default='status', help="test a daemon function: delete|cleanup|boot|status")
    
    (opts, args) = parser.parse_args()
    if not ((len(args) == 1) and (args[0] in ['stop','start','restart','foreground'])):
        sys.stderr.write(usage)
        return 2

    config = get_config(opts.config)
    daemon = IpyDaemon(config=config, section=opts.section)
    if 'start' == args[0]:
        daemon.start()
    elif 'stop' == args[0]:
        daemon.stop()
    elif 'restart' == args[0]:
        daemon.restart()
    elif 'foreground' == args[0]:
        daemon.foreground(now=opts.test)
    else:
        sys.stderr.write("[error] unknown command '%s'\n"%args[0])
        return 2
    return 0
Example #13
0
def cmd(send, msg, args):
    """Reports the difference between now and some specified time.
    Syntax: {command} <time>
    """
    parser = arguments.ArgParser(args['config'])
    parser.add_argument('date', nargs='*', action=arguments.DateParser)
    try:
        cmdargs = parser.parse_args(msg)
    except arguments.ArgumentException as e:
        send(str(e))
        return
    if not cmdargs.date:
        send("Time until when?")
        return
    delta = dateutil.relativedelta.relativedelta(cmdargs.date, datetime.datetime.now())
    diff = "%s is " % cmdargs.date.strftime("%x")
    if delta.years:
        diff += "%d years " % (delta.years)
    if delta.months:
        diff += "%d months " % (delta.months)
    if delta.days:
        diff += "%d days " % (delta.days)
    if delta.hours:
        diff += "%d hours " % (delta.hours)
    if delta.minutes:
        diff += "%d minutes " % (delta.minutes)
    if delta.seconds:
        diff += "%d seconds " % (delta.seconds)
    diff += "away"
    send(diff)
Example #14
0
def main(argv):
  # Parse the command-line flags.
  flags = parser.parse_args(argv[1:])

  level = getattr(logging, flags.logging_level)
  logger.setLevel(logging.DEBUG)
  h1 = logging.StreamHandler(sys.stdout)
  h1.setLevel(level)
  logger.addHandler(h1)

  fh = logging.FileHandler(os.path.join(os.path.dirname(__file__), 'gcalcron.log'))
  fh.setLevel(logging.DEBUG)
  logger.addHandler(fh)

  try:
    g = GCalCron()
    gCalAdapter = GCalAdapter(g.getCalendarId(), flags)
    g.gCalAdapter = gCalAdapter
    if flags.std:
        useStandardCommands = True

    if flags.reset:
      g.reset_settings()
    else:
      g.sync_gcal_to_cron()
      logger.info('Sync succeeded')
  except:
    logging.exception('Sync failed')
def main():
    """
    Main function that parses arguments and generates the pegasus
    workflow

    :return: True if any errors occurred during DAX generaton
    """
    parser = argparse.ArgumentParser(description="Update fsurf job info and "
                                                 "email user about job "
                                                 "completion")
    # version info
    parser.add_argument('--version', action='version', version='%(prog)s ' + VERSION)
    # Arguments for workflow outcome
    parser.add_argument('--success', dest='success',
                        action='store_true',
                        help='Workflow completed successfully')
    parser.add_argument('--failure', dest='success',
                        action='store_false',
                        help='Workflow completed with errors')
    # Arguments identifying workflow
    parser.add_argument('--id', dest='workflow_id',
                        action='store', help='Pegasus workflow id to use')

    args = parser.parse_args(sys.argv[1:])
    if args.success:
        process_results(args.workflow_id, success=True)
    else:
        process_results(args.workflow_id, success=False)

    sys.exit(0)
Example #16
0
def main():
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('-i', dest='msgfile',
                        help='input file', required=True)
    parser.add_argument('-d', dest='debug', action='store_true',
                        help='debug (no hashing)')
    parser.add_argument('-r', dest='fulldata', action='store_true',
                        help='includes raw data of attachments')
    parser.add_argument('-w', dest='whitelist_ip',
                        help='whitelist IPv4 or IPv6 ip from parsing; comma-separated list of IPs, no spaces !')
    parser.add_argument('-f', dest='whitelist_email',
                        help='whitelist an email in routing headers "For"; comma-separated list of e-mail addresses, no spaces !')
    parser.add_argument('-b', dest='byhostentry',
                        help='collect the smtp injector IP using the "by" "host" in routing headers; comma-separated list of IPs, no spaces !')

    options = parser.parse_args()

    msgfile = options.msgfile
    full = options.debug
    fulldata = options.fulldata
    pconf = {}

    if options.whitelist_ip is not None:
        pconf['whiteip'] = options.whitelist_ip.split(',')

    if options.whitelist_email is not None:
        pconf['whitefor'] = options.whitelist_email.split(',')

    if options.byhostentry is not None:
        pconf['byhostentry'] = options.byhostentry.split(',')

    m = decode_email(msgfile, full, fulldata, pconf)
    print (json.dumps(m, default=json_serial))
Example #17
0
def get_args(args=None):
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--max-container-age',
        type=timedelta_type,
        help="Maximum age for a container. Containers older than this age "
             "will be removed. Age can be specified in any pytimeparse "
             "supported format.")
    parser.add_argument(
        '--max-image-age',
        type=timedelta_type,
        help="Maxium age for an image. Images older than this age will be "
             "removed. Age can be specified in any pytimeparse supported "
             "format.")
    parser.add_argument(
        '--dry-run', action="store_true",
        help="Only log actions, don't remove anything.")
    parser.add_argument(
        '-t', '--timeout', type=int, default=60,
        help="HTTP timeout in seconds for making docker API calls.")
    parser.add_argument(
        '--exclude-image',
        action='append',
        help="Never remove images with this tag.")
    parser.add_argument(
        '--exclude-image-file',
        type=argparse.FileType('r'),
        help="Path to a file which contains a list of images to exclude, one "
             "image tag per line.")

    return parser.parse_args(args=args)
Example #18
0
def main():
    parser = scripts.get_base_parser("Poll Fulfillment Client", path="/services/query_example/")
    parser.add_argument("--collection", dest="collection", default="default_queryable", help="Data Collection that this Fulfillment request applies to. Defaults to 'default_queryable'.")
    parser.add_argument("--result_id", dest="result_id", required=True, help="The result_id being requested.")
    parser.add_argument("--result_part_number", dest="result_part_number", default=1, help="The part number being requested. Defaults to '1'.")

    args = parser.parse_args()

    poll_fulf_req = tm11.PollFulfillmentRequest(message_id=tm11.generate_message_id(),
                              collection_name=args.collection,
                              result_id=args.result_id,
                              result_part_number=args.result_part_number)
    
    print "Request:\n"
    if args.xml_output is False:
        print poll_fulf_req.to_text()
    else:
        print poll_fulf_req.to_xml(pretty_print=True)
    
    client = scripts.create_client(args)
    resp = client.callTaxiiService2(args.host, args.path, t.VID_TAXII_XML_11, poll_fulf_req.to_xml(pretty_print=True), args.port)
    r = t.get_message_from_http_response(resp, '0')
    
    print "Response:\n"
    if args.xml_output is False:
        print r.to_text()
    else:
        print r.to_xml(pretty_print=True)
Example #19
0
def get_params():
    ''' Get params definition from ElasticOcean and from all the backends '''

    parser = get_params_parser()
    args = parser.parse_args()

    return args
Example #20
0
File: drift.py Project: copt/edx
def parsecommandline():
    usage = """usage: %prog [options]

Generate graphs comparing branches in an edx repo checked out closeby."""

    parser = OptionParser(usage=usage)

    parser.add_option("-d", dest="repodir", default=DEFAULT_REPO_DIR,
            help="relative path to the edx-platform repo " + \
                    "(default \"%s\")" % DEFAULT_REPO_DIR)

    parser.add_option("-f", dest="frombranch", default=DEFAULT_FROM_BRANCH,
            help="branch comparing from, do not include \"origin\" " + \
                    "(default \"%s\")" % DEFAULT_FROM_BRANCH)
    parser.add_option("-t", dest="tobranch", default=DEFAULT_TO_BRANCH,
            help="branch comparing to, do not include \"origin\" " + \
                    "(default \"%s\")" % DEFAULT_TO_BRANCH)
    
    parser.add_option("-s", dest="startdate", default=DEFAULT_START_DATE,
            help="date to begin with (default \"%s\")" % DEFAULT_START_DATE)
    parser.add_option("-e", dest="enddate", default=None,
            help="date to end with (default today)")
    
    parser.add_option("-g", dest="diff_filename", default=DEFAULT_DIFF_FILENAME,
            help="file for diff graph relative to repo dir " +\
                    "(default \"%s\")" % DEFAULT_DIFF_FILENAME)
    parser.add_option("-a", dest="age_filename", default=DEFAULT_AGE_FILENAME,
            help="relative to repo dir (default \"%s\")" % DEFAULT_AGE_FILENAME)

    (options, args) = parser.parse_args()
    return options
Example #21
0
def make_default_options():
  """Helper function for creating default options for runner."""
  parser = argparse.ArgumentParser()
  GitRunner.add_parser_args(parser, {'github_disable_upstream_push': True})
  parser.add_argument('--output_dir',
                      default=os.path.join('/tmp', 'gittest.%d' % os.getpid()))
  return parser.parse_args([])
Example #22
0
def main(argv):
    #parse arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('InputFile',type=str, nargs='+',help="use - for stdin")
    parser.add_argument('--timebinsize',type=str, default='1h')
    parser.add_argument('--timecol', type=str,default=None)
    parser.add_argument('--datefmt', type=str, default=None)
    parser.add_argument('--spname', type=str,default='src')
    parser.add_argument('--levels', type=int, default=25)
    parser.add_argument('--latcol', type=str,default='Latitude')
    parser.add_argument('--loncol', type=str,default='Longitude')
    parser.add_argument('--catcol', type=str,default=None)
    parser.add_argument('--countcol', type=str, default=None)
    parser.add_argument('--sep', type=str, default=',')
    parser.add_argument('--ncheader', type=str, default=None)
    parser.add_argument('--header', type=str, default=None)
    parser.add_argument('--offset', type=str, default=None)
    parser.add_argument('--port', type=str, default='29512')
    args = parser.parse_args()

    if 'NANOCUBE_WEB' not in os.environ:
        os.environ['NANOCUBE_WEB'] = '../web'

    if 'NANOCUBE_BIN' not in os.environ:
        os.environ['NANOCUBE_BIN'] = '../src'

    ncinput = NanocubeInput(args)
Example #23
0
def parse_args():
    parser = ArgumentParser(usage="Usage: '%(prog)s [options] <url> <site_id>")

    # Database options
    group = parser.add_argument_group('Database options')
    group.add_argument('-u', '--user', dest='db_user',
                       help='Database user name',
                       default='root')
    group.add_argument('-p', '--password', dest='db_password',
                       help='Database user password',
                       default='')
    group.add_argument('-d', dest='db_name', required=True,
                       help='Name of the database where data will be stored')
    group.add_argument('--host', dest='db_hostname',
                       help='Name of the host where the database server is running',
                       default='localhost')
    group.add_argument('--port', dest='db_port',
                       help='Port of the host where the database server is running',
                       default='3306')

    # Piwik options
    group = parser.add_argument_group('Piwik options')
    group.add_argument('--start-date', dest='start_date', required=True)
    group.add_argument('--end-date', dest='end_date', default='today')
    group.add_argument('--key', dest='key', required=True,
                       help='Piwik auth key')

    # Positional arguments
    parser.add_argument('url', help='Piwik server URL')
    parser.add_argument('site_id', help='Identifier of the site')

    # Parse arguments
    args = parser.parse_args()

    return args
Example #24
0
    def post(self, lecture_id):
        lecture = Lecture.query.filter(Lecture.id == lecture_id).first()

        if not lecture:
            abort(404, message="Lecture {} does not exist".format(lecture_id))

        parser = reqparse.RequestParser()
        parser.add_argument('challenge', required=True, type=float)
        parser.add_argument('interest', required=True, type=float)
        parser.add_argument('time', required=True, type=str)
        args = parser.parse_args()

        challenge = args.challenge
        interest = args.interest

        if not (0.0 <= challenge <= 1.0):
            abort(400, message="Challenge must be in range [0,1]")

        if not (0.0 <= interest <= 1.0):
            abort(400, message="Interest must be in range [0,1]")

        try:
            time = dateutil.parser.parse(args.time)
        except ValueError as e:
            abort(400, message="Time could not be parsed: {}".format(e))

        user_id = g.client_id

        engagement = Engagement(challenge, interest, time, user_id, lecture)
        db.session.add(engagement)
        db.session.flush()

        return {
            'id': engagement.id
        }
def main() :
    parser = parse()
    (options, args) = parser.parse_args()

    if len(args) != 3 :
        if len(args) == 0  :
            parser.error("Asana API Key is required")
        if len(args) == 1 :
            parser.error("Github username is required")
        if len(args) == 2 :
            parser.error("Github password is required")
        exit(1)

    asana_api = asana.AsanaAPI(args[0], debug=False)  
    project_id = get_project_id_from_asana(asana_api, options)
    if not project_id :
        exit(1)

    github_api = Github(args[1], args[2])
    git_repo = get_repo_from_github(github_api, options)
    if not git_repo:
        exit(1)

    migrate_asana_to_github(asana_api, project_id, git_repo, options)

    exit(0)
Example #26
0
File: process.py Project: embr/wmf
def parse_args():
    """
    support output file selection
    max number of lines for debugging
    log level
    """
    
    

    parser = argparse.ArgumentParser(description='Process a collection of squid logs and write certain extrated metrics to file')
#    parser.add_argument('squid_files', metavar='squid_file', nargs='+', type=str, help='squid files to be processed')
    parser.add_argument('-m', '--max', dest='max_lines', type=int, default=(), help='number of lines to processed total (from all files)')
    parser.add_argument('-l', '--log', dest='log_level', choices=LEVELS.keys(), default='INFO', help='log level')
    parser.add_argument('--start', dest='start_date', type=dateutil.parser.parse, default=(date.today().replace(day=1)).strftime(DATE_FORMAT), help='start date in squid time')
    parser.add_argument('--end', dest='end_date', type=dateutil.parser.parse, default=(date.today() - timedelta(days=2)).strftime(DATE_FORMAT), help='end date in squid time')
    parser.add_argument('-d', '--datadir', dest='datadir', type=str, default='/a/squid/archive/zero', help='the top-level directory from which to recursively descend in search of files which match the provider name')
    parser.add_argument('providers', metavar='PROVIDER_IDENTIFIER', nargs='*', type=str, default=DEFAULT_PROVIDERS, help='list of provider identifiers used in squid log file names')
    parser.add_argument('--name_format', dest='name_format', type=str, default='%s.log-%.gz', help='a printf style format string which is formatted with the tuple: (provider_name, date representation')

    args = parser.parse_args()
    # do pre-processing
    args.__dict__['squid_files'] = get_files(args)
    
    pprint.pprint(args.__dict__)
    log.getLogger().setLevel(LEVELS[args.log_level])
    return args
Example #27
0
def run():
    import optparse

    parser = optparse.OptionParser()
    parser.add_option("-s", "--server", type="str", default=1)
    parser.add_option("-b", "--batch", type="int", default=10)
    parser.add_option("-c", "--max-connections", type="int", default=3)
    parser.add_option("-d", "--days", type="int", default=7)
    parser.add_option("-l", "--loglevel", default="info")

    options, args = parser.parse_args()
    if not args:
        parser.error("specify a group")
        return 1

    logging.basicConfig(level=getattr(logging, options.loglevel.upper()))

    if options.server.isdigit():
        server = Server.objects.get(pk=int(options.server))
    else:
        server = options.server

    logging.info("[+] using server %s" % (server,))
    scanner = Scanner(server, options.max_connections)
    scanner.start()
    scanner.index(args[0], days=options.days, batch=options.batch)
    scanner.run()
Example #28
0
def main():
    parser = OptionParser()
    parser.add_option("-c", "--config", dest="config",
                      help="Configuration file")
    options = parser.parse_args()[0]
    config = ConfigParser()
    try:
        config.read(options.config)
    except:
        parser.error("Could not open configuration file")

    def got_message(*args, **kwargs):
        receive_message(config, *args, **kwargs)

    if not options.config:
        parser.error('Configuration file is required')

    verbosity = {True: log.DEBUG, False: log.WARN}
    log.basicConfig(
        format='%(asctime)s %(message)s',
        level=verbosity[config.getboolean('shipit-notifier', 'verbose')]
    )

    # Adjust applabel when wanting to run shipit on multiple machines
    pulse = consumers.BuildConsumer(applabel='shipit-notifier', ssl=False)
    pulse.configure(topic='build.#.finished',
                    durable=True, callback=got_message)

    log.info('listening for pulse messages')
    pulse.listen()
Example #29
0
def main():
    log.addHandler(logging.StreamHandler(sys.stderr))
    api_log.addHandler(logging.StreamHandler(sys.stderr))

    parser = argparse.ArgumentParser()
    parser.add_argument('-v', '--verbose', action='store_true',
                        help='Display debug messages')
    subparsers = parser.add_subparsers(help='sub-command help')

    parser1 = subparsers.add_parser('upload')
    parser1.add_argument('filename', help='Timesheet csv')
    parser1.add_argument('--dry-run', action='store_true',
                         help='Preview changes')
    parser1.set_defaults(cmd='upload')

    parser2 = subparsers.add_parser('download')
    parser2.add_argument('date', help='List entries for specified week')
    parser2.set_defaults(cmd='download')

    parser3 = subparsers.add_parser('lookups')
    parser3.add_argument('kind', choices=['customer', 'activity'],
                         help='Download specified lookups')
    parser3.set_defaults(cmd='lookups')

    args = parser.parse_args()

    log_level = logging.DEBUG if args.verbose else logging.INFO
    log.setLevel(log_level)
    api_log.setLevel(log_level)

    run(args)
Example #30
0
def parse_options():
    from optparse import OptionParser

    parser = OptionParser()
    parser.add_option("-i", "--import", dest="import_opml", help="import opml FILE", metavar="FILE")
    parser.add_option("-d", "--dump", dest="dump_planet", help="dump planet", action="store_true", default=False)
    parser.add_option(
        "-c", "--cache", dest="dump_planet_cache", help="dump planet's cache", action="store_true", default=False
    )

    (options, args) = parser.parse_args()

    if len(args) >= 1:
        global planets
        planets.extend(args)

    if options.dump_planet_cache:
        for p in planets:
            curr = Planet(direc=p)
            print curr.dump_cache()

    if options.dump_planet:
        for p in planets:
            curr = Planet(direc=p)
            print curr.dump()

    if options.import_opml:
        for p in planets:
            curr = Planet(direc=p)
            curr.import_opml(options.import_opml)
if (__name__ == "__main__"):
    print("analyse_event.py.main()")
    parser = argparse.ArgumentParser(description='analyse event')
    parser.add_argument('--config',
                        default="credentials.json",
                        help='name of json file containing api login token')
    parser.add_argument('--event',
                        default=None,
                        help='ID Number of the event to analyse')
    parser.add_argument('--list',
                        action="store_true",
                        help='List all events in the database')
    parser.add_argument(
        '--test',
        help='Address of Device running OpenSeizureDetector Ap for Testing')
    argsNamespace = parser.parse_args()
    args = vars(argsNamespace)
    print(args)

    #analyse_event(configFname=args['config'])

    analyser = EventAnalyser(configFname=args['config'])

    if (args['event'] is not None):
        if (args['test'] is not None):
            print("Running Event Number %d on test server %s" %
                  (int(args['event']), args['test']))
            analyser.testEvent(int(args['event']), args['test'])
        else:
            print("Analysing Event Number %d" % int(args['event']))
            analyser.analyseEvent(int(args['event']))
Example #32
0
    def group_parser():

        parser = reqparse.RequestParser()
        parser.add_argument("group_ids", type=int, location='args', action='append')

        return parser.parse_args()
Example #33
0
def main(input_pickle_path):

  '''
  Entry point when executed from command line. Performs simple word analysis
  on a pickle dump of emails.

  Args:
    input_pickle_path:  path of pickle dump of emails

  Returns:
    correspondence:     see `analyze`
    author_words:       see `analyze`
  '''

  with open(input_pickle_path, 'rb') as fp:
    emails = pickle.load(fp)

  return analyze(emails)


if __name__ == '__main__':

  import argparse

  parser = argparse.ArgumentParser()
  parser.add_argument('input_pickle_path')

  args = parser.parse_args()

  main(args.input_pickle_path)
Example #34
0
def main(argv):
    """a docstring for main, really?"""
    parser = argparse.ArgumentParser(description="Summarize JIRA info.")
    parser.add_argument(
        "--no-scrape",
        action="store_true",
        help="Don't re-run the scraper, just read the current states.json file"
    )
    parser.add_argument(
        "--since",
        metavar="DAYS",
        type=int,
        default=0,
        help="Only consider unresolved PRs & PRs closed in the past DAYS days")
    parser.add_argument("--debug",
                        action="store_true",
                        help="Show debugging messages")
    parser.add_argument("--pretty",
                        action="store_true",
                        help="Pretty print output")
    parser.add_argument(
        "--average",
        action="store_true",
        help="Print out the average time spent in each of 4 states")
    parser.add_argument(
        "--median",
        action="store_true",
        help="Print out the median time spent in each of 4 states")
    parser.add_argument(
        "--percentile",
        type=float,
        help="Print out the qth percentile of all tickets in each state")
    parser.add_argument(
        "--std-dev",
        action="store_true",
        help="Print out the standard deviation across the data")
    parser.add_argument("--max",
                        action="store_true",
                        help="Show the maximum time in the series")
    parser.add_argument("--min",
                        action="store_true",
                        help="Show the minimum time in the series")
    parser.add_argument("--all",
                        action="store_true",
                        help="Show all statistics")

    args = parser.parse_args(argv[1:])

    # Parse out what functions we want to gather for this report
    functions = []

    if args.average or args.all:
        functions.append((avg_time_spent, 'Average'))

    if args.median or args.all:
        median_time_spent = make_percentile(50)
        functions.append((median_time_spent, 'Median'))

    if args.percentile or args.all:
        pnum = args.percentile or 95
        pfunc = make_percentile(pnum)
        functions.append((pfunc, '{} Percentile'.format(pnum)))

    if args.std_dev or args.all:
        functions.append((std_dev, 'Std Deviation'))

    if args.max or args.all:
        functions.append((lambda lst: max(lst), 'Max'))

    if args.min or args.all:
        functions.append((lambda lst: min(lst), 'Min'))

    if len(functions) == 0:
        print(
            "Alert: No statistical functions specified. Please use '--help' to see which are available, or use '--all' to run all."
        )
        return

    # Scrape jira unless told otherwise
    if not args.no_scrape:
        scrape_jira()

    # Parse states.json into times list
    tickets = parse_jira_info(args.debug, args.pretty)
    # Gets {'list name': list}
    ticket_lists = get_time_lists(tickets, args.since)
    for list_name, time_spent in ticket_lists.iteritems():
        print("-" * 40)
        num_tix = len(time_spent)
        print("Statistics for '{}', over {} tickets".format(
            list_name, num_tix))
        print("-" * 40)
        get_stats(time_spent, functions, args.pretty)
Example #35
0
def main(argv):
  parser = argparse.ArgumentParser(description="Download raw events from the Unity Analytics server.")
  parser.add_argument('url', nargs='?', default='')
  parser.add_argument('-v', '--version', action='store_const', const=True, help='Retrieve version info for this file.')
  parser.add_argument('-b', '--verbose', action='store_const', const=True, help='Output more informative errors.')
  parser.add_argument('-o', '--output', default='', help='Set an output path for results.')
  parser.add_argument('-f', '--first', help='UNIX timestamp for trimming input.')
  parser.add_argument('-l', '--last', help='UNIX timestamp for trimming input.')
  parser.add_argument('-s', '--appStart', action='store_const', const=True, help='Include appStart events.')
  parser.add_argument('-r', '--appRunning', action='store_const', const=True, help='Include appRunning events.')
  parser.add_argument('-c', '--custom', action='store_const', const=True, help='Include custom events.')
  parser.add_argument('-t', '--transaction', action='store_const', const=True, help='Include transaction events.')
  parser.add_argument('-u', '--userInfo', action='store_const', const=True, help='Include userInfo events.')
  parser.add_argument('-d', '--deviceInfo', action='store_const', const=True, help='Include deviceInfo events.')
  args = vars(parser.parse_args())
  
  if 'help' in args:
    parser.print_help()
    sys.exit()
  elif args['version'] == True:
    version_info()
    sys.exit()

  try:
    # now by default
    end_date = datetime.datetime.utcnow() if not args['last'] else dateutil.parser.parse(args['last'], fuzzy=False)
  except:
    print 'Provided end date could not be parsed. Format should be YYYY-MM-DD.'
    if args['verbose'] == True:
      print sys.exc_info()[0]
    sys.exit()

  try:
    # subtract 5 days by default
    start_date = end_date - datetime.timedelta(days=5) if not args['first'] else dateutil.parser.parse(args['first'], fuzzy=False)
  except:
    print 'Provided start date could not be parsed. Format should be YYYY-MM-DD.'
    if args['verbose'] == True:
      print sys.exc_info()[0]
    sys.exit()

  url = args['url']
  
  # by default, we'll include all. If a flag(s) was selected, use it
  flags = []
  for e in all_events:
    if args[e]: flags.append(e)
  if len(flags) == 0:
    flags = all_events

  # if first arg isn't a url
  if 'http' not in url:
    parser.print_help()
    sys.exit(2)
  elif len(url) > 0:
    print 'Loading batch manifest'
    manifest_json = load_and_parse(url, args['verbose'])
    
    found_items = 0
    for manifest_item in manifest_json:
      # filter dates outside of range
      date = dateutil.parser.parse(manifest_item["generated_at"]).replace(tzinfo=None)
      if date < start_date:
        continue
      elif date > end_date:
        continue

      found_items += 1
      print 'Retrieving manifest item from: ' + manifest_item["url"]
      batches_json = load_and_parse(manifest_item["url"], args['verbose'])
      batch_id = batches_json["batchid"]
      for batch in batches_json["data"]:
        bUrl = batch["url"]
        for event_type in flags:
          if event_type in bUrl:
            output_file_name = args['output'] + batch_id + "_" + event_type + ".txt"
            try:
              # finally, load the actual file from S3
              print 'Downloading ' + output_file_name
              urlretrieve(bUrl, output_file_name)
            except HTTPError as e:
              print 'The server couldn\'t download the file.'
              print 'Error code: ', e.code
              if (args['verbose']):
                print e
              sys.exit()
            except URLError as e:
              print 'When downloading, we failed to reach a server.'
              print 'Reason: ', e.reason
              if (args['verbose']):
                print e
              sys.exit()
            else:
              print 'TSV file downloaded successfully'

    if found_items == 0:
      print 'No data found within specified dates. By default, this script downloads the last five days of data. Use -f (--first) and -l (--last) to specify a date range.'
  else:
    print 'get_raw_events.py requires that you specify a URL as the first argument.\nThis URL may be obtained by going to your project settings on the Unity Analytics website.\n\n'
    parser.print_help()
    sys.exit(2)
Example #36
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "asset",
        type=str,
        nargs=1,
        help=
        "symbol of cryptoasset or fiat currency (i.e. BTC/LTC/ETH or EUR/USD)")
    parser.add_argument("date", type=str, nargs='?', help="date (YYYY-MM-DD)")
    parser.add_argument(
        "-v",
        "--version",
        action='version',
        version='%(prog)s {version}'.format(version=__version__))
    parser.add_argument("-d",
                        "--debug",
                        action='store_true',
                        help="enabled debug logging")
    parser.add_argument("-q",
                        "--quantity",
                        type=Decimal,
                        help="quantity to price")
    parser.add_argument("-nc",
                        "--nocache",
                        action='store_true',
                        help="bypass cache for historical data")

    config.args = parser.parse_args()

    if config.args.debug:
        log.setLevel(logging.DEBUG)
        config.output_config(parser.prog)

    value_asset = ValueAsset()
    asset = config.args.asset[0]
    timestamp = None

    if asset == config.CCY:
        return

    if config.args.date:
        timestamp = dateutil.parser.parse(config.args.date)
        timestamp = timestamp.replace(tzinfo=config.TZ_LOCAL)
        price_ccy, name, data_source = value_asset.get_historical_price(
            asset, timestamp)
    else:
        price_ccy, name, data_source = value_asset.get_latest_price(asset)

    if price_ccy is not None:
        log.info("1 %s=%s%s %s via %s (%s)", asset, config.sym(),
                 '{:0,.2f}'.format(price_ccy), config.CCY, data_source, name)
        if config.args.quantity:
            quantity = Decimal(config.args.quantity)
            log.info("%s %s=%s%s %s", '{:0,f}'.format(quantity.normalize()),
                     asset, config.sym(),
                     '{:0,.2f}'.format(quantity * price_ccy), config.CCY)
    else:
        if name is not None:
            if timestamp:
                log.warning("Price for %s at %s is not available", asset,
                            timestamp.strftime('%Y-%m-%d'))
            else:
                log.warning("Current price for %s is not available", asset)
        else:
            log.warning("Prices for %s are not supported", asset)
Example #37
0
#TODO for python3: 
# * fix NameError: name 'basestring' is not defined in voc.tools.dict_to_schedule_xml()

tz = pytz.timezone('Europe/Amsterdam')
time_stamp_offset = -3600 #  Workaround until MediaWiki server will be fixed

parser = optparse.OptionParser()
parser.add_option('--online', action="store_true", dest="online", default=False)
parser.add_option('--show-assembly-warnings', action="store_true", dest="show_assembly_warnings", default=False)
parser.add_option('--fail', action="store_true", dest="exit_when_exception_occours", default=False)
parser.add_option('--git', action="store_true", dest="git", default=False)
parser.add_option('--debug', action="store_true", dest="debug", default=False)


options, args = parser.parse_args()
local = False
use_offline_frab_schedules = False
only_workshops = False

if __name__ == '__main__':
    congress_nr = 35
    year = str(1983 + congress_nr)
    xc3 = "{x}C3".format(x=congress_nr)

    wiki_url = 'https://events.ccc.de/congress/{year}/wiki'.format(year=year)

    output_dir = "/srv/www/" + xc3
    secondary_output_dir = "./" + xc3

    if len(sys.argv) == 2:
Example #38
0
                      help="Path to data file. (default: %default)",
                      dest="inputFile",
                      default=inputFileNameLocal)
    parser.add_option("--outputFile",
                      help="Output file. Results will be written to this file."
                      " (default: %default)",
                      dest="outputFile",
                      default=outputFileName)
    parser.add_option(
        "--max",
        default=100.0,
        type=float,
        help="Maximum number for the value field. [default: %default]")
    parser.add_option(
        "--min",
        default=0.0,
        type=float,
        help="Minimum number for the value field. [default: %default]")
    parser.add_option(
        "--resolution",
        default=None,
        type=float,
        help=
        "Resolution for the value field (overrides min and max). [default: %default]"
    )

    options, args = parser.parse_args(sys.argv[1:])

    # Run it
    runAnomaly(options)
Example #39
0
def main():
    colorama.init()
    parser = argparse.ArgumentParser()
    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version='%s v%s' % (parser.prog, __version__))

    if sys.version_info[:2] >= (3, 7):
        subparsers = parser.add_subparsers(dest='command', required=True)
    else:
        subparsers = parser.add_subparsers(dest='command')

    parser_latest = subparsers.add_parser(
        CMD_LATEST,
        help="get the latest price of an asset",
        description="Get the latest [asset] price (in GBP). "
        "If no data source [-ds] is given, "
        "the same data source(s) as "
        "'bittytax' are used.")
    parser_latest.add_argument('asset',
                               type=str,
                               nargs=1,
                               help="symbol of cryptoasset or fiat currency "
                               "(i.e. BTC/LTC/ETH or EUR/USD)")
    parser_latest.add_argument('quantity',
                               type=validate_quantity,
                               nargs='?',
                               help="quantity to price (optional)")
    parser_latest.add_argument(
        '-ds',
        choices=datasource_choices(upper=True) + ['ALL'],
        metavar='{' + ', '.join(datasource_choices()) + '} or ALL',
        dest='datasource',
        type=str.upper,
        help="specify the data source to use, or all")
    parser_latest.add_argument('-d',
                               '--debug',
                               action='store_true',
                               help="enable debug logging")

    parser_history = subparsers.add_parser(
        CMD_HISTORY,
        help="get the historical price of an asset",
        description="Get the historic [asset] price (in GBP) "
        "for the [date] specified. "
        "If no data source [-ds] is given, "
        "the same data source(s) as "
        "'bittytax' are used.")
    parser_history.add_argument('asset',
                                type=str,
                                nargs=1,
                                help="symbol of cryptoasset or fiat currency "
                                "(i.e. BTC/LTC/ETH or EUR/USD)")
    parser_history.add_argument('date',
                                type=validate_date,
                                nargs=1,
                                help="date (YYYY-MM-DD or DD/MM/YYYY)")
    parser_history.add_argument('quantity',
                                type=validate_quantity,
                                nargs='?',
                                help="quantity to price (optional)")
    parser_history.add_argument(
        '-ds',
        choices=datasource_choices(upper=True) + ['ALL'],
        metavar='{' + ', '.join(datasource_choices()) + '} or ALL',
        dest='datasource',
        type=str.upper,
        help="specify the data source to use, or all")
    parser_history.add_argument('-nc',
                                '--nocache',
                                action='store_true',
                                help="bypass data cache")
    parser_history.add_argument('-d',
                                '--debug',
                                action='store_true',
                                help="enable debug logging")

    parser_list = subparsers.add_parser(
        CMD_LIST,
        help="list all assets",
        description='List all assets, or filter by [asset].')
    parser_list.add_argument('asset',
                             type=str,
                             nargs='?',
                             help="symbol of cryptoasset or fiat currency "
                             "(i.e. BTC/LTC/ETH or EUR/USD)")
    parser_list.add_argument('-d',
                             '--debug',
                             action='store_true',
                             help="enable debug logging")

    config.args = parser.parse_args()

    if config.args.debug:
        print("%s%s v%s" % (Fore.YELLOW, parser.prog, __version__))
        print("%spython: v%s" % (Fore.GREEN, platform.python_version()))
        print("%ssystem: %s, release: %s" %
              (Fore.GREEN, platform.system(), platform.release()))
        config.output_config()

    if config.args.command in (CMD_LATEST, CMD_HISTORY):
        symbol = config.args.asset[0]
        asset = price = False

        if config.args.datasource == 'ALL':
            data_sources = datasource_choices(upper=True)
        else:
            data_sources = [config.args.datasource]

        for ds in data_sources:
            value_asset = ValueAsset(price_tool=True, data_source=ds)
            try:
                if config.args.command == CMD_HISTORY:
                    price_ccy, name, _ = value_asset.get_historical_price(
                        symbol, config.args.date[0])
                else:
                    price_ccy, name, _ = value_asset.get_latest_price(symbol)
            except UnexpectedDataSourceError as e:
                parser.exit("%sERROR%s %s" %
                            (Back.RED + Fore.BLACK, Back.RESET + Fore.RED, e))

            if price_ccy is not None:
                output_price(price_ccy)
                price = True

            if name is not None:
                asset = True

        if not asset:
            parser.exit(
                "%sWARNING%s Prices for %s are not supported" %
                (Back.YELLOW + Fore.BLACK, Back.RESET + Fore.YELLOW, symbol))

        if not price:
            if config.args.command == CMD_HISTORY:
                parser.exit(
                    "%sWARNING%s Price for %s on %s is not available" %
                    (Back.YELLOW + Fore.BLACK, Back.RESET + Fore.YELLOW,
                     symbol, config.args.date[0].strftime('%Y-%m-%d')))
            else:
                parser.exit(
                    "%sWARNING%s Current price for %s is not available" %
                    (Back.YELLOW + Fore.BLACK, Back.RESET + Fore.YELLOW,
                     symbol))
    elif config.args.command == CMD_LIST:
        if config.args.command == CMD_LIST:
            asset_data = AssetData()

            if config.args.asset:
                assets = asset_data.get_asset(config.args.asset)
                if assets:
                    output_assets(assets)
                else:
                    parser.exit("%sWARNING%s Asset %s not found" %
                                (Back.YELLOW + Fore.BLACK,
                                 Back.RESET + Fore.YELLOW, config.args.asset))
            else:
                assets = asset_data.all_assets()
                output_assets(assets)

        if config.args.command in (CMD_LATEST, CMD_HISTORY):
            symbol = config.args.asset[0]
Example #40
0
def parse_command_line(argv):
    me = os.path.basename(argv[0])
    format_from_argv0, argv0_from, argv0_to = argv0_to_format(me)

    parser = argparse.ArgumentParser(
        description='Convert between TOML, MessagePack, YAML, and JSON.')

    input_group = parser.add_mutually_exclusive_group()
    input_group.add_argument('input',
                             nargs='?',
                             default='-',
                             help='input file')
    input_group.add_argument('-i',
                             '--input',
                             dest='input_flag',
                             metavar='input',
                             default=None,
                             help='input file')

    output_group = parser.add_mutually_exclusive_group()
    output_group.add_argument('output',
                              nargs='?',
                              default='-',
                              help='input file')
    output_group.add_argument('-o',
                              '--output',
                              dest='output_flag',
                              metavar='output',
                              default=None,
                              help='output file')

    if not format_from_argv0:
        parser.add_argument('--if',
                            '-if',
                            '--input-format',
                            dest='input_format',
                            help="input format",
                            choices=FORMATS)
        parser.add_argument('--of',
                            '-of',
                            '--output-format',
                            dest='output_format',
                            help="output format",
                            choices=FORMATS)

    if not format_from_argv0 or argv0_to == 'json':
        parser.add_argument('--indent-json',
                            dest='indent_json',
                            metavar='n',
                            type=int,
                            default=None,
                            help='indent JSON output')

    if not format_from_argv0 or argv0_to == 'yaml':
        parser.add_argument('--yaml-style',
                            dest='yaml_style',
                            default=None,
                            help='YAML formatting style',
                            choices=['', '\'', '"', '|', '>'])

    parser.add_argument('--wrap',
                        dest='wrap',
                        metavar='key',
                        default=None,
                        help='wrap the data in a map type with the given key')
    parser.add_argument('--unwrap',
                        dest='unwrap',
                        metavar='key',
                        default=None,
                        help='only output the data stored under the given key')
    parser.add_argument('-p',
                        '--preserve-key-order',
                        dest='ordered',
                        action='store_true',
                        help='preserve the order of dictionary/mapping keys')
    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version=__version__)

    args = parser.parse_args(args=argv[1:])

    # Use the positional input and output arguments.
    if args.input_flag is not None:
        args.input = args.input_flag

    if args.output_flag is not None:
        args.output = args.output_flag

    # Determine the implicit input and output format if possible.
    if format_from_argv0:
        args.input_format = argv0_from
        args.output_format = argv0_to

        if argv0_to != 'json':
            args.__dict__['indent_json'] = None
        if argv0_to != 'yaml':
            args.__dict__['yaml_style'] = None
    else:
        if args.input_format is None:
            args.input_format = extension_to_format(args.input)
            if args.input_format is None:
                parser.error('Need an explicit input format')

        if args.output_format is None:
            args.output_format = extension_to_format(args.output)
            if args.output_format is None:
                parser.error('Need an explicit output format')

    # Wrap yaml_style.
    args.__dict__['yaml_options'] = {'default_style': args.yaml_style}
    del args.__dict__['yaml_style']

    return args
Example #41
0
    )

    parser.add_option("-d",
                      "--datacentre-host",
                      dest="datacentre",
                      default="localhost",
                      help="the machine that the datacentre(mongodb) is on")

    parser.add_option("-k",
                      "--datacentre-port",
                      dest="datacentre_port",
                      type="int",
                      default="62345",
                      help="the port that the datacentre(mongodb) is on")

    (options, args) = parser.parse_args()

    while True:
        try:
            create_sumary_file(options.datacentre,
                               options.datacentre_port,
                               options.jsonfile,
                               start_time=datetime.datetime.strptime(
                                   'Nov 25 2013  09:40AM', '%b %d %Y %I:%M%p'))

            if options.hostname != None:
                upload_summary_scp(options.path, options.hostname,
                                   options.username, options.password,
                                   options.jsonfile)
            print "File done."
        except Exception, e:
Example #42
0
def main():
    parser = argparse.ArgumentParser()
    subparsers = parser.add_subparsers(dest='test_type')
    # Host-side test args.
    host_cmd_parser = subparsers.add_parser(
        'host-cmd',
        help='Runs a host-side test. Pass the host-side command to run after '
        '"--". If --use-vm is passed, hostname and port for the device '
        'will be 127.0.0.1:9222.')
    host_cmd_parser.set_defaults(func=host_cmd)
    host_cmd_parser.add_argument(
        '--deploy-chrome',
        action='store_true',
        help=
        'Will deploy a locally built ash-chrome binary to the device before '
        'running the host-cmd.')

    # GTest args.
    # TODO(bpastene): Rename 'vm-test' arg to 'gtest'.
    gtest_parser = subparsers.add_parser('vm-test',
                                         help='Runs a device-side gtest.')
    gtest_parser.set_defaults(func=device_test)
    gtest_parser.add_argument(
        '--test-exe',
        type=str,
        required=True,
        help='Path to test executable to run inside the device.')

    # GTest args. Some are passed down to the test binary in the device. Others
    # are parsed here since they might need tweaking or special handling.
    gtest_parser.add_argument(
        '--test-launcher-summary-output',
        type=str,
        help='When set, will pass the same option down to the test and retrieve '
        'its result file at the specified location.')
    gtest_parser.add_argument(
        '--stop-ui',
        action='store_true',
        help='Will stop the UI service in the device before running the test.')
    gtest_parser.add_argument(
        '--trace-dir',
        type=str,
        help='When set, will pass down to the test to generate the trace and '
        'retrieve the trace files to the specified location.')
    gtest_parser.add_argument(
        '--env-var',
        nargs=2,
        action='append',
        default=[],
        help='Env var to set on the device for the duration of the test. '
        'Expected format is "--env-var SOME_VAR_NAME some_var_value". Specify '
        'multiple times for more than one var.')

    # Tast test args.
    # pylint: disable=line-too-long
    tast_test_parser = subparsers.add_parser(
        'tast',
        help='Runs a device-side set of Tast tests. For more details, see: '
        'https://chromium.googlesource.com/chromiumos/platform/tast/+/master/docs/running_tests.md'
    )
    tast_test_parser.set_defaults(func=device_test)
    tast_test_parser.add_argument(
        '--suite-name',
        type=str,
        required=True,
        help='Name to apply to the set of Tast tests to run. This has no effect '
        'on what is executed, but is used mainly for test results reporting '
        'and tracking (eg: flakiness dashboard).')
    tast_test_parser.add_argument(
        '--test-launcher-summary-output',
        type=str,
        help='Generates a simple GTest-style JSON result file for the test run.'
    )
    # TODO(bpastene): Change all uses of "--conditional" to use "--attr-expr".
    tast_test_parser.add_argument(
        '--conditional',
        '--attr-expr',
        type=str,
        dest='conditional',
        help='A boolean expression whose matching tests will run '
        '(eg: ("dep:chrome")).')
    tast_test_parser.add_argument(
        '--strip-chrome',
        action='store_true',
        help='Strips symbols from ash-chrome before deploying to the device.')
    tast_test_parser.add_argument(
        '--deploy-lacros',
        action='store_true',
        help='Deploy a lacros-chrome instead of ash-chrome.')
    tast_test_parser.add_argument(
        '--tast-var',
        action='append',
        dest='tast_vars',
        help='Runtime variables for Tast tests, and the format are expected to '
        'be "key=value" pairs.')
    tast_test_parser.add_argument(
        '--test',
        '-t',
        action='append',
        dest='tests',
        help='A Tast test to run in the device (eg: "ui.ChromeLogin").')
    tast_test_parser.add_argument(
        '--gtest_filter',
        type=str,
        help="Similar to GTest's arg of the same name, this will filter out the "
        "specified tests from the Tast run. However, due to the nature of Tast's "
        'cmd-line API, this will overwrite the value(s) of "--test" above.')

    add_common_args(gtest_parser, tast_test_parser, host_cmd_parser)

    args = sys.argv[1:]
    unknown_args = []
    # If a '--' is present in the args, treat everything to the right of it as
    # args to the test and everything to the left as args to this test runner.
    # Otherwise treat all known args as args to this test runner and all unknown
    # args as test args.
    if '--' in args:
        unknown_args = args[args.index('--') + 1:]
        args = args[0:args.index('--')]
    if unknown_args:
        args = parser.parse_args(args=args)
    else:
        args, unknown_args = parser.parse_known_args()

    logging.basicConfig(level=logging.DEBUG if args.verbose else logging.WARN)

    if not args.use_vm and not args.device:
        # If we're not running on a VM, but haven't specified a hostname, assume
        # we're on a lab bot and are trying to run a test on a lab DUT. See if the
        # magic lab DUT hostname resolves to anything. (It will in the lab and will
        # not on dev machines.)
        try:
            socket.getaddrinfo(LAB_DUT_HOSTNAME, None)
        except socket.gaierror:
            logging.error('The default DUT hostname of %s is unreachable.',
                          LAB_DUT_HOSTNAME)
            return 1

    args.cros_cache = os.path.abspath(args.cros_cache)
    return args.func(args, unknown_args)
Example #43
0
def main():
    parser = argparse.ArgumentParser(
        description='Ingest stats from gitlab project.',
        epilog=
        "To ingest gitlab stats for pipelines and merge requests updated up to 2 days old from now, run 'ingest 2d'"
    )
    parser.add_argument(
        'delta',
        metavar='delta',
        type=str,
        help=
        'delta in [NNd][NNh][NNm] format, to define the max age of updates to fetch'
    )
    parser.add_argument('--verbose',
                        action='store_true',
                        help="verbose output")
    parser.add_argument('--fetch-only',
                        action='store_true',
                        help="do not ingest in Elasticsearch")
    parser.add_argument('--no-pipelines',
                        action='store_true',
                        help="do not fetch pipelines")
    parser.add_argument('--no-merge-requests',
                        action='store_true',
                        help="do not fetch merge requests")
    parser.add_argument('--dump-es-docs',
                        action='store_true',
                        help="dump documets before sending to Elasticsearch")
    parser.add_argument('--dump-config',
                        action='store_true',
                        help="dump config before starting")
    parser.add_argument('--gitlab-url',
                        default=os.getenv('GITLAB_URL', 'https://gitlab.com'),
                        help="gitlab site url")
    parser.add_argument('--gitlab-token',
                        default=os.getenv('GITLAB_TOKEN'),
                        help="gitlab private token")
    parser.add_argument('--gitlab-project-id',
                        default=os.getenv('GITLAB_PROJECT_ID'),
                        help="gitlab project id")
    parser.add_argument('--es-hosts',
                        default=os.getenv('ES_HOSTS', 'localhost'),
                        help="Elasticsearch hosts")
    parser.add_argument('--check-missing-intervals',
                        action='store_true',
                        help="check for non-ingested intervals")

    global args
    args = parser.parse_args()

    gitlab_token = args.gitlab_token
    args.gitlab_token = f"{gitlab_token[:2]}...{gitlab_token[-2:]}"
    if args.dump_config:
        print(f"Config: {vars(args)}", file=sys.stderr)

    delta = parse_time_delta(args.delta)
    if not delta:
        print("Wrong delta format.", file=sys.stderr)
        exit(1)

    if not args.fetch_only:
        es_hosts = [x.strip() for x in args.es_hosts.split()]
        global es
        es = Elasticsearch(es_hosts)
        while not es.ping():
            print("Waiting for elasticsearch...", file=sys.stderr)
            sleep(1)

    if not args.no_merge_requests or not args.no_merge_requests:
        print(f"Fetching project {args.gitlab_project_id} ...",
              file=sys.stderr)
        gl = gitlab.Gitlab(args.gitlab_url, private_token=gitlab_token)
        project = gl.projects.get(args.gitlab_project_id)

    ts = datetime.now(timezone.utc)
    start_from = ts - delta
    end_till = ts
    print(
        f"Requested interval: {start_from.isoformat()} - {end_till.isoformat()} ({delta})",
        file=sys.stderr)

    if not args.no_pipelines:
        with ingestion('pipelines', delta, start_from, end_till) as i:
            print("Fetching pipelines...", file=sys.stderr)
            pipelines = project.pipelines.list(as_list=False,
                                               order_by='updated_at',
                                               sort='desc')
            process_pipelines(pipelines, start_from)

    if not args.no_merge_requests:
        with ingestion('mergerequests', delta, start_from, end_till) as i:
            print("Fetching merge requests...", file=sys.stderr)
            merge_requests = project.mergerequests.list(as_list=False,
                                                        order_by='updated_at',
                                                        sort='desc')
            process_merge_requests(merge_requests, start_from)

    if args.check_missing_intervals:
        print("Checking for missing intervals...", file=sys.stderr)
        if check_missing_intervals(delta, start_from, end_till):
            exit(1)
        else:
            exit(0)
Example #44
0
def main():
    qp = {}

    def add_qp(option, opt_str, value, parser):
        if option.dest == 'query':
            try:
                (p, v) = value.split('=', 1)
                qp[p] = v

            except ValueError:
                raise optparse.OptionValueError("%s expects parameter=value"
                                                % opt_str)

        else:
            qp[option.dest] = value

    parser = optparse.OptionParser(
        usage="Usage: %prog [-h|--help] [OPTIONS] -o file",
        version="%prog " + VERSION,
        add_help_option=False)

    parser.set_defaults(
        url="http://geofon.gfz-potsdam.de/eidaws/routing/1/",
        timeout=600,
        retries=10,
        retry_wait=60,
        threads=5)

    parser.add_option("-h", "--help", action="store_true", default=False,
                      help="show help message and exit")

    parser.add_option("-l", "--longhelp", action="store_true", default=False,
                      help="show extended help message and exit")

    parser.add_option("-v", "--verbose", action="store_true", default=False,
                      help="verbose mode")

    parser.add_option("-u", "--url", type="string",
                      help="URL of routing service (default %default)")

    parser.add_option("-y", "--service", type="string", action="callback",
                      callback=add_qp,
                      help="target service (default dataselect)")

    parser.add_option("-N", "--network", type="string", action="callback",
                      callback=add_qp,
                      help="network code or pattern")

    parser.add_option("-S", "--station", type="string", action="callback",
                      callback=add_qp,
                      help="station code or pattern")

    parser.add_option("-L", "--location", type="string", action="callback",
                      callback=add_qp,
                      help="location code or pattern")

    parser.add_option("-C", "--channel", type="string", action="callback",
                      callback=add_qp,
                      help="channel code or pattern")

    parser.add_option("-s", "--starttime", type="string", action="callback",
                      callback=add_qp,
                      help="start time")

    parser.add_option("-e", "--endtime", type="string", action="callback",
                      callback=add_qp,
                      help="end time")

    parser.add_option("-q", "--query", type="string", action="callback",
                      callback=add_qp, metavar="PARAMETER=VALUE",
                      help="additional query parameter")

    parser.add_option("-t", "--timeout", type="int",
                      help="request timeout in seconds (default %default)")

    parser.add_option("-r", "--retries", type="int",
                      help="number of retries (default %default)")

    parser.add_option("-w", "--retry-wait", type="int",
                      help="seconds to wait before each retry "
                           "(default %default)")

    parser.add_option("-n", "--threads", type="int",
                      help="maximum number of download threads "
                           "(default %default)")

    parser.add_option("-c", "--credentials-file", type="string",
                      help="URL,user,password file (CSV format) for queryauth")

    parser.add_option("-a", "--auth-file", type="string",
                      help="file that contains the auth token")

    parser.add_option("-p", "--post-file", type="string",
                      help="request file in FDSNWS POST format")

    parser.add_option("-f", "--arclink-file", type="string",
                      help="request file in ArcLink format")

    parser.add_option("-b", "--breqfast-file", type="string",
                      help="request file in breq_fast format")

    parser.add_option("-o", "--output-file", type="string",
                      help="file where downloaded data is written")

    parser.add_option("-z", "--no-citation", action="store_true", default=False,
                      help="suppress network citation info")

    parser.add_option("-Z", "--no-check", action="store_true", default=False,
                      help="suppress checking received routes and data")

    (options, args) = parser.parse_args()

    if options.help:
        print(__doc__.split("Usage Examples", 1)[0], end="")
        parser.print_help()
        return 0

    if options.longhelp:
        print(__doc__)
        parser.print_help()
        return 0

    if args or not options.output_file:
        parser.print_usage(sys.stderr)
        return 1

    if bool(options.post_file) + bool(options.arclink_file) + \
            bool(options.breqfast_file) > 1:
        msg("only one of (--post-file, --arclink-file, --breqfast-file) "
            "can be used")
        return 1

    try:
        cred = {}
        authdata = None
        postdata = None
        chans_to_check = set()

        if options.credentials_file:
            with open(options.credentials_file) as fd:
                try:
                    for (url, user, passwd) in csv.reader(fd):
                        cred[url] = (user, passwd)

                except (ValueError, csv.Error):
                    raise Error("error parsing %s" % options.credentials_file)

                except UnicodeDecodeError:
                    raise Error("invalid unicode character found in %s"
                                % options.credentials_file)

        if options.auth_file:
            with open(options.auth_file, 'rb') as fd:
                authdata = fd.read()

        else:
            try:
                with open(DEFAULT_TOKEN_LOCATION, 'rb') as fd:
                    authdata = fd.read()
                    options.auth_file = DEFAULT_TOKEN_LOCATION

            except IOError:
                pass

        if authdata:
            msg("using token in %s:" % options.auth_file, options.verbose)

            try:
                proc = subprocess.Popen(['gpg', '--decrypt'],
                                        stdin=subprocess.PIPE,
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.PIPE)

                out, err = proc.communicate(authdata)

                if not out:
                    if isinstance(err, bytes):
                        err = err.decode('utf-8')

                    msg(err)
                    return 1

                if isinstance(out, bytes):
                    out = out.decode('utf-8')

                msg(out, options.verbose)

            except OSError as e:
                msg(str(e))

        if options.post_file:
            try:
                with open(options.post_file) as fd:
                    postdata = fd.read()

            except UnicodeDecodeError:
                raise Error("invalid unicode character found in %s"
                            % options.post_file)

        else:
            parser = None

            if options.arclink_file:
                parser = ArclinkParser()

                try:
                    parser.parse(options.arclink_file)

                except UnicodeDecodeError:
                    raise Error("invalid unicode character found in %s"
                                % options.arclink_file)

            elif options.breqfast_file:
                parser = BreqParser()

                try:
                    parser.parse(options.breqfast_file)

                except UnicodeDecodeError:
                    raise Error("invalid unicode character found in %s"
                                % options.breqfast_file)

            if parser is not None:
                if parser.failstr:
                    msg(parser.failstr)
                    return 1

                postdata = parser.postdata

        if not options.no_check:
            if postdata:
                for line in postdata.splitlines():
                    nslc = line.split()[:4]
                    if nslc[2] == '--':
                        nslc[2] = ''
                    chans_to_check.add('.'.join(nslc))

            else:
                net = qp.get('network', '*')
                sta = qp.get('station', '*')
                loc = qp.get('location', '*')
                cha = qp.get('channel', '*')

                for n in net.split(','):
                    for s in sta.split(','):
                        for l in loc.split(','):
                            for c in cha.split(','):
                                if l == '--':
                                    l = ''
                                chans_to_check.add('.'.join((n, s, l, c)))

        url = RoutingURL(urlparse.urlparse(options.url), qp)
        dest = open(options.output_file, 'wb')

        nets = route(url, cred, authdata, postdata, dest, chans_to_check,
                     options.timeout, options.retries, options.retry_wait,
                     options.threads, options.verbose)

        if nets and not options.no_citation:
            msg("retrieving network citation info", options.verbose)
            get_citation(nets, options)

        else:
            msg("", options.verbose)

        msg("In case of problems with your request, plese use the contact "
            "form at\n\n"
            "    http://www.orfeus-eu.org/organization/contact/form/"
            "?recipient=EIDA\n", options.verbose)

    except (IOError, Error) as e:
        msg(str(e))
        return 1

    return 0
Example #45
0
def main():
    # Arguments parsing
    parser = argparse.ArgumentParser("All arguments are optional and read from config.ini when not passed.")
    parser.add_argument("-d", "--debug", action="count", default=0, help="Increase debugging level")
    parser.add_argument("-c", "--config", default='config.ini', help="Configuration file")
    parser.add_argument("-s", "--start-date", default="", help="Start date for sync in YYYY-MM-DD format")
    parser.add_argument("-e", "--end-date", default="", help="End data for sync in YYYY-MM-DD format")
    parser.add_argument("-g", "--google-creds", default="auth/google.json", help="Google credentials file")
    parser.add_argument("-f", "--fitbit-creds", default="auth/fitbit.json", help="Fitbit credentials file")
    parser.add_argument("-v", "--version", help="Fitbit-GoogleFit migration tool version", action="store_true")
    args = parser.parse_args()

    # Show version information if required
    if args.version:
        print('         fitbit-googlefit version {}'.format(VERSION))
        print('')

    # Reading configuration from config file
    config = configparser.ConfigParser()
    config.read(args.config)
    params = config['params']

    # Init objects
    helper = Helper(args.fitbit_creds, args.google_creds)
    convertor = Convertor(args.google_creds, None)
    fitbitClient, googleClient = helper.GetFitbitClient(), helper.GetGoogleClient()
    remote = Remote(fitbitClient, googleClient, convertor, helper)

    # Get user's time zone info from Fitbit -- since Fitbit time stamps are not epoch and stored in user's timezone.
    userProfile = remote.ReadFromFitbit(fitbitClient.user_profile_get)
    tzinfo = dateutil.tz.gettz(userProfile['user']['timezone'])
    convertor.UpdateTimezone(tzinfo)

    # setup Google Fit data sources for each data type supported
    for dataType in ['steps', 'distance', 'weight', 'heart_rate', 'calories', 'activity', 'body_fat', 'sleep']:
        remote.CreateGoogleFitDataSource(dataType)

    # Decide the start and end dates of sync
    start_date_str = args.start_date if args.start_date != '' else params.get('start_date')
    end_date_str = args.end_date if args.end_date != '' else params.get('end_date')
    start_date = convertor.parseHumanReadableDate(start_date_str)
    end_date = convertor.parseHumanReadableDate(end_date_str)

    # Start syncing data for the given range
    for single_date in convertor.daterange(start_date, end_date):
        date_stamp = single_date.strftime(DATE_FORMAT)
        print('------------------------------   {}  -------------------------'.format(date_stamp))

        # ----------------------------------     steps      ------------------------
        if params.getboolean('sync_steps'):
            remote.SyncFitbitToGoogleFit('steps', date_stamp)

        # ----------------------------------     distance   ------------------------
        if params.getboolean('sync_distance'):
            remote.SyncFitbitToGoogleFit('distance', date_stamp)

        # ----------------------------------     heart rate ------------------------
        if params.getboolean('sync_heartrate'):
            remote.SyncFitbitToGoogleFit('heart_rate', date_stamp)

        # ----------------------------------     weight     ------------------------
        if params.getboolean('sync_weight'):
            remote.SyncFitbitToGoogleFit('weight', date_stamp)

        # ----------------------------------     body fat   ------------------------
        if params.getboolean('sync_body_fat'):
            remote.SyncFitbitToGoogleFit('body_fat', date_stamp)

        # ----------------------------------     calories   ------------------------
        if params.getboolean('sync_calories'):
            remote.SyncFitbitToGoogleFit('calories', date_stamp)

        # ----------------------------------     sleep   ------------------------
        if params.getboolean('sync_sleep'):
            remote.SyncFitbitToGoogleFit('sleep', date_stamp)

        print('')

    # ----------------------------------  activity logs  ------------------------
    if params.getboolean('sync_activities'):
        remote.SyncFitbitActivitiesToGoogleFit(start_date=start_date)
Example #46
0
def parse_command_line_arguments(logger):
    """
    Parse command line arguments received, if any
    Print example if invalid arguments are passed

    :param logger:  the logger
    :return:        config_filename passed as argument if any, else DEFAULT_CONFIG_FILENAME
                    export_formats passed as argument if any, else 'pdf'
                    list_export_profiles if passed as argument, else None
                    do_loop False if passed as argument, else True
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('--config',
                        help='config file to use, defaults to ' +
                        DEFAULT_CONFIG_FILENAME)
    parser.add_argument('--format',
                        nargs='*',
                        help='formats to download, valid options are pdf, '
                        'json, docx, csv, media, web-report-link, actions')
    parser.add_argument(
        '--list_export_profiles',
        nargs='*',
        help='display all export profiles, or restrict to specific'
        ' template_id if supplied as additional argument')
    parser.add_argument('--loop',
                        nargs='*',
                        help='execute continuously until interrupted')
    parser.add_argument(
        '--setup',
        action='store_true',
        help='Automatically create new directory containing the '
        'necessary config file.'
        'Directory will be named iAuditor Audit Exports, and will be placed in your current directory'
    )
    args = parser.parse_args()

    config_filename = DEFAULT_CONFIG_FILENAME

    if args.setup:
        initial_setup(logger)
        exit()

    if args.config is not None:
        if os.path.isfile(args.config):
            config_filename = args.config
            logger.debug(config_filename + ' passed as config argument')
        else:
            logger.error(config_filename + ' is not a valid config file')
            sys.exit(1)

    export_formats = ['pdf']
    if args.format is not None and len(args.format) > 0:
        valid_export_formats = [
            'json', 'docx', 'pdf', 'csv', 'media', 'web-report-link',
            'actions', 'sql'
        ]
        export_formats = []
        for option in args.format:
            if option not in valid_export_formats:
                print(
                    '{0} is not a valid export format.  Valid options are pdf, json, docx, csv, web-report-link, '
                    'media, or actions'.format(option))
                logger.info(
                    'invalid export format argument: {0}'.format(option))
            else:
                export_formats.append(option)

    loop_enabled = True if args.loop is not None else False

    return config_filename, export_formats, args.list_export_profiles, loop_enabled
Example #47
0
    def group_action_parser(self):

        parser = reqparse.RequestParser()
        parser.add_argument("action_name", type=str, location='args', default=None)

        return parser.parse_args()
Example #48
0
def main():
    # Configure logging in the library
    logging.basicConfig()
    logger = logging.getLogger(sentinel5dl.__name__)
    logger.setLevel(logging.INFO)

    parser = argparse.ArgumentParser(
        description='Search for and download Sentinel-5P data files',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog=f'AVAILABLE PRODUCTS\n{PRODUCTS_STR}'
    )

    # type= can use a callable, use that for most of this
    parser.add_argument(
        '--polygon',
        type=is_polygon,
        help='''Polygon defining an area by a set of coordinates.
            Example: 30.1 10.0, 40.0 40.1, 20 40, 10 20, 30.1 10.0'''
    )

    parser.add_argument(
        '--product',
        choices=PRODUCTS,
        metavar='PRODUCT',
        default='L2__CO____',
        help='Type of product to search for'
    )

    parser.add_argument(
        '--level',
        choices=PROCESSING_LEVELS,
        default='L2',
        help='Data processing level'
    )

    parser.add_argument(
        '--mode',
        choices=PROCESSING_MODES,
        help='Data processing mode'
    )

    parser.add_argument(
        '--begin-ts',
        default='2019-09-01T00:00:00.000Z',
        type=dateutil.parser.parse,
        help='''Timestamp specifying the earliest sensing date.
            Example: 2019-09-01T00:00:00.000Z'''
    )

    parser.add_argument(
        '--end-ts',
        default='2019-09-17T23:59:59.999Z',
        type=dateutil.parser.parse,
        help='''Timestamp specifying the latest sensing date.
            Example: 2019-09-17T23:59:59.999Z'''
    )

    parser.add_argument(
        '--use-certifi',
        action='store_true',
        help='''If a Certificate Authority (CA) bundle is not already supplied
            by your operating system, certifi provides an easy way of
            providing a cabundle.'''
    )

    parser.add_argument(
        '--worker',
        type=int,
        default=1,
        help='Number of parallel downloads',
    )

    parser.add_argument(
        'download_dir',
        metavar='download-dir',
        help='Download directory'
    )

    args = parser.parse_args()

    # Provide a Certificate Authority (CA) bundle
    if args.use_certifi:
        sentinel5dl.ca_info = certifi.where()

    # Search for Sentinel-5 products
    result = search(
        polygon=args.polygon,
        begin_ts=args.begin_ts,
        end_ts=args.end_ts,
        product=args.product,
        processing_level=args.level,
        processing_mode=args.mode
    )

    # Download found products to the download directory with number of workers
    with multiprocessing.Pool(args.worker) as p:
        p.starmap(download, map(
            lambda product: ((product,), args.download_dir),
            result.get('products')))
Example #49
0
                    action='store_true',
                    help='run in prod?')
parser.add_argument('-r',
                    '--num_results',
                    dest='num_results',
                    type=int,
                    default=200,
                    help='number of results to return per query')
parser.add_argument('--port',
                    dest='port',
                    type=int,
                    default=5000,
                    help='port to serve on')

if __name__ == "__main__":
    args = parser.parse_args()
    print(args)
    # start
    if args.prod:
        # run on Tornado instead, since running raw Flask in prod is not recommended
        print('starting tornado!')
        from tornado.wsgi import WSGIContainer
        from tornado.httpserver import HTTPServer
        from tornado.ioloop import IOLoop
        from tornado.log import enable_pretty_logging
        enable_pretty_logging()
        http_server = HTTPServer(WSGIContainer(app))
        http_server.listen(args.port)
        IOLoop.instance().start()
    else:
        print('starting flask!')
Example #50
0
def parse_args():
    parser = argparse.ArgumentParser('Artifact costs')
    parser.add_argument('groupid')
    return parser.parse_args()
Example #51
0
def parse_command_line():
    parser = optparse.OptionParser()
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      dest="verbose",
                      default=False,
                      help="prints debug output and additional detail.")
    parser.add_option("-d",
                      "--debug",
                      action="store_true",
                      dest="debug",
                      default=False,
                      help="run in debug mode and not service context.")
    parser.add_option(
        "-b",
        "--bash",
        action="store_true",
        dest="schedule",
        default=False,
        help="create schedule file for bash shell based command / control.")
    parser.add_option("-m",
                      "--mask",
                      dest="el_mask",
                      type=float,
                      default=0.0,
                      help="mask all passes below the provided elevation.")
    parser.add_option("-c",
                      "--config",
                      dest="config",
                      default='config/beacons.ini',
                      help="Use configuration file <config>.")
    parser.add_option("-f",
                      "--foreground",
                      action="store_true",
                      dest="foreground",
                      help="Execute schedule in foreground.")
    parser.add_option(
        "-s",
        "--starttime",
        dest="starttime",
        help="Start time in ISO8601 format, e.g. 2016-01-01T15:24:00Z")
    parser.add_option(
        "-e",
        "--endtime",
        dest="endtime",
        help="End time in ISO8601 format, e.g. 2016-01-01T16:24:00Z")
    parser.add_option(
        "-i",
        "--interval",
        dest="interval",
        type=float,
        default=10.0,
        help=
        "Sampling interval for ephemeris predictions, default is 10 seconds.")
    parser.add_option("-r",
                      "--radio",
                      dest="site",
                      default='config/site.ini',
                      help="Radio site configuration file.")

    (options, args) = parser.parse_args()

    return (options, args)
Example #52
0
def parse_args():
    parser = argparse.ArgumentParser(
        description=sys.modules['__main__'].__doc__)
    parser.add_argument('--project',
                        required=True,
                        choices=PROJECTS.keys(),
                        help='Collect stats about this project.')
    parser.add_argument(
        '--bot',
        type=str,
        dest='bots',
        action='append',
        default=[
            '*****@*****.**',
            '*****@*****.**',
            '*****@*****.**',
            '*****@*****.**',
            '*****@*****.**',
        ],
        help=('Add an author to be treated as a bot. '
              'Repeat to add several bots. Default: %(default)s.'))
    parser.add_argument('--seq',
                        action='store_true',
                        help='Run everything sequentially for debugging.')
    parser.add_argument(
        '--thread-pool',
        type=int,
        default=200,
        help='Fetch data using this many parallel threads. Default=%(default)s.'
    )
    parser.add_argument('--list-rejections',
                        action='store_true',
                        help='List rejected CLs and reasons for rejection.')
    parser.add_argument(
        '--list-false-rejections',
        action='store_true',
        help='List CLs that were committed in more than one attempt.')
    parser.add_argument(
        '--use-logs',
        action='store_true',
        default=True,
        help=(
            'On by default. '
            'Fetch the detailed logs and recompute the stats in this script, '
            'instead of fetching precomputed stats. '
            'Slower, but more accurate, and helps validate the cached stats.'))
    parser.add_argument(
        '--use-cache',
        dest='use_logs',
        action='store_false',
        help=('Fetch the cached stats from the app. Opposite to --use-logs.'))
    parser.add_argument(
        '--date',
        help='Start date of stats YYYY-MM-DD[ HH[:MM]]. Default: --range ago.')
    parser.add_argument('--range',
                        choices=INTERVALS.keys(),
                        default='week',
                        help='Time range to print stats for.')
    infra_libs.logs.add_argparse_options(parser, default_level=logging.ERROR)

    args = parser.parse_args()

    if args.date:
        args.date = date_from_string(args.date)
    else:
        args.date = (datetime.datetime.now() -
                     datetime.timedelta(minutes=INTERVALS[args.range]))

    return args
Example #53
0
def parse_arguments(state):
    parser = argparse.ArgumentParser()

    #select state of Brazil
    state1 = "SP"
    #initial date for data fitting
    date = "2020-03-15"
    #initial condition for susceptible
    s0 = 280.0e3
    #initial condition for exposed
    e0 = 1e-4
    #initial condition for infectious
    i0 = 1e-4
    #initial condition for recovered
    r0 = 1e-4
    #initial condition for deaths
    k0 = 1e-4
    #initial condition for asymptomatic
    a0 = 1e-4
    #start fitting when the number of cases >= start
    start = 350
    #as recovered data is not available, so recovered is in function of death
    ratioRecoveredDeath = .15
    #weigth for fitting data
    weigthCases = 0.4
    weigthRecov = 0.1
    #weightDeaths = 1 - weigthCases - weigthRecov

    #command line arguments
    parser.add_argument('--states', dest='states', type=str, default=state1)

    parser.add_argument('--download-data', dest='download_data', default=False)

    parser.add_argument('--start-date',
                        dest='start_date',
                        type=str,
                        default=date)

    parser.add_argument('--prediction-days',
                        dest='predict_range',
                        type=int,
                        default=150)

    parser.add_argument('--S_0', dest='s_0', type=int, default=s0)

    parser.add_argument('--E_0', dest='e_0', type=int, default=e0)

    parser.add_argument('--A_0', dest='a_0', type=int, default=a0)

    parser.add_argument('--I_0', dest='i_0', type=int, default=i0)

    parser.add_argument('--R_0', dest='r_0', type=int, default=r0)

    parser.add_argument('--D_0', dest='d_0', type=int, default=k0)

    parser.add_argument('--START', dest='startNCases', type=int, default=start)

    parser.add_argument('--RATIO',
                        dest='ratio',
                        type=int,
                        default=ratioRecoveredDeath)

    parser.add_argument('--WCASES',
                        dest='weigthCases',
                        type=int,
                        default=weigthCases)

    parser.add_argument('--WREC',
                        dest='weigthRecov',
                        type=int,
                        default=weigthRecov)

    args = parser.parse_args()

    state_list = []
    if args.states != "":
        try:
            states_raw = args.states
            state_list = states_raw.split(",")
        except Exception:
            sys.exit("QUIT: states parameter is not on CSV format")
    else:
        sys.exit("QUIT: You must pass a state list on CSV format.")

    return (state_list, args.download_data, args.start_date, args.predict_range, args.s_0, args.e_0, \
        args.a_0, args.i_0, args.r_0, args.d_0, args.startNCases, args.ratio, args.weigthCases, args.weigthRecov)
Example #54
0
def main():
    import argparse
    import os

    domain = os.getenv('JIRA_DOMAIN', '')
    email = os.getenv('JIRA_EMAIL', '')
    apikey = os.getenv('JIRA_APIKEY', '')

    parser = argparse.ArgumentParser(
        description='Extract issue changelog data from a Jira Project')
    parser.add_argument('project',
                        help='Jira project from which to extract issues')
    parser.add_argument(
        'since',
        help='Date from which to start extracting issues (yyyy-mm-dd)')
    parser.add_argument('--updates-only',
                        action='store_true',
                        help='''
        When passed, instead of extracting issues created since the since argument,
        only issues *updated* since the since argument will be extracted.''')
    parser.add_argument(
        '--append',
        action='store_true',
        help='Append to the output file instead of overwriting it.')
    parser.add_argument(
        '-d',
        '--domain',
        default=domain,
        help=
        'Jira project domain url (i.e., https://company.atlassian.net). Can also be provided via JIRA_DOMAIN environment variable.'
    )
    parser.add_argument(
        '-e',
        '--email',
        default=email,
        help=
        'Jira user email address for authentication. Can also be provided via JIRA_EMAIL environment variable.'
    )
    parser.add_argument(
        '-k',
        '--apikey',
        default=apikey,
        help=
        'Jira user api key for authentication. Can also be provided via JIRA_APIKEY environment variable.'
    )
    parser.add_argument('-o',
                        '--output',
                        default='out.csv',
                        help='File to store the csv output.')
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Be verbose and output progress to console.')

    parser.add_argument(
        '-f',
        '--field',
        metavar='FIELD_ID',
        action='append',
        help='Include one or more custom fields in the query by id.')

    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    client = Client(args.domain, email=args.email, apikey=args.apikey)

    mode = 'a' if args.append else 'w'

    custom_fields = [
        k if k.startswith('customfield') else f'customfield_{k}'
        for k in args.field
    ] if args.field else []

    with open(args.output, mode, newline='') as csv_file:
        logging.info('{} opened for writing (mode: {})...'.format(
            args.output, mode))
        generate_csv(client,
                     csv_file,
                     args.project,
                     since=args.since,
                     custom_fields=custom_fields,
                     updates_only=args.updates_only,
                     write_header=not args.append)
Example #55
0
def main():
    # Silence upload.py.
    rietveld.upload.verbosity = 0

    parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
    parser.add_option('-u',
                      '--user',
                      metavar='<email>',
                      default=os.environ.get('USER'),
                      help='Filter on user, default=%default')
    parser.add_option('-b',
                      '--begin',
                      metavar='<date>',
                      help='Filter issues created after the date (mm/dd/yy)')
    parser.add_option('-e',
                      '--end',
                      metavar='<date>',
                      help='Filter issues created before the date (mm/dd/yy)')
    quarter_begin, quarter_end = get_quarter_of(datetime.today() -
                                                relativedelta(months=2))
    parser.add_option(
        '-Q',
        '--last_quarter',
        action='store_true',
        help='Use last quarter\'s dates, i.e. %s to %s' %
        (quarter_begin.strftime('%Y-%m-%d'), quarter_end.strftime('%Y-%m-%d')))
    parser.add_option('-Y',
                      '--this_year',
                      action='store_true',
                      help='Use this year\'s dates')
    parser.add_option('-w',
                      '--week_of',
                      metavar='<date>',
                      help='Show issues for week of the date (mm/dd/yy)')
    parser.add_option(
        '-W',
        '--last_week',
        action='count',
        help='Show last week\'s issues. Use more times for more weeks.')
    parser.add_option(
        '-a',
        '--auth',
        action='store_true',
        help='Ask to authenticate for instances with no auth cookie')
    parser.add_option('-d',
                      '--deltas',
                      action='store_true',
                      help='Fetch deltas for changes (slow).')

    activity_types_group = optparse.OptionGroup(
        parser, 'Activity Types',
        'By default, all activity will be looked up and '
        'printed. If any of these are specified, only '
        'those specified will be searched.')
    activity_types_group.add_option('-c',
                                    '--changes',
                                    action='store_true',
                                    help='Show changes.')
    activity_types_group.add_option('-i',
                                    '--issues',
                                    action='store_true',
                                    help='Show issues.')
    activity_types_group.add_option('-r',
                                    '--reviews',
                                    action='store_true',
                                    help='Show reviews.')
    parser.add_option_group(activity_types_group)

    output_format_group = optparse.OptionGroup(
        parser, 'Output Format',
        'By default, all activity will be printed in the '
        'following format: {url} {title}. This can be '
        'changed for either all activity types or '
        'individually for each activity type. The format '
        'is defined as documented for '
        'string.format(...). The variables available for '
        'all activity types are url, title and author. '
        'Format options for specific activity types will '
        'override the generic format.')
    output_format_group.add_option(
        '-f',
        '--output-format',
        metavar='<format>',
        default=u'{url} {title}',
        help='Specifies the format to use when printing all your activity.')
    output_format_group.add_option(
        '--output-format-changes',
        metavar='<format>',
        default=None,
        help='Specifies the format to use when printing changes. Supports the '
        'additional variable {reviewers}')
    output_format_group.add_option(
        '--output-format-issues',
        metavar='<format>',
        default=None,
        help='Specifies the format to use when printing issues. Supports the '
        'additional variable {owner}.')
    output_format_group.add_option(
        '--output-format-reviews',
        metavar='<format>',
        default=None,
        help='Specifies the format to use when printing reviews.')
    output_format_group.add_option(
        '--output-format-heading',
        metavar='<format>',
        default=u'{heading}:',
        help='Specifies the format to use when printing headings.')
    output_format_group.add_option(
        '-m',
        '--markdown',
        action='store_true',
        help='Use markdown-friendly output (overrides --output-format '
        'and --output-format-heading)')
    output_format_group.add_option(
        '-j',
        '--json',
        action='store_true',
        help='Output json data (overrides other format options)')
    parser.add_option_group(output_format_group)
    auth.add_auth_options(parser)

    parser.add_option('-v',
                      '--verbose',
                      action='store_const',
                      dest='verbosity',
                      default=logging.WARN,
                      const=logging.INFO,
                      help='Output extra informational messages.')
    parser.add_option('-q',
                      '--quiet',
                      action='store_const',
                      dest='verbosity',
                      const=logging.ERROR,
                      help='Suppress non-error messages.')
    parser.add_option(
        '-o',
        '--output',
        metavar='<file>',
        help='Where to output the results. By default prints to stdout.')

    # Remove description formatting
    parser.format_description = (lambda _: parser.description)  # pylint: disable=no-member

    options, args = parser.parse_args()
    options.local_user = os.environ.get('USER')
    if args:
        parser.error('Args unsupported')
    if not options.user:
        parser.error('USER is not set, please use -u')
    options.user = username(options.user)

    logging.basicConfig(level=options.verbosity)

    # python-keyring provides easy access to the system keyring.
    try:
        import keyring  # pylint: disable=unused-import,unused-variable,F0401
    except ImportError:
        logging.warning('Consider installing python-keyring')

    if not options.begin:
        if options.last_quarter:
            begin, end = quarter_begin, quarter_end
        elif options.this_year:
            begin, end = get_year_of(datetime.today())
        elif options.week_of:
            begin, end = (get_week_of(
                datetime.strptime(options.week_of, '%m/%d/%y')))
        elif options.last_week:
            begin, end = (
                get_week_of(datetime.today() -
                            timedelta(days=1 + 7 * options.last_week)))
        else:
            begin, end = (get_week_of(datetime.today() - timedelta(days=1)))
    else:
        begin = dateutil.parser.parse(options.begin)
        if options.end:
            end = dateutil.parser.parse(options.end)
        else:
            end = datetime.today()
    options.begin, options.end = begin, end

    if options.markdown:
        options.output_format = ' * [{title}]({url})'
        options.output_format_heading = '### {heading} ###'
    logging.info('Searching for activity by %s', options.user)
    logging.info('Using range %s to %s', options.begin, options.end)

    my_activity = MyActivity(options)

    if not (options.changes or options.reviews or options.issues):
        options.changes = True
        options.issues = True
        options.reviews = True

    # First do any required authentication so none of the user interaction has to
    # wait for actual work.
    if options.changes:
        my_activity.auth_for_changes()
    if options.reviews:
        my_activity.auth_for_reviews()

    logging.info('Looking up activity.....')

    try:
        if options.changes:
            my_activity.get_changes()
        if options.reviews:
            my_activity.get_reviews()
        if options.issues:
            my_activity.get_issues()
    except auth.AuthenticationError as e:
        logging.error('auth.AuthenticationError: %s', e)

    output_file = None
    try:
        if options.output:
            output_file = open(options.output, 'w')
            logging.info('Printing output to "%s"', options.output)
            sys.stdout = output_file
    except (IOError, OSError) as e:
        logging.error('Unable to write output: %s', e)
    else:
        if options.json:
            my_activity.dump_json()
        else:
            my_activity.print_changes()
            my_activity.print_reviews()
            my_activity.print_issues()
    finally:
        if output_file:
            logging.info('Done printing to file.')
            sys.stdout = sys.__stdout__
            output_file.close()

    return 0
                           dest="day_to_process",
                           help="using format YYYY-MM-DD",
                           default=None)
    job_group.add_argument("--max_samples_per_partition",
                           dest="max_samples_per_partition",
                           type=int,
                           default=max_samples_per_partition)
    job_group.add_argument("--base_hdfs_path",
                           dest="base_hdfs_path",
                           default=base_hdfs_path)
    # should we still allow the input of day to process and estimate ts start and end from it?

    # Parse

    try:
        c_options = parser.parse_args()
        print "Got options:", c_options
        max_ads_image_dig = c_options.max_ads_image_dig
        max_ads_image_hbase = c_options.max_ads_image_hbase
        max_ads_image = max(max_ads_image_dig, max_ads_image_hbase)
    except Exception as inst:
        print inst
        parser.print_help()

    es_ts_start, es_ts_end, ingestion_id = get_ingestion_start_end_id(
        c_options)

    # Setup SparkContext
    sc = SparkContext(appName="getimages-" + ingestion_id + dev_release_suffix)
    conf = SparkConf()
    log4j = sc._jvm.org.apache.log4j
Example #57
0
def handle_parameters():
    parser = argparse.ArgumentParser()
    parser.add_argument('filename', type=str)

    return parser.parse_args()
Example #58
0
def main():
    print('knasboll v0.5')
    parser = argparse.ArgumentParser()
    parser.add_argument('-i',
                        '--input-wildcard',
                        default='*.xml',
                        help='invoice XML files to process')
    parser.add_argument(
        '-o',
        '--output-file',
        default='dalas-invoice-import.csv',
        help='output Excel file for import into book-keeping software')
    options = parser.parse_args()

    # load table
    allocation_lookup = eval(open('allocation.cfg').read())

    table = []
    for fn in glob(options.input_wildcard):
        print('processing %s...' % fn)
        # load import file and place each row in table
        ns = dict([node for _, node in ET.iterparse(fn, events=['start-ns'])])
        ns['ns'] = ns['']
        e = ET.parse(fn).getroot()
        for account in e.findall('ns:account', ns):
            customer_id = account.find('ns:cust_id', ns).text
            for invoice in account.findall('ns:invoice', ns):
                invoice_date = invoice.find('ns:invoice_date', ns).text
                timestamp = dateutil.parser.parse(invoice_date).timestamp()
                invoice_date = datetime.fromtimestamp(
                    timestamp).isoformat().partition('T')[0]
                payment_date = invoice.find('ns:payment_due_date', ns).text
                timestamp = dateutil.parser.parse(payment_date).timestamp()
                payment_date = datetime.fromtimestamp(
                    timestamp).isoformat().partition('T')[0]
                tax_total = invoice.find('ns:total_tax_value', ns).text
                tax_total = float(tax_total)
                tax_total_s = ('%.2f' % float(tax_total)).replace('.', ',')
                invoice_total = invoice.find('ns:invoice_total', ns).text
                invoice_total = float(invoice_total) + tax_total
                invoice_total_s = ('%.2f' % invoice_total).replace('.', ',')
                invoice_number = invoice.find('ns:invoice_number', ns).text
                allocs = []
                for invoice_item in invoice.findall('ns:invoice_item', ns):
                    allocation = invoice_item.find('ns:allocation_code_name',
                                                   ns).text
                    if allocation not in allocation_lookup:
                        print(
                            'FATAL: no such allocation %s in allocation.cfg' %
                            allocation)
                        return
                    allocation_code = allocation_lookup[allocation]
                    allocs += [(allocation_code, allocation)]
                allocation_code, allocation = sorted(allocs)[0]
                table += [[
                    allocation, invoice_number, invoice_date, payment_date,
                    customer_id, tax_total, tax_total_s, invoice_total,
                    invoice_total_s, allocation_code
                ]]

    add_col(table, 'invoice_date', 'year', lambda s: s.split('-')[0])
    add_col(table, 'invoice_date', 'month', lambda s: s.split('-')[1])
    add_col(table, 'invoice_date', 'journal', lambda x: '70')
    add_col(table, 'invoice_date', 'payment_condition', lambda x: '01')
    add_col(table, 'invoice_date', 'vat_code', lambda x: '04')
    add_col(table, 'invoice_date', 'currency', lambda x: 'EUR')
    add_col(table, 'invoice_date', 'exchange_fact', lambda x: 1.0)
    cols = [
        'journal', 'year', 'month', 'invoice_no', 'allocation', 'invoice_date',
        'pay_date', 'currency', 'exchange_fact', 'payment_condition',
        'customer_number', 'allocation_code', 'allocation', 'vat_code',
        'invoice_total', 'tax_total'
    ]

    wf = open(options.output_file, 'w', newline='')
    wr = csv.writer(wf)
    t = [[row[columns.index(c)] for row in table] for c in cols]
    for row in zip(*t):
        wr.writerow(row)
    wf.close()

    # finish with a nice message
    gross_total = sum(row[columns.index('invoice_total')] for row in table)
    net_total = gross_total - sum(row[columns.index('tax_total')]
                                  for row in table)
    print(
        'Dala, your bester agent havs convertidid the %i invoice thingies for gross %.2f evro/net %.2f evrossar and writing it to %s!'
        % (len(table), gross_total, net_total, options.output_file))
Example #59
0
    parser.add_argument('-year', type=int, default=2017, help='The tax year you want to fill out.')
    parser.add_argument('-startyear', type=int, default=0, help='The year to start looking for buy orders.  ' +
                                                                'Use this if you have the cost basis for previous ' +
                                                                'years (pass the filename with -costbasis)')
    parser.add_argument('-costbasis', default='', help='An optional file containing the cost basis of coins not ' +
                                                       'included in your GDAX, Coinbase, or Bittrex history.')
    parser.add_argument('--download', action='store_true', help='Use this flag to download the transaction history.  ' +
                                                                'Otherwise the data will be loaded from save.p')
    parser.add_argument('--turbotax', action='store_true', help='Use this flag to make a Turbo Tax txf import file.')
    parser.add_argument('--form8949', action='store_true', help='Use this flag to make the IRS form 8949 pdfs.')
    parser.add_argument('--saveorders', action='store_true', help='Use this flag to save the orders in a Python ' +
                                                                  'pickle file.')
    # Use a preset argument list if using pycharm console
    if 'pydevconsole' in sys.argv[0]:
        args = parser.parse_args([
                                  '-name', "Glenn Sugar",
                                  '-year', '2017',
                                  '-startyear', '2017'])
    else:
        args = parser.parse_args()

    if args.download:
        # Read in the GDAX buys and sells
        gdax_buys, gdax_sells = gdax_reader.get_buys_sells()
        # Read in the Coinbase buys and sells
        coinbase_buys, coinbase_sells = coinbase_reader.get_buys_sells()
        # Read in the Bittrex buys and sells
        bittrex_buys, bittrex_sells = bittrex_reader.get_buys_sells()
        # Go through the buys and sells and see if they are coin-coin transactions
        # Fixed means that coin-coin transactions are now coin-usd, usd-coin
        print('Fixing coin-coin transactions...')
        buys_fixed = []
import os
import time
import datetime
import json
import urllib
import dateutil.parser
import urlparse
from argparse import ArgumentParser

parser = ArgumentParser()
parser.add_argument("-f",
                    "--file",
                    dest="file",
                    help="memories_history.json file",
                    metavar="FILE")
json_path = parser.parse_args().file

download_dir = 'memories'

with open(json_path) as f:
    data = json.load(f)

length = len(data['Saved Media'])
index = 1

if not os.path.exists(download_dir):
    os.makedirs(download_dir)

for key in data['Saved Media']:
    print("Dowloading {0}/{1}".format(index, length))