コード例 #1
0
def main():
    """CLI go brrrrr."""
    shtmlparser = SimpleHTMLDataParser()
    parser = argparse.ArgumentParser()
    parser.add_argument("injson",
                        nargs="?",
                        type=argparse.FileType("r"),
                        default=sys.stdin)
    parser.add_argument(
        "outdb",
        nargs="?",
        type=argparse.FileType("w"),
        default=":memory:",
    )
    config = parser.parse_args()

    raw_data = json.load(config.injson)
    conn = sqlite3.connect(config.outdb.name)

    conn.executescript(TABLE_SCHEMA)

    curr = conn.cursor()

    # parse and write out each record
    for record in raw_data:
        pr = CovidEventRecord(record, shtmlparser)
        insert_stmt = f'INSERT INTO exposures (notification_date, school, city, school_district, health_region, exposure_dates, extra_info) VALUES ("{pr.notification_date}", "{c(pr.school)}", "{pr.city}", "{pr.school_district}", "{pr.health_region}", "{c(pr.exposure_dates)}", "{c(pr.extra_info)}")'
        try:
            curr.execute(insert_stmt)
        except sqlite3.OperationalError as e:
            print(f"ERROR {e} executing: {insert_stmt}")
            sys.exit(1)
    conn.commit()
    curr.execute("vacuum;")
    conn.close()
コード例 #2
0
def main():
    """
    Parse arguments, read input and count HTML tags
    """
    parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]))
    parser.add_argument('files', nargs='*', default='-')
    args = parser.parse_args()
    counter = HTMLTagCounter()
    exitval = 0
    maxtags = 0
    results = {}
    for arg in args.files or '-':
        try:
            if arg == '-':
                counter.feed(sys.stdin.read())
            else:
                with open(arg, 'r') as istream:
                    counter.feed(istream.read())
        except (FileNotFoundError, IsADirectoryError, PermissionError) as exc:
            print(': '.join((sys.argv[0], arg, exc.strerror)), file=sys.stderr)
            results[arg] = 0
            exitval = 1
        else:
            maxtags = max((counter.n_tags, maxtags))
            results[arg] = counter.n_tags
            counter.reset()
    if len(results) > 1:
        numsfmt = '{{:{}d}}'.format(len(str(maxtags)))
        for arg in args.files:
            print(numsfmt.format(results[arg]), arg)
    else:
        print(maxtags)
    return exitval
コード例 #3
0
def parse_args():
    """Parse the command line arguments passed to the script."""
    parser = argparse.ArgumentParser(
        description=
        "Attempt to create an member account on an Umbraco site using the public `HandleRegisterMember` endpoint.",
        epilog=
        "Note that it is necessary to crawl for the anti-forgery tokens in Umbraco versions 7.13 and up."
    )
    parser.add_argument("URL", help="the base URL of the Umbraco site")
    parser.add_argument(
        "--crawl",
        "-C",
        action="store_true",
        help=
        "crawl the site to find an anti-forgery cookie, token, and ufprt to send with the request"
    )
    parser.add_argument("--name",
                        "-n",
                        default="Neo",
                        help="the new member's name")
    parser.add_argument("--email",
                        "-e",
                        default="*****@*****.**",
                        help="the new member's email address")
    parser.add_argument("--username",
                        "-u",
                        default="N30",
                        help="the new member's username")
    parser.add_argument("--password",
                        "-p",
                        default="Umbraco12345!",
                        help="the new member's password")
    parser.add_argument("--type", "-t", help="the member type alias")
    return parser.parse_args()
コード例 #4
0
def getArguments():
    name = 'Trailer-Downloader'
    version = '1.12'
    parser = ArgumentParser(
        description=
        '{}: download a movie trailer from Apple or YouTube with help from TMDB'
        .format(name))
    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version='{} {}'.format(name, version),
                        help='show the version number and exit')
    parser.add_argument(
        '-d',
        '--directory',
        dest='directory',
        help='full path of directory to copy downloaded trailer',
        metavar='DIRECTORY')
    parser.add_argument('-t',
                        '--title',
                        dest='title',
                        help='title of movie',
                        metavar='TITLE')
    parser.add_argument('-y',
                        '--year',
                        dest='year',
                        help='release year of movie',
                        metavar='YEAR')
    args = parser.parse_args()
    return {
        'directory':
        str(args.directory) if args.directory != None else args.directory,
        'title': str(args.title) if args.title != None else args.title,
        'year': str(args.year) if args.year != None else args.year
    }
コード例 #5
0
def main() -> 'NoReturn':
    parser = get_parser()
    parsed = parser.parse_args()
    if parsed.verbose:
        basicConfig(level=DEBUG)
    else:
        basicConfig(level=INFO)

    if parsed.version:
        print(__file__, __version__)
        sys.exit(0)

    if parsed.subcommand in ['download', 'd', 'dl']:
        if not subcommand_download(url=parsed.url):
            sys.exit(1)
    elif parsed.subcommand in ['test', 't']:
        if not subcommand_test(command=parsed.command):
            sys.exit(1)
    elif parsed.subcommand is not None:
        logger.error(
            'The subcommand "%s" is not supported in %s. Please use the full version: https://github.com/online-judge-toolgs/oj',
            parsed.subcommand, __file__)
        sys.exit(1)
    else:
        parser.print_help(file=sys.stderr)
        sys.exit(1)
    sys.exit(0)
コード例 #6
0
def main():
    parser = argparse.ArgumentParser(description="Converts DFKI Quintuple N-Triple DAX ABOX data to RDF triples")
    command_builder = util.CommandBuilder(parser)
    command_builder.add_convert(convert)
    command_builder.add_batch_convert(convert, 'nt')
    args = parser.parse_args()
    command_builder.execute(args)
コード例 #7
0
def parse_args():
    parser = argparse.ArgumentParser(
        description=
        "Import bookmarks from standard html file to rofi-bookmarks.")
    parser.add_argument("inputfile", metavar="htmlfile")
    parser.add_argument("outputfile", metavar="bookmarkfile")

    return parser.parse_args()
コード例 #8
0
ファイル: main.py プロジェクト: Mais1212/Alexnime
def main():
    parser = createParser()
    namespace = parser.parse_args()

    start_page = namespace.start_page
    end_page = namespace.end_page

    for page in range(start_page, end_page):

        url_page = HOST + "/catalog?page=" + str(page)
        response_main_page = requests.get(url_page,
                                          headers=HEADERS,
                                          timeout=(0.1, 10),
                                          proxies=PROXIES)
        soup_min_page = BeautifulSoup(response_main_page.content,
                                      "html.parser")

        selector_content = ".anime-column .image-block"
        pages_content = soup_min_page.select(selector_content)
        anime_links = get_content(pages_content)
        count = 0
        for link in anime_links:

            response_anime_page = requests.get(HOST + link,
                                               headers=HEADERS,
                                               proxies=PROXIES)
            soup_anime_page = BeautifulSoup(response_anime_page.content,
                                            "html.parser")

            print(link)

            selector_name = ".content div div h1"
            selector_age = ".content-main-info li:nth-child(3)"
            selector_age_rating = ".content-main-info li:nth-child(5)"
            selector_ganres = ".content-main-info .categories-list"
            selector_rating = "span.main-rating-block > span.main-rating"
            selecror_voices = "ul.animeVoices > li"

            select_name = soup_anime_page.select_one(selector_name).text
            select_age = soup_anime_page.select_one(selector_age).text
            select_age_rating = soup_anime_page.select_one(
                selector_age_rating).text
            select_ganres = soup_anime_page.select_one(selector_ganres).text
            select_rating = soup_anime_page.select_one(selector_rating).text
            try:
                select_voices = soup_anime_page.select_one(
                    selecror_voices).text
            except AttributeError:
                select_voices = "Не хватает данных"

            full_select = [
                select_name, select_age, select_ganres, select_age_rating,
                select_rating, select_voices
            ]
            anime_data.append(full_select)
            count += 1

    create_excel("Результат.xlsx", anime_data)
コード例 #9
0
def getArguments():
    name = 'Trailer-Downloader'
    version = '1.08'
    parser = ArgumentParser(
        description='{}: download a movie trailer from Apple or YouTube'.
        format(name))
    parser.add_argument("-v",
                        "--version",
                        action='version',
                        version='{} {}'.format(name, version),
                        help="show the version number and exit")
    parser.add_argument(
        "-d",
        "--directory",
        dest="directory",
        help="full path of directory to copy downloaded trailer",
        metavar="DIRECTORY")
    parser.add_argument("-f",
                        "--file",
                        dest="file",
                        help="full path of movie file",
                        metavar="FILE")
    parser.add_argument("-t",
                        "--title",
                        dest="title",
                        help="title of movie",
                        metavar="TITLE")
    parser.add_argument("-y",
                        "--year",
                        dest="year",
                        help="release year of movie",
                        metavar="YEAR")
    args = parser.parse_args()
    # Python 2.7
    try:
        return {
            'directory':
            str(args.directory).decode(format())
            if args.directory != None else args.directory,
            'file':
            str(args.file).decode(format())
            if args.file != None else args.file,
            'title':
            str(args.title).decode(format())
            if args.title != None else args.title,
            'year':
            str(args.year).decode(format()) if args.year != None else args.year
        }
    # Python 3.0 and later
    except:
        return {
            'directory':
            str(args.directory) if args.directory != None else args.directory,
            'file': str(args.file) if args.file != None else args.file,
            'title': str(args.title) if args.title != None else args.title,
            'year': str(args.year) if args.year != None else args.year
        }
コード例 #10
0
ファイル: snippet.py プロジェクト: szabo92/gistable
def main():

    description = 'Make Launch Center Pro backup files easier to work with.'
    parser = argparse.ArgumentParser(description=description)
    group = parser.add_mutually_exclusive_group(required=True)

    group.add_argument('-read', action='store', help = ('Read in '
    'a Launch Center Pro .lcpbackup file and output a json file with '
    'the URL actions.'))

    group.add_argument('-write', action='store', help = ('Read in a '
    'previously created json file and write it to a Launch Center Pro '
    'backup file.'))

    parser.add_argument('-lcpfile', '-l', action='store', help=('The '
    '*XML* LCP backup file to use as a template (defaults to xml file with '
    'same timestamp as json file. Either use the default, or convert manually '
    'from binary to xml with plutil.'))

    args = parser.parse_args()

    if args.read:
        if not args.read.endswith('.lcpbackup'):
            print("You need to specify an .lcpbackup file to read.")
            exit(0)
        else:
            pl = read_lcp_backup(args.read)

            url_dict = find_urls(pl)

            with open(out_json, 'w') as json_opener:
                json.dump(url_dict, json_opener, indent=4)

            print("Output:\n{}\n{}".format(out_json, out_xml))

    if args.write:
        if not args.write.endswith('.json'):
            print("You need to specify a .json file to read from.")
            exit(0)
        else:

            old_ts = re.search(r'^\d+', args.write).group(0)
            template_file = '{}_xml.lcpbackup'.format(old_ts)

            if args.lcpfile:
               template_file = args.lcpfile

            with open(template_file) as xml, \
                        open(args.write) as json_file:

                pl = xml.read()
                my_json = json.load(json_file)

            new_pl = update_pl(pl, my_json)

            write_lcp_backup(new_pl)
コード例 #11
0
def get_args():
    """
    This function uses the argparse library to parse command line arguments.

    Returns:
        args (argparse.Namespace): An argparse object. Elements of the object 
        can be accessed by their option name as attributes.
    """
    parser = argparse.ArgumentParser(description=
    "This program takes a list of words, or a file containing a list of words,\
 and queries the Google NGram API for the usage frequency of these words, by\
 year, within a desginated time-frame. Unless '--noSave' is indicated, data is\
 saved to a file called 'google_Ngram_output.csv'. A more detailed description\
 of various arguments can be found at https://books.google.com/ngrams/info")

    parser.add_argument("Query", type = str, nargs="+", help="List of words,\
 or CSV or TXT file of words to be queried")

    parser.add_argument("-c", "--corpus", default="eng_2019", type = str, 
        help ="Shorthand name for corpus of words to be queried. Available\
 corpora can be read about at https://books.google.com/ngrams/info. Default is\
 English corpus, 2019 update")

    parser.add_argument("-s", "--startYear", default=1800, type=int,
        help = "A year: beginning of time range to be queried. Default is 1800")

    parser.add_argument("-e", "--endYear", default = 2000, type = int,
        help = "A year: last year of time range to be included in query.\
 Default is 2000")

    parser.add_argument("-sm", "--smoothing", default = 0, type = int,
        help = "The degree to which data points are averaged between years. A\
 smoothing of 0 indicates completely raw data. Default is 0.")

    parser.add_argument("-ci", "--caseInsensitive", action="store_true", 
        help = "Consider upper- and lower-case versions of the words")

    parser.add_argument("-a", "--allData", action="store_true")

    parser.add_argument("-n", "--noSave", action="store_true", 
        help = "Use to prevent data from being saved to external file")

    parser.add_argument("-q", "--quiet", action="store_true",
        help="Use to prevent program from printing to STD OUT")

    parser.add_argument("-o", "--outputDir", default = "./", type = str,
        help = "Directory to save output file to")

    parser.add_argument("-p", "--plot", action="store_true", 
        help = "Create plot of data")

    args = parser.parse_args()
    args.Query = "".join(args.Query).split(",")

    return args
コード例 #12
0
def get_args():
#    get_days_show("d_12_12_2011", "d_13_12_2011", "file:///C:/Users/ehhexxn/Downloads/TV%20Calendar%20-%20December%202011%20TV%20listings%20guide.htm")
    parser = argparse.ArgumentParser(description='Process arguments of get_show_list')
    parser.add_argument('--start', dest='start',
                   help='The starting date, format yymmdd')
    parser.add_argument('--end', dest='end',
                   help='The ending date, format yymmdd')
    parser.add_argument('--source', dest="source", nargs="*",
                    help="The source file, default is http://www.pogdesign.co.uk/cat/")
    args = parser.parse_args()    
    return args
コード例 #13
0
def main() -> None:
    """Main function."""
    parser = argparse.ArgumentParser(allow_abbrev=False)
    parser.add_argument('input', type=pathlib.Path)
    parser.add_argument('output', type=pathlib.Path)
    opts = parser.parse_args()
    module = stardoc_output_pb2.ModuleInfo.FromString(opts.input.read_bytes())
    # Force Unix-style line endings for consistent results.  See
    # https://github.com/bazelbuild/stardoc/issues/110.
    with opts.output.open(mode='xt', encoding='utf-8', newline='\n') as file:
        generator = _Generator(file)
        generator.run(module)
コード例 #14
0
def getArguments():
    parser = ArgumentParser(description=NAME+': '+DESCRIPTION)
    parser.add_argument('-v', '--version', action='version', version=NAME+' '+VERSION, help='show the version number and exit')
    parser.add_argument('-d', '--directory', dest='directory', help='full path of directory to copy downloaded trailer', metavar='DIRECTORY')
    parser.add_argument('-t', '--title', dest='title', help='title of movie', metavar='TITLE')
    parser.add_argument('-y', '--year', dest='year', help='release year of movie', metavar='YEAR')
    args = parser.parse_args()
    return {
        'directory': str(args.directory) if args.directory != None else args.directory,
        'title': str(args.title) if args.title != None else args.title,
        'year': str(args.year) if args.year != None else args.year
    }
コード例 #15
0
def main():
    parser = argparse.ArgumentParser(
        description=
        "Filter a Shakespearean play (namely Hamlet) and keep only the direct speech\n\nInput should be the html body of a play's text from shakespeare.mit.edu"
    )
    parser.add_argument("ifile", help="The raw input file")
    parser.add_argument("ofile", help="The output file")
    args = parser.parse_args()
    with open(args.ifile, "r") as ifile, open(args.ofile, "w") as ofile:
        lineParser = LineParser()
        lineParser.feed(ifile.read())
        ofile.write("\n".join(lineParser.lines))
    print("Done.")
コード例 #16
0
def main():
    parser = argparse.ArgumentParser(description=(
        "Download the QuakeML files provided by the IRIS SPUD "
        "momenttensor service. Will be saved as a QuakeML file in the "
        "current folder."))
    parser.add_argument("url",
                        metavar="U",
                        type=str,
                        help="The URL to download.")
    args = parser.parse_args()

    url = args.url
    iris2quakeml(url)
コード例 #17
0
ファイル: nevernote.py プロジェクト: snegov/nevernote
def main():
    parser = argparse.ArgumentParser(
        description='Nevernote - download pages locally.')
    parser.add_argument('urls', metavar='URL', type=str, nargs='+',
        help='URL of page to download')
    args = parser.parse_args()

    for arg in args.urls:
        if os.path.isfile(arg):
            print('Found file %s' % arg)
            for url in (line.strip() for line in open(arg)):
                process_url(url)
        else:
            process_url(arg)
コード例 #18
0
ファイル: yaspp.py プロジェクト: loftwah/yaspp
def parseArgs():
    import argparse

    parser = argparse.ArgumentParser(
        description='Generate a static feed and podlove webplayer list.')

    parser.add_argument("yaml_file", type=str, help="The content file.")
    parser.add_argument("-o",
                        "--output-dir",
                        type=str,
                        default=".",
                        help="Output directory (default: .)")

    return parser.parse_args()
コード例 #19
0
 def process(self):
     parser = argparse.ArgumentParser()
     parser.add_argument('-test', type=str, help='test methodname')
     parser.add_argument('-a1', type=str, help='arg1')
     parser.add_argument('-a2', type=str, help='arg2')
     parser.add_argument('-a3', type=str, help='arg3')
     parser.add_argument('-url', help='url')
     parser.add_argument('-num', type=int, help='num types to call url')
     args = parser.parse_args()
     tbeg = time.time()
     if args.test is not None:
         self.process_test(args.test, args.a1, args.a2, args.a3)
     tend = time.time()
     tdif = (tend - tbeg)
     p('\ncompleted. time diff {} ms'.format(tdif))
コード例 #20
0
ファイル: github_tool.py プロジェクト: nlitsme/githubtool
def main():
    import argparse
    parser = argparse.ArgumentParser(description='Tool for interogating github')
    parser.add_argument('--auth', type=str, help='OAuth token, or "username:password"')
    parser.add_argument('--verbose', '-v', action='store_true', help='print more info, such as times')
    parser.add_argument('--debug', action='store_true', help='print full exception')
    parser.add_argument('--limits', action='store_true', help='print rate limit status')
    parser.add_argument('--list', '-l', type=str, help='List repositories for the specified user')
    parser.add_argument('--network', '-n', action='store_true', help='Show list of all forks and their state')
    parser.add_argument('--urls', '-u', action='store_true', help='output url listing')
    parser.add_argument('--all', '-a', action='store_true', help='Request all pages, up to 1000 items')
    parser.add_argument('--where', '-w', type=str, default='code', help='What type of object to search for: code, user, repo, commit, issue')
    parser.add_argument('--query', '-q', type=str, help='in:{path,file} language:{js,c,python,...} filename:substring extension:ext user: repo: size:')
    parser.add_argument('--create', '-c', type=str, help='Create a new repository, name:description')
    parser.add_argument('REPOS', nargs='*', type=str, help='repository list to summarize')
    args = parser.parse_args()

    try:
        with open(os.getenv("HOME")+"/.github_cmdline_rc") as fh:
            cfg = json.load(fh)
    except Exception as e:
        print("ERROR", e)
        cfg = dict()

    if not args.auth:
        args.auth = cfg.get('auth')

    loop = asyncio.get_event_loop()

    api = GithubApi(loop, args)

    tasks = [ ]
    if args.list:
        tasks.append(listrepos(api, args.list, args))
    elif args.limits:
        tasks.append(getlimits(api))
    elif args.query:
        tasks.append(querygithub(api, args))
    elif args.create:
        name, desc = args.create.split(':', 1)
        tasks.append(createrepo(api, args, name, desc))
    else:
        tasks.append(inforepos(api, args))

    loop.run_until_complete(asyncio.gather(*tasks))

    loop.run_until_complete(api.close())
コード例 #21
0
def main():
    import argparse
    parser = argparse.ArgumentParser(description='print entire contents of a mediawiki site in XML format')
    parser.add_argument('--history', action='store_true', help='Include history in export')
    parser.add_argument('--savedir', type=str, help='Save all files to the specified directory')
    parser.add_argument('--limit', type=int, help='Maximum number of simultaneous connections to use.')
    parser.add_argument('--batchsize', type=int, help='Nr of pages to export per request.', default=300)
    parser.add_argument('--debug', action='store_true', help='errors print stacktrace, and abort')
    parser.add_argument('wikipage', type=str)
    args = parser.parse_args()

    global debug
    debug = args.debug

    loop = asyncio.get_event_loop()
    tasks = [ exportsite(loop, args.wikipage, args)  ]
    loop.run_until_complete(asyncio.gather(*tasks))
コード例 #22
0
def get_args():
    #    get_days_show("d_12_12_2011", "d_13_12_2011", "file:///C:/Users/ehhexxn/Downloads/TV%20Calendar%20-%20December%202011%20TV%20listings%20guide.htm")
    parser = argparse.ArgumentParser(
        description='Process arguments of get_show_list')
    parser.add_argument('--start',
                        dest='start',
                        help='The starting date, format yymmdd')
    parser.add_argument('--end',
                        dest='end',
                        help='The ending date, format yymmdd')
    parser.add_argument(
        '--source',
        dest="source",
        nargs="*",
        help="The source file, default is http://www.pogdesign.co.uk/cat/")
    args = parser.parse_args()
    return args
コード例 #23
0
ファイル: _reporter.py プロジェクト: taf3/taf
def parse_options():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument("--logfile", action="store", dest="log_file", default=None,
                        help="Path to log file created by buildbot.")
    parser.add_argument("--logdir", action="store", dest="log_dir", default=None,
                        help="Path to directory with log files created by buildbot.")
    parser.add_argument("--type", action="store", dest="attype", default='none',
                        help="Type of the attachment (html|text|none).")
    parser.add_argument("--html", action="store", dest="html", default=None,
                        help="Path to save html report.")
    parser.add_argument("--addtest", action="store", dest="addtest", default=None,
                        help="Add test if it not exists.")
    parser.add_argument("--htmlres", action="store", dest="html_res", default=None,
                        help="Path to html resources (css, js, images).")
    parser.add_argument("--xmlpath", action="store", dest="xml_path", default=None,
                        help="Path to xml files for html report.")
    parser.add_argument("--xsltstyle", action="store", dest="xslt_style", default="junit_full.xsl",
                        help="Path to xslt style sheet.")
    parser.add_argument("--xsltconcat", action="store", dest="xslt_concat", default="junit_concat.xsl",
                        help="Path to xslt concatenation style.")
    parser.add_argument("--maillist", action="store", dest="mail_list", default=None,
                        help="Path to file with email list.")
    parser.add_argument("--info", action="store", dest="info", default="",
                        help="Additional info for subject.")
    options = parser.parse_args()

    allowed_report_types = ['html', 'text', 'none']
    if options.attype not in allowed_report_types:
        raise Exception("Invalid --type option.")

    mail_list = []
    if options.mail_list:
        config = configparser.RawConfigParser()
        try:
            config.read(options.mail_list)
        except Exception as err:
            raise err
        else:
            mail_list = eval(config.get('subscribers', 'emails'))

    return {'logfile': options.log_file, 'logdir': options.log_dir, 'info': options.info,
            'attype': options.attype, 'maillist': mail_list,
            'html': options.html, 'htmlres': options.html_res, 'addtest': options.addtest, 'xmlpath': options.xml_path,
            'xslt_style': options.xslt_style, 'xslt_concat': options.xslt_concat}
コード例 #24
0
ファイル: xml2tab.py プロジェクト: anibyl/kamputerm
def parseArguments():
    parser = argparse.ArgumentParser(description='Convert from a stardict textual file to a tab file')
    parser.add_argument('input', metavar='FILENAME', nargs='?', default='-', help='input file name. If missing then reads from stdin')
    parser.add_argument('-o', '--output', default='-', metavar='FILENAME', help='output file name. If it don\'t enumerate then writes to stdout')
    parser.add_argument('-r', '--orthography', default='school', choices=['school', 'classic'], help="'classic' or 'school' orthography")
    args = parser.parse_args()
    if args.input == '-':
        args.input = sys.stdin
    else:
        args.input = open(args.input, 'r', encoding='utf8')
    if args.output == '-':
        args.output = sys.stdout
    else:
        args.output = open(args.output, 'w', encoding='utf8')
    if args.orthography == 'school':
        args.orthography = 'by'
    elif args.orthography == 'classic':
        args.orthography = 'be'
    return args
コード例 #25
0
def parse_config():
    parser = argparse.ArgumentParser()
    parser.add_argument("src", nargs=1, type=str)
    parser.add_argument("dst", nargs="?", type=str)
    args = parser.parse_args()

    config = {}

    config["src"] = args.src[0]

    if args.dst is None:
        index = config["src"].rfind(".")
        if index == -1:
            config["dst"] = "{}.md".format(config["src"])
        else:
            config["dst"] = "{}.md".format(config["src"][:index])
    else:
        config["dst"] = args.dst[0]

    return config
コード例 #26
0
ファイル: vd.py プロジェクト: cwbooth5/visidata
def terminal_main():
    'Parse arguments and initialize VisiData instance'
    import argparse

    global g_args, vd
    parser = argparse.ArgumentParser(description=__doc__)

    parser.add_argument('inputs', nargs='*', help='initial sheets')
    parser.add_argument('-d', '--debug', dest='debug', action='store_true', default=False, help='abort on exception')
    g_args = parser.parse_args()

    vd = VisiData()
    inputs = g_args.inputs or ['.']

    for fn in inputs:
        openSource(VSource(fn, fn))

    ret = wrapper(curses_main)
    if ret:
        print(ret)
コード例 #27
0
def main():
    """CLI go brrrrr."""
    shtmlparser = SimpleHTMLDataParser()
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "injson", nargs="?", type=argparse.FileType("r"), default=sys.stdin
    )
    parser.add_argument(
        "outcsv", nargs="?", type=argparse.FileType("w"), default=sys.stdout
    )
    config = parser.parse_args()

    raw_data = json.load(config.injson)

    writer = csv.writer(config.outcsv)
    writer.writerow(
        (
            "Notification Date",
            "School",
            "City",
            "School District",
            "Health Region",
            "Exposure Dates",
            "Extra Info",
        )
    )

    # parse and write out each record
    for record in raw_data:
        pr = CovidEventRecord(record, shtmlparser)
        writer.writerow(
            (
                pr.notification_date,
                pr.school,
                pr.city,
                pr.school_district,
                pr.health_region,
                pr.exposure_dates,
                pr.extra_info,
            )
        )
コード例 #28
0
ファイル: rubyvenv.py プロジェクト: ethanhs/rubyvenv
def main(argv: Optional[Sequence[str]] = None) -> int:
    parser = argparse.ArgumentParser()
    parser.add_argument('dest', nargs='?', metavar='DEST_DIR')
    parser.add_argument('--ruby', default='latest')
    parser.add_argument(
        '--list-versions', action='store_true',
        help='List versions available for your system',
    )
    args = parser.parse_args(argv)

    if args.list_versions:
        return list_versions()
    else:
        if not args.dest:
            parser.error('DEST_DIR is required')
        args.dest = os.path.abspath(args.dest)
        if args.ruby == 'system':
            return make_system_environment(args.dest)
        else:
            version = pick_version(args.ruby)
            return make_environment(args.dest, version)
コード例 #29
0
def main():

    parser = CommandLineParser()
    (options, args) = parser.parse_args()

    if options.version == True:
        print("conkyClementine v.3.00")
    else:
        if options.verbose == True:
            print("*** INITIAL OPTIONS:")
            print("    datatype:", options.datatype)
            print("    template:", options.template)
            print("    ratingchar:", options.ratingchar)
            print("    nounknownoutput:", options.nounknownoutput)
            print("    secondsoutput:", options.secondsoutput)
            print("    maxlength:", options.maxlength)
            print("    verbose:", options.verbose)
            print("    errorlogfile:", options.errorlogfile)
            print("    infologfile:", options.infologfile)

        clementineinfo = ClementineInfo(options)
        clementineinfo.writeOutput()
コード例 #30
0
ファイル: PTT_Crawler.py プロジェクト: chenjr0719/PTT-Crawler
def main():

    #Read and set parameters.
    parser = argparse.ArgumentParser()

    parser.add_argument("board", help="Set the board you want to crawling. Ex: Gossiping,cat")
    parser.add_argument("num", type = int, help="Set the number of index you want to crawling.")
    parser.add_argument("-p", "--push", help="Collect pushes or not. Default is yes.")

    args = parser.parse_args()

    main.board = str(args.board)
    index_num = int(args.num)

    if args.push == 'yes' or args.push == None:
        main.get_push = True
    elif args.push == 'no':
        main.get_push = None
    else:
        print('--push is not correct!\nPlease input yes or no.')
        sys.exit()

    #Create a directory to restore the result.
    result_dir = 'Result/'
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    os.chdir(result_dir)

    print('Start to Crawling...\nPlease be patient.')
    print('Getting article list...')
    link_list = getArticleLinks(main.board, index_num)

    #Get message, comments and reactions from feed.
    print('Crawling article in multi-processing...')
    target_pool = Pool()
    target_pool.map(getArticle, link_list)
    target_pool.close()

    print('Crawling is done.')
コード例 #31
0
ファイル: xiami.py プロジェクト: arition/xiami-downloader
def parse_arguments():

    note = 'The following SONG, ALBUM, and PLAYLIST are IDs which can be' \
           'obtained from the URL of corresponding web page.'

    parser = argparse.ArgumentParser(description=note)

    parser.add_argument('-v', '--version', action='version', version=VERSION)
    parser.add_argument('-f', '--force', action='store_true',
                        help='overwrite existing files without prompt')
    parser.add_argument('-t', '--tool', choices=['wget', 'urllib2'],
                        help='change the download tool')
    parser.add_argument('-s', '--song', action='append',
                        help='adds songs for download',
                        type=int, nargs='+')
    parser.add_argument('-a', '--album', action='append',
                        help='adds all songs in the albums for download',
                        type=int, nargs='+')
    parser.add_argument('-p', '--playlist', action='append',
                        help='adds all songs in the playlists for download',
                        type=int, nargs='+')
    parser.add_argument('--no-tag', action='store_true',
                        help='skip adding ID3 tag')
    parser.add_argument('--directory', default='',
                        help='save downloads to the directory')
    parser.add_argument('--name-template', default='{id} - {title} - {artist}',
                        help='filename template')
    parser.add_argument('--lrc-timetag', action='store_true',
                        help='keep timetag in lyric')
    parser.add_argument('--no-wait', action='store_true',
                        help='make download faster, but xiami may ban your account')
    parser.add_argument('-un', '--username', default='',
                        help='Vip account email')
    parser.add_argument('-pw', '--password', default='',
                        help='Vip account password')
    parser.add_argument('-ma', '--memberAuth', default='',
                        help='Cookie member_auth')

    return parser.parse_args()
コード例 #32
0
ファイル: pfserver.py プロジェクト: scjurgen/pyfeld
def run_main():
    global uc_media
    global raumfeld_handler
    global arglist

    LOG_FILENAME = Settings.home_directory() + '/pfserver.log'
    unlink(LOG_FILENAME)
    logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)

    logging.debug('This message should go to the log file')

    parser = argparse.ArgumentParser(description='pfserver,A.K.A. Raumfeldserver with pyfeld.')
    parser.add_argument('--telnetserverip', default="127.0.0.1", help='Address of telnet server in the cloud')
    parser.add_argument('--telnetserverport', default='24445', help='Port of telnet server in the cloud')
    parser.add_argument('--localport', default='8088', help='local port for eventual rest interface')
    parser.add_argument('--gui', dest='gui', default='none', help='add a monitor window')
    arglist = parser.parse_args()

    threading.Thread(target=call_forwarder, args=[arglist.telnetserverip, arglist.telnetserverport]).start()

    RfCmd.discover()
    raumfeld_handler = RaumfeldHandler()
    subscription_handler = SubscriptionHandler(raumfeld_handler)
    threads = []
    t = threading.Thread(target=subscription_handler.subscription_thread, args=(280,))
    threads.append(t)
    t.start()
    if arglist.gui == 'curses':
        gui = MainGui()
        uuid_store.set_update_cb(gui.show_notification_state)
        tgui = threading.Thread(target=gui.run_main_loop)
        threads.append(tgui)
        tgui.start()
    uc_media = UpnpCommand(RfCmd.rfConfig['mediaserver'][0]['location'])
    this_servers_ip = get_local_ip_address()
    run_server(this_servers_ip, arglist.localport)
コード例 #33
0
def parse_arguments():
    """
    命令的帮助信息设置
    :return:
    """
    note = '需要下载的专辑(album)ID, 歌单(playlist)ID, 歌曲(song)ID' \
           '从虾米音乐的网页版获取.'
    parser = argparse.ArgumentParser(description=note)

    parser.add_argument('-v', '--version', action='version',
                        version='1.0')
    parser.add_argument('-s', '--song', action='append',
                        help='adds songs for download',
                        nargs='+')
    parser.add_argument('-a', '--album', action='append',
                        help='adds all songs in the albums for download',
                        nargs='+')
    parser.add_argument('-p', '--playlist', action='append',
                        help='adds all songs in the playlists for download',
                        nargs='+')
    parser.add_argument('-t', '--to', action='append',
                        help='adds name of directory to save songs',
                        nargs='+')
    return parser.parse_args()
コード例 #34
0
ファイル: mavproxy.py プロジェクト: Python3pkg/MAVProxy
    parser.add_option("--profile",
                      action='store_true',
                      help="run the Yappi python profiler")
    parser.add_option("--state-basedir",
                      default=None,
                      help="base directory for logs and aircraft directories")
    parser.add_option("--version",
                      action='store_true',
                      help="version information")
    parser.add_option(
        "--default-modules",
        default=
        "log,signing,wp,rally,fence,param,relay,tuneopt,arm,mode,calibration,rc,auxopt,misc,cmdlong,battery,terrain,output,adsb",
        help='default module list')

    (opts, args) = parser.parse_args()
    if len(args) != 0:
        print(("ERROR: mavproxy takes no position arguments; got (%s)" %
               str(args)))
        sys.exit(1)

    # warn people about ModemManager which interferes badly with APM and Pixhawk
    if os.path.exists("/usr/sbin/ModemManager"):
        print(
            "WARNING: You should uninstall ModemManager as it conflicts with APM and Pixhawk"
        )

    #set the Mavlink version, if required
    set_mav_version(opts.mav10, opts.mav20, opts.auto_protocol,
                    opts.mavversion)
コード例 #35
0
def parse_args():
    parser = argparse.ArgumentParser(description='Import bookmarks from standard html file to rofi-bookmarks.')
    parser.add_argument('inputfile', metavar='htmlfile' )
    parser.add_argument('outputfile', metavar='bookmarkfile')

    return parser.parse_args()
コード例 #36
0

if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    # without dash(-) means positional arguments
    parser.add_argument('srt',
                        metavar='source.srt',
                        help='srt file path that you want to translate.')
    parser.add_argument('-sl',
                        '--sl',
                        metavar='ko',
                        default='ko',
                        help='language code of input srt file')
    parser.add_argument('-l',
                        '--language',
                        nargs='+',
                        metavar='en zh-TW',
                        default=['en', 'zh-TW'],
                        help='language codes those you want to translate.')
    args = parser.parse_args()
    #args = parser.parse_args( ['db_kr.srt'] )

    if None == args.srt:
        print(
            'Please, input srt file path. ex) python smart_shampoo.py source.srt'
        )
    else:
        create_translated_srt(args.srt, args.sl, args.language)
        print('[SShampoo] Done!')
コード例 #37
0
        cherrypy.tree.graft(WSGIHandler())
        print("Starting {0} at http://localhost:{1}".format(
            OmniDB.settings.OMNIDB_VERSION, str(server_port)))
        cherrypy.engine.start()

        print("Open OmniDB in your favorite browser")
        print("Press Ctrl+C to exit")
        cherrypy.engine.block()


if __name__ == "__main__":
    #default port

    parser = optparse.OptionParser(version=OmniDB.settings.OMNIDB_VERSION)
    parser.add_option("-p",
                      "--port",
                      dest="port",
                      default=OmniDB.settings.OMNIDB_DEFAULT_PORT,
                      type=int,
                      help="listening port")
    (options, args) = parser.parse_args()

    #Removing Expired Sessions
    SessionStore.clear_expired()

    #Websocket Core
    ws_core.start_wsserver_thread()
    server_port = options.port
    DjangoApplication().run()
コード例 #38
0
    parser.add_option('-u',
                      '--unassigned',
                      action='store_true',
                      dest='unassigned',
                      help='show only unassigned bugs')
    parser.add_option('-c',
                      '--categories',
                      action='store_true',
                      dest='cats',
                      help='show bug category')
    parser.add_option('-l',
                      '--link',
                      action='store_true',
                      dest='links',
                      help='show link to bug details')
    options, filter_args = parser.parse_args()
    unassigned_only = options.unassigned
    show_cats = options.cats
    show_links = options.links

    # check if our local copy is recent, if not:
    # login and download the latest CSV from bugtracker
    if not cache_is_recent():
        print("local copy too old -> fetching a recent one..")

        # are USERNAME / PASSWORD provided? if not, prompt for any missing
        if USERNAME == '':
            USERNAME = raw_input('Username: '******'':
            PASSWORD = getpass.getpass('Password: ')
コード例 #39
0
parser = OptionParser()

parser.add_option("-p", "--pathways", dest="pathways", default='',
                  help="pathways to import from MetaCyc")

parser.add_option("-r", "--reactions", dest="reactions", default='',
                  help="directly import non-pathway reactions")

parser.add_option("-i", "--iterate", action="store_true", dest="recursive", default=False,
                  help="iterate tree to find subpathways recursively (on pathomxways only)")

parser.add_option("-s", "--search", dest="search", default=None,
                  help="only load pathways matching this regex")

(options, args) = parser.parse_args()

reaction_directions = {
    'LEFT-TO-RIGHT': 'forward',
    'RIGHT-TO-LEFT': 'back',
    'REVERSIBLE': 'both',
    'IRREVERSIBLE-LEFT-TO-RIGHT': 'forward',
    'IRREVERSIBLE-RIGHT-TO-LEFT': 'back',
    'PHYSIOL-LEFT-TO-RIGHT': 'forward',
    'PHYSIOL-RIGHT-TO-LEFT': 'back'
    }

secondary_metabolites = [
                    # Nuceleosides
                    'AMP', 'ADP', 'ATP',
                    'CMP', 'CDP', 'CTP',
コード例 #40
0
ファイル: scraper.py プロジェクト: lukakalinovcic/kodiranje
    for i, example in enumerate(examples, 1):
        input_path = os.path.join(problem_dir, 'in{}'.format(i))
        with open(input_path, 'w') as f:
            f.write(example[0])

        output_path = os.path.join(problem_dir, 'out{}'.format(i))
        with open(output_path, 'w') as f:
            f.write(example[1])

    print('Wrote {} examples for problem {}.'.format(len(examples), problem))


parser = argparse.ArgumentParser(description='Codeforces scraper.  https://github.com/lovrop/codeforces-scraper')
parser.add_argument('contest', help='URI or numerical ID of contest to scrape')
args = parser.parse_args()

# See if it was just a numeric ID
try:
    contest_id = int(args.contest)
    contest_uri = 'http://codeforces.com/contest/{}'.format(contest_id)
except ValueError:
    contest_uri = args.contest

print('Retrieving ', contest_uri, '... ', sep='', end='')
sys.stdout.flush()
contest_html = urllib.request.urlopen(contest_uri).read().decode('utf-8')
print('OK ({} bytes).'.format(len(contest_html)))

parser = ContestHTMLParser()
parser.feed(contest_html)
コード例 #41
0
ファイル: search_bug_tracker.py プロジェクト: ruesp83/blender
    f = o.open(url_csv)
    if f.headers["Content-Type"] != "text/comma-separated-values":
        return None
    content = f.read().decode("utf8")
    f.close()
    return content


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="usage: %prog [options] search string", description="Search Blender Bug Tracker"
    )
    parser.add_option("-u", "--unassigned", action="store_true", dest="unassigned", help="show only unassigned bugs")
    parser.add_option("-c", "--categories", action="store_true", dest="cats", help="show bug category")
    parser.add_option("-l", "--link", action="store_true", dest="links", help="show link to bug details")
    options, filter_args = parser.parse_args()
    unassigned_only = options.unassigned
    show_cats = options.cats
    show_links = options.links

    # check if our local copy is recent, if not:
    # login and download the latest CSV from bugtracker
    if not cache_is_recent():
        print("local copy too old -> fetching a recent one..")

        # are USERNAME / PASSWORD provided? if not, prompt for any missing
        if USERNAME == "":
            USERNAME = raw_input("Username: "******"":
            PASSWORD = getpass.getpass("Password: ")
コード例 #42
0
ファイル: bandex.py プロジェクト: EP-USP/mini-ep2
from restaurante import Restaurante

def day_of_week(day_number):

    day_list = ['segunda', 'terca', 'quarta',
                'quinta', 'sexta', 'sabado',
                'domingo']
    return day_list[day_number]

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    day_number = datetime.datetime.today().weekday()

    parser = argparse.ArgumentParser(fromfile_prefix_chars='@')

    parser.add_argument('-b', '--bandex', help='Local')
    parser.add_argument('-d', '--dia', help='Dia da semana', default=day_of_week(day_number))
    parser.add_argument('-a', '--almoco', help='Almoço', action='store_true')
    parser.add_argument('-j', '--janta', help='Janta', action='store_true')

    args = parser.parse_args()
    if not args.bandex:
    	args = parser.parse_args(['@.bandexrc'])

    restaurante = Restaurante(args.bandex)
    if not args.almoco and not args.janta:
        restaurante.print_menu(args.dia, True, True)
    else:
        restaurante.print_menu(args.dia, args.almoco, args.janta)

コード例 #43
0
def main(argv):

    parser = make_argparser()
    args = parser.parse_args(argv[1:])

    logging.basicConfig(stream=args.log,
                        level=args.volume,
                        format='%(message)s')

    clipboard = args.clipboard
    if not distutils.spawn.find_executable('xclip'):
        logging.warning(
            'Warning: Could not find `xclip` command. Will not be able to copy final url to clipboard.'
        )
        clipboard = False

    #TODO: read from stdin
    url = args.url
    if url is None:
        parser.print_help()
        raise URLError(
            'Error: No url argument given and could not find a valid url in clipboard.'
        )

    if not urllib.parse.urlsplit(url).scheme:
        url = 'http://' + url
    if get_loglevel() <= logging.WARNING:
        print(url)

    # Do the actual redirect resolution.
    replies = list(
        follow_redirects(url,
                         max_response=args.max_response,
                         user_agent=args.user_agent))
    for reply_num, reply in enumerate(replies):
        if get_loglevel() <= logging.WARNING or reply_num == len(replies) - 1:
            print(reply.location)

    # Remove starting www. from domain, if present
    domain = urllib.parse.urlsplit(reply.location).netloc
    if domain.startswith('www.') and domain.count('.') > 1:
        domain = domain[4:]

    # Print summary info.
    for reply in replies:
        if reply.type == 'refresh':
            logging.info('meta refresh from  ' +
                         reply.url[:args.terminal_width - 19])
        elif reply.type == 'absolute':
            logging.info('absolute path from ' +
                         reply.url[:args.terminal_width - 19])
        elif reply.type == 'relative':
            logging.info('relative path from ' +
                         reply.url[:args.terminal_width - 19])
    logging.info(f'total redirects: {len(replies)}')

    # Copy final data to clipboard, and open reputation checker in browser, if requested.
    if clipboard:
        if args.browser:
            to_clipboard(reply.location)
        else:
            to_clipboard(domain)
    if args.browser:
        webbrowser.open(args.reputation_url + domain)