Esempio n. 1
0
 def create(prefix, args):
     """Creates a new manager"""
     parser = argparse.ArgumentParser(add_help=False)
     parser.add_argument("--%shelp" % prefix, action="help",
         help="Provides helps about arguments for this manager")
     args, remaining_args = parser.parse_known_args(args)
     return Manager(args.dbpath), remaining_args
Esempio n. 2
0
def main():

    description = 'Make Launch Center Pro backup files easier to work with.'
    parser = argparse.ArgumentParser(description=description)
    group = parser.add_mutually_exclusive_group(required=True)

    group.add_argument('-read', action='store', help = ('Read in '
    'a Launch Center Pro .lcpbackup file and output a json file with '
    'the URL actions.'))

    group.add_argument('-write', action='store', help = ('Read in a '
    'previously created json file and write it to a Launch Center Pro '
    'backup file.'))

    parser.add_argument('-lcpfile', '-l', action='store', help=('The '
    '*XML* LCP backup file to use as a template (defaults to xml file with '
    'same timestamp as json file. Either use the default, or convert manually '
    'from binary to xml with plutil.'))

    args = parser.parse_args()

    if args.read:
        if not args.read.endswith('.lcpbackup'):
            print("You need to specify an .lcpbackup file to read.")
            exit(0)
        else:
            pl = read_lcp_backup(args.read)

            url_dict = find_urls(pl)

            with open(out_json, 'w') as json_opener:
                json.dump(url_dict, json_opener, indent=4)

            print("Output:\n{}\n{}".format(out_json, out_xml))

    if args.write:
        if not args.write.endswith('.json'):
            print("You need to specify a .json file to read from.")
            exit(0)
        else:

            old_ts = re.search(r'^\d+', args.write).group(0)
            template_file = '{}_xml.lcpbackup'.format(old_ts)

            if args.lcpfile:
               template_file = args.lcpfile

            with open(template_file) as xml, \
                        open(args.write) as json_file:

                pl = xml.read()
                my_json = json.load(json_file)

            new_pl = update_pl(pl, my_json)

            write_lcp_backup(new_pl)
Esempio n. 3
0
def main():
    parser = argparse.ArgumentParser(
        description='Nevernote - download pages locally.')
    parser.add_argument('urls', metavar='URL', type=str, nargs='+',
        help='URL of page to download')
    args = parser.parse_args()

    for arg in args.urls:
        if os.path.isfile(arg):
            print('Found file %s' % arg)
            for url in (line.strip() for line in open(arg)):
                process_url(url)
        else:
            process_url(arg)
Esempio n. 4
0
def parse_config():
    parser = argparse.ArgumentParser()
    parser.add_argument("src", nargs=1, type=str)
    parser.add_argument("dst", nargs="?", type=str)
    args = parser.parse_args()

    config = {}

    config["src"] = args.src[0]

    if args.dst is None:
        index = config["src"].rfind(".")
        if index == -1:
            config["dst"] = "{}.md".format(config["src"])
        else:
            config["dst"] = "{}.md".format(config["src"][:index])
    else:
        config["dst"] = args.dst[0]

    return config
Esempio n. 5
0
def get_args():
#    get_days_show("d_12_12_2011", "d_13_12_2011", "file:///C:/Users/ehhexxn/Downloads/TV%20Calendar%20-%20December%202011%20TV%20listings%20guide.htm")
    parser = argparse.ArgumentParser(description='Process arguments of get_show_list')
    parser.add_argument('--start', dest='start',
                   help='The starting date, format yymmdd')
    parser.add_argument('--end', dest='end',
                   help='The ending date, format yymmdd')
    parser.add_argument('--source', dest="source", nargs="*",
                    help="The source file, default is http://www.pogdesign.co.uk/cat/")
    args = parser.parse_args()    
    return args
Esempio n. 6
0
def run_main():
    global uc_media
    global raumfeld_handler
    global arglist

    LOG_FILENAME = Settings.home_directory() + '/pfserver.log'
    unlink(LOG_FILENAME)
    logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)

    logging.debug('This message should go to the log file')

    parser = argparse.ArgumentParser(description='pfserver,A.K.A. Raumfeldserver with pyfeld.')
    parser.add_argument('--telnetserverip', default="127.0.0.1", help='Address of telnet server in the cloud')
    parser.add_argument('--telnetserverport', default='24445', help='Port of telnet server in the cloud')
    parser.add_argument('--localport', default='8088', help='local port for eventual rest interface')
    parser.add_argument('--gui', dest='gui', default='none', help='add a monitor window')
    arglist = parser.parse_args()

    threading.Thread(target=call_forwarder, args=[arglist.telnetserverip, arglist.telnetserverport]).start()

    RfCmd.discover()
    raumfeld_handler = RaumfeldHandler()
    subscription_handler = SubscriptionHandler(raumfeld_handler)
    threads = []
    t = threading.Thread(target=subscription_handler.subscription_thread, args=(280,))
    threads.append(t)
    t.start()
    if arglist.gui == 'curses':
        gui = MainGui()
        uuid_store.set_update_cb(gui.show_notification_state)
        tgui = threading.Thread(target=gui.run_main_loop)
        threads.append(tgui)
        tgui.start()
    uc_media = UpnpCommand(RfCmd.rfConfig['mediaserver'][0]['location'])
    this_servers_ip = get_local_ip_address()
    run_server(this_servers_ip, arglist.localport)
Esempio n. 7
0
def parseArguments():
    parser = argparse.ArgumentParser(description='Convert from a stardict textual file to a tab file')
    parser.add_argument('input', metavar='FILENAME', nargs='?', default='-', help='input file name. If missing then reads from stdin')
    parser.add_argument('-o', '--output', default='-', metavar='FILENAME', help='output file name. If it don\'t enumerate then writes to stdout')
    parser.add_argument('-r', '--orthography', default='school', choices=['school', 'classic'], help="'classic' or 'school' orthography")
    args = parser.parse_args()
    if args.input == '-':
        args.input = sys.stdin
    else:
        args.input = open(args.input, 'r', encoding='utf8')
    if args.output == '-':
        args.output = sys.stdout
    else:
        args.output = open(args.output, 'w', encoding='utf8')
    if args.orthography == 'school':
        args.orthography = 'by'
    elif args.orthography == 'classic':
        args.orthography = 'be'
    return args
Esempio n. 8
0
def main():

    #Read and set parameters.
    parser = argparse.ArgumentParser()

    parser.add_argument("board", help="Set the board you want to crawling. Ex: Gossiping,cat")
    parser.add_argument("num", type = int, help="Set the number of index you want to crawling.")
    parser.add_argument("-p", "--push", help="Collect pushes or not. Default is yes.")

    args = parser.parse_args()

    main.board = str(args.board)
    index_num = int(args.num)

    if args.push == 'yes' or args.push == None:
        main.get_push = True
    elif args.push == 'no':
        main.get_push = None
    else:
        print('--push is not correct!\nPlease input yes or no.')
        sys.exit()

    #Create a directory to restore the result.
    result_dir = 'Result/'
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    os.chdir(result_dir)

    print('Start to Crawling...\nPlease be patient.')
    print('Getting article list...')
    link_list = getArticleLinks(main.board, index_num)

    #Get message, comments and reactions from feed.
    print('Crawling article in multi-processing...')
    target_pool = Pool()
    target_pool.map(getArticle, link_list)
    target_pool.close()

    print('Crawling is done.')
Esempio n. 9
0
class Count(object):
    def __init__(self):
        self.counter = 0
    def get(self):
        return self.counter
    def increment(self):
        self.counter += 1
        if self.counter > 95:
            self.counter = 0

# --------------------------------------------------

if __name__ == '__main__':

    # Server startup options
    # --nows: no websocket output, just update DB

    # TODO: Handle 'headless' commandline options
    parser = argparse.ArgumentParser(description="Wisewolf RSS server process")
    parser.add_argument("--nows", help="No websocket output, just update DB")
    args = parser.parse_args()

    # Log startup message, create DB if necessary
    print("Wisewolf RSS server %s (c)2017 Kyubi Systems: " % SERVER_VERSION, end=' ')
    initialise()
    print('OK')

    # Start main RSS server loop
    logging.info("Wisewolf RSS server version %s starting", SERVER_VERSION)
    start()
    print(f'Unparsed: {_}')

    before_job()

    do_job()

    after_job()


if __name__ == '__main__':
    # Check the python version is 3
    if sys.version_info.major != 3:
        print('[Wallhaven] Need python3')
        sys.exit()
    
    # Argument parse
    import argparse
    parser = argparse.ArgumentParser()
 
    parser.add_argument('-c', '--config', type=str,
                        default='config.yaml',
                        help='The configuration file path')
    FLAGS, _ = parser.parse_known_args()

    # Preprocessing for some arguments
    FLAGS.config = os.path.abspath(os.path.expanduser(FLAGS.config))

    # Excute main
    main()

Esempio n. 11
0
  # Keep track of the data.
  def handle_data(self, data):
    if self.__in_tr:
      if self.__in_td:
        if self.__td_position < self.__numCols:
          self.__possible_data[self.__td_position] += data

  @property
  def countryData(self):
    return self.__countryData

# Define usage when running this from the command line.
if __name__ == '__main__':
  parser = argparse.ArgumentParser(
    description='Scrape countries from http://www.nationsonline.org.')
  parser.add_argument('url', type=str, help='The URL to scrape.')
  parser.add_argument('output', type=str, help='The output file.')
  parser.add_argument('-p', '--pickle', dest='p', action='store_true',
      help='Generate a pickle.')
  args = parser.parse_args()

  genPickle = args.p
  url = args.url
  outputFile = args.output

  # Default values.
  numPops = 0 # The number of irrelavant data values at the top to ignore.
  exceptions = [] # Legitamet country rows marked as erroneous.
  numCols = 3 # The number of columns.
  extractionMap = (0, 1, 2) # The subset of columns to use as output.
Esempio n. 12
0
                    if ex[2]:
                        examples_with_query += 1

                examples.extend(new_examples)
                progress_bar.set_postfix({
                    "examples":
                    len(examples),
                    "examples with query":
                    examples_with_query
                })

    examples = pd.DataFrame(examples, columns=["name", "command", "context"])
    examples.to_csv(args.output, index=False)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description="Crawl examples from manpage-data.json")
    parser.add_argument("input", type=str, help="path to manpage-data.json")
    parser.add_argument("--chunk-size",
                        type=int,
                        default=100,
                        help="number of lines in memory")
    parser.add_argument("-o",
                        "--output",
                        type=str,
                        default="manpage-examples.csv",
                        help="path to output file")
    args = parser.parse_args()
    main(args)
Esempio n. 13
0
            if command_line_args.precompress == "1":
                args = ['gzip', root + "/" + outFileStub]
                print("args: " + str(args))
                subprocess.check_call(args)

    print("Successfully built " + outFile + " contains " + str(len(output)) +
          " characters")

    if command_line_args.precompress == "1":
        return outFileEnd
    else:
        return outFileStub


parser = argparse.ArgumentParser()
parser.add_argument("dev_html_name")
parser.add_argument("--precompress", default="1")
command_line_args = parser.parse_args()

root = os.path.dirname(os.path.realpath(command_line_args.dev_html_name))

print("Root is " + root)

subprocess.call(["rm", "-rf", root + "/dist"])
subprocess.call(["mkdir", root + "/dist"])

scriptFiles = []
webWorkerScriptFiles = []
cssFiles = []

Esempio n. 14
0
# dough
import logging
import argparse

parser = argparse.ArgumentParser(
    description="Query URL for content",
    formatter_class=argparse.RawDescriptionHelpFormatter,
    epilog=textwrap.dedent("""
        examples:
          > %(prog)s "https://docs.python.org" -q ".document h1"
          > %(prog)s "https://docs.python.org" -r "Python ([0-9.]+)"
          > %(prog)s "https://time.is" -w -u -i 5
          > %(prog)s "https://time.is" -q time -i 5
        """),
)
parser.add_argument("url", help="an url to request")
parser.add_argument(
    "--whole",
    "-w",
    default=False,
    action="store_true",
    help="match whole response body (ignore -q and -r arguments)",
)
parser.add_argument(
    "--query",
    "-q",
    metavar="SELECTOR",
    type=str,
    help="a CSS-like selector to query (supports `tag`, `.class` and `#id`)",
)
parser.add_argument(
Esempio n. 15
0
def main():
    import argparse
    parser = argparse.ArgumentParser(description='Tool for interogating github')
    parser.add_argument('--auth', type=str, help='OAuth token, or "username:password"')
    parser.add_argument('--verbose', '-v', action='store_true', help='print more info, such as times')
    parser.add_argument('--debug', action='store_true', help='print full exception')
    parser.add_argument('--limits', action='store_true', help='print rate limit status')
    parser.add_argument('--list', '-l', type=str, help='List repositories for the specified user')
    parser.add_argument('--network', '-n', action='store_true', help='Show list of all forks and their state')
    parser.add_argument('--urls', '-u', action='store_true', help='output url listing')
    parser.add_argument('--all', '-a', action='store_true', help='Request all pages, up to 1000 items')
    parser.add_argument('--where', '-w', type=str, default='code', help='What type of object to search for: code, user, repo, commit, issue')
    parser.add_argument('--query', '-q', type=str, help='in:{path,file} language:{js,c,python,...} filename:substring extension:ext user: repo: size:')
    parser.add_argument('--create', '-c', type=str, help='Create a new repository, name:description')
    parser.add_argument('REPOS', nargs='*', type=str, help='repository list to summarize')
    args = parser.parse_args()

    try:
        with open(os.getenv("HOME")+"/.github_cmdline_rc") as fh:
            cfg = json.load(fh)
    except Exception as e:
        print("ERROR", e)
        cfg = dict()

    if not args.auth:
        args.auth = cfg.get('auth')

    loop = asyncio.get_event_loop()

    api = GithubApi(loop, args)

    tasks = [ ]
    if args.list:
        tasks.append(listrepos(api, args.list, args))
    elif args.limits:
        tasks.append(getlimits(api))
    elif args.query:
        tasks.append(querygithub(api, args))
    elif args.create:
        name, desc = args.create.split(':', 1)
        tasks.append(createrepo(api, args, name, desc))
    else:
        tasks.append(inforepos(api, args))

    loop.run_until_complete(asyncio.gather(*tasks))

    loop.run_until_complete(api.close())
Esempio n. 16
0
    print("""Unicode error - run the program again.""")
    os.system("chcp 65001")
    exit()

parser = argparse.ArgumentParser(
    description="Automatically download an artist's discography.")

group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--artist", type=str, nargs=1, help="The artist's name")
group.add_argument("--from-list",
                   type=str,
                   nargs=1,
                   help="Filename of text file listing multiple artists")

parser.add_argument("--force-artist-refresh",
                    action="store_true",
                    default=False,
                    help="Always redownload an artist's album list")
parser.add_argument('--skip-user-albums',
                    action='store_true',
                    default=False,
                    help="Skip user's existing albums")

args = parser.parse_args()

gazelle = None
database_init()

always_grab = args.force_artist_refresh

user_albums = []
Esempio n. 17
0
def makeparser():
    """
    Create the commandline parser.
    """
    import argparse
    parser = argparse.ArgumentParser(description='plesk file utility')
    parser.add_argument('--config',
                        '-c',
                        type=str,
                        help='configuration to use')
    parser.add_argument('--baseurl', help='plesk base url')
    parser.add_argument('--ignoresslerrors',
                        '-k',
                        action='store_true',
                        help='Ignore ssl certificate errors')
    parser.add_argument('--username', '-u', help='username for login')
    parser.add_argument('--password', '-p', help='password for login')
    parser.add_argument('--verbose',
                        '-v',
                        action='store_true',
                        help='print results from web requests')

    sub = parser.add_subparsers(dest='command')

    ls = sub.add_parser('ls', help='list files')
    ls.add_argument('--recurse',
                    '-r',
                    action='store_true',
                    help='recursively list directories')
    ls.add_argument('--ignoreerror',
                    '-c',
                    action='store_true',
                    help='continue after error')
    ls.add_argument('dirname', type=str, help='which directory to list')

    cat = sub.add_parser('cat', help='print remote file contents to stdout')
    cat.add_argument('filename', help='which file')

    tee = sub.add_parser('tee', help='save stdin to a remote file')
    tee.add_argument('filename', help='which file')

    cat = sub.add_parser('get', help='copy remote file')
    cat.add_argument('filename', help='which remote file')
    cat.add_argument('destination', help='where to store locally', default='.')

    put = sub.add_parser('put', help='upload file')
    put.add_argument('filename', help='which local file')
    put.add_argument('destination', help='where to store remotely')

    edit = sub.add_parser('edit', help='edit file contents')
    edit.add_argument('filename', help='which file')
    edit.add_argument('contents', help='the new contents')

    azip = sub.add_parser('zip', help='archive files')
    azip.add_argument('--dirname',
                      '-C',
                      help='the directory containing the requested files')
    azip.add_argument('zipname', help='name of the zip archive')
    azip.add_argument('files', nargs='*', help='which files to zip')

    unzip = sub.add_parser('unzip', help='unarchive files')
    unzip.add_argument('zipname', help='name of the zip archive')

    mkdir = sub.add_parser('mkdir', help='create directory')
    mkdir.add_argument('dirname')

    rmdir = sub.add_parser('rmdir', help='delete directory')
    rmdir.add_argument('dirname')

    delfiles = sub.add_parser('rm', help='delete files')
    delfiles.add_argument('files', nargs='*')

    emptyfile = sub.add_parser('empty', help='create empty file')
    emptyfile.add_argument('filename')

    movefiles = sub.add_parser(
        'mv',
        help='move files, note: the destination must be an absolute path')
    movefiles.add_argument('--dirname',
                           '-C',
                           help='the directory containing the requested files')
    movefiles.add_argument('files', nargs='+')

    copyfiles = sub.add_parser(
        'cp',
        help='copy files, note: the destination must be an absolute path')
    copyfiles.add_argument('--dirname',
                           '-C',
                           help='the directory containing the requested files')
    copyfiles.add_argument('files', nargs='+')

    calcsize = sub.add_parser('du', help='calc size of filelist')
    calcsize.add_argument('--dirname',
                          '-C',
                          help='the directory containing the requested files')
    calcsize.add_argument('files', nargs='*')

    help = sub.add_parser('help', help='verbose usage')
    help.add_argument('subcommand', nargs='?')

    # keep the available choices for later use in 'help'`
    parser.subparsers = sub.choices

    return parser
                continue
            entries.append(d)
            print_entry(d)

    rss = generate_rss(entries, options)
    if args.debug:
        print(rss.decode())
    if args.upload_s3:
        upload_s3(rss, options.get('s3'))
        print('Success %s' % options.get('id'))


if __name__ == '__main__':

    parser = argparse.ArgumentParser(
        description='Scrape webpages to make an RSS feed using just regex')
    parser.add_argument('feed', help='Feed key in settings.py')
    parser.add_argument('--s3',
                        dest='upload_s3',
                        action='store_true',
                        help='Upload to S3')
    parser.add_argument('--debug', dest='debug', action='store_true')
    args = parser.parse_args()

    if args.feed == 'all':
        for options in settings.FEEDS.values():
            process_feed(options)
    else:
        options = settings.FEEDS[args.feed]
        process_feed(options)
Esempio n. 19
0
        src_path = path.replace(texmf_ubuntu, texmf_src)

        if not os.path.exists(src_path):
            print(path, file=skip_log)
            continue

        src_dir = dirname.replace(texmf_ubuntu, texmf_src)
        dst_dir = dirname.replace(texmf_ubuntu, texmf_dst)
        preload.add((src_dir, dst_dir))

    return preload


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--texmf', required=True)
    parser.add_argument('--package', required=True)
    parser.add_argument('--url', required=True)
    parser.add_argument('--skip-log')
    args = parser.parse_args()

    filelist_url = os.path.join(args.url, 'all', args.package, 'filelist')
    page = urllib.request.urlopen(filelist_url).read().decode('utf-8')

    html_parser = UbuntuDebFileList()
    html_parser.feed(page)
    preload = generate_preload(args.texmf, html_parser.file_list,
                               args.skip_log)

    print(' '.join(f'--preload {src}@{dst}' for src, dst in preload))
            _, ext = os.path.splitext(filename)
            if ext.lower() in ('.htm', '.html'):
                pages.append((os.path.join(root, filename),
                              urllib.parse.urljoin(base_url, os.path.join(virroot, filename))))

    return pages


def main(site_dir, base_url):
    pages = collect_pages(site_dir, base_url)
    urls = [page[1] for page in pages]
    urls.append(base_url)

    for filename, _ in pages:
        parser = FindLinkParser(base_url)
        with open(filename) as f:
            parser.feed(f.read())

        for link in parser.links:
            if link not in urls:
                print(filename, '\t', link)

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Find broken site/wikilinks.")
    parser.add_argument("site", help="the directory containing the generated site")
    parser.add_argument("--base_url", help="the base URL of the site", required=True)

    args = parser.parse_args()

    main(args.site, args.base_url)
Esempio n. 21
0
# To make this program executable from any process working directory, run:
# ln -s ~/Software/getcode/getcode.py ~/.local/bin/getcode

import argparse
import html.parser
import requests
import sys

parser = argparse.ArgumentParser(
    description='Download some code matching given keywords.')

parser.add_argument(
    '--num-tags',
    type=int,
    default=150,
    help=
    'of at least how many tag nodes the content should be output (default: 15 tag nodes)'
)
parser.add_argument('keywords',
                    metavar='N',
                    type=str,
                    nargs='+',
                    help='an integer for the accumulator')

arguments = parser.parse_args()


class SearchPageParser(html.parser.HTMLParser):

    isResult = False
Esempio n. 22
0
def main():
    import argparse
    parser = argparse.ArgumentParser(description='print entire contents of a mediawiki site in XML format')
    parser.add_argument('--history', action='store_true', help='Include history in export')
    parser.add_argument('--savedir', type=str, help='Save all files to the specified directory')
    parser.add_argument('--limit', type=int, help='Maximum number of simultaneous connections to use.')
    parser.add_argument('--batchsize', type=int, help='Nr of pages to export per request.', default=300)
    parser.add_argument('--debug', action='store_true', help='errors print stacktrace, and abort')
    parser.add_argument('wikipage', type=str)
    args = parser.parse_args()

    global debug
    debug = args.debug

    loop = asyncio.get_event_loop()
    tasks = [ exportsite(loop, args.wikipage, args)  ]
    loop.run_until_complete(asyncio.gather(*tasks))
Esempio n. 23
0
                #args.max
                if counter == int(args.max):
                    break

    fout = open(args.output, 'w')
    fout.write(json.dumps(allOutput))
    fout.close()


if __name__ == "__main__":

    parser = argparse.ArgumentParser(description='Process each .')
    parser.add_argument('ID',
                        metavar='N',
                        type=int,
                        nargs=1,
                        help='your student ID')
    parser.add_argument("-o",
                        "--output",
                        help="Directs the output to a filename of your choice",
                        required=True)
    parser.add_argument(
        "--max",
        help="The maximum number of comments to read from each file",
        default=10000)
    args = parser.parse_args()

    if (int(args.max) > 200272):
        print(
            "Error: If you want to read more than 200,272 comments per file, you have to read them all."
Esempio n. 24
0
        os.mkdir(problem_dir)

    for i, example in enumerate(examples, 1):
        input_path = os.path.join(problem_dir, 'in{}'.format(i))
        with open(input_path, 'w') as f:
            f.write(example[0])

        output_path = os.path.join(problem_dir, 'out{}'.format(i))
        with open(output_path, 'w') as f:
            f.write(example[1])

    print('Wrote {} examples for problem {}.'.format(len(examples), problem))


parser = argparse.ArgumentParser(description='Codeforces scraper.  https://github.com/lovrop/codeforces-scraper')
parser.add_argument('contest', help='URI or numerical ID of contest to scrape')
args = parser.parse_args()

# See if it was just a numeric ID
try:
    contest_id = int(args.contest)
    contest_uri = 'http://codeforces.com/contest/{}'.format(contest_id)
except ValueError:
    contest_uri = args.contest

print('Retrieving ', contest_uri, '... ', sep='', end='')
sys.stdout.flush()
contest_html = urllib.request.urlopen(contest_uri).read().decode('utf-8')
print('OK ({} bytes).'.format(len(contest_html)))

parser = ContestHTMLParser()
Esempio n. 25
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--loglevel', type=str, default='INFO')

    parser.add_argument('--shard_size',
                        type=argparse_utils.positive_int,
                        default=1000000)

    parser.add_argument('sgm',
                        type=argparse_utils.existing_file_path,
                        nargs='+')

    parser.add_argument('--top_k_topics',
                        type=argparse_utils.positive_int,
                        default=20)

    parser.add_argument('--trectext_out_prefix', type=str, required=True)

    parser.add_argument('--document_classification_out',
                        type=argparse_utils.nonexisting_file_path,
                        required=True)

    args = parser.parse_args()

    try:
        logging_utils.configure_logging(args)
    except IOError:
        return -1

    parser = ReutersParser()

    for sgm_path in args.sgm:
        logging.info('Parsing %s.', sgm_path)

        with open(sgm_path, 'r', encoding='ISO-8859-1') as f_sgm:
            parser.feed(f_sgm.read())

    logging.info('Parsed %d documents.', len(parser.documents))

    topic_histogram = collections.Counter(
        topic for document in parser.documents
        for topic in document['tags']['topics'])

    top_topics = set(
        sorted(topic_histogram.keys(),
               key=lambda topic: topic_histogram[topic])[-args.top_k_topics:])

    logging.info('Top topics: %s', top_topics)

    writer = trec_utils.ShardedTRECTextWriter(args.trectext_out_prefix,
                                              shard_size=args.shard_size,
                                              encoding='latin1')

    with open(args.document_classification_out, 'w') as \
            f_document_classification_out:
        for document in parser.documents:
            doc_id = document['doc_id']

            doc_text = '\n'.join([
                document['texts'].get('title', ''),
                document['texts'].get('dateline',
                                      ''), document['texts'].get('body', '')
            ])

            writer.write_document(doc_id, doc_text)

            doc_topics = {
                topic
                for topic in document['tags']['topics'] if topic in top_topics
            }

            if doc_topics:
                most_specific_doc_topic = min(
                    doc_topics, key=lambda topic: topic_histogram[topic])

                f_document_classification_out.write(doc_id)
                f_document_classification_out.write(' ')
                f_document_classification_out.write(most_specific_doc_topic)
                f_document_classification_out.write('\n')

    writer.close()
Esempio n. 26
0
def parse_args():
    '''
    Parses the command line arguments and generates help text.
    '''
    parser = ArgumentParser()

    # Optional
    parser.add_argument('--guests',
                        type=int,
                        default=1,
                        help='number of guests')
    parser.add_argument('--children',
                        type=int,
                        default=0,
                        help='number of children')
    parser.add_argument('--rooms', type=int, default=1, help='number of rooms')
    group = parser.add_mutually_exclusive_group()
    group.add_argument('--checkin',
                       type=parse_date,
                       metavar='YYYY-MM-DD',
                       default=EVENT_START_DAY.strftime('%Y-%m-%d'),
                       help='check in')
    group.add_argument('last_notifications = set()--wednesday',
                       dest='checkin',
                       action='store_const',
                       const=(EVENT_START_DAY -
                              timedelta(1)).strftime('%Y-%m-%d'),
                       help='check in on Wednesday')
    parser.add_argument('--checkout',
                        type=parse_date,
                        metavar='YYYY-MM-DD',
                        default=(EVENT_START_DAY +
                                 timedelta(3)).strftime('%Y-%m-%d'),
                        help='check out')
    group = parser.add_mutually_exclusive_group()
    group.add_argument(
        '--max-distance',
        type=parse_distance,
        metavar='BLOCKS',
        help=
        "max hotel distance that triggers an alert (or 'connected' to require skywalk hotels)"
    )
    group.add_argument('--connected',
                       dest='max_distance',
                       action='store_const',
                       const='connected',
                       help='shorthand for --max-distance connected')
    parser.add_argument(
        '--budget',
        type=float,
        metavar='PRICE',
        default='99999',
        help='max total rate (not counting taxes/fees) that triggers an alert')
    parser.add_argument('--hotel-regex',
                        type=parse_regex,
                        metavar='PATTERN',
                        default=re.compile('.*'),
                        help='regular expression to match hotel name against')
    parser.add_argument('--room-regex',
                        type=parse_regex,
                        metavar='PATTERN',
                        default=re.compile('.*'),
                        help='regular expression to match room against')
    parser.add_argument(
        '--show-all',
        action='store_true',
        help=
        'show all rooms, even if miles away (these rooms never trigger alerts)'
    )
    parser.add_argument('--ssl-insecure',
                        action='store_false',
                        dest='ssl_cert_verify',
                        help=SUPPRESS)
    group = parser.add_mutually_exclusive_group()
    group.add_argument('--delay',
                       type=int,
                       default=1,
                       metavar='MINS',
                       help='search every MINS minute(s)')
    group.add_argument('--once',
                       action='store_true',
                       help='search once and exit')
    parser.add_argument('--test',
                        action='store_true',
                        dest='test',
                        help='trigger every specified alert and exit')

    # Required
    group = parser.add_argument_group('required arguments')
    group.add_argument('--key',
                       required=True,
                       help='key (see the README for more information)')

    # Alerts
    group = parser.add_argument_group('alerts')
    group.add_argument(
        '--cmd',
        dest='alerts',
        action='append',
        type=lambda arg: ('cmd', arg),
        metavar='CMD',
        help='run the specified command, passing each hotel name as an argument'
    )

    return parser.parse_args()
Esempio n. 27
0
f = Figlet(font="5lineoblique")
print(f.renderText("ExploitDB Crawler"))

guideline_table = PrettyTable()
guideline_table.field_names = ["Command", "Description"]
guideline_table.add_row(
    ["--page_num <PAGE_NUM>", "Display exploits with given page <num>"])
guideline_table.add_row(
    ["--exploit_id <ID>", "Display exploit with given <id>"])
guideline_table.add_row(["--exit", "Exit"])

field_names = ['Id', 'Description', 'Date', 'Author', 'Platform']

parser = argparse.ArgumentParser()
parser.add_argument("--page_num",
                    "-PAGE NUM",
                    type=int,
                    help="Display exploits in page <num> and choose exploit")
parser.add_argument("--exploit_id",
                    "-EXPLOIT_ID",
                    help="Display exploit with given id")
args = parser.parse_args()

user_input = ""
print(
    "========================================================================================================================"
)
while True:
    user_input = user_input.split(" ")
    if args.page_num or user_input[0] == "--page_num":
        if args.page_num:
            page_num = args.page_num
Esempio n. 28
0
      return

    tag, attrs = self.__stack[-1]
    if tag == 'option' and 'selected' in attrs and 'value' not in attrs:
      self.data[self._Last('select')['name']] = data
    elif tag == 'textarea':
      self.data[attrs['name']] = data

  def handle_endtag(self, tag):
    while tag != self.__stack.pop()[0]:
      pass


if __name__ == '__main__':
  parser = argparse.ArgumentParser()
  parser.add_argument("--conf", default="~/.update-sermon-page.conf")
  args = parser.parse_args()
  conf_file = os.path.expanduser(args.conf)
  with open(conf_file) as f:
    conf = json.load(f)

  s = requests.Session()
  r = s.post('http://www.cpc.org.au/user',
             data={
                 'form_id': 'user_login',
                 'op': 'Log in',
                 'name': conf['user'],
                 'pass': conf['passwd']})
  edit_html = s.get('http://www.cpc.org.au/node/229/edit').text
  edit_html = re.sub(' xmlns="[^"]+"', '', edit_html, count=1)
Esempio n. 29
0
    if path:
        utils.save(url_boundary_queue, url_graph, path)
        utils.info_sync_print(f'Saved to {path}')

    # TODO:
    #   - Visualization


if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser(
        description=
        'Crawls the web, like a spider. Uses threads, also like a spider.')
    parser.add_argument(
        '-s',
        '--start-url',
        default=DEFAULT_START_URL,
        help=f'the starting point of the crawl, defaults to {DEFAULT_START_URL}'
    )
    parser.add_argument(
        '-t',
        '--thread-limit',
        default=DEFAULT_THREAD_LIMIT,
        help=
        f'the number of threads to use when crawling, defaults to {DEFAULT_THREAD_LIMIT}'
    )
    parser.add_argument(
        '-p',
        '--path',
        default=DEFAULT_PATH,
        help=
        'if provided, crawling results will be loaded and saved to this file, defaults to '
Esempio n. 30
0
    # Author details
    author='{author}',
    author_email='{author_email}',

    # License
    license='{license}',

    packages=['data'],
    py_modules=["{app_name}"],
    package_data={{'data':[{files}]}}
)
"""

# Template for the application script
app = """import os
import shutil
import argparse

parser = argparse.ArgumentParser()
parser.add_argument('--install',
    help='Install {app_name} in an empty directory',
    action="store_true")
args = parser.parse_args()

files = ({files})

if args.install:
    print('Installing {app_name} in an empty directory')

    src_path = os.path.join(os.path.dirname(__file__), 'data')
Esempio n. 31
0
FIRST_NAME_PLACEHOLDER = 'XXFIRSTNAMEXX'
LAST_NAME_PLACEHOLDER = 'XXNAMEXX'
COMPANY_PLACEHOLDER = 'XXCOMPANYXX'
TWITTER_PLACEHOLDER = 'XXTWITTERXX'

FIRST_NAME_INDEX = 0
LAST_NAME_INDEX = 1
COMPANY_INDEX = 2
TWITTER_INDEX = 3

DEFAULT_DPI = 300

# Argument parser
parser = ArgumentParser(description='Create Attendee badges for Devfest')

parser.add_argument('-a', '--attendees', dest='attendees', type=str, nargs=1, default='sample/attendees.csv',
                    help='attendee list to import data from. MUST ba a CSV.')
parser.add_argument('-t', '--template', dest='template', type=str, nargs=1, default='sample/template.svg',
                    help='badge template file to be used. MUST be an SVG.')
parser.add_argument('-d', '--dpi', dest='dpi', type=int, nargs=1, default=DEFAULT_DPI,
                    help='output DPI')

args = parser.parse_args()

# Input files
attendees = args.attendees
template = args.template
dpi = args.dpi

print('Creating output dir.')
os.makedirs(os.path.dirname('output/'), exist_ok=True)
Esempio n. 32
0
        return json.loads(text)

    # download pairwise IBD information
    def get_ibd(self, human_id_1, human_id_2):
        text = self.get_url('https://you.23andme.com/tools/ibd/?human_id_1=' +
                            human_id_1 + '&human_id_2=' + human_id_2)
        return json.loads(text)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='Retrieve DNA matches from 23andMe (16 Aug 2018)',
        add_help=False,
        usage='getmy23andme.py -u <username> -p <password> [options]')
    parser.add_argument('-u',
                        metavar='<STR>',
                        type=str,
                        help='23andMe username [prompt]')
    parser.add_argument('-p',
                        metavar='<STR>',
                        type=str,
                        help='23andMe password [prompt]')
    parser.add_argument('-v',
                        action='store_false',
                        default=True,
                        help='whether to use verbose mode [True]')
    parser.add_argument('-t',
                        metavar='<INT>',
                        type=int,
                        default=60,
                        help='timeout in seconds [60]')
    parser.add_argument('-o',
Esempio n. 33
0
def get_args():
    """
    This function uses the argparse library to parse command line arguments.

    Returns:
        args (argparse.Namespace): An argparse object. Elements of the object 
        can be accessed by their option name as attributes.
    """
    parser = argparse.ArgumentParser(description=
    "This program takes a list of words, or a file containing a list of words,\
 and queries the Google NGram API for the usage frequency of these words, by\
 year, within a desginated time-frame. Unless '--noSave' is indicated, data is\
 saved to a file called 'google_Ngram_output.csv'. A more detailed description\
 of various arguments can be found at https://books.google.com/ngrams/info")

    parser.add_argument("Query", type = str, nargs="+", help="List of words,\
 or CSV or TXT file of words to be queried")

    parser.add_argument("-c", "--corpus", default="eng_2019", type = str, 
        help ="Shorthand name for corpus of words to be queried. Available\
 corpora can be read about at https://books.google.com/ngrams/info. Default is\
 English corpus, 2019 update")

    parser.add_argument("-s", "--startYear", default=1800, type=int,
        help = "A year: beginning of time range to be queried. Default is 1800")

    parser.add_argument("-e", "--endYear", default = 2000, type = int,
        help = "A year: last year of time range to be included in query.\
 Default is 2000")

    parser.add_argument("-sm", "--smoothing", default = 0, type = int,
        help = "The degree to which data points are averaged between years. A\
 smoothing of 0 indicates completely raw data. Default is 0.")

    parser.add_argument("-ci", "--caseInsensitive", action="store_true", 
        help = "Consider upper- and lower-case versions of the words")

    parser.add_argument("-a", "--allData", action="store_true")

    parser.add_argument("-n", "--noSave", action="store_true", 
        help = "Use to prevent data from being saved to external file")

    parser.add_argument("-q", "--quiet", action="store_true",
        help="Use to prevent program from printing to STD OUT")

    parser.add_argument("-o", "--outputDir", default = "./", type = str,
        help = "Directory to save output file to")

    parser.add_argument("-p", "--plot", action="store_true", 
        help = "Create plot of data")

    args = parser.parse_args()
    args.Query = "".join(args.Query).split(",")

    return args
Esempio n. 34
0
try:
	print(u"\u2603 Scraper running...")
except:
	print("""Unicode error - run the program again.""")
	os.system("chcp 65001")
	exit()


parser = argparse.ArgumentParser(description="Automatically download an artist's discography.")

group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--artist", type=str, nargs=1, help="The artist's name")
group.add_argument("--from-list", type=str, nargs=1, help="Filename of text file listing multiple artists")

parser.add_argument("--force-artist-refresh", action="store_true", default=False, help="Always redownload an artist's album list")
parser.add_argument('--skip-user-albums', action='store_true', default=False, help="Skip user's existing albums")

args = parser.parse_args()

gazelle = None
database_init()

always_grab = args.force_artist_refresh

user_albums = []

if args.skip_user_albums is True:
	results = db2.query(User_Album).all()

	for ua in results:
Esempio n. 35
0
 def parse_args():
     parser = argparse.ArgumentParser(description = "Crawler that prints all the URLs in a site that can be reached from a starting point")
     parser.add_argument("site", type=str, help="The URL from which to start the crawl")
     parser.add_argument("-o", "--out", type=str, help="")
     return parser.parse_args()
Esempio n. 36
0
    return pages


def main(site_dir, base_url):
    pages = collect_pages(site_dir, base_url)
    urls = [page[1] for page in pages]
    urls.append(base_url)

    for filename, _ in pages:
        parser = FindLinkParser(base_url)
        with open(filename) as f:
            parser.feed(f.read())

        for link in parser.links:
            if link not in urls:
                print(filename, '\t', link)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Find broken site/wikilinks.")
    parser.add_argument("site",
                        help="the directory containing the generated site")
    parser.add_argument("--base_url",
                        help="the base URL of the site",
                        required=True)

    args = parser.parse_args()

    main(args.site, args.base_url)
Esempio n. 37
0
if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser(description="Translate a CoNLL file")

    iogroup = parser.add_argument_group(
        "io", "Arguments for IO. Both must be present.")

    iogroup.add_argument(
        "--input",
        "-i",
        help="Input file name (fifth word of each row is translated)")
    iogroup.add_argument("--output",
                         "-o",
                         help="Output file. Format: origword  transword")
    parser.add_argument("--method",
                        help="Either google, or lexicon",
                        choices=["lexicon", "google"],
                        default="lexicon")
    parser.add_argument("--source",
                        "-s",
                        help="Source language code (3 letter)",
                        default="eng")
    parser.add_argument("--target",
                        "-t",
                        help="Target language code (3 letter)",
                        required=True)
    parser.add_argument("--format",
                        "-f",
                        help="Format of input file",
                        choices=["conll", "plaintext"],
                        default="conll")
    parser.add_argument("--lexname", "-l", help="Name of special lexicon")
Esempio n. 38
0
# TODO: update CMakeLists generator

#!/usr/bin/python3

import argparse
import html.parser
import itertools
import os
import sys
import subprocess
import urllib.request

parser = argparse.ArgumentParser(description='Generate solution template for a new contest.')
parser.add_argument('contest_name', metavar='contest-name',
                    help='The name of the contest')
parser.add_argument('--num-problems', type=int, default=0,
                    help='The number of problems in the contest (0 = autodetect)')
parser.add_argument('--contest-family', default='codeforces',
                    help='The name of the competition system')
download_tests_parser = parser.add_mutually_exclusive_group(required=False)
download_tests_parser.add_argument('--download-tests', dest='download_tests', action='store_true')
download_tests_parser.add_argument('--no-download-tests', dest='download_tests', action='store_false')
parser.set_defaults(download_tests=True)
add_to_git_parser = parser.add_mutually_exclusive_group(required=False)
add_to_git_parser.add_argument('--add-to-git', dest='add_to_git', action='store_true')
add_to_git_parser.add_argument('--no-add-to-git', dest='add_to_git', action='store_false')
parser.set_defaults(add_to_git=True)

args = parser.parse_args()

assert 0 <= args.num_problems and args.num_problems <= 26, (
Esempio n. 39
0
def scrape_solution(contest_id, submission_id):
    # os.mkdir(contest_id)
    contest_uri = 'http://codeforces.com/contest/{}/submission/{}'.format(
        contest_id, submission_id)
    print('Retrieving ', contest_uri, '... ', sep='', end='')
    sys.stdout.flush()
    contest_html = urllib.request.urlopen(contest_uri).read().decode('utf-8')
    print('OK ({} bytes).'.format(len(contest_html)))

    parser = SubmissionHTMLParser()
    parser.feed(contest_html)
    problems = parser.getSubmission()
    #
    # print('Found', len(problems), 'problems.')
    #
    # with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
    #     for problem in problems:
    #         executor.submit(download_problem, contest_id, contest_uri, problem)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='Codeforces submission scraper.')
    parser.add_argument('contest_id', help='Numerical ID of contest to scrape')
    parser.add_argument('submission_id',
                        help='Numerical ID of submission to scrape')
    args = parser.parse_args()
    contest_id = args.contest_id
    submission_id = args.submission_id
    scrape_solution(contest_id, submission_id)
Esempio n. 40
0
    # Author details
    author='{author}',
    author_email='{author_email}',

    # License
    license='{license}',

    packages=['data'],
    py_modules=["{app_name}"],
    package_data={{'data':[{files}]}}
)
"""

# Template for the application script
app = """import os
import shutil
import argparse

parser = argparse.ArgumentParser()
parser.add_argument('--install',
    help='Install {app_name} in an empty directory',
    action="store_true")
args = parser.parse_args()

files = ({files})

if args.install:
    print('Installing {app_name} in an empty directory')

    src_path = os.path.join(os.path.dirname(__file__), 'data')
Esempio n. 41
0
def parse_args():
    parser = argparse.ArgumentParser(description='Import bookmarks from standard html file to rofi-bookmarks.')
    parser.add_argument('inputfile', metavar='htmlfile' )
    parser.add_argument('outputfile', metavar='bookmarkfile')

    return parser.parse_args()
Esempio n. 42
0
from restaurante import Restaurante

def day_of_week(day_number):

    day_list = ['segunda', 'terca', 'quarta',
                'quinta', 'sexta', 'sabado',
                'domingo']
    return day_list[day_number]

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    day_number = datetime.datetime.today().weekday()

    parser = argparse.ArgumentParser(fromfile_prefix_chars='@')

    parser.add_argument('-b', '--bandex', help='Local')
    parser.add_argument('-d', '--dia', help='Dia da semana', default=day_of_week(day_number))
    parser.add_argument('-a', '--almoco', help='Almoço', action='store_true')
    parser.add_argument('-j', '--janta', help='Janta', action='store_true')

    args = parser.parse_args()
    if not args.bandex:
    	args = parser.parse_args(['@.bandexrc'])

    restaurante = Restaurante(args.bandex)
    if not args.almoco and not args.janta:
        restaurante.print_menu(args.dia, True, True)
    else:
        restaurante.print_menu(args.dia, args.almoco, args.janta)

Esempio n. 43
0
        except requests.exceptions.RequestException as e:
            logging.info(
                "Exception while fetching URL %s: %s.%s: %s",
                url,
                e.__class__.__module__,
                e.__class__.__qualname__,
                str(e),
            )

    return found


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Index URLs')
    parser.add_argument(
        'URLs',
        nargs='+',
        help='List of URLs that serves both as starting point as well as root filter',
    )
    parser.add_argument(
        '--index',
        type=argparse.FileType('wb'),
        required=True,
        help='Location of the index file to write to',
    )
    args = parser.parse_args()

    links = crawl(args.URLs)
    pickle.dump([x + (unquote(x[2]),) for x in links.flatten()], args.index)
    args.index.close()
Esempio n. 44
0
        shutil.copyfile('../template.cpp', src_file);

    for i, example in enumerate(examples, 1):
        input_path = os.path.join(problem_dir, 'in{}'.format(i))
        with open(input_path, 'w') as f:
            f.write(example[0])

        output_path = os.path.join(problem_dir, 'out{}'.format(i))
        with open(output_path, 'w') as f:
            f.write(example[1])

    print('Wrote {} examples for problem {}.'.format(len(examples), problem))


parser = argparse.ArgumentParser(description='Codeforces scraper.  https://github.com/lovrop/codeforces-scraper')
parser.add_argument('contest', help='URI or numerical ID of contest to scrape')
args = parser.parse_args()

# See if it was just a numeric ID
try:
    contest_id = int(args.contest)
    contest_uri = 'http://codeforces.com/contest/{}'.format(contest_id)
except ValueError:
    contest_uri = args.contest

print('Retrieving ', contest_uri, '... ', sep='', end='')
sys.stdout.flush()
contest_html = urllib.request.urlopen(contest_uri).read().decode('utf-8')
print('OK ({} bytes).'.format(len(contest_html)))

parser = ContestHTMLParser()
Esempio n. 45
0
            return 'Female'

        text = self.get_url('https://www.23andme.com/user/?profile=' + uid)
        text = html.parser.unescape(re.sub(' *\n *', '', text))

        regexp = re.compile('<p><strong>Sex:</strong>(Female|Male)</p>')
        res = regexp.search(text)
        if res:
            line = text[res.span()[0]:res.span()[1]]
            return line[24:-4]
        else:
            return 'Unknown'

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description = 'Retrieve DNA matches from 23andMe (26 Jun 2016)', add_help = False, usage = 'getmy23andme.py -u <username> -p <password> [options]')
    parser.add_argument('-u', metavar = '<STR>', type = str, help = '23andMe username [prompt]')
    parser.add_argument('-p', metavar = '<STR>', type = str, help = '23andMe password [prompt]')
    parser.add_argument('-v', action = 'store_true', default = False, help = 'whether to use verbose mode [False]')
    parser.add_argument('-t', metavar = '<INT>', type = int, default = 60, help = 'timeout in seconds [60]')
    parser.add_argument('-o', metavar = '<STR>', type = str, help = 'output prefix [account_id]')
    parser.add_argument('-x', action = 'store_true', default = False, help = 'whether to download inheritance and ibdview tables [False]')
    parser.add_argument('-h', metavar = '<FILE>', type = str, help = 'previously downloaded inheritance table file')
    parser.add_argument('-i', metavar = '<FILE>', type = str, help = 'previously downloaded ibdview table file')
    try:        
        parser.add_argument('-l', metavar = '<FILE>', type = argparse.FileType('w', encoding = 'UTF-8'), default = sys.stderr, help = 'output log file [stderr]')
    except TypeError:
        sys.stderr.write('Python >= 3.4 is required to run this script\n')
        sys.stderr.write('(see https://docs.python.org/3/whatsnew/3.4.html#argparse)\n')
        exit(2)

    # extract arguments from the command line
Esempio n. 46
0
def parse_arguments():

    note = 'The following SONG, ALBUM, and PLAYLIST are IDs which can be' \
           'obtained from the URL of corresponding web page.'

    parser = argparse.ArgumentParser(description=note)

    parser.add_argument('-v', '--version', action='version', version=VERSION)
    parser.add_argument('-f', '--force', action='store_true',
                        help='overwrite existing files without prompt')
    parser.add_argument('-t', '--tool', choices=['wget', 'urllib2'],
                        help='change the download tool')
    parser.add_argument('-s', '--song', action='append',
                        help='adds songs for download',
                        type=int, nargs='+')
    parser.add_argument('-a', '--album', action='append',
                        help='adds all songs in the albums for download',
                        type=int, nargs='+')
    parser.add_argument('-p', '--playlist', action='append',
                        help='adds all songs in the playlists for download',
                        type=int, nargs='+')
    parser.add_argument('--no-tag', action='store_true',
                        help='skip adding ID3 tag')
    parser.add_argument('--directory', default='',
                        help='save downloads to the directory')
    parser.add_argument('--name-template', default='{id} - {title} - {artist}',
                        help='filename template')
    parser.add_argument('--lrc-timetag', action='store_true',
                        help='keep timetag in lyric')
    parser.add_argument('--no-wait', action='store_true',
                        help='make download faster, but xiami may ban your account')
    parser.add_argument('-un', '--username', default='',
                        help='Vip account email')
    parser.add_argument('-pw', '--password', default='',
                        help='Vip account password')
    parser.add_argument('-ma', '--memberAuth', default='',
                        help='Cookie member_auth')

    return parser.parse_args()
Esempio n. 47
0
    def get(self):
        return self.counter

    def increment(self):
        self.counter += 1
        if self.counter > 95:
            self.counter = 0


# --------------------------------------------------

if __name__ == '__main__':

    # Server startup options
    # --nows: no websocket output, just update DB

    # TODO: Handle 'headless' commandline options
    parser = argparse.ArgumentParser(description="Wisewolf RSS server process")
    parser.add_argument("--nows", help="No websocket output, just update DB")
    args = parser.parse_args()

    # Log startup message, create DB if necessary
    print("Wisewolf RSS server %s (c)2017 Kyubi Systems: " % SERVER_VERSION,
          end=' ')
    initialise()
    print('OK')

    # Start main RSS server loop
    logging.info("Wisewolf RSS server version %s starting", SERVER_VERSION)
    start()