コード例 #1
0
def main():
    parser = gooey.GooeyParser(description='Role some dice')
    parser.add_argument('number_of_rolls',
                        type=int,
                        metavar='How many rolls?',
                        choices=range(1, 10))
    args = parser.parse_args()
    for i in range(args.number_of_rolls):
        print(f'Roll {i + 1}: {random.randint(1, 6)}')
コード例 #2
0
def main():
    parser = gooey.GooeyParser()
    #parser = argparse.ArgumentParser('')
    #sub_parsers = parser.add_subparsers(title="subTitle", description="SubDescription", help="SubHElp")
    #mainparser = sub_parsers.add_parser('Generate')
    #mainparser.add_argument('foo')
    #cfgparser = sub_parsers.add_parser('Config')
    cfg = AppConfig(cli=False, parser=parser)
    args = parser.parse_args()
コード例 #3
0
ファイル: gui.py プロジェクト: lucas-flowers/snutree
def parse_args(argv=None):
    '''
    Parse and return the program arguments. If `argv` is provided, arguments
    are read from that list instead of using the system arguments. Gooey may
    set these arguments if used in a GUI.
    '''
    parser = gooey.GooeyParser(prog='snutree', description=None)
    for args, kwargs in options.values():
        parser.add_argument(*args, **kwargs)
    parsed = parser.parse_args(argv)
    return parsed
コード例 #4
0
def main():
    parser = gooey.GooeyParser()
    parser.add_argument(
        '--test',
        default=0,
        type=int,
        choices=[0, 1, 2, 3, 4],
        help='This is a very, very, very long help text '
        'to explain a very, very, very important input value. '
        'Unfortunately, the end of this long message is cropped. ')
    args = parser.parse_args()
    print(args)
コード例 #5
0
def main():
    parser = gooey.GooeyParser()
    args = parser.parse_args()

    process = subprocess.Popen([
        "ping",
        "127.0.0.1",
        "-n",
        "5",
    ])
    process.wait()
    if process.returncode != 0:
        print("An error occurred :", process.returncode)
        exit(1)
コード例 #6
0
def main():
    parser = gooey.GooeyParser()

    # parser.add_argument("--test")

    mutex_1 = parser.add_mutually_exclusive_group(required=True)
    mutex_1.add_argument('--choose-1', action='store_true', default=False)
    mutex_1.add_argument('--choose-2', action='store_true', default=False)

    # mutex_2 = parser.add_mutually_exclusive_group(required=True)
    # mutex_2.add_argument('--choose-3', action='store_true', default=False)
    # mutex_2.add_argument('--choose-4', action='store_true', default=False)

    args = parser.parse_args()
    print(args)
コード例 #7
0
def main():
    parser = gooey.GooeyParser()
    args = parser.parse_args()
    import sys
    print(sys.stdout)
    # startupinfo = subprocess.STARTUPINFO()
    # startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
    print('begin')
    # process = subprocess.Popen(["ping", "127.0.0.1", "-n", "5", ],
    #                            shell=True, stdout=subprocess.PIPE,
    #                            stderr=subprocess.STDOUT)
    # print('process waiting')
    # process.communicate()
    # print(sys.stdout.read())
    print('done waiting')
コード例 #8
0
def get_parser() -> gooey.GooeyParser:
    """Returns a parser for Janken!

    Returns:
        Argument parser for Janken.

    """

    parser = gooey.GooeyParser(description='Interpret Janken images.')
    parser.add_argument(
        'images',
        nargs='+',
        help='image files to interpret',
        widget='MultiFileChooser',
    )
    return parser
コード例 #9
0
def main(argv=None, return_report=False):
    if argv is None:  # if argv is empty, fetch from the commandline
        argv = sys.argv[1:]
    elif isinstance(
            argv, _str
    ):  # else if argv is supplied but it's a simple string, we need to parse it to a list of arguments before handing to argparse or any other argument parser
        argv = shlex.split(argv)  # Parse string just like argv using shlex

    #==== COMMANDLINE PARSER ====

    #== Commandline description
    desc = '''Easytextract v%s
Description: Easy to use text extractor, from PDF, DOC, DOCX and other document types, including if necessary using OCR (via Tesseract).

Note: use --cmd to avoid launching the graphical interface and use as a commandline tool.
    ''' % __version__
    ep = ''' '''

    #== Commandline arguments
    #-- Constructing the parser
    # Use GooeyParser if we want the GUI because it will provide better widgets
    if (not '--cmd' in argv and not '--ignore-gooey' in argv
            and not '--help' in argv and not '-h' in argv):  # pragma: no cover
        # Initialize the Gooey parser
        main_parser = gooey.GooeyParser(
            add_help=True,
            description=desc,
            epilog=ep,
            formatter_class=argparse.RawTextHelpFormatter)
        # Define Gooey widget types explicitly (because type auto-detection doesn't work quite well)
        widget_dir = {"widget": "DirChooser"}
        widget_filesave = {"widget": "FileSaver"}
        widget_file = {"widget": "FileChooser"}
        widget_text = {"widget": "TextField"}
        widget_multifile = {"widget": "MultiFileChooser"}
    else:  # Else in command-line usage, use the standard argparse
        # Delete the special argument to avoid unrecognized argument error in argparse
        if len(argv) > 0 and '--ignore-gooey' in argv:
            argv.remove(
                '--ignore-gooey'
            )  # this argument is automatically fed by Gooey when the user clicks on Start
        if len(argv) > 0 and '--cmd' in argv: argv.remove('--cmd')
        # Initialize the normal argparse parser
        main_parser = argparse.ArgumentParser(
            add_help=True,
            description=desc,
            epilog=ep,
            formatter_class=argparse.RawTextHelpFormatter)
        # Define dummy dict to keep compatibile with command-line usage
        widget_dir = {}
        widget_filesave = {}
        widget_file = {}
        widget_text = {}
        widget_multifile = {}

    # Required arguments
    main_parser.add_argument(
        '-i',
        '--input',
        metavar='/some/path or /some/file.pdf',
        type=str,
        nargs='+',
        required=True,  # type=argparse.FileType('r') to open directly
        help=
        'Input files to analyze (pdf, docx, doc or any other supported by Textract).',
        **widget_multifile)
    main_parser.add_argument(
        '-o',
        '--output',
        metavar='/some/path',
        type=str,
        required=True,
        help='Output folder where to store the extracted text files.',
        **widget_dir)

    # Optional output/copy mode
    main_parser.add_argument(
        '--filetypes',
        metavar='pdf;docx',
        type=str,
        required=False,
        default='pdf;docx;doc',
        help=
        'Filter by filetype (limited by Textract support). Eg, pdf;docx;doc',
        **widget_text)
    main_parser.add_argument(
        '-a',
        '--accent_remove',
        action='store_true',
        required=False,
        default=False,
        help=
        'Replace accentuated characters by their non-accentuated counterpart (great for post-processing).'
    )
    main_parser.add_argument(
        '--ocr_disable',
        action='store_true',
        required=False,
        default=False,
        help=
        'Disable OCR, which is used if document is unreadable otherwise. OCR takes additional time (using Tesseract v3).'
    )
    main_parser.add_argument(
        '--ocr_force',
        action='store_true',
        required=False,
        default=False,
        help=
        'Force OCR usage for any document type. Needs Tesseract v3 to work.')
    main_parser.add_argument(
        '--tolerant_disable',
        action='store_true',
        required=False,
        default=False,
        help=
        'Tolerance: print and skip errors, else raise an exception (for debugging).'
    )
    main_parser.add_argument(
        '--lang_filter',
        metavar='fr;en',
        type=str,
        required=False,
        default='en;fr;nl',
        help=
        'Filter by language, leave this empty to disable. This is another check for gibberish text after PDF decoding. Eg, fr;en',
        **widget_text)
    main_parser.add_argument(
        '-l',
        '--log',
        metavar='/some/folder/filename.log',
        type=str,
        required=False,
        help=
        'Path to the log file. (Output will be piped to both the stdout and the log file)',
        **widget_filesave)
    main_parser.add_argument('-v',
                             '--verbose',
                             action='store_true',
                             required=False,
                             default=False,
                             help='Verbose mode (show more output).')
    main_parser.add_argument(
        '--silent',
        action='store_true',
        required=False,
        default=False,
        help=
        'No console output (but if --log specified, the log will still be saved in the specified file).'
    )

    #== Parsing the arguments
    args = main_parser.parse_args(argv)  # Storing all arguments to args

    #-- Set variables from arguments
    allinputpaths = args.input
    inputpaths = [get_fullpath(path) for path in allinputpaths]
    outputpath = get_fullpath(args.output)
    accent_remove = args.accent_remove
    filetypes = args.filetypes
    ocr = not args.ocr_disable
    ocr_force = args.ocr_force
    tolerant = not args.tolerant_disable
    lang_filter = args.lang_filter
    verbose = args.verbose
    silent = args.silent

    # -- Sanity checks
    # Strip trailing slashes to ensure we correctly format paths afterward
    inputpaths = [inpath.rstrip('/\\') for inpath in inputpaths]
    outputpath = outputpath.rstrip('/\\')

    # Check existence
    for inpath in inputpaths:
        if not os.path.exists(inpath):
            raise NameError('Specified input path does not exist: %s.' %
                            inpath)
    if not os.path.exists(outputpath) or not os.path.isdir(outputpath):
        raise NameError(
            'Specified output path does not exist or is not a directory. Please check the specified path.'
        )

    # Preprocess lang filter or disable (same for filetypes)
    if not lang_filter:  # empty lang filter, we disable
        lang_filter = None
    else:
        lang_filter = lang_filter.split(';')  # else make a list
    if not filetypes:  # empty filetypes, we disable
        filetypes = None
    else:
        filetypes = filetypes.split(';')  # else make a list

    # -- Configure the log file if enabled (ptee.write() will write to both stdout/console and to the log file)
    if args.log:
        ptee = Tee(args.log, 'a', nostdout=silent)
        #sys.stdout = Tee(args.log, 'a')
        sys.stderr = Tee(args.log, 'a', nostdout=silent)
    else:
        ptee = Tee(nostdout=silent)

    # -- Main routine
    print('== Easytextract ==')
    print('Extracting text contents, please wait...')
    all_texts, errors = extract_text_recursive(inputpaths,
                                               filetype=filetypes,
                                               ocr=ocr,
                                               tolerant=tolerant,
                                               lang_filter=lang_filter,
                                               accent_remove=accent_remove,
                                               ocr_force=ocr_force,
                                               verbose=verbose)
    print('Total documents successfully extracted: %i' % len(all_texts))

    # Display unreadable (error) reports
    if errors:
        print(
            'Total number of unreadable documents: %i. Here is the detailed list:'
            % len(errors))
        for err in errors:
            print('* %s' % err)

    # Write the extracted text content(s) to text file(s)
    import codecs
    for filename, text in all_texts.items():
        outfilepath = os.path.join(outputpath, filename + '.txt')
        if accent_remove:
            with open(
                    outfilepath, 'w'
            ) as f:  # does not support writing accentuated characters, but it writes as ascii, which is nice
                f.write(text)
        else:
            with codecs.open(
                    outfilepath, mode='w',
                    encoding='utf-8') as f:  # supports accentuated characters
                f.write(text)
    print('Saved extracted text contents to %s' % outputpath)

    return 0
コード例 #10
0
ファイル: argreplay.py プロジェクト: jschultz/argrecord
 def parse_arguments(argstring):
     parser = gooey.GooeyParser()
     add_arguments(parser)
     args = parser.parse_args(argstring)
     return args
コード例 #11
0
def main(argv=None, return_report=False, regroup=False):
    if argv is None: # if argv is empty, fetch from the commandline
        argv = sys.argv[1:]
    elif isinstance(argv, _str): # else if argv is supplied but it's a simple string, we need to parse it to a list of arguments before handing to argparse or any other argument parser
        argv = shlex.split(argv) # Parse string just like argv using shlex

    # If --gui was specified, then there's a problem
    if len(argv) == 0 or '--gui' in argv:  # pragma: no cover
        raise Exception('--gui specified but an error happened with lib/gooey, cannot load the GUI (however you can still use this script in commandline). Check that lib/gooey exists and that you have wxpython installed. Here is the error: ')

    #==== COMMANDLINE PARSER ====

    #== Commandline description
    desc = '''Regex Path Matcher v%s
Description: Match paths using regular expression, and then generate a report. Can also substitute using regex to generate output paths. A copy mode is also provided to allow the copy of files from input to output paths.
This app is essentially a path matcher using regexp, and it then rewrites the path using regexp, so that you can reuse elements from input path to build the output path.
This is very useful to reorganize folders for experiments, where scripts/softwares expect a specific directories layout in order to work.

Advices
-------
- Filepath comparison: Paths are compared against filepaths, not just folders (but of course you can match folders with regex, but remember when designing your regexp that it will compared against files paths, not directories).
- Relative filepath: Paths are relative to the rootpath (except if --show-fullpath) and that they are always unix style, even on Windows (for consistency on all platforms and to easily reuse regexp).
- Partial matching: partial matching regex is accepted, so you don't need to model the full filepath, only the part you need (eg, 'myfile' will match '/myfolder/sub/myfile-034.mat').
- Unix filepaths: on all platforms, including Windows, paths will be in unix format (except if you set --show_fullpath). It makes things simpler for you to make crossplatform regex patterns.
- Use [^/]+ to match any file/folder in the filepath: because paths are always unix-like, you can use [^/]+ to match any part of the filepath. Eg, "([^/]+)/([^/]+)/data/mprage/.+\.(img|hdr|txt)" will match "UWS/John_Doe/data/mprage/12345_t1_mprage_98782.hdr".
- Split your big task in several smaller, simpler subtasks: instead of trying to do a regex that match T1, T2, DTI, everything at the same time, try to focus on only one modality at a time and execute them using multiple regex queries: eg, move first structural images, then functional images, then dti, etc. instead of all at once.
- Python module: this library can be used as a Python module to include in your scripts (just call `main(return_report=True)`).

Note: use --gui (without any other argument) to launch the experimental gui (needs Gooey library).

In addition to the switches provided below, using this program as a Python module also provides 2 additional options:
 - return_report = True to return as a variable the files matched and the report instead of saving in a file.
 - regroup = True will return the matched files (if return_report=True) in a tree structure of nested list/dicts depending on if the groups are named or not. Groups can also avoid being matched by using non-matching groups in regex.
    ''' % __version__
    ep = ''' '''

    #== Commandline arguments
    #-- Constructing the parser
    # Use GooeyParser if we want the GUI because it will provide better widgets
    if (len(argv) == 0 or '--gui' in argv) and not '--ignore-gooey' in argv:  # pragma: no cover
        # Initialize the Gooey parser
        main_parser = gooey.GooeyParser(add_help=True, description=desc, epilog=ep, formatter_class=argparse.RawTextHelpFormatter)
        # Define Gooey widget types explicitly (because type auto-detection doesn't work quite well)
        widget_dir = {"widget": "DirChooser"}
        widget_filesave = {"widget": "FileSaver"}
        widget_file = {"widget": "FileChooser"}
        widget_text = {"widget": "TextField"}
    else: # Else in command-line usage, use the standard argparse
        # Delete the special argument to avoid unrecognized argument error in argparse
        if len(argv) > 0 and '--ignore-gooey' in argv[0]: argv.remove('--ignore-gooey') # this argument is automatically fed by Gooey when the user clicks on Start
        # Initialize the normal argparse parser
        main_parser = argparse.ArgumentParser(add_help=True, description=desc, epilog=ep, formatter_class=argparse.RawTextHelpFormatter)
        # Define dummy dict to keep compatibile with command-line usage
        widget_dir = {}
        widget_filesave = {}
        widget_file = {}
        widget_text = {}

    # Required arguments
    main_parser.add_argument('-i', '--input', metavar='/some/path', type=str, required=True,
                        help='Path to the input folder', **widget_dir)
    main_parser.add_argument('-ri', '--regex_input', metavar=r'"sub[^/]+/(\d+)"', type=str, required=True,
                        help=r'Regex to match input paths. Must be defined relatively from --input folder. Do not forget to enclose it in double quotes (and not single)! To match any directory, use [^/\]*? or the alias \dir, or \dirnodot if you want to match folders in combination with --dir switch.', **widget_text)

    # Optional output/copy mode
    main_parser.add_argument('-o', '--output', metavar='/new/path', type=str, required=False, default=None,
                        help='Path to the output folder (where file will get copied over if --copy)', **widget_dir)
    main_parser.add_argument('-ro', '--regex_output', metavar=r'"newsub/\1"', type=str, required=False, default=None,
                        help='Regex to substitute input paths to convert to output paths. Must be defined relatively from --output folder. If not provided but --output is specified, will keep the same directory layout as input (useful to extract specific files without changing layout). Do not forget to enclose it in double quotes!', **widget_text)
    main_parser.add_argument('-c', '--copy', action='store_true', required=False, default=False,
                        help='Copy the matched input paths to the regex-substituted output paths.')
    main_parser.add_argument('-s', '--symlink', action='store_true', required=False, default=False,
                        help='Copy with a symbolic/soft link the matched input paths to the regex-substituted output paths (works only on Linux).')
    main_parser.add_argument('-m', '--move', action='store_true', required=False, default=False,
                        help='Move the matched input paths to the regex-substituted output paths.')
    main_parser.add_argument('--move_fast', action='store_true', required=False, default=False,
                        help='Move the matched input paths to the regex-substituted output paths, without checking first that the copy was done correctly.')
    main_parser.add_argument('-d', '--delete', action='store_true', required=False, default=False,
                        help='Delete the matched files.')

    # Optional general arguments
    main_parser.add_argument('-t', '--test', action='store_true', required=False, default=False,
                        help='Regex test mode: Stop after the first matched file and show the result of substitution. Useful to quickly check if the regex patterns are ok.')
    main_parser.add_argument('--dir', action='store_true', required=False, default=False,
                        help='Match directories too? (else only files are matched)')
    main_parser.add_argument('-y', '--yes', action='store_true', required=False, default=False,
                        help='Automatically accept the simulation and apply changes (good for batch processing and command chaining).')
    main_parser.add_argument('-f', '--force', action='store_true', required=False, default=False,
                        help='Force overwriting the target path already exists. Note that by default, if a file already exist, without this option, it won\'t get overwritten and no message will be displayed.')
    main_parser.add_argument('--show_fullpath', action='store_true', required=False, default=False,
                        help='Show full paths instead of relative paths in the simulation.')
    main_parser.add_argument('-ra', '--range', type=str, metavar='1:10-255', required=False, default=False,
                        help='Range mode: match only the files with filenames containing numbers in the specified range. The format is: (regex-match-group-id):(range-start)-(range-end). regex-match-group-id is the id of the regular expression that will contain the numbers that must be compared to the range. range-end is inclusive.')
    main_parser.add_argument('-re', '--regex_exists', metavar=r'"newsub/\1"', type=str, required=False, default=None,
                        help='Regex of output path to check if the matched regex here is matched prior writing output files.', **widget_text)
    main_parser.add_argument('--report', type=str, required=False, default='pathmatcher_report.txt', metavar='pathmatcher_report.txt',
                        help='Where to store the simulation report (default: pwd = current working dir).', **widget_filesave)
    main_parser.add_argument('--noreport', action='store_true', required=False, default=False,
                        help='Do not create a report file, print the report in the console.')
    main_parser.add_argument('--tree', action='store_true', required=False, default=False,
                        help='Regroup in a tree structure the matched files according to named and unnamed regex groups, and save the result as a json file (pathmatcher_tree.json).')
    main_parser.add_argument('-l', '--log', metavar='/some/folder/filename.log', type=str, required=False,
                        help='Path to the log file. (Output will be piped to both the stdout and the log file)', **widget_filesave)
    main_parser.add_argument('-v', '--verbose', action='store_true', required=False, default=False,
                        help='Verbose mode (show more output).')
    main_parser.add_argument('--silent', action='store_true', required=False, default=False,
                        help='No console output (but if --log specified, the log will still be saved in the specified file).')


    #== Parsing the arguments
    args = main_parser.parse_args(argv) # Storing all arguments to args
    
    #-- Set variables from arguments
    inputpath = args.input
    outputpath = args.output if args.output else None
    regex_input = args.regex_input
    regex_output = args.regex_output
    regex_exists = args.regex_exists
    copy_mode = args.copy
    symlink_mode = args.symlink
    move_mode = args.move
    movefast_mode = args.move_fast
    delete_mode = args.delete
    test_flag = args.test
    dir_flag = args.dir
    yes_flag = args.yes
    force = args.force
    only_missing = not force
    show_fullpath = args.show_fullpath
    path_range = args.range
    reportpath = args.report
    noreport = args.noreport
    tree_flag = args.tree
    verbose = args.verbose
    silent = args.silent

    # -- Sanity checks

    # First check if there is any input path, it's always needed
    if inputpath is None:
        raise NameError('No input path specified! Please specify one!')

	# Try to decode in unicode, else we will get issues down the way when outputting files
    try:
        inputpath = str(inputpath)
    except UnicodeDecodeError as exc:
        inputpath = str(inputpath, encoding=chardet.detect(inputpath)['encoding'])
    if outputpath:
        try:
            outputpath = str(outputpath)
        except UnicodeDecodeError as exc:
            outputpath = str(outputpath, encoding=chardet.detect(outputpath)['encoding'])

    # Remove trailing spaces
    inputpath = inputpath.strip()
    if outputpath:
        outputpath = outputpath.strip()

    # Input or output path is a URL (eg: file:///media/... on Ubuntu/Debian), then strip that out
    RE_urlprotocol = re.compile(r'^\w{2,}:[/\\]{2,}', re.I)
    if RE_urlprotocol.match(inputpath):
        inputpath = urllib.parse.unquote(inputpath).decode("utf8")  # first decode url encoded characters such as spaces %20
        inputpath = r'/' + RE_urlprotocol.sub(r'', inputpath)  # need to prepend the first '/' since it is probably an absolute path and here we will strip the whole protocol
    if outputpath and RE_urlprotocol.match(outputpath):
        outputpath = urllib.parse.unquote(outputpath).decode("utf8")
        outputpath = r'/' + RE_urlprotocol.sub(r'', outputpath)

    # Check if input/output paths exist, else might be a relative path, then convert to an absolute path
    rootfolderpath = inputpath if os.path.exists(inputpath) else fullpath(inputpath)
    rootoutpath = outputpath if outputpath is None or os.path.exists(outputpath) else fullpath(outputpath)

    # Single file specified instead of a folder: we define the input folder as the top parent of this file
    if os.path.isfile(inputpath): # if inputpath is a single file (instead of a folder), then define the rootfolderpath as the parent directory (for correct relative path generation, else it will also truncate the filename!)
        rootfolderpath = os.path.dirname(inputpath)
    if outputpath and os.path.isfile(outputpath): # if inputpath is a single file (instead of a folder), then define the rootfolderpath as the parent directory (for correct relative path generation, else it will also truncate the filename!)
        rootoutpath = os.path.dirname(outputpath)

    # Strip trailing slashes to ensure we correctly format paths afterward
    if rootfolderpath:
        rootfolderpath = rootfolderpath.rstrip('/\\')
    if rootoutpath:
        rootoutpath = rootoutpath.rstrip('/\\')

    # Final check of whether thepath exist
    if not os.path.isdir(rootfolderpath):
        raise NameError('Specified input path: %s (detected as %s) does not exist. Please check the specified path.' % (inputpath, rootfolderpath))

    # Check the modes are not conflicting
    if sum([1 if elt == True else 0 for elt in [copy_mode, symlink_mode, move_mode, movefast_mode, delete_mode]]) > 1:
        raise ValueError('Cannot set multiple modes simultaneously, please choose only one!')

    # Check if an output is needed and is not set
    if (copy_mode or symlink_mode or move_mode or movefast_mode) and not outputpath:
        raise ValueError('--copy or --symlink or --move or --move_fast specified but no --output !')

    # If tree mode enabled, enable also the regroup option
    if tree_flag:
        regroup = True

    # -- Configure the log file if enabled (ptee.write() will write to both stdout/console and to the log file)
    if args.log:
        ptee = Tee(args.log, 'a', nostdout=silent)
        #sys.stdout = Tee(args.log, 'a')
        sys.stderr = Tee(args.log, 'a', nostdout=silent)
    else:
        ptee = Tee(nostdout=silent)
    
    # -- Preprocess regular expression to add aliases
    # Directory alias
    regex_input = regex_input.replace('\dirnodot', r'[^\\/.]*?').replace('\dir', r'[^\\/]*?')
    regex_output = regex_output.replace('\dirnodot', r'[^\\/.]*?').replace('\dir', r'[^\\/]*?') if regex_output else regex_output
    regex_exists = regex_exists.replace('\dirnodot', r'[^\\/.]*?').replace('\dir', r'[^\\/]*?') if regex_exists else regex_exists

    #### Main program
    # Test if regular expressions are correct syntactically
    try:
        regin = re.compile(str_to_raw(regex_input))
        regout = re.compile(str_to_raw(regex_output)) if regex_output else None
        regexist = re.compile(str_to_raw(regex_exists)) if regex_exists else None
        if path_range:  # parse the range format
            temp = re.search(r'(\d+):(\d+)-(\d+)', path_range)
            prange = {"group": int(temp.group(1)), "start": int(temp.group(2)), "end": int(temp.group(3))}
            del temp
    except re.error as exc:
        ptee.write("Regular expression is not correct, please fix it! Here is the error stack:\n")
        ptee.write(traceback.format_exc())
        return 1

    ptee.write("== Regex Path Matcher started ==\n")
    ptee.write("Parameters:")
    ptee.write("- Input root: %s" % inputpath)
    ptee.write("- Input regex: %s" % regex_input)
    ptee.write("- Output root: %s" % outputpath)
    ptee.write("- Output regex: %s" % regex_output)
    ptee.write("- Full arguments: %s" % ' '.join(sys.argv))
    ptee.write("\n")

    # == FILES WALKING AND MATCHING/SUBSTITUTION STEP
    files_list = []  # "to copy" files list, stores the list of input files and their corresponding output path (computed using regex)
    files_list_regroup = {}  # files list regrouped, if regroup = True
    ptee.write("Computing paths matching and simulation report, please wait (total time depends on files count - filesize has no influence). Press CTRL+C to abort\n")
    for dirpath, filename in tqdm(recwalk(inputpath, topdown=False, folders=dir_flag), unit='files', leave=True, smoothing=0):
        # Get full absolute filepath and relative filepath from base dir
        filepath = os.path.join(dirpath, filename)
        relfilepath = path2unix(os.path.relpath(filepath, rootfolderpath)) # File relative path from the root (we truncate the rootfolderpath so that we can easily check the files later even if the absolute path is different)
        regin_match = regin.search(relfilepath)
        # Check if relative filepath matches the input regex
        if regin_match:  # Matched! We store it in the "to copy" files list
            # If range mode enabled, check if the numbers in the filepath are in the specified range, else we skip this file
            if path_range:
                curval = int(regin_match.group(prange['group']))
                if not (prange['start'] <= curval <= prange['end']):
                    continue
            # Compute the output filepath using output regex
            if outputpath:
                newfilepath = regin.sub(regex_output, relfilepath) if regex_output else relfilepath
                #fulloutpath = os.path.join(rootoutpath, newfilepath)
            else:
                newfilepath = None
                #fulloutpath = None
            # Check if output path exists (if argument is enabled)
            if regex_exists and newfilepath:
                if not os.path.exists(os.path.join(rootoutpath, regin.sub(regex_exists, relfilepath))):
                    # If not found, skip to the next file
                    if verbose or test_flag:
                        ptee.write("\rFile skipped because output does not exist: %s" % newfilepath)
                    continue
            # Store both paths into the "to copy" list
            files_list.append([relfilepath, newfilepath])
            if verbose or test_flag:  # Regex test mode or verbose: print the match
                ptee.write("\rMatch: %s %s %s\n" % (relfilepath, "-->" if newfilepath else "", newfilepath if newfilepath else ""))
                if test_flag:  # Regex test mode: break file walking after the first match
                    break
            # Store paths in a tree structure based on groups if regroup is enabled
            if regroup and regin_match.groups():
                curlevel = files_list_regroup  # current level in the tree
                parentlevel = curlevel  # parent level in the tree (necessary to modify the leaf, else there is no way to reference by pointer)
                lastg = 0  # last group key (to access the leaf)
                gdict = regin_match.groupdict()  # store the named groups, so we can pop as we consume it
                for g in regin_match.groups():
                    # For each group
                    if g is None:
                        # If group value is empty, just skip (ie, this is an optional group, this allow to specify multiple optional groups and build the tree accordingly)
                        continue
                    # Find if the current group value is in a named group, in this case we will also use the key name of the group followed by the value, and remove from dict (so that if there are multiple matching named groups with same value we don't lose them)
                    k, v, gdict = pop_first_namedgroup(gdict, g)
                    # If a named group is found, use the key followed by value as nodes
                    if k:
                        if not k in curlevel:
                            # Create node for group key/name
                            curlevel[k] = {}
                        if not g in curlevel[k]:
                            # Create subnode for group value
                            curlevel[k][g] = {}
                        # Memorize the parent level
                        parentlevel = curlevel[k]
                        lastg = g
                        # Memorize current level (step down one level for next iteration)
                        curlevel = curlevel[k][g]
                    # Else it is an unnamed group, use the value as the node name
                    else:
                        if not g in curlevel:
                            # Create node for group value
                            curlevel[g] = {}
                        # Memorize the parent level
                        parentlevel = curlevel
                        lastg = g
                        # Memorize current level (step down one level for next iteration)
                        curlevel = curlevel[g]
                # End of tree structure construction
                # Create the leaf if not done already, as a list
                if not parentlevel[lastg]:
                    parentlevel[lastg] = []
                # Append the value (so if there are multiple files matching the same structure, they will be appended in this list)
                parentlevel[lastg].append([relfilepath, newfilepath])
    ptee.write("End of simulation. %i files matched." % len(files_list))
    # Regex test mode: just quit after the first match
    if test_flag:
        if return_report:
            return files_list, None
        else:
            return 0

    # == SIMULATION REPORT STEP
    ptee.write("Preparing simulation report, please wait a few seconds...")

    # Initialize conflicts global flags
    conflict1_flag = False
    conflict2_flag = False

    # Show result in console using a Python implementation of MORE (because file list can be quite long)
    #more_display=More(num_lines=30)
    #"\n".join(map(str,files_list)) | more_display

    # Precompute conflict type 2 lookup table (= dict where each key is a output filepath, and the value the number of occurrences)
    outdict = {}
    for file_op in files_list:
        outdict[file_op[1]] = outdict.get(file_op[1], 0) + 1

    # Build and show simulation report in user's default text editor
    if noreport:
        reportfile = StringIO()
    else:
        reportfile = open(reportpath, 'w')
    try:
        reportfile.write("== REGEX PATH MATCHER SIMULATION REPORT ==\n")
        reportfile.write("Total number of files matched: %i\n" % len(files_list))
        reportfile.write("Parameters:\n")
        reportfile.write("- Input root: %s\n" % inputpath.encode('utf-8'))
        reportfile.write("- Input regex: %s\n" % regex_input)
        reportfile.write("- Output root: %s\n" % (outputpath.encode('utf-8') if outputpath else ''))
        reportfile.write("- Output regex: %s\n" % regex_output)
        reportfile.write("- Full arguments: %s" % ' '.join(sys.argv))
        reportfile.write("\r\n")
        reportfile.write("List of matched files:\n")
        for file_op in files_list:
            conflict1 = False
            conflict2 = False
            if outputpath:
                # Check if there was a conflict:
                # Type 1 - already existing output file (force overwrite?)
                fulloutpath = os.path.join(rootoutpath, file_op[1])
                if os.path.exists(fulloutpath):
                    conflict1 = True
                    conflict1_flag = True

                # Type 2 - two files will output with same name (bad regex)
                if outdict[file_op[1]] > 1:
                    conflict2 = True
                    conflict2_flag = True

            # Show relative or absolute paths?
            if show_fullpath:
                showinpath = os.path.join(rootfolderpath, file_op[0])
                showoutpath = os.path.join(rootoutpath, file_op[1]) if outputpath else None
            else:
                showinpath = file_op[0]
                showoutpath = file_op[1] if outputpath else None

            # Write into report file
            reportfile.write("* %s %s %s %s %s" % (showinpath, "-->" if (outputpath or delete_mode) else "", showoutpath if outputpath else "", "[ALREADY_EXIST]" if conflict1 else '', "[CONFLICT]" if conflict2 else ''))
            reportfile.write("\n")
        if noreport:
            reportfile.seek(0)
            print(reportfile.read())
    finally:
        try:
            reportfile.close()
        except ValueError as exc:
            pass
    # Open the simulation report with the system's default text editor
    if not (yes_flag or return_report or noreport):  # if --yes is supplied, just skip question and apply!
        ptee.write("Opening simulation report with your default editor, a new window should open.")
        open_with_default_app(reportpath)

    # == COPY/MOVE STEP
    if files_list and ( delete_mode or ((copy_mode or symlink_mode or move_mode or movefast_mode) and outputpath) ):
        # -- USER NOTIFICATION AND VALIDATION
        # Notify user of conflicts
        ptee.write("\n")
        if conflict1_flag:
            ptee.write("Warning: conflict type 1 (files already exist) has been detected. Please use --force if you want to overwrite them, else they will be skipped.\n")
        if conflict2_flag:
            ptee.write("Warning: conflict type 2 (collision) has been detected. If you continue, several files will have the same name due to the specified output regex (thus, some will be lost). You should cancel and check your regular expression for output.\n")
        if not conflict1_flag and not conflict2_flag:
            ptee.write("No conflict detected. You are good to go!")

        # Ask user if we should apply
        if not (yes_flag or return_report):  # if --yes is supplied, just skip question and apply!
            applycopy = input("Do you want to apply the result of the path reorganization simulation on %i files? [Y/N]: " % len(files_list))
            if applycopy.lower() != 'y':
                return 0

        # -- APPLY STEP
        ptee.write("Applying new path structure, please wait (total time depends on file sizes and matches count). Press CTRL+C to abort")
        for infilepath, outfilepath in tqdm(files_list, total=len(files_list), unit='files', leave=True):
            if verbose:
                ptee.write("%s --> %s" % (infilepath, outfilepath))
            # Copy the file! (User previously accepted to apply the simulation)
            fullinpath = os.path.join(rootfolderpath, infilepath)
            if outputpath:
                fulloutpath = os.path.join(rootoutpath, outfilepath)
                if movefast_mode:  # movefast: just move the file/directory tree
                    move_any(fullinpath, fulloutpath)
                else:  # else we first copy in any case, then delete old file if move_mode
                    copy_any(fullinpath, fulloutpath, only_missing=only_missing, symlink=True if symlink_mode else False)  # copy file
                    if move_mode:  # if move mode, then delete the old file. Copy/delete is safer than move because we can ensure that the file is fully copied (metadata/stats included) before deleting the old
                        remove_if_exist(fullinpath)
            if delete_mode:  # if delete mode, ensure that the original file is deleted!
                remove_if_exist(fullinpath)

    # == RETURN AND END OF MAIN
    ptee.write("Task done, quitting.")
    # Save the tree structure in a json file if --tree is enabled
    if tree_flag:
        with open('pathmatcher_tree.json', 'wb') as jsonout:
            jsonout.write(json.dumps(files_list_regroup, sort_keys=True, indent=4, separators=(',', ': ')))
        print('Tree structure saved in file pathmatcher_tree.json')
    # Script mode: return the matched files and their substitutions if available
    if return_report:
        if regroup:
            return files_list_regroup, [conflict1_flag, conflict2_flag]
        else:
            return files_list, [conflict1_flag, conflict2_flag]
    # Standalone mode: just return non error code
    else:
        return 0
コード例 #12
0
def main():

    # External libraries
    try:
        import mutagen
    except ImportError:
        mutagen = None

    audio_ext = (".mp3", ".m4a", ".m4b", ".m4p", ".aa", ".wav")
    list_ext = (".pls", ".m3u")

    def make_dir_if_absent(path):
        try:
            os.makedirs(path)
        except OSError as exc:
            if exc.errno != errno.EEXIST:
                raise

    def raises_unicode_error(str):
        try:
            str.encode('latin-1')
            return False
        except (UnicodeEncodeError, UnicodeDecodeError):
            return True

    def hash_error_unicode(item):
        item_bytes = item.encode('utf-8')
        return "".join([
            "{0:02X}".format(ord(x))
            for x in reversed(hashlib.md5(item_bytes).hexdigest()[:8])
        ])

    def validate_unicode(path):
        path_list = path.split('/')
        last_raise = False
        for i in range(len(path_list)):
            if raises_unicode_error(path_list[i]):
                path_list[i] = hash_error_unicode(path_list[i])
                last_raise = True
            else:
                last_raise = False
        extension = os.path.splitext(path)[1].lower()
        return "/".join(path_list) + (extension if last_raise
                                      and extension in audio_ext else '')

    def exec_exists_in_path(command):
        with open(os.devnull, 'w') as FNULL:
            try:
                with open(os.devnull, 'r') as RFNULL:
                    subprocess.call([command],
                                    stdout=FNULL,
                                    stderr=subprocess.STDOUT,
                                    stdin=RFNULL)
                    return True
            except OSError as e:
                return False

    def splitpath(path):
        return path.split(os.sep)

    def get_relpath(path, basepath):
        commonprefix = os.sep.join(
            os.path.commonprefix(list(map(splitpath, [path, basepath]))))
        return os.path.relpath(path, commonprefix)

    def is_path_prefix(prefix, path):
        return prefix == os.sep.join(
            os.path.commonprefix(list(map(splitpath, [prefix, path]))))

    def group_tracks_by_id3_template(tracks, template):
        grouped_tracks_dict = {}
        template_vars = set(re.findall(r'{.*?}', template))
        for track in tracks:
            try:
                id3_dict = mutagen.File(track, easy=True)
            except:
                id3_dict = {}

            key = template
            single_var_present = False
            for var in template_vars:
                val = id3_dict.get(var[1:-1], [''])[0]
                if len(val) > 0:
                    single_var_present = True
                key = key.replace(var, val)

            if single_var_present:
                if key not in grouped_tracks_dict:
                    grouped_tracks_dict[key] = []
                grouped_tracks_dict[key].append(track)

        return sorted(grouped_tracks_dict.items())

    class Text2Speech(object):
        valid_tts = {'pico2wave': True, 'RHVoice': True, 'espeak': True}

        @staticmethod
        def check_support():
            voiceoverAvailable = False

            # Check for pico2wave voiceover
            if not exec_exists_in_path("pico2wave"):
                Text2Speech.valid_tts['pico2wave'] = False
                print(
                    "Warning: pico2wave not found, voicever won't be generated using it."
                )
            else:
                voiceoverAvailable = True

            # Check for espeak voiceover
            if not exec_exists_in_path("espeak"):
                Text2Speech.valid_tts['espeak'] = False
                print(
                    "Warning: espeak not found, voicever won't be generated using it."
                )
            else:
                voiceoverAvailable = True

            # Check for Russian RHVoice voiceover
            if not exec_exists_in_path("RHVoice"):
                Text2Speech.valid_tts['RHVoice'] = False
                print(
                    "Warning: RHVoice not found, Russian voicever won't be generated."
                )
            else:
                voiceoverAvailable = True

            # Return if we at least found one voiceover program.
            # Otherwise this will result in silent voiceover for tracks and "Playlist N" for playlists.
            return voiceoverAvailable

        @staticmethod
        def text2speech(out_wav_path, text):
            # Skip voiceover generation if a track with the same name is used.
            # This might happen with "Track001" or "01. Intro" names for example.
            if os.path.isfile(out_wav_path):
                verboseprint("Using existing", out_wav_path)
                return True

            # ensure we deal with unicode later
            if not isinstance(text, str):
                text = str(text, 'utf-8')
            lang = Text2Speech.guess_lang(text)
            if lang == "ru-RU":
                return Text2Speech.rhvoice(out_wav_path, text)
            else:
                if Text2Speech.pico2wave(out_wav_path, text):
                    return True
                elif Text2Speech.espeak(out_wav_path, text):
                    return True
                else:
                    return False

        # guess-language seems like an overkill for now
        @staticmethod
        def guess_lang(unicodetext):
            lang = 'en-GB'
            if re.search("[А-Яа-я]", unicodetext) is not None:
                lang = 'ru-RU'
            return lang

        @staticmethod
        def pico2wave(out_wav_path, unicodetext):
            if not Text2Speech.valid_tts['pico2wave']:
                return False
            subprocess.call(
                ["pico2wave", "-l", "en-GB", "-w", out_wav_path, unicodetext])
            return True

        @staticmethod
        def espeak(out_wav_path, unicodetext):
            if not Text2Speech.valid_tts['espeak']:
                return False
            subprocess.call([
                "espeak", "-v", "english_rp", "-s", "150", "-w", out_wav_path,
                unicodetext
            ])
            return True

        @staticmethod
        def rhvoice(out_wav_path, unicodetext):
            if not Text2Speech.valid_tts['RHVoice']:
                return False

            tmp_file = tempfile.NamedTemporaryFile(suffix=".wav", delete=False)
            tmp_file.close()

            proc = subprocess.Popen([
                "RHVoice", "--voice=Elena", "--variant=Russian",
                "--volume=100", "-o", tmp_file.name
            ],
                                    stdin=subprocess.PIPE)
            proc.communicate(input=unicodetext.encode('utf-8'))
            # make a little bit louder to be comparable with pico2wave
            subprocess.call(["sox", tmp_file.name, out_wav_path, "norm"])

            os.remove(tmp_file.name)
            return True

    class Record(object):
        def __init__(self, parent):
            self.parent = parent
            self._struct = collections.OrderedDict([])
            self._fields = {}
            self.track_voiceover = parent.track_voiceover
            self.playlist_voiceover = parent.playlist_voiceover
            self.rename = parent.rename
            self.trackgain = parent.trackgain

        def __getitem__(self, item):
            if item not in list(self._struct.keys()):
                raise KeyError
            return self._fields.get(item, self._struct[item][1])

        def __setitem__(self, item, value):
            self._fields[item] = value

        def construct(self):
            output = bytes()
            for i in list(self._struct.keys()):
                (fmt, default) = self._struct[i]
                output += struct.pack("<" + fmt, self._fields.get(i, default))
            return output

        def text_to_speech(self, text, dbid, playlist=False):
            if self.track_voiceover and not playlist or self.playlist_voiceover and playlist:
                # Create the voiceover wav file
                fn = ''.join(format(x, '02x') for x in reversed(dbid))
                path = os.path.join(self.base, "iPod_Control", "Speakable",
                                    "Tracks" if not playlist else "Playlists",
                                    fn + ".wav")
                return Text2Speech.text2speech(path, text)
            return False

        def path_to_ipod(self, filename):
            if os.path.commonprefix([os.path.abspath(filename), self.base
                                     ]) != self.base:
                raise IOError(
                    "Cannot get Ipod filename, since file is outside the IPOD path"
                )
            baselen = len(self.base)
            if self.base.endswith(os.path.sep):
                baselen -= 1
            ipodname = "/".join(
                os.path.abspath(filename)[baselen:].split(os.path.sep))
            return ipodname

        def ipod_to_path(self, ipodname):
            return os.path.abspath(
                os.path.join(self.base, os.path.sep.join(ipodname.split("/"))))

        @property
        def shuffledb(self):
            parent = self.parent
            while parent.__class__ != Shuffler:
                parent = parent.parent
            return parent

        @property
        def base(self):
            return self.shuffledb.path

        @property
        def tracks(self):
            return self.shuffledb.tracks

        @property
        def albums(self):
            return self.shuffledb.albums

        @property
        def artists(self):
            return self.shuffledb.artists

        @property
        def lists(self):
            return self.shuffledb.lists

    class TunesSD(Record):
        def __init__(self, parent):
            Record.__init__(self, parent)
            self.track_header = TrackHeader(self)
            self.play_header = PlaylistHeader(self)
            self._struct = collections.OrderedDict([
                ("header_id", ("4s", b"bdhs")),  # shdb
                ("unknown1", ("I", 0x02000003)),
                ("total_length", ("I", 64)),
                ("total_number_of_tracks", ("I", 0)),
                ("total_number_of_playlists", ("I", 0)),
                ("unknown2", ("Q", 0)),
                ("max_volume", ("B", 0)),
                ("voiceover_enabled", ("B", int(self.track_voiceover))),
                ("unknown3", ("H", 0)),
                ("total_tracks_without_podcasts", ("I", 0)),
                ("track_header_offset", ("I", 64)),
                ("playlist_header_offset", ("I", 0)),
                ("unknown4", ("20s", b"\x00" * 20)),
            ])

        def construct(self):
            # The header is a fixed length, so no need to calculate it
            self.track_header.base_offset = 64
            track_header = self.track_header.construct()

            # The playlist offset will depend on the number of tracks
            self.play_header.base_offset = self.track_header.base_offset + len(
                track_header)
            play_header = self.play_header.construct(self.track_header.tracks)
            self["playlist_header_offset"] = self.play_header.base_offset

            self["total_number_of_tracks"] = self.track_header[
                "number_of_tracks"]
            self["total_tracks_without_podcasts"] = self.track_header[
                "number_of_tracks"]
            self["total_number_of_playlists"] = self.play_header[
                "number_of_playlists"]

            output = Record.construct(self)
            return output + track_header + play_header

    class TrackHeader(Record):
        def __init__(self, parent):
            self.base_offset = 0
            Record.__init__(self, parent)
            self._struct = collections.OrderedDict([
                ("header_id", ("4s", b"hths")),  # shth
                ("total_length", ("I", 0)),
                ("number_of_tracks", ("I", 0)),
                ("unknown1", ("Q", 0)),
            ])

        def construct(self):
            self["number_of_tracks"] = len(self.tracks)
            self["total_length"] = 20 + (len(self.tracks) * 4)
            output = Record.construct(self)

            # Construct the underlying tracks
            track_chunk = bytes()
            for i in self.tracks:
                track = Track(self)
                verboseprint("[*] Adding track", i)
                track.populate(i)
                output += struct.pack(
                    "I",
                    self.base_offset + self["total_length"] + len(track_chunk))
                track_chunk += track.construct()
            return output + track_chunk

    class Track(Record):
        def __init__(self, parent):
            Record.__init__(self, parent)
            self._struct = collections.OrderedDict([
                ("header_id", ("4s", b"rths")),  # shtr
                ("header_length", ("I", 0x174)),
                ("start_at_pos_ms", ("I", 0)),
                ("stop_at_pos_ms", ("I", 0)),
                ("volume_gain", ("I", int(self.trackgain))),
                ("filetype", ("I", 1)),
                ("filename", ("256s", b"\x00" * 256)),
                ("bookmark", ("I", 0)),
                ("dontskip", ("B", 1)),
                ("remember", ("B", 0)),
                ("unintalbum", ("B", 0)),
                ("unknown", ("B", 0)),
                ("pregap", ("I", 0x200)),
                ("postgap", ("I", 0x200)),
                ("numsamples", ("I", 0)),
                ("unknown2", ("I", 0)),
                ("gapless", ("I", 0)),
                ("unknown3", ("I", 0)),
                ("albumid", ("I", 0)),
                ("track", ("H", 1)),
                ("disc", ("H", 0)),
                ("unknown4", ("Q", 0)),
                ("dbid", ("8s", 0)),
                ("artistid", ("I", 0)),
                ("unknown5", ("32s", b"\x00" * 32)),
            ])

        def populate(self, filename):
            self["filename"] = self.path_to_ipod(filename).encode('utf-8')

            if os.path.splitext(filename)[1].lower() in (".m4a", ".m4b",
                                                         ".m4p", ".aa"):
                self["filetype"] = 2

            text = os.path.splitext(os.path.basename(filename))[0]

            # Try to get album and artist information with mutagen
            if mutagen:
                audio = None
                try:
                    audio = mutagen.File(filename, easy=True)
                except:
                    print(
                        "Error calling mutagen. Possible invalid filename/ID3Tags (hyphen in filename?)"
                    )
                if audio:
                    # Note: Rythmbox IPod plugin sets this value always 0.
                    self["stop_at_pos_ms"] = int(audio.info.length * 1000)

                    artist = audio.get("artist", ["Unknown"])[0]
                    if artist in self.artists:
                        self["artistid"] = self.artists.index(artist)
                    else:
                        self["artistid"] = len(self.artists)
                        self.artists.append(artist)

                    album = audio.get("album", ["Unknown"])[0]
                    if album in self.albums:
                        self["albumid"] = self.albums.index(album)
                    else:
                        self["albumid"] = len(self.albums)
                        self.albums.append(album)

                    if audio.get("title", "") and audio.get("artist", ""):
                        text = " - ".join(
                            audio.get("title", "") + audio.get("artist", ""))

            # Handle the VoiceOverData
            if isinstance(text, str):
                text = text.encode('utf-8', 'ignore')
            self["dbid"] = hashlib.md5(text).digest()[:8]
            self.text_to_speech(text, self["dbid"])

    class PlaylistHeader(Record):
        def __init__(self, parent):
            self.base_offset = 0
            Record.__init__(self, parent)
            self._struct = collections.OrderedDict([
                ("header_id", ("4s", b"hphs")),  #shph
                ("total_length", ("I", 0)),
                ("number_of_playlists", ("I", 0)),
                ("number_of_non_podcast_lists", ("2s", b"\xFF\xFF")),
                ("number_of_master_lists", ("2s", b"\x01\x00")),
                ("number_of_non_audiobook_lists", ("2s", b"\xFF\xFF")),
                ("unknown2", ("2s", b"\x00" * 2)),
            ])

        def construct(self, tracks):
            # Build the master list
            masterlist = Playlist(self)
            verboseprint("[+] Adding master playlist")
            masterlist.set_master(tracks)
            chunks = [masterlist.construct(tracks)]

            # Build all the remaining playlists
            playlistcount = 1
            for i in self.lists:
                playlist = Playlist(self)
                verboseprint("[+] Adding playlist", (i[0] if type(i) == type(
                    ()) else i))
                playlist.populate(i)
                construction = playlist.construct(tracks)
                if playlist["number_of_songs"] > 0:
                    playlistcount += 1
                    chunks += [construction]
                else:
                    print(
                        "Error: Playlist does not contain a single track. Skipping playlist."
                    )

            self["number_of_playlists"] = playlistcount
            self["total_length"] = 0x14 + (self["number_of_playlists"] * 4)
            # Start the header

            output = Record.construct(self)
            offset = self.base_offset + self["total_length"]

            for i in range(len(chunks)):
                output += struct.pack("I", offset)
                offset += len(chunks[i])

            return output + b"".join(chunks)

    class Playlist(Record):
        def __init__(self, parent):
            self.listtracks = []
            Record.__init__(self, parent)
            self._struct = collections.OrderedDict([
                ("header_id", ("4s", b"lphs")),  # shpl
                ("total_length", ("I", 0)),
                ("number_of_songs", ("I", 0)),
                ("number_of_nonaudio", ("I", 0)),
                ("dbid", ("8s", b"\x00" * 8)),
                ("listtype", ("I", 2)),
                ("unknown1", ("16s", b"\x00" * 16))
            ])

        def set_master(self, tracks):
            # By default use "All Songs" builtin voiceover (dbid all zero)
            # Else generate alternative "All Songs" to fit the speaker voice of other playlists
            if self.playlist_voiceover and (Text2Speech.valid_tts['pico2wave']
                                            or
                                            Text2Speech.valid_tts['espeak']):
                self["dbid"] = hashlib.md5(b"masterlist").digest()[:8]
                self.text_to_speech("All songs", self["dbid"], True)
            self["listtype"] = 1
            self.listtracks = tracks

        def populate_m3u(self, data):
            listtracks = []
            for i in data:
                if not i.startswith("#"):
                    path = i.strip()
                    if self.rename:
                        path = validate_unicode(path)
                    listtracks.append(path)
            return listtracks

        def populate_pls(self, data):
            sorttracks = []
            for i in data:
                dataarr = i.strip().split("=", 1)
                if dataarr[0].lower().startswith("file"):
                    num = int(dataarr[0][4:])
                    filename = urllib.parse.unquote(dataarr[1]).strip()
                    if filename.lower().startswith('file://'):
                        filename = filename[7:]
                    if self.rename:
                        filename = validate_unicode(filename)
                    sorttracks.append((num, filename))
            listtracks = [x for (_, x) in sorted(sorttracks)]
            return listtracks

        def populate_directory(self, playlistpath, recursive=True):
            # Add all tracks inside the folder and its subfolders recursively.
            # Folders containing no music and only a single Album
            # would generate duplicated playlists. That is intended and "wont fix".
            # Empty folders (inside the music path) will generate an error -> "wont fix".
            listtracks = []
            for (dirpath, dirnames, filenames) in os.walk(playlistpath):
                dirnames.sort()

                # Ignore any hidden directories
                if "/." not in dirpath:
                    for filename in sorted(filenames, key=lambda x: x.lower()):
                        # Only add valid music files to playlist
                        if os.path.splitext(filename)[1].lower() in (".mp3",
                                                                     ".m4a",
                                                                     ".m4b",
                                                                     ".m4p",
                                                                     ".aa",
                                                                     ".wav"):
                            fullPath = os.path.abspath(
                                os.path.join(dirpath, filename))
                            listtracks.append(fullPath)
                if not recursive:
                    break
            return listtracks

        def remove_relatives(self, relative, filename):
            base = os.path.dirname(os.path.abspath(filename))
            if not os.path.exists(relative):
                relative = os.path.join(base, relative)
            fullPath = relative
            return fullPath

        def populate(self, obj):
            # Create a playlist of the folder and all subfolders
            if type(obj) == type(()):
                self.listtracks = obj[1]
                text = obj[0]
            else:
                filename = obj
                if os.path.isdir(filename):
                    self.listtracks = self.populate_directory(filename)
                    text = os.path.splitext(os.path.basename(filename))[0]
                else:
                    # Read the playlist file
                    with open(filename, 'rb') as f:
                        data = f.readlines()

                    extension = os.path.splitext(filename)[1].lower()
                    if extension == '.pls':
                        self.listtracks = self.populate_pls(data)
                    elif extension == '.m3u':
                        self.listtracks = self.populate_m3u(data)
                    else:
                        raise

                    # Ensure all paths are not relative to the playlist file
                    for i in range(len(self.listtracks)):
                        self.listtracks[i] = self.remove_relatives(
                            self.listtracks[i], filename)
                    text = os.path.splitext(os.path.basename(filename))[0]

            # Handle the VoiceOverData
            self["dbid"] = hashlib.md5(text.encode('utf-8')).digest()[:8]
            self.text_to_speech(text, self["dbid"], True)

        def construct(self, tracks):
            self["total_length"] = 44 + (4 * len(self.listtracks))
            self["number_of_songs"] = 0

            chunks = bytes()
            for i in self.listtracks:
                path = self.ipod_to_path(i)
                position = -1
                try:
                    position = tracks.index(path)
                except:
                    # Print an error if no track was found.
                    # Empty playlists are handeled in the PlaylistHeader class.
                    print("Error: Could not find track \"" + path + "\".")
                    print(
                        "Maybe its an invalid FAT filesystem name. Please fix your playlist. Skipping track."
                    )
                if position > -1:
                    chunks += struct.pack("I", position)
                    self["number_of_songs"] += 1
            self["number_of_nonaudio"] = self["number_of_songs"]

            output = Record.construct(self)
            return output + chunks

    class Shuffler(object):
        def __init__(self,
                     path,
                     track_voiceover=False,
                     playlist_voiceover=False,
                     rename=False,
                     trackgain=0,
                     auto_dir_playlists=None,
                     auto_id3_playlists=None):
            self.path = os.path.abspath(path)
            self.tracks = []
            self.albums = []
            self.artists = []
            self.lists = []
            self.tunessd = None
            self.track_voiceover = track_voiceover
            self.playlist_voiceover = playlist_voiceover
            self.rename = rename
            self.trackgain = trackgain
            self.auto_dir_playlists = auto_dir_playlists
            self.auto_id3_playlists = auto_id3_playlists

        def initialize(self):
            # remove existing voiceover files (they are either useless or will be overwritten anyway)
            for dirname in ('iPod_Control/Speakable/Playlists',
                            'iPod_Control/Speakable/Tracks'):
                shutil.rmtree(os.path.join(self.path, dirname),
                              ignore_errors=True)
            for dirname in ('iPod_Control/iTunes', 'iPod_Control/Music',
                            'iPod_Control/Speakable/Playlists',
                            'iPod_Control/Speakable/Tracks'):
                make_dir_if_absent(os.path.join(self.path, dirname))

        def dump_state(self):
            print("Shuffle DB state")
            print("Tracks", self.tracks)
            print("Albums", self.albums)
            print("Artists", self.artists)
            print("Playlists", self.lists)

        def populate(self):
            self.tunessd = TunesSD(self)
            for (dirpath, dirnames, filenames) in os.walk(self.path):
                dirnames.sort()
                relpath = get_relpath(dirpath, self.path)
                # Ignore the speakable directory and any hidden directories
                if not is_path_prefix("iPod_Control/Speakable",
                                      relpath) and "/." not in dirpath:
                    for filename in sorted(filenames, key=lambda x: x.lower()):
                        # Ignore hidden files
                        if not filename.startswith("."):
                            fullPath = os.path.abspath(
                                os.path.join(dirpath, filename))
                            if os.path.splitext(filename)[1].lower() in (
                                    ".mp3", ".m4a", ".m4b", ".m4p", ".aa",
                                    ".wav"):
                                self.tracks.append(fullPath)
                            if os.path.splitext(filename)[1].lower() in (
                                    ".pls", ".m3u"):
                                self.lists.append(fullPath)

                # Create automatic playlists in music directory.
                # Ignore the (music) root and any hidden directories.
                if self.auto_dir_playlists and "iPod_Control/Music/" in dirpath and "/." not in dirpath:
                    # Only go to a specific depth. -1 is unlimted, 0 is ignored as there is already a master playlist.
                    depth = dirpath[len(self.path) + len(os.path.sep):].count(
                        os.path.sep) - 1
                    if self.auto_dir_playlists < 0 or depth <= self.auto_dir_playlists:
                        self.lists.append(os.path.abspath(dirpath))

            if self.auto_id3_playlists != None:
                if mutagen:
                    for grouped_list in group_tracks_by_id3_template(
                            self.tracks, self.auto_id3_playlists):
                        self.lists.append(grouped_list)
                else:
                    print(
                        "Error: No mutagen found. Cannot generate auto-id3-playlists."
                    )
                    sys.exit(1)

        def write_database(self):
            print("Writing database. This may take a while...")
            print(
                "writing to file: " +
                os.path.join(self.path, "iPod_Control", "iTunes", "iTunesSD"))
            with open(
                    os.path.join(self.path, "iPod_Control", "iTunes",
                                 "iTunesSD"), "wb") as f:
                try:
                    f.write(self.tunessd.construct())
                except IOError as e:
                    print("I/O error({0}): {1}".format(e.errno, e.strerror))
                    print("Error: Writing iPod database failed.")
                    sys.exit(1)

            print("Database written successfully:")
            print("Tracks", len(self.tracks))
            print("Albums", len(self.albums))
            print("Artists", len(self.artists))
            print("Playlists", len(self.lists))

    #
    # Read all files from the directory
    # Construct the appropriate iTunesDB file
    # Construct the appropriate iTunesSD file
    #   http://shuffle3db.wikispaces.com/iTunesSD3gen
    # Use SVOX pico2wave and RHVoice to produce voiceover data
    #

    def check_unicode(path):
        ret_flag = False  # True if there is a recognizable file within this level
        for item in os.listdir(path):
            if os.path.isfile(os.path.join(path, item)):
                if os.path.splitext(item)[1].lower() in audio_ext + list_ext:
                    ret_flag = True
                    if raises_unicode_error(item):
                        src = os.path.join(path, item)
                        dest = os.path.join(path, hash_error_unicode(
                            item)) + os.path.splitext(item)[1].lower()
                        print('Renaming %s -> %s' % (src, dest))
                        os.rename(src, dest)
            else:
                ret_flag = (check_unicode(os.path.join(path, item))
                            or ret_flag)
                if ret_flag and raises_unicode_error(item):
                    src = os.path.join(path, item)
                    new_name = hash_error_unicode(item)
                    dest = os.path.join(path, new_name)
                    print('Renaming %s -> %s' % (src, dest))
                    os.rename(src, dest)
        return ret_flag

    def nonnegative_int(string):
        try:
            intval = int(string)
        except ValueError:
            raise argparse.ArgumentTypeError("'%s' must be an integer" %
                                             string)

        if intval < 0 or intval > 99:
            raise argparse.ArgumentTypeError(
                "Track gain value should be in range 0-99")
        return intval

    def checkPathValidity(path):
        if not os.path.isdir(result.path):
            print(
                "Error finding IPod directory. Maybe it is not connected or mounted?"
            )
            print(path)
            print(result.path)
            sys.exit(1)

        if not os.access(result.path, os.W_OK):
            print('Unable to get write permissions in the IPod directory')
            sys.exit(1)

    def handle_interrupt(signal, frame):
        print("Interrupt detected, exiting...")
        sys.exit(1)

    if __name__ == '__main__':
        signal.signal(signal.SIGINT, handle_interrupt)

        parser = gooey.GooeyParser(
            description=
            'Python script for building the Track and Playlist database '
            'for the newer gen IPod Shuffle. Version 1.5')

        parser.add_argument('-t',
                            '--track-voiceover',
                            action='store_true',
                            help='Enable track voiceover feature')

        parser.add_argument('-p',
                            '--playlist-voiceover',
                            action='store_true',
                            help='Enable playlist voiceover feature')

        parser.add_argument(
            '-u',
            '--rename-unicode',
            action='store_true',
            help=
            'Rename files causing unicode errors, will do minimal required renaming'
        )

        parser.add_argument(
            '-g',
            '--track-gain',
            type=nonnegative_int,
            default='0',
            help='Specify volume gain (0-99) for all tracks; '
            '0 (default) means no gain and is usually fine; '
            'e.g. 60 is very loud even on minimal player volume')

        parser.add_argument(
            '-d',
            '--auto-dir-playlists',
            type=int,
            default=None,
            const=-1,
            nargs='?',
            help=
            'Generate automatic playlists for each folder recursively inside '
            '"IPod_Control/Music/". You can optionally limit the depth: '
            '0=root, 1=artist, 2=album, n=subfoldername, default=-1 (No Limit).'
        )

        parser.add_argument(
            '-i',
            '--auto-id3-playlists',
            type=str,
            default=None,
            metavar='ID3_TEMPLATE',
            const='{artist}',
            nargs='?',
            help=
            'Generate automatic playlists based on the id3 tags of any music '
            'added to the iPod. You can optionally specify a template string '
            'based on which id3 tags are used to generate playlists. For eg. '
            '\'{artist} - {album}\' will use the pair of artist and album to group '
            'tracks under one playlist. Similarly \'{genre}\' will group tracks based '
            'on their genre tag. Default template used is \'{artist}\'')

        parser.add_argument('-v',
                            '--verbose',
                            action='store_true',
                            help='Show verbose output of database generation.')

        parser.add_argument('path',
                            help='Path to the IPod\'s root directory',
                            widget='DirChooser')

        result = parser.parse_args()

        # Enable verbose printing if desired
        verboseprint = print if result.verbose else lambda *a, **k: None

        print(result)
        if result.path[-1] != os.path.sep:
            result.path = result.path + os.path.sep
        checkPathValidity(result.path)

        if result.rename_unicode:
            check_unicode(result.path)

        if not mutagen:
            print(
                "Warning: No mutagen found. Database will not contain any album nor artist information."
            )

        verboseprint("Playlist voiceover requested:",
                     result.playlist_voiceover)
        verboseprint("Track voiceover requested:", result.track_voiceover)
        if (result.track_voiceover or result.playlist_voiceover):
            if not Text2Speech.check_support():
                print(
                    "Error: Did not find any voiceover program. Voiceover disabled."
                )
                result.track_voiceover = False
                result.playlist_voiceover = False
            else:
                verboseprint("Voiceover available.")

        shuffle = Shuffler(result.path,
                           track_voiceover=result.track_voiceover,
                           playlist_voiceover=result.playlist_voiceover,
                           rename=result.rename_unicode,
                           trackgain=result.track_gain,
                           auto_dir_playlists=result.auto_dir_playlists,
                           auto_id3_playlists=result.auto_id3_playlists)
        shuffle.initialize()
        shuffle.populate()
        shuffle.write_database()
コード例 #13
0
def parse_args():
    """Use GooeyParser to build up arguments in the script and save the arguments in a
    default json file so that they can be retrieved each time the script is ran.
    """
    stored_args = {}
    args_file = ".pymice-analyzer-args.json"
    # Read in the prior arguments as a dictionary.
    if os.path.isfile(args_file):
        with open(args_file) as data_file:
            stored_args = jn.load(data_file)
    parser = gy.GooeyParser(description="GPLv3 - Emir Turkes; Phenovance LLC")
    subs = parser.add_subparsers()

    universal_parser = subs.add_parser(
        "universal_settings",
        prog="Universal Settings",
        help="Blanket settings for all paradigms",
    )
    universal_parser.add_argument(
        "proj_name",
        metavar="Project Name",
        default=stored_args.get("proj_name"),
        help="Name of your project",
    )
    universal_parser.add_argument(
        "tzinfo",
        metavar="Time Zone",
        widget="Dropdown",
        choices=list(filter(re.compile(".*GMT").match, pz.all_timezones)),
        default=stored_args.get("tzinfo"),
        help="Time zone where Intellicage data was collected",
    )
    universal_parser.add_argument(
        "data_dir",
        metavar="Data Directory",
        widget="DirChooser",
        default=stored_args.get("data_dir"),
        help="Input directory containing Intellicage files",
    )
    universal_parser.add_argument(
        "proj_dir",
        metavar="Output Directory",
        widget="DirChooser",
        default=stored_args.get("proj_dir"),
        help="Output directory to save analysis",
    )
    universal_parser.add_argument(
        "start",
        metavar="Start",
        widget="DateChooser",
        default=stored_args.get("start"),
        help="Start times and dates of the phases",
    )
    universal_parser.add_argument(
        "end",
        metavar="End",
        widget="DateChooser",
        default=stored_args.get("end"),
        help="End times and dates of the phases",
    )
    universal_parser.add_argument(
        "plots",
        metavar="Plots",
        widget="Dropdown",
        choices=["Bar", "Line", "Box"],
        default=stored_args.get("plots"),
        help="Types of plots to make",
    )
    universal_parser.add_argument(
        "tables",
        metavar="Tables",
        widget="Dropdown",
        choices=["Normality", "Variance", "Post-hoc"],
        default=stored_args.get("tables"),
        help="Types of tables to make",
    )
    universal_parser.add_argument(
        "comparisons",
        metavar="Comparisons",
        widget="Dropdown",
        choices=["Between Group", "Within Group"],
        default=stored_args.get("comparisons"),
        help="What comparisons to make",
    )
    universal_parser.add_argument(
        "error",
        metavar="Error",
        widget="Dropdown",
        choices=["SEM", "SD"],
        default=stored_args.get("error"),
        help="Way error should be measured",
    )
    universal_parser.add_argument(
        "normality",
        metavar="Normality",
        widget="Dropdown",
        choices=["Shapiro-Wilk", "Kolmogorov-Smirnov"],
        default=stored_args.get("normality"),
        help="Which normality tests to use",
    )
    universal_parser.add_argument(
        "variance",
        metavar="Variance",
        widget="Dropdown",
        choices=["Levene", "Brown-Forsythe"],
        default=stored_args.get("variance"),
        help="Which variance tests to use",
    )
    universal_parser.add_argument(
        "tests",
        metavar="Tests",
        widget="Dropdown",
        choices=["Kruskal-Wallis", "Mann-Whitney"],
        default=stored_args.get("tests"),
        help="Which statistical tests to use",
    )
    universal_parser.add_argument(
        "post_hoc",
        metavar="Post-hoc",
        widget="Dropdown",
        choices=["Dunn", "Tukey"],
        default=stored_args.get("post_hoc"),
        help="Which post hoc tests to use",
    )
    universal_parser.add_argument(
        "--excluded_groups",
        metavar="Excluded Groups",
        default=stored_args.get("excluded_groups"),
        help="Groups to exclude",
    )
    universal_parser.add_argument(
        "--excluded_animals",
        metavar="Excluded Animals",
        default=stored_args.get("excluded_animals"),
        help="Animals to exclude",
    )

    all_paradigms = {
        "num_visits": "Number of Visits",
        "num_pokes": "Number of Nosepokes",
        "visit_dur": "Visit Duration",
        "poke_dur": "Nosepoke Duration",
        "time_corners": "Time to All Corners",
        "time_pokes": "Time to All Pokes",
        "corner_pref": "Corner Preferences",
        "door_pref": "Door Preference",
        "zig_zag": "Zig Zag Visits",
        "perimeter": "Perimeter Visits",
        "overtake": "Overtake Occurrences",
    }
    for paradigm in all_paradigms:
        paradigm_parser = subs.add_parser(paradigm, prog=all_paradigms[paradigm])
        paradigm_parser.add_argument(
            "--start",
            metavar="Start",
            widget="DateChooser",
            default=stored_args.get("start"),
            help="Start times and dates of the phases",
        )
        paradigm_parser.add_argument(
            "--end",
            metavar="End",
            widget="DateChooser",
            default=stored_args.get("end"),
            help="End times and dates of the phases",
        )
        paradigm_parser.add_argument(
            "--plots",
            metavar="Plots",
            widget="Dropdown",
            choices=["Bar", "Line", "Box"],
            default=stored_args.get("plots"),
            help="Types of plots to make",
        )
        paradigm_parser.add_argument(
            "--tables",
            metavar="Tables",
            widget="Dropdown",
            choices=["Normality", "Variance", "Post-hoc"],
            default=stored_args.get("tables"),
            help="Types of tables to make",
        )
        paradigm_parser.add_argument(
            "--comparisons",
            metavar="Comparisons",
            widget="Dropdown",
            choices=["Between Group", "Within Group"],
            default=stored_args.get("comparisons"),
            help="What comparisons to make",
        )
        paradigm_parser.add_argument(
            "--error",
            metavar="Error",
            widget="Dropdown",
            choices=["SEM", "SD"],
            default=stored_args.get("error"),
            help="Way error should be measured",
        )
        paradigm_parser.add_argument(
            "--normality",
            metavar="Normality",
            widget="Dropdown",
            choices=["Shapiro-Wilk", "Kolmogorov-Smirnov"],
            default=stored_args.get("normality"),
            help="Which normality tests to use",
        )
        paradigm_parser.add_argument(
            "--variance",
            metavar="Variance",
            widget="Dropdown",
            choices=["Levene", "Brown-Forsythe"],
            default=stored_args.get("variance"),
            help="Which variance tests to use",
        )
        paradigm_parser.add_argument(
            "--tests",
            metavar="Tests",
            widget="Dropdown",
            choices=["Kruskal-Wallis", "Mann-Whitney"],
            default=stored_args.get("tests"),
            help="Which statistical tests to use",
        )
        paradigm_parser.add_argument(
            "--post_hoc",
            metavar="Post-hoc",
            widget="Dropdown",
            choices=["Dunn", "Tukey"],
            default=stored_args.get("post_hoc"),
            help="Which post hoc tests to use",
        )
        paradigm_parser.add_argument(
            "--excluded_groups",
            metavar="Excluded Groups",
            default=stored_args.get("excluded_groups"),
            help="Groups to exclude",
        )
        paradigm_parser.add_argument(
            "--excluded_animals",
            metavar="Excluded Animals",
            default=stored_args.get("excluded_animals"),
            help="Animals to exclude",
        )

    args = parser.parse_args()
    # Store the values of the arguments so that it is available on next run.
    with open(args_file, "w") as data_file:
        # Using vars(args) returns the data as a dictionary.
        jn.dump(vars(args), data_file)
    return args, all_paradigms
コード例 #14
0
ファイル: niceshare.py プロジェクト: silky/niceshare
def main(use_gooey=False):
    desc = 'GUI for gstreamer-based screen sharing.'

    if use_gooey:
        import gooey
        parser = gooey.GooeyParser(description=desc)
    else:
        parser = argparse.ArgumentParser(description=desc)
        parser.add_argument(
            '--ignore-gooey',
            action='store_true',
            help=
            'Ingored; just for compatibility with Gooey when plain argparse is used.',
        )

    connection_group = parser.add_mutually_exclusive_group(required=True)
    connection_group.add_argument(
        '--listen-port',
        type=str,  # we accept str to support gstreamer SRT URL params
        help=
        'Port number. You likely need to open this in your firewall/NAT. Example: 5000',
    )
    connection_group.add_argument(
        '--call',
        type=str,  # we accept str to support gstreamer SRT URL params
        help='Connect to host:port. Example: localhost:5000',
    )

    mode_group = parser.add_mutually_exclusive_group(required=True)
    mode_group.add_argument(
        '--view',
        action='store_true',
        help='Receive video from the other side.',
    )
    for i, screen_size in enumerate(list_screen_sizes()):
        mode_group.add_argument(
            f'--screenshare-screen-{i}',
            metavar=f'Screenshare screen {i}',
            dest='screenshare',
            action='store_const',
            const=screen_size,
            help=screen_size,
        )
    all_screens_size = get_all_screens_size()
    mode_group.add_argument(
        f'--screenshare-all',
        metavar='Screenshare all screens',
        dest='screenshare',
        action='store_const',
        const=all_screens_size,
        help=all_screens_size,
    )

    mode_group.add_argument(
        '--screenshare-rectangle',
        metavar="Screenshare custom rectangle",
        dest='screenshare_rectangle',
        help='Format: WxH+OFFSET_X,OFFSET_Y. Example: 1920x1080+0,0',
        **({} if not use_gooey else {
            'gooey_options': {
                'validator': {
                    'test':
                    f"__import__('re').match(r'{screenshare_argument_regex}', user_input)",  # gets eval()d
                    'message':
                    'Must be of format WIDTHxHEIGHT+OFFSET_X,OFFSET_Y',
                },
            },
        }),
    )

    parser.add_argument(
        '--bitrate',
        type=int,
        default=2048,
        help='Bitrate in KBit/s',
    )

    parser.add_argument(
        # TODO: Enable by default when this crash is fixed: https://github.com/Haivision/srt/issues/1594
        '--fec',
        action='store_true',
        default=False,
        help=
        'Forward Error Correction costs more bandwidth but helps with packet loss. Both sides must use the same value.',
    )

    parser.add_argument(
        '--latency',
        type=int,
        default=1000,
        help=
        'Acceptable latency in milliseconds. The video transmission will have that much delay. Too small values will result in corruption artifacts. Should be 4x the ping time to the destination.',
    )

    parser.add_argument(
        '--fps',
        type=int,
        default=30,
        help='Frames per second.',
    )

    parser.add_argument('--passphrase',
                        type=str,
                        help='Encrypt traffic with this passphrase',
                        **({} if not use_gooey else {
                            'widget': 'PasswordField',
                        }))

    parser.add_argument(
        '--print-command',
        action='store_true',
        default=False,
        help='Only print the command, do not run it.',
    )

    args = parser.parse_args()

    # print(args)

    if args.listen_port:
        uri = 'srt://:' + args.listen_port
    elif args.call:
        hostname, port = args.call.split(':')
        ip = socket.gethostbyname(hostname)
        uri = f'srt://{ip}:{port}'

    # Cannot use `dest='screenshare` for that flag because then Gooey renders
    # the validation error into the wrong place; thus translate it manually.
    if args.screenshare_rectangle:
        args.screenshare = args.screenshare_rectangle

    if args.screenshare is not None:
        width, height, startx, starty = parse_screenshare_argument(
            args.screenshare)
        endx = startx + width - 1
        endy = starty + height - 1
        gst_launch_args = [
            'gst-launch-1.0',
            f'ximagesrc startx={startx} endx={endx} starty={starty} endy={endy} show-pointer=true use-damage=0',
            '! queue',
            '! videoconvert',
            '! clockoverlay',
            f'! x264enc tune=zerolatency speed-preset=fast bitrate={args.bitrate} threads=1 byte-stream=true key-int-max=60 intra-refresh=true',
            f'! video/x-h264, profile=baseline, framerate={args.fps}/1',
            '! mpegtsmux',
            '! queue',
            f'! srtsink uri={uri} latency={args.latency} ' + ' '.join(
                concat_lists([
                    [
                        'packetfilter=fec,cols:3,rows:-3,layout:staircase,arq:always'
                    ] if args.fec else [],
                    [f'passphrase={args.passphrase}']
                    if args.passphrase else [],
                ])),
        ]
    elif args.view is not None:
        gst_launch_args = [
            'gst-launch-1.0',
            f'srtsrc uri={uri} ' + ' '.join(
                concat_lists([
                    ['packetfilter=fec'] if args.fec else [],
                    [f'passphrase={args.passphrase}']
                    if args.passphrase else [],
                ])),
            '! queue',
            '! tsdemux',
            '! h264parse',
            '! video/x-h264',
            '! avdec_h264',
            '! autovideosink sync=false',
        ]

    quoted_in_nix_shell_command = shlex.quote(' '.join(gst_launch_args))
    command = ' '.join([
        'NIX_PATH=nixpkgs=https://github.com/nh2/nixpkgs/archive/6dc03726f61868c0b8020e9ca98ac71972528d8f.tar.gz',
        'nix-shell',
        '-p gst_all_1.gstreamer',
        '-p gst_all_1.gst-plugins-good',
        '-p gst_all_1.gst-plugins-base',
        '-p gst_all_1.gst-plugins-bad',
        '-p gst_all_1.gst-plugins-ugly',
        '-p gst_all_1.gst-libav',
        f'--run {quoted_in_nix_shell_command}',
    ])

    cli_flags = ' '.join(a for a in sys.argv
                         if a not in ['--ignore-gooey', '--print-command'])
    print(f'\nYour CLI flags:\n\n{cli_flags}\n')
    print(f'\nYour gstreamer invocation:\n\n{command}\n')

    if not args.print_command:
        subprocess.run(command, shell=True)
コード例 #15
0
def main(argv=None):
    if argv is None:  # if argv is empty, fetch from the commandline
        argv = sys.argv[1:]
    elif isinstance(
            argv, _str
    ):  # else if argv is supplied but it's a simple string, we need to parse it to a list of arguments before handing to argparse or any other argument parser
        argv = shlex.split(argv)  # Parse string just like argv using shlex

    #==== COMMANDLINE PARSER ====

    #== Commandline description
    desc = '''Ascii Path Renamer v%s
Description: Rename all directories/files names from unicode (ie, accentuated characters) to ascii.

Note: use --gui (without any other argument) to launch the experimental gui (needs Gooey library).
    ''' % __version__
    ep = ''' '''

    #== Commandline arguments
    #-- Constructing the parser
    # Use GooeyParser if we want the GUI because it will provide better widgets
    if len(argv) > 0 and (argv[0] == '--gui' and
                          not '--ignore-gooey' in argv):  # pragma: no cover
        # Initialize the Gooey parser
        main_parser = gooey.GooeyParser(
            add_help=True,
            description=desc,
            epilog=ep,
            formatter_class=argparse.RawTextHelpFormatter)
        # Define Gooey widget types explicitly (because type auto-detection doesn't work quite well)
        widget_dir = {"widget": "DirChooser"}
        widget_filesave = {"widget": "FileSaver"}
        widget_file = {"widget": "FileChooser"}
        widget_text = {"widget": "TextField"}
    else:  # Else in command-line usage, use the standard argparse
        # Delete the special argument to avoid unrecognized argument error in argparse
        if len(argv) > 0 and '--ignore-gooey' in argv[0]:
            argv.remove(
                '--ignore-gooey'
            )  # this argument is automatically fed by Gooey when the user clicks on Start
        # Initialize the normal argparse parser
        main_parser = argparse.ArgumentParser(
            add_help=True,
            description=desc,
            epilog=ep,
            formatter_class=argparse.RawTextHelpFormatter)
        # Define dummy dict to keep compatibile with command-line usage
        widget_dir = {}
        widget_filesave = {}
        widget_file = {}
        widget_text = {}
    # Required arguments
    main_parser.add_argument(
        '-i',
        '--input',
        metavar='/some/path',
        type=str,
        required=True,
        help='Path to the input folder. The renaming will be done recursively.',
        **widget_dir)

    # Optional general arguments
    main_parser.add_argument('-v',
                             '--verbose',
                             action='store_true',
                             required=False,
                             default=False,
                             help='Verbose mode (show more output).')

    #== Parsing the arguments
    args = main_parser.parse_args(argv)  # Storing all arguments to args

    #-- Set variables from arguments
    inputpath = fullpath(args.input)
    rootfolderpath = inputpath
    verbose = args.verbose

    # -- Sanity checks
    if os.path.isfile(
            inputpath
    ):  # if inputpath is a single file (instead of a folder), then define the rootfolderpath as the parent directory (for correct relative path generation, else it will also truncate the filename!)
        rootfolderpath = os.path.dirname(inputpath)

    if not os.path.isdir(rootfolderpath):
        raise NameError(
            'Specified input path does not exist. Please check the specified path'
        )

    #### Main program
    print("== Ascii Path Renamer started ==")
    print("Renaming from root path %s" % rootfolderpath)
    count_files = 0
    count_renamed_files = 0
    for dirpath, filename in recwalk(
            unicode(rootfolderpath), folders=True, topdown=False
    ):  # IMPORTANT: need to supply a unicode path to os.walk in order to get back unicode filenames! Also need to walk the tree bottom-up (from leaf to root), else if we change the directories names before the dirs/files they contain, we won't find them anymore!
        count_files += 1
        if verbose:
            print("\n- Processing file %s\n" % os.path.join(dirpath, filename))
        # convert unicode string to ascii (ie, convert accentuated characters to their non-accentuated counterparts)
        ascii_filename = unidecode(filename)
        # strip quotes and double quotes
        remove_chars = '\'"`'
        for c in remove_chars:
            ascii_filename = ascii_filename.replace(c, '')
        # check that the filename/directory was not already ascii only, if not, we rename it
        if ascii_filename != filename and ascii_filename:
            if verbose:
                print("- Renaming non-ascii file/dir %s to %s\n" %
                      (filename, ascii_filename))
            shutil.move(os.path.join(dirpath, filename),
                        os.path.join(dirpath, ascii_filename))
            count_renamed_files += 1
        sys.stdout.write("\r%i files/folders done." % count_files)
    print("\nAscii renaming is done, %i files/dirs renamed. Quitting now." %
          count_renamed_files)

    return 0
コード例 #16
0
def interface():
    """
    ra_data = {"AA": ["mon", "sun", "tue"], "AV": ["sun", "tue"], "DS": ["Sun", "Mon", ], "KJ": ["wed", "thr", "Tue",
    ], "KH": ["Mon", "wed", "Thr", ], "BS": ["Wed", "Thr", "mon", ], "MF": ["wed", "sun", "tue"]}
    total_weeks = 16
    trials = 100
    """  # make data object global for reference

    parser = gooey.GooeyParser()
    parser.add_argument(
        "--ra_data",
        dest='ra_data',
        type=str,
        help=
        "ra data needs to be formatted as a dictionary with each entry being the name or "
        "initials followed by the days they are available using three letter abbreviations "
        """ "AA": ["mon", "sun", "tue"], "AV": ["sun", "tue"], ... """
        "sun, mon, tue, wed, thr, fri, sat")
    parser.add_argument(
        '-w',
        dest="total_time",
        type=str,
        required=False,
        help="The number of weeks that the scheduler needs to print out",
        default='4')
    parser.add_argument(
        '-t',
        dest="trials",
        type=str,
        required=False,
        help="Increase this if you are having trouble getting good results",
        default='50')
    parser.add_argument(
        '-s',
        dest="load_save",
        required=False,
        help="If you would like to run the same as the last run set to true",
        type=str,
        default='False')
    parser.add_argument('--off',
                        dest="day_off",
                        required=False,
                        help="if each person needs a day off set to true",
                        type=str,
                        default=False)

    # convert arguments from strings into their type
    args = parser.parse_args()
    args.ra_data = """ {"AA": ["sat"], "AV": ["sun", "tue"], "DS": ["Sun", "Mon", ]} """
    ra_data = eval(args.ra_data)
    assert type(ra_data) is dict
    weeks = int(args.total_time)
    trials = int(args.trials)
    if args.day_off:
        global day_off_flag
        day_off_flag = True
    if args.load_save.lower() == "true":
        with open('save.json', 'r') as f:
            load_data = json.load(f)  # needs to add additional load logic
            print(load_data)
            return
    # pack information into data object
    main(ra_data, weeks, trials)
    with open('save.json', 'w') as f:
        json.dump(data, f)  # needs to add additional load logic