コード例 #1
0
ファイル: __main__.py プロジェクト: pombredanne/DiffUtils-1
def do_patch(patch_file: Path,
             original: Path,
             output: Path,
             context_size=5,
             force=False):
    original_lines = []
    patch_lines = []
    with open(patch_file, 'rt') as f:
        for line in f:
            patch_lines.append(line.rstrip("\r\n"))
    patch = parse_unified_diff(patch_lines)
    patch_lines = None  # Free
    with open(original, 'rt') as f:
        for line in f:
            original_lines.append(line.rstrip("\r\n"))
    try:
        result_lines = patch.apply_to(original_lines)
    except PatchFailedException as e:
        raise CommandError(str(e)) from None
    try:
        with open(output, 'wt' if force else 'xt') as f:
            for line in result_lines:
                f.write(line)
                f.write('\n')
    except FileExistsError:
        raise CommandError("Output file already exists: {}".format(output))
コード例 #2
0
ファイル: __main__.py プロジェクト: FountainMC/TacoFountain
def setup(force=False):
    """Setup the development environment, re-applying all the Paper and TacoSpigot patches."""
    unshaded_tacospigot()
    WORK_DIR.mkdir(exist_ok=True)
    repository = Path(ROOT_DIR, "TacoSpigot")
    if not repository.exists():
        raise CommandError("TacoSpigot repository not found!")
    tacospigot_jar = Path(repository, "build", "TacoSpigot-illegal.jar")
    cacheInfo = CacheInfo() if force else CacheInfo.load()
    current_commit = current_tacospigot_commit()
    if tacospigot_jar.exists(
    ) and cacheInfo.lastBuiltTacoSpigot == current_commit:
        print("Reusing cached TacoSpigot jar")
    else:
        print("---- Cleaning TacoSpigot")
        run(["bash", "clean.sh"], cwd=repository, check=True)
        print("---- Compiling TacoSpigot")
        run(["bash", "build-illegal.sh"], cwd=repository, check=True)
        cacheInfo.lastBuiltTacoSpigot = current_commit
        cacheInfo.save()
    if not FORGE_FERNFLOWER_JAR.exists():
        print("---- Compiling forge fernflower")
        compile_forgeflower()
    version = minecraft_version()
    mojang_jar = Path(PAPER_WORK_DIR, version, f"{version}-mapped.jar")
    if not mojang_jar.exists():
        raise CommandError(f"Missing mojang jar for {version}: {mojang_jar}")
    decompile_sources(version, mojang_jar)
    remap_source()
コード例 #3
0
ファイル: __init__.py プロジェクト: FountainMC/TacoFountain
def resolve_maven_dependenices(dependencies, repos=MAVEN_REPOSITORIES):
    classpath = []
    remotes = ",".join(f"{name}::default::{url}"
                       for name, url in repos.items())
    for dependency in dependencies:
        parts = dependency.split(":")
        assert len(parts) == 3, f"Invalid dependency: {dependency}"
        groupId, artifactId, version = parts
        expected_location = Path(LOCAL_REPOSITORY, groupId.replace('.', '/'),
                                 artifactId, version,
                                 f"{artifactId}-{version}.jar")
        if not expected_location.exists():
            print(f"Downloading {dependency}")
            try:
                command = [
                    "mvn", "dependency:get", "-q", f"-DgroupId={groupId}",
                    f"-DartifactId={artifactId}", f"-Dversion={version}",
                    f"-DremoteRepositories={remotes}"
                ]
                run(command, check=True)
                if not expected_location.exists():
                    raise CommandError(
                        f"Unable to download {dependency} to {expected_location}"
                    )
            except CalledProcessError:
                raise CommandError(f"Maven failed to download {dependency}")
        assert expected_location.exists()
        classpath.append(expected_location)
    return classpath
コード例 #4
0
ファイル: __init__.py プロジェクト: FountainMC/TacoFountain
def regenerate_unfixed_sources():
    unfixed_sources = Path(WORK_DIR, "unfixed")
    decompiled_sources = Path(WORK_DIR, minecraft_version(), "decompiled")
    if unmapped_sources.exists():
        print("---- Removing existing unfixed sources")
        shutil.rmtree(unmapped_sources)
    server_repo = Path(Path.cwd(), "TacoSpigot", "TacoSpigot-Server")
    if not server_repo.exists():
        raise CommandError("Couldn't find TacoSpigot-Server")
    tacospigot_sources = Path(server_repo, "src", "main", "java")
    if not tacospigot_sources.exists():
        raise CommandError("Couldn't find TacoSpigot sources!")
    mojang_sources = Path(PAPER_WORK_DIR, minecraft_version())
    if not mojang_sources.exists():
        raise CommandError("Couldn't find mojang sources!")
    print("---- Copying original sources from TacoSpigot")
    shutil.copytree(tacospigot_sources, unfixed_sources)
    # Copy the decompiled sources that aren't already in TacoSpigot
    # This makes it so we don't have to depend on the mojang server fat jar,
    # giving us complete control over our dependencies.
    # Make sure to use the ones decompiled with forge fernflower, or they won't work
    print(
        "---- Copying remaining sources from forge fernflower decompiled mojang jar"
    )
    decompiled_nms_sources = Path(decompiled_sources, "net/minecraft/server")
    unfixed_nms_sources = Path(unfixed_sources, "net/minecraft/server")
    assert decompiled_nms_sources.exists()
    for f in decompiled_nms_sources.iterdir():
        assert not f.is_dir(), f"Unexpected directory: {f}"
        unfixed_file = Path(unfixed_sources, f.name)
        if not unfixed_file.exists():
            shutil.copy2(f, unfixed_file)
コード例 #5
0
def determine_bukkit_classpath(force=False):
    """Parse the craftbukkit pom to determine their classpath, reusing cached info if possible"""
    current_commit = current_tacospigot_commit()
    cache = CacheInfo.load()
    if not force and cache.bukkitClasspathCommit == current_commit:
        result = cache.bukkitClasspath
        assert result, f"Unexpected cached result: {result}"
        return result
    print("---- Recomputing bukkit classpath", file=stderr)
    try:
        proc = run(["mvn", "dependency:tree", "-B"],
                   check=True,
                   cwd="TacoSpigot",
                   stdout=PIPE,
                   stderr=PIPE,
                   encoding='utf-8')
    except CalledProcessError as e:
        error_lines = e.stderr.splitlines()
        if not error_lines:
            error_lines = e.stdout.splitlines()
        print("Error running mvn dependency tree:", file=stderr)
        for line in error_lines:
            print(line, file=stderr)
        raise CommandError("Error running mvn dependency tree")
    start_pattern = re.compile("maven-dependency-plugin:.*:tree")
    artifact_pattern = re.compile("(.*):(.*):(\w+):([^:]+)(?::(.*))?")
    end_marker = '-' * 10
    output = iter(proc.stdout.splitlines())
    result = []
    for line in output:
        if start_pattern.search(line) is None:
            continue
        while True:
            try:
                line = next(output)
            except StopIteration:
                raise CommandError(
                    "Unexpected end of output parsing maven dependency tree")
            if end_marker in line:
                break
            assert line.startswith('[INFO]'), f"Unexpected line: {line}"
            line = line.lstrip("[INFO]").lstrip('\|-+ ').strip()
            if not line:
                continue  # Ignore blank lines
            match = artifact_pattern.match(line)
            assert match, f"Unexpected line: {line}"
            groupId, artifactId, classifier, version, scope = match.groups()
            if classifier == "pom":
                continue
            assert classifier == "jar", f"Unexpected classifier: {classifier}"
            assert scope in (None, "runtime", "compile", "test",
                             "provided"), f"Unkown scope {scope} in {line}"
            if scope in ("compile", None) and 'tacospigot' not in groupId:
                result.append(f"{groupId}:{artifactId}:{version}")
    assert result, f"Unexpected result: {result}"
    cache.bukkitClasspath = result
    cache.bukkitClasspathCommit = current_commit
    cache.save()
    return tuple(result)
コード例 #6
0
def overlay(targetVersion=None,
        logLevel='warning',
        logFormat='%(levelname)s:%(name)s (at %(asctime)s): %(message)s',
        **kwargs):
    '''
    Overlay a schema on top of the currently installed one.

    Overlaying is a non-destructive, backwards-compatible first-step for
    schema migration. After overlaying, a superimposition of the current
    and target schemas will be installed. This is useful when both versions
    need to be simultaneously supported. When the previous version is no
    longer needed, perform a "trim" operation on the database.

    This will call the application's target version's overlay function.
    '''
    logging.basicConfig(level=getattr(logging, logLevel.upper(),
        logging.NOTSET), format=logFormat)

    # Create default connection
    loop.run_until_complete(init())

    currentVersion = getSchemaVersion()

    if currentVersion is None:
        raise CommandError('It seems there is no schema ' +
            'currently installed. You can install a schema with ' +
            'the "install" command.')
    elif currentVersion == targetVersion:
        raise CommandError('Schema version ' + currentVersion
                + ' is already installed')

    _logger.info('Current schema version = %s', currentVersion)

    # Default target version is the latest one available
    if targetVersion is None:
        targetVersion = schemaVersions[-1]

    # Get module for target version
    targetVersMod = importlib.import_module(
            f'.v{targetVersion}', 'CureCompanion.mariadb')

    # Make sure it has an overlay function
    if not hasattr(targetVersMod, 'overlay'):
        raise CommandError('Version ' + targetVersion +
                ' does support the overlay operation.')

    # Delegate to appropriate overlay function
    targetVersMod.overlay(fromVersion=currentVersion, **kwargs)
コード例 #7
0
def trim(referenceVersion=None, trimVersion=None,
        logLevel='warning',
        logFormat='%(levelname)s:%(name)s (at %(asctime)s): %(message)s'):
    '''
    Trim the database to remove any data from a previous schema version
    that is now irrelevant to the reference schema version and thus
    obsolete.  This operation should be performed once the previous schema
    version no longer needs to be supported.

    This will call the application's reference version's trim function. If
    no reference version is specified, this will trim with reference to the
    currently installed schema version.
    '''
    logging.basicConfig(level=getattr(logging, logLevel.upper(),
        logging.NOTSET), format=logFormat)

    # Create default connection
    loop.run_until_complete(init())

    if referenceVersion is None:
        referenceVersion = getSchemaVersion()

    if referenceVersion is None:
        raise CommandError('It seems there is no schema ' +
            'currently installed.')

    # Get module for reference version
    refVersMod = importlib.import_module(
            f'.v{referenceVersion}', 'CureCompanion.mariadb')

    # Make sure it has a trim function
    if not hasattr(refVersMod, 'trim'):
        raise CommandError('Version ' + referenceVersion + ' does not ' +
            'support the trim operation.')

    # Set default trim version
    if trimVersion is None:
        trimVersion = refVersMod.previousVersion

    # Confirm with user
    response = askYesOrNoQuestion('Trim is a destructive and ' +
        'irreversible operation. Are you sure you want to proceed?!')

    if response == 'y':
        # Delegate to appropriate trim function
        refVersMod.trim(trimVersion)
    else:
        _logger.info('Trim not performed (whew!)')
コード例 #8
0
def getSchemaVersion(metadataTableName='metadata'):
    '''
    Get currently installed database schema version.
    '''
    currentVersion = None
    with closing(connection.cursor()) as cursor:
        try:
            cursor.execute('SELECT * FROM ' + metadataTableName + \
                    ' WHERE attribute = %s', ('version',))
        except pymysql.ProgrammingError as e:
            # 1146 == table does not exist
            if e.args[0] == 1146:  # pylint: disable=no-else-return
                # Version 1 tables don't exist either, so it is most
                # likely that no schema is installed
                return None
            else:
                raise
        else:
            row = cursor.fetchone()
            if not row:
                raise CommandError('Could not read current ' +
                    'database version')
            currentVersion = row['value']

    return currentVersion
コード例 #9
0
ファイル: __init__.py プロジェクト: FountainMC/TacoFountain
def run_fernflower(classes: Path,
                   output: Path,
                   libraries=[],
                   verbose=True,
                   options=FERNFLOWER_OPTIONS):
    assert classes.is_dir(), f"Classes don't exist: {classes}"
    assert not output.exists(), f"Ouptut already exists: {output}"
    assert FORGE_FERNFLOWER_JAR.exists(
    ), f"Fernflower jar doesn't exist: {FORGE_FERNFLOWER_JAR}"
    output.mkdir(parents=True)
    command = ["java", "-jar", str(FORGE_FERNFLOWER_JAR)]
    for key, value in options.items():
        if isinstance(value, bool):
            value = "1" if value else "0"
        elif not isinstance(value, str):
            raise TypeError(f"Unexpected option type: {type(value)}")
        command.append(f"-{key}={value}")
    for library in libraries:
        if isinstance(library, Path):
            library = str(library)
        elif not isinstance(library, str):
            raise TypeError(f"Unexpected library type: {type(library)}")
        command.append(f"-e={library}")
    command.extend((str(classes), str(output)))
    # NOTE: Use popen so we can output info while running
    with Popen(command, encoding='utf-8', stdout=PIPE, stderr=PIPE) as proc:
        while proc.poll() is None:
            line = proc.stdout.readline().rstrip("\r\n")
            print(line)
        if proc.wait() != 0:
            error_message = proc.stderr.read().splitlines()
            shutil.rmtree(output)  # Cleanup partial output
            raise CommandError(["Error running fernflower:", *error_message])
コード例 #10
0
def main(input_filename):
    if not exists(input_filename):
        raise CommandError(f'file {input_filename} does not exist')

    from aktash.utils import file_info, FILE_INFO_DESC
    for k, v in file_info(input_filename).items():
        print(f'{FILE_INFO_DESC[k]}: {v}')
コード例 #11
0
def get_selected_services(args):
    if args.all:
        return ALL_SERVICES
    elif args.only:
        return args.only
    else:
        raise CommandError("Supply either --all or --only options")
コード例 #12
0
def upgrade(targetVersion=None, logLevel='warning',
        logFormat='%(levelname)s:%(name)s (at %(asctime)s): %(message)s'):
    '''
    Perform a schema upgrade. An upgrade means a complete migration of
    data to the target version. It may be safer or more useful to
    wield the overlay-trim duo (as long as database size doesn't become
    an issue).

    This will call the application's target version's upgrade function.
    '''
    logging.basicConfig(level=getattr(logging, logLevel.upper(),
        logging.NOTSET), format=logFormat)

    # Create default connection
    loop.run_until_complete(init())

    currentVersion = getSchemaVersion()

    if currentVersion is None:
        raise CommandError('It seems there is no schema ' +
            'currently installed. You can install a schema with ' +
            'the "install" command.')
    elif currentVersion == targetVersion:
        raise CommandError('Schema version ' + currentVersion
                + ' is already installed')

    _logger.info('Current schema version = %s', currentVersion)

    # Default target version is the latest one available
    if targetVersion is None:
        targetVersion = schemaVersions[-1]

    # Get module for target version
    targetVersMod = importlib.import_module(
            f'.v{targetVersion}', 'CureCompanion.mariadb')

    # Make sure it has an upgrade function
    if not hasattr(targetVersMod, 'upgrade'):
        raise CommandError('Version ' + targetVersion +
                ' does not support ' +
                'the upgrade operation (hint: overlay and trim ' +
                'may be supported).')

    # Delegate to appropriate upgrade function
    targetVersMod.upgrade(fromVersion=currentVersion)
コード例 #13
0
def parse_version_metadata(version):
    versions = load_version_manifest()['versions']
    try:
        return versions[version]
    except KeyError:
        versions = load_version_manifest(refresh=True)['versions']
        try:
            return versions[version]
        except KeyError:
            raise CommandError(f"Missing version: {version}")
コード例 #14
0
ファイル: __init__.py プロジェクト: culebron/aktash
def _connect_postgres(path_string):
    from sqlalchemy import create_engine

    if '#' not in path_string:
        raise CommandError(
            'Use this format to read from sql: postgresql://[user[:password]@]hostname[:port]/<db_name>#<table_name or query>.'
        )

    sharp_idx = path_string.index('#')
    engine = create_engine(path_string[:sharp_idx])
    return engine, path_string[sharp_idx + 1:]
コード例 #15
0
ファイル: utils.py プロジェクト: operasoftware/sentrycli
def check_required_keys_present(required, dictionary):
    """
    Check if at least one of required values are present in dictionary.
    :type required: list
    :param dictionary: dictionary which should be checked.
    :type: dict
    :raises: argh.CommandError
    """
    if all([dictionary.get(param) is None for param in required]):
        raise CommandError('one of %s has to be specified' %
                           '|'.join(required))
コード例 #16
0
ファイル: __main__.py プロジェクト: FountainMC/TacoFountain
def patch(quiet=False):
    """Applies the patch files to the working directory, overriding any existing work."""
    setup = setup_patching()
    patches, unpatched_sources, patched_sources = setup.patches, setup.unpatched_sources, setup.patched_sources
    print("---- Applying Fountain patches via DiffUtils")
    for patch_root, dirs, files in os.walk(str(patches)):
        for patch_file_name in files:
            patch_file = Path(patch_root, patch_file_name)
            if patch_file.suffix != '.patch':
                raise CommandError(
                    f"Patch file doesn't end with '.patch': {patch_file_name}")
            relative_path = Path(patch_file.parent.relative_to(patches),
                                 patch_file.stem)
            original_file = Path(unpatched_sources, relative_path)
            output_file = Path(patched_sources, relative_path)
            if not original_file.exists():
                raise CommandError(
                    f"Couldn't find  original {original_file} for patch {patch_file}!"
                )
            output_file.parent.mkdir(parents=True, exist_ok=True)
            patch_lines = []
            with open(patch_file, 'rt') as f:
                for line in f:
                    patch_lines.append(line.rstrip('\r\n'))
            patch = parse_unified_diff(patch_lines)
            patch_lines = None  # Free
            original_lines = []
            with open(original_file, 'rt') as f:
                for line in f:
                    original_lines.append(line.rstrip('\r\n'))
            try:
                result_lines = patch.apply_to(original_lines)
            except PatchFailedException as e:
                raise CommandError(
                    f"Unable to apply {relative_path}.patch: {e}") from None
            # TODO: Should we be forcibly overriding files here?
            with open(output_file, 'wt') as f:
                for line in result_lines:
                    f.write(line)
                    f.write('\n')
コード例 #17
0
ファイル: __main__.py プロジェクト: pombredanne/DiffUtils-1
def patch(patches: Path, original: Path, output: Path, force=False):
    """Applies the specified patches to the original files, producing the revised text"""
    if not patches.exists():
        raise CommandError("Patch file doesn't exist: {}".format(patches))
    if not original.exists():
        raise CommandError("Original file doesn't exist: {}".format(original))
    if patches.is_dir():
        if not original.is_dir():
            raise CommandError(
                "Patches {} is a directory, but original {} is a file!".format(
                    patches, original))
        for patch_root, dirs, files in os.walk(str(patches)):
            for patch_file_name in files:
                patch_file = Path(patch_root, patch_file_name)
                if patch_file.suffix != '.patch':
                    raise CommandError(
                        "Patch file doesn't end with '.patch': {}".format(
                            patch_file_name))
                relative_path = Path(patch_file.parent.relative_to(patches),
                                     patch_file.stem)
                original_file = Path(original, relative_path)
                output_file = Path(output, relative_path)
                if not original_file.exists():
                    raise CommandError(
                        "Couldn't find  original {} for patch {}!".format(
                            original_file, patch_file))
                output_file.parent.mkdir(parents=True, exist_ok=True)
                do_patch(patch_file, original_file, output_file, force=force)
    else:
        if not original.is_file():
            raise CommandError(
                "Patches {} is a file, but original {} is a directory!".format(
                    patches, original))
        do_patch(patches, original, output, force=force)
コード例 #18
0
ファイル: __init__.py プロジェクト: FountainMC/TacoFountain
def configuration():
    global _configuration
    result = _configuration
    if result is not None:
        # Defensive copy
        return result.copy()
    try:
        with open(Path(ROOT_DIR, "buildData", "config.json")) as f:
            result = json.load(f)
    except FileNotFoundError:
        raise CommandError("Missing config file")
    _configuration = result
    return result.copy()
コード例 #19
0
def unshaded_tacospigot(force=False):
    global _valid_tacospigot_unshaded
    tacospigot_unshaded_jar = Path(WORK_DIR, "jars", "TacoSpigot-unshaded.jar")
    if force or not _valid_tacospigot_unshaded or not tacospigot_unshaded_jar.exists(
    ):
        # NOTE: Now we just remap the TacoSpigot jar to undo the shading
        cache = CacheInfo.load()
        current_commit = current_tacospigot_commit()
        if not tacospigot_unshaded_jar.exists(
        ) or cache.tacospigotUnshadedCommit != current_commit:
            print("---- Detecting NMS package versioning")
            name_pattern = re.compile(
                "net/minecraft/server/(\w+)/MinecraftServer.class")
            version_signature = None
            with ZipFile('TacoSpigot/build/TacoSpigot-illegal.jar') as z:
                for name in z.namelist():
                    match = name_pattern.match(name)
                    if match is not None:
                        version_signature = match.group(1)
                        break
            if version_signature is None:
                raise CommandError("Unable to detect NMS package versioning")
            if not SPECIALSOURCE_JAR.exists():
                print("---- Downloading SpecialSource")
                download_file(SPECIALSOURCE_JAR, SPECIALSOURCE_URL)
            print(
                f"---- Reversing TacoSpigot version shading for {version_signature}"
            )
            with NamedTemporaryFile('wt', encoding='utf-8',
                                    prefix='package') as f:
                f.write(
                    f"PK: net/minecraft/server/{version_signature} net/minecraft/server\n"
                )
                f.write(
                    f"PK: org/bukkit/craftbukkit/{version_signature} org/bukkit/craftbukkit\n"
                )
                f.flush()
                run([
                    "java", "-jar",
                    str(SPECIALSOURCE_JAR), "-i",
                    "TacoSpigot/build/TacoSpigot-illegal.jar", "-o",
                    str(tacospigot_unshaded_jar), "-m", f.name
                ],
                    check=True)
            cache.tacospigotUnshadedCommit = current_commit
            cache.save()
        _valid_tacospigot_unshaded = True
    assert tacospigot_unshaded_jar.exists()
    return tacospigot_unshaded_jar
コード例 #20
0
def breadcrumbs(pathname, attributes=None, top=None, options=False):
    """
    Analyze and filter event's attributes
    """
    events = load_from_file(pathname)

    if options:
        print_options(events)
        return

    if attributes is None:
        raise CommandError('--attributes argument is mandatory')

    if attributes:
        group_by_attributes(events, attributes, top)
コード例 #21
0
ファイル: __main__.py プロジェクト: Techcable/DiffUtils
def do_diff(
    engine: DiffEngine,
    original: Path,
    revised: Path,
    output: Path,
    context_size=5,
    force=False,
):
    original_lines = []
    revised_lines = []
    with open(original, "rt") as f:
        for line in f:
            original_lines.append(line.rstrip("\r\n"))
    with open(revised, "rt") as f:
        for line in f:
            revised_lines.append(line.rstrip("\r\n"))
    result = engine.diff(original_lines, revised_lines)
    if not original.is_absolute():
        original_name = str(original)
    else:
        original_name = str(original.relative_to(Path.cwd()))
    if not revised.is_absolute():
        revised_name = str(revised)
    else:
        revised_name = str(revised.relative_to(Path.cwd()))
    try:
        result_lines = []
        empty = True
        for line in generate_unified_diff(
                original_name,
                revised_name,
                original_lines,
                result,
                context_size=context_size,
        ):
            if empty and line.strip():
                empty = False
            result_lines.append(line)
        if empty:
            return False
        with open(output, "wt" if force else "xt") as f:
            for line in result_lines:
                f.write(line)
                f.write("\n")
        return True
    except FileExistsError:
        raise CommandError("Output file already exists: {}".format(output))
コード例 #22
0
ファイル: __init__.py プロジェクト: FountainMC/TacoFountain
def decompile_blacklist():
    """Classes that are broken even with the improved fernflower decompiler"""
    global _cached_decompile_blacklist
    result = _cached_decompile_blacklist
    if result is not None:
        return result
    with open('buildData/decompile_blacklist.json') as f:
        result = frozenset(json.load(f))
    tacospigot_sources = Path("TacoSpigot/TacoSpigot-Server/src/main/java",
                              "net/minecraft/server")
    for value in result:
        tacospigot_file = Path(tacospigot_sources, value + ".java")
        if tacospigot_file.exists():
            raise CommandError(
                f"Blacklisted file exists in TacoSpigot: {value}")
    _cached_decompile_blacklist = result
    return result
コード例 #23
0
def is_included_library(name):
    global include_patterns
    if include_patterns is None:
        from fnmatch import translate
        include_patterns = [
            (re.compile(translate(name)), flag)
            for name, flag in included_server_libraries.items() if '*' in name
        ]
    groupId, artifactId, version = name.split(':')
    identifier = f"{groupId}:{artifactId}"
    try:
        return included_server_libraries[identifier]
    except KeyError:
        for pattern, flag in include_patterns:
            if pattern.match(identifier) is not None:
                return flag
        raise CommandError(
            f"No matching include rule for library: {identifier}")
コード例 #24
0
ファイル: __main__.py プロジェクト: pombredanne/DiffUtils-1
def fix_patch(patch_file: Path, original_file: Path, strict=False, context=5):
    """Fixes errors detected in the patch, by leniently parsing it and then re-emitting it"""
    if not patch_file.is_file():
        if patch_file.exists():
            raise CommandError(
                "Patch file is a directory: {}".format(patch_file))
        else:
            raise CommandError(
                "Patch file doesn't exist: {}".format(patch_file))
    if not original_file.is_file():
        if original_file.exists():
            raise CommandError(
                "Original file is a directory: {}".format(original_file))
        else:
            raise CommandError(
                "Original file doesn't exist: {}".format(original_file))
    patch_lines = []
    # TODO: Make a public API for parsing original_name and revised_name
    original_name, revised_name = None, None
    with open(patch_file, 'rt') as f:
        for line in f:
            if original_name is None and line.startswith('---'):
                original_name = line[3:].split()[0]
            elif revised_name is None and line.startswith('+++'):
                revised_name = line[3:].split()[0]
            patch_lines.append(line.rstrip("\r\n"))
    original_lines = []
    with open(original_file, 'rt') as f:
        for line in f:
            original_lines.append(line.rstrip("\r\n"))
    if original_name is None:
        raise CommandError(
            "Unable to detect original file name in {}".format(patch_file))
    elif revised_name is None:
        raise CommandError(
            "Unable to detect revised file name in {}".format(patch_file))
    patch = parse_unified_diff(patch_lines, lenient=not strict)
    with open(patch_file, 'wt') as f:
        for line in generate_unified_diff(original_name,
                                          revised_name,
                                          original_lines,
                                          patch,
                                          context_size=context):
            f.write(line)
            f.write('\n')
コード例 #25
0
def launch(cluster_name,
           slaves,
           key_file=default_key_file,
           env=default_env,
           tag=[],
           key_id=default_key_id,
           region=default_region,
           zone=default_zone,
           instance_type=default_instance_type,
           ondemand=False,
           spot_price=default_spot_price,
           user_data=default_user_data,
           security_group=None,
           vpc=None,
           vpc_subnet=None,
           master_instance_type=default_master_instance_type,
           wait_time='180',
           hadoop_major_version='2',
           worker_instances=default_worker_instances,
           retries_on_same_cluster=5,
           max_clusters_to_create=5,
           minimum_percentage_healthy_slaves=0.9,
           remote_user=default_remote_user,
           script_timeout_total_minutes=55,
           script_timeout_inactivity_minutes=10,
           resume=False,
           just_ignore_existing=False,
           worker_timeout=240,
           spark_repo=default_spark_repo,
           spark_version=default_spark_version,
           spark_ec2_git_repo=default_spark_ec2_git_repo,
           spark_ec2_git_branch=default_spark_ec2_git_branch,
           ami=default_ami,
           master_ami=default_master_ami):

    all_args = locals()

    if cluster_exists(cluster_name, region=region) and not resume:
        if just_ignore_existing:
            log.info('Cluster exists but that is ok')
            return ''
        else:
            raise CommandError(
                'Cluster already exists, pick another name or resume the setup using --resume'
            )

    for j in range(max_clusters_to_create):
        log.info('Creating new cluster {0}, try {1}'.format(
            cluster_name, j + 1))
        success = False
        resume_param = ['--resume'] if resume else []

        auth_params = []
        if security_group:
            auth_params.extend([
                '--authorized-address', '127.0.0.1/32',
                '--additional-security-group', security_group
            ])

        # '--vpc-id', default_vpc,
        # '--subnet-id', default_vpc_subnet,
        if vpc and vpc_subnet:
            auth_params.extend([
                '--vpc-id',
                vpc,
                '--subnet-id',
                vpc_subnet,
            ])

        spot_params = ['--spot-price', spot_price] if not ondemand else []
        ami_params = ['--ami', ami] if ami else []
        master_ami_params = ['--master-ami', master_ami] if master_ami else []

        for i in range(retries_on_same_cluster):
            log.info('Running script, try %d of %d', i + 1,
                     retries_on_same_cluster)
            try:
                call_ec2_script(
                    [
                        '--identity-file', key_file, '--key-pair', key_id,
                        '--slaves', slaves, '--region', region, '--zone', zone,
                        '--instance-type', instance_type,
                        '--master-instance-type', master_instance_type,
                        '--wait', wait_time, '--hadoop-major-version',
                        hadoop_major_version, '--spark-ec2-git-repo',
                        spark_ec2_git_repo, '--spark-ec2-git-branch',
                        spark_ec2_git_branch, '--worker-instances',
                        worker_instances, '--master-opts',
                        '-Dspark.worker.timeout={0}'.format(worker_timeout),
                        '--spark-git-repo', spark_repo, '-v', spark_version,
                        '--user-data', user_data, 'launch', cluster_name
                    ] + spot_params + resume_param + auth_params + ami_params +
                    master_ami_params,
                    timeout_total_minutes=script_timeout_total_minutes,
                    timeout_inactivity_minutes=script_timeout_inactivity_minutes
                )
                success = True
            except subprocess.CalledProcessError as e:
                resume_param = ['--resume']
                log.warn('Failed with: %s', e)
            except Exception as e:
                # Probably a timeout
                log.exception('Fatal error calling EC2 script')
                break
            finally:
                tag_cluster_instances(cluster_name=cluster_name,
                                      tag=tag,
                                      env=env,
                                      region=region)

            if success:
                break

        try:
            if success:
                master = get_master(cluster_name, region=region)
                save_cluster_args(master, key_file, remote_user, all_args)
                health_check(cluster_name=cluster_name,
                             key_file=key_file,
                             master=master,
                             remote_user=remote_user,
                             region=region)
                ssh_call(user=remote_user,
                         host=master,
                         key_file=key_file,
                         args=master_post_create_commands)
                return master
        except Exception as e:
            log.exception(
                'Got exception on last steps of cluster configuration')
        log.warn('Destroying unsuccessful cluster')
        destroy(cluster_name=cluster_name, region=region)
    raise CommandError(
        'Failed to created cluster {} after failures'.format(cluster_name))
コード例 #26
0
ファイル: __main__.py プロジェクト: pombredanne/DiffUtils-1
def diff(original: Path,
         revised: Path,
         output: Path,
         ignore_missing=False,
         implementation=None,
         context=5,
         unrestricted=False,
         force=False):
    """Compute the difference between the original and revised text"""
    if not original.exists():
        raise CommandError("Original file doesn't exist: {}".format(original))
    if not revised.exists():
        raise CommandError("Revised file doesn't exist: {}".format(revised))
    try:
        engine = DiffEngine.create(name=implementation)
    except ImportError as e:
        raise CommandError("Unable to import {} implementation!".format(
            implementation)) from e
    if original.is_dir():
        if not revised.is_dir():
            raise CommandError(
                "Original {} is a directory, but revised {} is a file!".format(
                    original, revised))
        for revised_root, dirs, files in os.walk(str(revised)):
            for revised_file_name in files:
                if not unrestricted and revised_file_name.startswith('.'):
                    continue
                revised_file = Path(revised_root, revised_file_name)
                relative_path = revised_file.relative_to(revised)
                original_file = Path(original, relative_path)
                if not original_file.exists():
                    if ignore_missing:
                        continue
                    else:
                        raise CommandError(
                            "Revised file {} doesn't have matching original {}!"
                            .format(revised_file, original_file))
                output_file = Path(output, relative_path.parent,
                                   relative_path.name + ".patch")
                output_file.parent.mkdir(parents=True, exist_ok=True)
                if do_diff(engine,
                           original_file,
                           revised_file,
                           output_file,
                           context_size=context,
                           force=force):
                    print("Computed diff: {}".format(relative_path))
            if not unrestricted:
                hidden_dirs = [d for d in dirs if d.startswith('.')]
                for d in hidden_dirs:
                    dirs.remove(d)
    else:
        if not revised.is_file():
            raise CommandError(
                "Original {} is a file, but revised {} is a directory!".format(
                    original, revised))
        do_diff(engine,
                original,
                revised,
                output,
                context_size=context,
                force=force)
コード例 #27
0
def allaccessimport(playlist=None,
                    username=None,
                    password=None,
                    dry_run=False):
    """
    Exports a Spotify playlist to stdout or csv.
    """

    if not username or not password:
        raise CommandError(
            "Username and password must be provided as either command-line " +
            "argument or in the application configuration file.")

    playlist_name = playlist
    playlist_description = ""
    if playlist:
        playlist_name = os.path.basename(playlist_name)
        playlist_name = os.path.splitext(playlist_name)[0]
    logging.debug("Playlist name will be: {}".format(playlist_name))

    api = Mobileclient(False, False)
    logged_in = api.login(username, password, Mobileclient.FROM_MAC_ADDRESS)
    if not logged_in:
        raise CommandError(
            'Error. Unable to login to Google Music All Access.')

    playlist_ref = None
    currenttracks = None

    failed_tracks = list()
    songs_added = 0
    total = 0

    stream = open(playlist, "rb") if playlist else sys.stdin

    for input_line in stream:
        input_line = input_line.strip()

        # Lazily search the beginning of the file for a Playlist name
        if input_line.startswith("#"):
            data = input_line[1:]
            parts = [x.strip() for x in data.split(":", 1)]

            if len(parts) == 2:
                if parts[0] == "Playlist":
                    playlist_name = parts[1]
                elif parts[0] == "Description":
                    playlist_description = parts[1]

            continue

        if not playlist_ref:
            if not playlist_name:
                raise CommandError(
                    "A playlist name was not given and it was not found " +
                    "in the file either. Can't continue.")
            else:
                playlist_ref, currenttracks = get_playlist(api, playlist_name)
                if not playlist_ref and not dry_run:
                    sys.stderr.write('Playlist not found. Creating new.\n')
                    playlist_ref = api.create_playlist(
                        playlist_name, description=playlist_description)
                yield 'Going to update playlist {0} ({1})\n'.format(
                    playlist_name, playlist_ref)

        trackinfo = list(csv.reader([input_line], quoting=csv.QUOTE_ALL))[0]

        if trackinfo[0] == 'Track' and trackinfo[1] == 'Artist':
            yield 'Skipping header.'
            continue

        search_term = "{0} {1}".format(trackinfo[0], trackinfo[1])
        total = total + 1
        newtrackid, error_reason = search_track(api, search_term,
                                                currenttracks)
        if newtrackid:
            if not dry_run:
                #print("Add to {} song {}".format(playlist_ref, newtrackid))
                api.add_songs_to_playlist(playlist_ref, [newtrackid])
            songs_added = songs_added + 1
        else:
            failed_tracks.append(trackinfo)
        sys.stderr.write("Searching {}...{}\n".format(search_term,
                                                      error_reason))

    yield "{0} songs added out of {1}. {2} Failed.".format(
        songs_added, total, total - songs_added)

    yield "Failed tracks:"
    for line in failed_tracks:
        print "  ", line
コード例 #28
0
def get_master(cluster_name, region=default_region):
    masters = get_masters(cluster_name, region=region)
    if not masters:
        raise CommandError("No master on {}".format(cluster_name))
    return masters[0].public_dns_name
コード例 #29
0
def job_run(cluster_name,
            job_name,
            job_mem,
            key_file=default_key_file,
            disable_tmux=False,
            detached=False,
            notify_on_errors=False,
            yarn=False,
            job_user=getpass.getuser(),
            job_timeout_minutes=0,
            remote_user=default_remote_user,
            utc_job_date=None,
            job_tag=None,
            disable_wait_completion=False,
            collect_results_dir=default_collect_results_dir,
            remote_control_dir=default_remote_control_dir,
            remote_path=None,
            master=None,
            disable_assembly_build=False,
            run_tests=False,
            kill_on_failure=False,
            destroy_cluster=False,
            region=default_region):

    utc_job_date_example = '2014-05-04T13:13:10Z'
    if utc_job_date and len(utc_job_date) != len(utc_job_date_example):
        raise CommandError(
            'UTC Job Date should be given as in the following example: {}'.
            format(utc_job_date_example))
    disable_tmux = disable_tmux and not detached
    wait_completion = not disable_wait_completion or destroy_cluster
    master = master or get_master(cluster_name, region=region)

    project_path = get_project_path()
    project_name = os.path.basename(project_path)
    module_name = os.path.basename(get_module_path())
    # Use job user on remote path to avoid too many conflicts for different local users
    remote_path = remote_path or '/home/%s/%s.%s' % (default_remote_user,
                                                     job_user, project_name)
    remote_hook_local = '{module_path}/remote_hook.sh'.format(
        module_path=get_module_path())
    remote_hook = '{remote_path}/remote_hook.sh'.format(
        remote_path=remote_path)
    notify_param = 'yes' if notify_on_errors else 'no'
    yarn_param = 'yes' if yarn else 'no'
    job_date = utc_job_date or datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
    job_tag = job_tag or job_date.replace(':', '_').replace('-', '_').replace(
        'Z', 'UTC')
    tmux_wait_command = ';(echo Press enter to keep the session open && /bin/bash -c "read -t 5" && sleep 7d)' if not detached else ''
    tmux_arg = ". /etc/profile; . ~/.profile;tmux new-session {detached} -s spark.{job_name}.{job_tag} '{aws_vars} {remote_hook} {job_name} {job_date} {job_tag} {job_user} {remote_control_dir} {spark_mem} {yarn_param} {notify_param} {tmux_wait_command}' >& /tmp/commandoutput".format(
        aws_vars=get_aws_keys_str(),
        job_name=job_name,
        job_date=job_date,
        job_tag=job_tag,
        job_user=job_user,
        remote_control_dir=remote_control_dir,
        remote_hook=remote_hook,
        spark_mem=job_mem,
        detached='-d' if detached else '',
        yarn_param=yarn_param,
        notify_param=notify_param,
        tmux_wait_command=tmux_wait_command)
    non_tmux_arg = ". /etc/profile; . ~/.profile;{aws_vars} {remote_hook} {job_name} {job_date} {job_tag} {job_user} {remote_control_dir} {spark_mem} {yarn_param} {notify_param} >& /tmp/commandoutput".format(
        aws_vars=get_aws_keys_str(),
        job_name=job_name,
        job_date=job_date,
        job_tag=job_tag,
        job_user=job_user,
        remote_control_dir=remote_control_dir,
        remote_hook=remote_hook,
        spark_mem=job_mem,
        yarn_param=yarn_param,
        notify_param=notify_param)

    if not disable_assembly_build:
        build_assembly()

    assembly_path = get_assembly_path()
    if assembly_path is None:
        raise Exception('Something is wrong: no assembly found')

    ssh_call(user=remote_user,
             host=master,
             key_file=key_file,
             args=['mkdir', '-p', remote_path])

    rsync_call(user=remote_user,
               host=master,
               key_file=key_file,
               src_local=assembly_path,
               remote_path=with_leading_slash(remote_path))

    rsync_call(user=remote_user,
               host=master,
               key_file=key_file,
               src_local=remote_hook_local,
               remote_path=with_leading_slash(remote_path))

    log.info('Will run job in remote host')
    if disable_tmux:
        ssh_call(user=remote_user,
                 host=master,
                 key_file=key_file,
                 args=[non_tmux_arg],
                 allocate_terminal=False)
    else:
        ssh_call(user=remote_user,
                 host=master,
                 key_file=key_file,
                 args=[tmux_arg],
                 allocate_terminal=True)

    if wait_completion:
        failed = False
        failed_exception = None
        try:
            wait_for_job(cluster_name=cluster_name,
                         job_name=job_name,
                         job_tag=job_tag,
                         key_file=key_file,
                         master=master,
                         region=region,
                         job_timeout_minutes=job_timeout_minutes,
                         remote_user=remote_user,
                         remote_control_dir=remote_control_dir,
                         collect_results_dir=collect_results_dir)
        except JobFailure as e:
            failed = True
            failed_exception = e
            log.warn('Job failed with: {}'.format(e))
        except NotHealthyCluster as e:
            failed = True
            failed_exception = e
            log.warn('Job is running but cluster is unhealthy: {}'.format(e))
        except Exception as e:
            failed = True
            failed_exception = e
            log.exception('Unexpected exception while waiting for job')
        if failed and kill_on_failure:
            log.info('Trying to kill failed job...')
            try:
                kill_job(cluster_name=cluster_name,
                         job_name=job_name,
                         job_tag=job_tag,
                         key_file=key_file,
                         master=master,
                         remote_user=remote_user,
                         region=region,
                         remote_control_dir=remote_control_dir)
                log.info('Killed!')
            except Exception as e:
                log.exception(
                    "Failed to kill failed job (probably it's already dead)")
        if destroy_cluster:
            log.info('Destroying cluster as requested')
            destroy(cluster_name, region=region)
        if failed:
            raise failed_exception or Exception('Failed!?')
    return (job_name, job_tag)
コード例 #30
0
ファイル: cli.py プロジェクト: afcarl/Henson
def _import_application(application_path):
    """Return the imported application and the path to it.

    Args:
        application_path (str): The path to use to import the
            application. It should be in the form of ``PATH[:APP]``.

    Returns:
        Tuple[str, henson.base.Application]: A two-tuple containing the
            import path and the imported application.

    """
    # Add the present working directory to the import path so that
    # services can be found without installing them to site-packages
    # or modifying PYTHONPATH
    sys.path.insert(0, '.')

    # First, find the module that should be imported
    application_path_parts = application_path.split(':', 1)
    import_path = application_path_parts.pop(0)

    # Then, try to find an import loader for the import_path
    # NOTE: this is to handle the case where a module is found but not
    # importable because of dependency import errors (Python 3 only)
    if not find_loader(import_path):
        raise CommandError(
            'Unable to find an import loader for {}.'.format(import_path), )

    # Once found, import the module and handle any dependency errors
    # TODO: Wrap the ImportError raised here to provide more meaningful
    # error messages to the end user
    module = import_module(import_path)

    # If an application name is specified, use that to select the
    # application instance
    try:
        app_name = application_path_parts.pop()
        # TODO: Wrap the AttributeError raised here to provide more
        # meaningful error messages to the end user
        app = getattr(module, app_name)
        # If the attribute specified by app_name is a callable, assume
        # it is an application factory and call it to get an instance of
        # a Henson application.
        if callable(app):
            app = app()
        # Fail if the attribute specified is not a Henson application
        if not isinstance(app, Application):
            raise CommandError(
                'app must be an instance of a Henson application. '
                'Got {}'.format(type(app)), )

    # If no application name is specified, try to automatically select
    # the correct module attribute based on type
    except IndexError:
        app_candidates = []
        for name in dir(module):
            attr = getattr(module, name)
            if isinstance(attr, Application):
                app_candidates.append((name, attr))

        # If there are zero app_candidates, there's nothing to run.
        if not app_candidates:
            raise CommandError(
                'No Henson application found. Please specify the '
                'application by name or run a different module.', )

        # If there are more than one, the choice of which app to run is
        # ambiguous.
        if len(app_candidates) > 1:
            raise CommandError(
                'More than one Henson application found in {}. Please '
                'specify a application by name (probably one of [{}]).'.format(
                    import_path, ', '.join(ac[0] for ac in app_candidates)), )

        app_name, app = app_candidates[0]

    return import_path, app