Exemplo n.º 1
0
def get_snap_channel(config, task):
    if "channel" in task["payload"]:
        channel = task["payload"]["channel"]
        scope = SNAP_SCOPES_PREFIX + channel.split("/")[0]
        if config["push_to_store"] and scope not in task["scopes"]:
            raise TaskVerificationError(
                f"Channel {channel} not allowed, missing scope {scope}")
    else:
        scope = get_single_item_from_sequence(
            task["scopes"],
            lambda scope: scope.startswith(SNAP_SCOPES_PREFIX),
            ErrorClass=TaskVerificationError,
            no_item_error_message="No scope starts with {}".format(
                SNAP_SCOPES_PREFIX),
            too_many_item_error_message="Too many scopes start with {}".format(
                SNAP_SCOPES_PREFIX),
        )
        channel = scope[len(SNAP_SCOPES_PREFIX):]
        channel = "esr/stable" if channel == "esr" else channel

    if channel not in ALLOWED_CHANNELS:
        raise TaskVerificationError(
            'Channel "{}" is not allowed. Allowed ones are: {}'.format(
                channel, ALLOWED_CHANNELS))

    return channel
Exemplo n.º 2
0
def get_snap_channel(config, task):
    if 'channel' in task['payload']:
        channel = task['payload']['channel']
        scope = SNAP_SCOPES_PREFIX + channel.split('/')[0]
        if config['push_to_store'] and scope not in task['scopes']:
            raise TaskVerificationError(
                f'Channel {channel} not allowed, missing scope {scope}'
            )
    else:
        scope = get_single_item_from_sequence(
            task['scopes'],
            lambda scope: scope.startswith(SNAP_SCOPES_PREFIX),
            ErrorClass=TaskVerificationError,
            no_item_error_message='No scope starts with {}'.format(SNAP_SCOPES_PREFIX),
            too_many_item_error_message='Too many scopes start with {}'.format(SNAP_SCOPES_PREFIX),
        )
        channel = scope[len(SNAP_SCOPES_PREFIX):]
        channel = 'esr/stable' if channel == 'esr' else channel

    if channel not in ALLOWED_CHANNELS:
        raise TaskVerificationError(
            'Channel "{}" is not allowed. Allowed ones are: {}'.format(channel, ALLOWED_CHANNELS)
        )

    return channel
Exemplo n.º 3
0
def _get_play_config(context, android_product):
    try:
        accounts = context.config['google_play_accounts']
    except KeyError:
        raise TaskVerificationError('"google_play_accounts" is not part of the configuration')

    try:
        return accounts[android_product]
    except KeyError:
        raise TaskVerificationError('Android "{}" does not exist in the configuration of this instance.\
    Are you sure you allowed to push such APK?'.format(android_product))
Exemplo n.º 4
0
def _check_locale_consistency(locale_in_payload, uniques_locales_in_upstream_artifacts):
    if len(uniques_locales_in_upstream_artifacts) > 1:
        raise TaskVerificationError(
            '`task.payload.locale` is defined ("{}") but too many locales set in \
`task.payload.upstreamArtifacts` ({})'.format(locale_in_payload, uniques_locales_in_upstream_artifacts)
        )
    elif len(uniques_locales_in_upstream_artifacts) == 1:
        locale_in_upstream_artifacts = uniques_locales_in_upstream_artifacts[0]
        if locale_in_payload != locale_in_upstream_artifacts:
            raise TaskVerificationError(
                '`task.payload.locale` ("{}") does not match the one set in \
`task.payload.upstreamArtifacts` ("{}")'.format(locale_in_payload, locale_in_upstream_artifacts)
            )
Exemplo n.º 5
0
def check_product_names_match_aliases(context):
    """Make sure we don't do any cross-product/channel alias update"""
    aliases = context.task["payload"]["aliases_entries"]

    validations = []
    for alias, product_name in aliases.items():
        if alias not in ALIASES_REGEXES.keys():
            raise TaskVerificationError("Unrecognized alias:{}".format(alias))

        validations.append(matches(product_name, ALIASES_REGEXES[alias]))

    if not all(validations):
        raise TaskVerificationError("The product/alias pairs are corrupt: {}".format(aliases))
Exemplo n.º 6
0
def _check_archive_itself(zip_path, zip_max_size_in_mb):
    zip_size = os.path.getsize(zip_path)
    zip_size_in_mb = zip_size // (1024 * 1024)

    if zip_size_in_mb > zip_max_size_in_mb:
        raise TaskVerificationError(
            'Archive "{}" is too big. Max accepted size (in MB): {}. File size (in MB): {}'
            .format(zip_path, zip_max_size_in_mb, zip_size_in_mb))

    if not zipfile.is_zipfile(zip_path):
        raise TaskVerificationError('Archive "{}" is not a valid zip file.')

    log.info('Structure of archive "{}" is sane'.format(zip_path))
Exemplo n.º 7
0
def _check_google_play_string_is_the_only_failed_task(failed_artifacts_per_task_id):
    if len(failed_artifacts_per_task_id) > 1:
        raise TaskVerificationError(
            'Only 1 task is allowed to fail. Found: {}'.format(failed_artifacts_per_task_id.keys())
        )

    task_id = list(failed_artifacts_per_task_id.keys())[0]
    failed_artifacts = failed_artifacts_per_task_id[task_id]
    if _EXPECTED_L10N_STRINGS_FILE_NAME not in failed_artifacts:
        raise TaskVerificationError(
            'Could not find "{}" in the only failed taskId "{}". Please note this is the only \
            artifact allowed to be absent. Found: {}'
            .format(_EXPECTED_L10N_STRINGS_FILE_NAME, task_id, failed_artifacts)
        )
Exemplo n.º 8
0
def build_filelist_dict(context):
    """Build a dictionary of cot-downloaded paths and formats.

    Scriptworker will pre-download and pre-verify the `upstreamArtifacts`
    in our `work_dir`.  Let's build a dictionary of relative `path` to
    a dictionary of `full_path` and signing `formats`.

    Args:
        context (Context): the signing context

    Raises:
        TaskVerificationError: if the files don't exist on disk or
                               if authenticode_comment is used without authenticode or on a non .msi

    Returns:
        dict of dicts: the dictionary of relative `path` to a dictionary with
            `full_path` and a list of signing `formats`.

    """
    filelist_dict = {}
    messages = []
    for artifact_dict in context.task["payload"]["upstreamArtifacts"]:
        authenticode_comment = artifact_dict.get("authenticode_comment")
        if authenticode_comment and not any(
                "authenticode" in fmt for fmt in artifact_dict["formats"]):
            raise TaskVerificationError(
                "Cannot use authenticode_comment without an authenticode format"
            )

        if authenticode_comment and not any(
                path.endswith(".msi") for path in artifact_dict["paths"]):
            # Don't have to think about .zip and such unpacking for the comment
            raise TaskVerificationError(
                "There is no support for authenticode_comment outside of msi's at this time"
            )
        for path in artifact_dict["paths"]:
            full_path = os.path.join(context.config["work_dir"], "cot",
                                     artifact_dict["taskId"], path)
            if not os.path.exists(full_path):
                messages.append("{} doesn't exist!".format(full_path))
            filelist_dict[path] = {
                "full_path": full_path,
                "formats": _sort_formats(artifact_dict["formats"])
            }
            if authenticode_comment:
                filelist_dict[path]["comment"] = authenticode_comment

    if messages:
        raise TaskVerificationError(messages)
    return filelist_dict
Exemplo n.º 9
0
def _ensure_all_expected_files_are_deflated_on_disk(zip_path,
                                                    expected_full_paths):
    for full_path in expected_full_paths:
        if not os.path.exists(full_path):
            raise TaskVerificationError(
                'After extracting "{}", expected file "{}" does not exist'.
                format(zip_path, full_path))
        if not os.path.isfile(full_path):
            raise TaskVerificationError(
                'After extracting "{}", "{}" is not a file'.format(
                    zip_path, full_path))

    log.info(
        'All files declared in archive "{}" exist and are regular files: {}'.
        format(zip_path, expected_full_paths))
Exemplo n.º 10
0
def validate_task_schema(context, schema_key='schema_file'):
    """Validate the task definition.

    Args:
        context (scriptworker.context.Context): the scriptworker context. It must contain a task and
            the config pointing to the schema file
        schema_key: the key in `context.config` where the path to the schema file is. Key can contain
            dots (e.g.: 'schema_files.file_a'), in which case

    Raises:
        TaskVerificationError: if the task doesn't match the schema

    """
    schema_path = context.config
    schema_keys = schema_key.split('.')
    for key in schema_keys:
        schema_path = schema_path[key]

    task_schema = load_json_or_yaml(schema_path, is_path=True)
    log.debug('Task is validated against this schema: {}'.format(task_schema))

    try:
        validate_json_schema(context.task, task_schema)
    except ScriptWorkerTaskException as e:
        raise TaskVerificationError(
            'Cannot validate task against schema. Task: {}.'.format(
                context.task)) from e
Exemplo n.º 11
0
def build_filelist(context):
    """Build a list of cot-downloaded paths.

    Scriptworker will pre-download and pre-verify the `upstreamArtifacts`
    in our `work_dir`.  Let's build a list of relative of full paths.

    Args:
        context (SigningContext): the signing context

    Raises:
        TaskVerificationError: if the files don't exist on disk

    Returns:
        list: `full_path` of all files.

    """
    filelist = []
    messages = []
    for artifact_dict in context.task["payload"]["upstreamArtifacts"]:
        for path in artifact_dict["paths"]:
            full_path = os.path.join(context.config["work_dir"], "cot",
                                     artifact_dict["taskId"], path)
            if not os.path.exists(full_path):
                messages.append("{} doesn't exist!".format(full_path))
            filelist.append(full_path)
    if messages:
        raise TaskVerificationError(messages)
    return filelist
Exemplo n.º 12
0
def get_amo_instance_config_from_scope(context):
    """Get instance configuration from task scope.

    Args:
        context (Context): the scriptworker context

    Raises:
        TaskVerificationError: if the task doesn't have the necessary scopes or if the instance
            isn't configured to process it

    Returns:
        dict: configuration, formatted like: {
            'amo_server': 'http://some-amo-it.url',
            'jwt_user': '******',
            'jwt_secret': 'some-secret'
        }

    """
    scope = _get_scope(context.task)
    configured_instances = context.config["amo_instances"]

    try:
        return configured_instances[scope]
    except KeyError:
        raise TaskVerificationError(
            'This worker is not configured to handle scope "{}"'.format(scope))
Exemplo n.º 13
0
def _generate_beetmover_template_args_maven(task, release_props):
    tmpl_args = {
        "artifact_id": task["payload"]["artifact_id"],
        "template_key": "maven_{}".format(release_props["appName"])
    }

    # Geckoview follows the FirefoxVersion pattern
    if release_props.get("appName") == "geckoview":
        payload_version = FirefoxVersion.parse(task["payload"]["version"])
        # Change version number to major.minor.buildId because that's what the build task produces
        version = [
            payload_version.major_number, payload_version.minor_number,
            release_props["buildid"]
        ]
    else:
        payload_version = MavenVersion.parse(task["payload"]["version"])
        version = [
            payload_version.major_number, payload_version.minor_number,
            payload_version.patch_number
        ]

    if any(number is None for number in version):
        raise TaskVerificationError(
            "At least one digit is undefined. Got: {}".format(version))
    tmpl_args["version"] = ".".join(str(n) for n in version)

    # XXX: some appservices maven.zip files have a different structure,
    # encompassing only `pom` and `jar` files. We toggle that behavior in the
    # mapping by using this flag
    tmpl_args["is_jar"] = task["payload"].get("is_jar")

    return tmpl_args
Exemplo n.º 14
0
def build_filelist_dict(context):
    """Build a dictionary of cot-downloaded paths and formats.

    Scriptworker will pre-download and pre-verify the `upstreamArtifacts`
    in our `work_dir`.  Let's build a dictionary of relative `path` to
    a dictionary of `full_path` and signing `formats`.

    Args:
        context (Context): the signing context

    Raises:
        TaskVerificationError: if the files don't exist on disk

    Returns:
        dict of dicts: the dictionary of relative `path` to a dictionary with
            `full_path` and a list of signing `formats`.

    """
    filelist_dict = {}
    messages = []
    for artifact_dict in context.task['payload']['upstreamArtifacts']:
        for path in artifact_dict['paths']:
            full_path = os.path.join(
                context.config['work_dir'], 'cot', artifact_dict['taskId'],
                path
            )
            if not os.path.exists(full_path):
                messages.append("{} doesn't exist!".format(full_path))
            filelist_dict[path] = {
                "full_path": full_path,
                "formats": _sort_formats(artifact_dict['formats']),
            }
    if messages:
        raise TaskVerificationError(messages)
    return filelist_dict
Exemplo n.º 15
0
def task_cert_type(context):
    """Extract task certificate type.

    Args:
        context (Context): the signing context.

    Raises:
        TaskVerificationError: if the number of cert scopes is not 1.

    Returns:
        str: the cert type.

    """
    if not context.task or not context.task["scopes"]:
        raise TaskVerificationError("No scopes found")

    prefixes = _get_cert_prefixes(context)
    scopes = _extract_scopes_from_unique_prefix(scopes=context.task["scopes"], prefixes=prefixes)
    return get_single_item_from_sequence(
        scopes,
        condition=lambda _: True,  # scopes must just contain 1 single item
        ErrorClass=TaskVerificationError,
        no_item_error_message="No scope starting with any of these prefixes {} found".format(prefixes),
        too_many_item_error_message="More than one scope found",
    )
def _generate_beetmover_template_args_maven(task, release_props):
    tmpl_args = {
        'artifact_id': task['payload']['artifact_id'],
        'template_key': 'maven_{}'.format(release_props['appName']),
    }

    # FIXME: this is a temporarily solution while we sanitize the payload
    # under https://github.com/mozilla-releng/beetmoverscript/issues/196
    if 'SNAPSHOT' in task['payload']['version']:
        payload_version = MavenVersion.parse(task['payload']['version'])
    else:
        payload_version = FirefoxVersion.parse(task['payload']['version'])
    # Change version number to major.minor.buildId because that's what the build task produces
    version = [
        payload_version.major_number, payload_version.minor_number,
        release_props.get('buildid', payload_version.patch_number)
    ]
    if any(number is None for number in version):
        raise TaskVerificationError(
            'At least one digit is undefined. Got: {}'.format(version))
    tmpl_args['version'] = '.'.join(str(n) for n in version)

    if isinstance(payload_version,
                  MavenVersion) and payload_version.is_snapshot:
        tmpl_args['snapshot_version'] = payload_version
        tmpl_args['date_timestamp'] = "{{date_timestamp}}"
        tmpl_args['clock_timestamp'] = "{{clock_timestamp}}"
        tmpl_args['build_number'] = "{{build_number}}"

    return tmpl_args
Exemplo n.º 17
0
def check_product_names_match_nightly_locations(context):
    """Double check that nightly products are as expected"""
    products = context.task["payload"]["bouncer_products"]
    valid_sets = []
    for product_set in BOUNCER_PATH_REGEXES_PER_PRODUCT:
        valid_sets.append(sorted(product_set.keys()))
    if sorted(products) not in valid_sets:
        raise TaskVerificationError("Products {} don't correspond to nightly ones".format(products))
Exemplo n.º 18
0
def _check_tarball_size(tarball_path):
    tar_size = os.path.getsize(tarball_path)
    tar_size_in_mb = tar_size // (1024 * 1024)

    if tar_size_in_mb > TAR_MAX_SIZE_IN_MB:
        raise TaskVerificationError(
            f"Tar {tarball_path} is too big. Max accepted size is {TAR_MAX_SIZE_IN_MB}"
        )
def _check_scopes_exist_and_all_have_the_same_prefix(scopes, prefixes):
    for prefix in prefixes:
        if all(scope.startswith(prefix) for scope in scopes):
            break
    else:
        raise TaskVerificationError(
            'Scopes must exist and all have the same prefix. '
            'Given scopes: {}. Allowed prefixes: {}'.format(scopes, prefixes))
Exemplo n.º 20
0
def _get_product_config(context, android_product):
    try:
        products = context.config['products']
    except KeyError:
        raise ConfigValidationError('"products" is not part of the configuration')

    matching_products = [product for product in products if android_product in product['product_names']]

    if len(matching_products) == 0:
        raise TaskVerificationError('Android "{}" does not exist in the configuration of this '
                                    'instance. Are you sure you allowed to push such an '
                                    'APK?'.format(android_product))

    if len(matching_products) > 1:
        raise TaskVerificationError('The configuration is invalid: multiple product configs match '
                                    'the product "{}"'.format(android_product))

    return matching_products[0]
Exemplo n.º 21
0
def get_ship_it_instance_config_from_scope(context):
    scope = _get_scope(context, "server")
    configured_instances = context.config['ship_it_instances']

    try:
        return configured_instances[scope]
    except KeyError:
        raise TaskVerificationError(
            'This worker is not configured to handle scope "{}"'.format(scope))
Exemplo n.º 22
0
def get_ship_it_instance_config_from_scope(context):
    scope = _get_scope(context, "server")
    configured_instance = context.config["shipit_instance"]

    if configured_instance.get("scope") == scope:
        return configured_instance

    raise TaskVerificationError(
        'This worker is not configured to handle scope "{}"'.format(scope))
Exemplo n.º 23
0
def extract_file_config_from_artifact_map(artifact_map, path, task_id, locale):
    """Return matching artifact map config."""
    for entry in artifact_map:
        if entry["taskId"] != task_id or entry["locale"] != locale:
            continue
        if not entry["paths"].get(path):
            continue
        return entry["paths"][path]
    raise TaskVerificationError("No artifact map entry for {}/{} {}".format(task_id, locale, path))
Exemplo n.º 24
0
def _extract_and_check_timestamps(archive_filename, regex):
    match = re.search(regex, archive_filename)
    try:
        identifier = match.group()
    except AttributeError:
        raise TaskVerificationError(
            'File "{}" present in archive has invalid identifier. '
            'Expected YYYYMMDD.HHMMSS-BUILDNUMBER within in'.format(
                archive_filename))
    timestamp, build_number = identifier.split('-')
    try:
        datetime.datetime.strptime(timestamp, '%Y%m%d.%H%M%S')
    except ValueError:
        raise TaskVerificationError(
            'File "{}" present in archive has invalid timestamp. '
            'Expected YYYYMMDD.HHMMSS within in'.format(archive_filename))

    date_timestamp, clock_timestamp = timestamp.split('.')
    return date_timestamp, clock_timestamp, build_number
Exemplo n.º 25
0
def get_flatpak_channel(config, task):
    payload = task["payload"]
    if "channel" not in payload:
        raise TaskVerificationError(
            f"channel must be defined in the task payload. Given payload: {payload}"
        )

    channel = payload["channel"]
    scope = FLATPAK_SCOPES_PREFIX + channel
    if config["push_to_flathub"] and scope not in task["scopes"]:
        raise TaskVerificationError(
            f"Channel {channel} not allowed, missing scope {scope}")

    if channel not in ALLOWED_CHANNELS:
        raise TaskVerificationError(
            'Channel "{}" is not allowed. Allowed ones are: {}'.format(
                channel, ALLOWED_CHANNELS))

    return channel
Exemplo n.º 26
0
def _ensure_all_expected_files_are_present_in_archive(zip_path,
                                                      files_in_archive,
                                                      expected_files):
    files_in_archive = set(files_in_archive)

    unique_expected_files = set(expected_files)
    if len(expected_files) != len(unique_expected_files):
        duplicated_files = [
            file for file in unique_expected_files
            if expected_files.count(file) > 1
        ]
        raise TaskVerificationError(
            'Found duplicated expected files in archive "{}": {}'.format(
                zip_path, duplicated_files))

    for file_ in files_in_archive:
        if os.path.isabs(file_):
            raise TaskVerificationError(
                'File "{}" in archive "{}" cannot be an absolute one.'.format(
                    file_, zip_path))
        if os.path.normpath(file_) != file_:
            raise TaskVerificationError(
                'File "{}" in archive "{}" cannot contain up-level reference nor redundant separators'
                .format(file_, zip_path))
        if file_ not in unique_expected_files:
            raise TaskVerificationError(
                'File "{}" present in archive "{}" is not expected. Expected: {}'
                .format(file_, zip_path, unique_expected_files))

    if len(files_in_archive) != len(unique_expected_files):
        missing_expected_files = [
            file for file in unique_expected_files
            if file not in files_in_archive
        ]
        raise TaskVerificationError(
            'Expected files are missing in archive "{}": {}'.format(
                zip_path, missing_expected_files))

    log.info('Archive "{}" contains all expected files: {}'.format(
        zip_path, unique_expected_files))
Exemplo n.º 27
0
def _ensure_no_file_got_overwritten(task_id, extracted_files):
    unique_paths = set(extracted_files)

    if len(unique_paths) != len(extracted_files):
        duplicated_paths = [
            path for path in unique_paths if extracted_files.count(path) > 1
        ]
        raise TaskVerificationError(
            'Archives from task "{}" overwrote files: {}'.format(
                task_id, duplicated_paths))

    log.info('All archives from task "{}" outputed different files.'.format(
        task_id))
Exemplo n.º 28
0
def _ensure_files_in_archive_have_decent_sizes(zip_path, zip_metadata,
                                               zip_max_size_in_mb):
    for file_name, file_metadata in zip_metadata.items():
        compressed_size = file_metadata['compress_size']
        real_size = file_metadata['file_size']
        compressed_size_size_in_mb = compressed_size // (1024 * 1024)

        if compressed_size_size_in_mb > zip_max_size_in_mb:
            raise TaskVerificationError(
                'In archive "{}", compressed file "{}" is too big. Max accepted size (in MB): {}. File size (in MB): {}'
                .format(zip_path, file_name, zip_max_size_in_mb,
                        compressed_size_size_in_mb))

        compression_ratio = real_size / compressed_size
        if compression_ratio > ZIP_MAX_COMPRESSION_RATIO:
            raise TaskVerificationError(
                'In archive "{}", file "{}" has a suspicious compression ratio. Max accepted: {}. Found: {}'
                .format(zip_path, file_name, ZIP_MAX_COMPRESSION_RATIO,
                        compression_ratio))

    log.info(
        'Archive "{}" contains files with legitimate sizes.'.format(zip_path))
Exemplo n.º 29
0
async def async_main(context):
    # perform schema validation for the corresponding type of task
    validate_task_schema(context)

    # determine the task server and action
    context.server = get_task_server(context.task, context.config)
    context.action = get_task_action(context.task, context.config)

    # perform the appropriate behavior
    if action_map.get(context.action):
        await action_map[context.action](context)
    else:
        raise TaskVerificationError("Unknown action: {}!".format(context.action))
Exemplo n.º 30
0
def _check_current_snap_is_not_released(current_revision, current_version,
                                        latest_released_revision,
                                        latest_released_version):
    if latest_released_version == current_version:
        if latest_released_revision == current_revision:
            raise AlreadyLatestError(latest_released_version,
                                     latest_released_revision)
        else:
            raise TaskVerificationError(
                'Versions "{0}" are the same but revisions differ. This may mean someone shipped a rogue "{0}" before automation! '
                "Latest on store: {1}. Revision of current Snap: {2}".format(
                    latest_released_version, latest_released_revision,
                    current_revision))
    elif latest_released_version > current_version:
        # We don't check if the revision is higher because
        raise TaskVerificationError(
            'Current version "{}" is lower than the latest one released on the store "{}". Downgrades are not allowed.'
            .format(current_version, latest_released_version))

    log.debug(
        'Current version "{}" is higher than the latest released one "{}". Okay to release the current one'
        .format(current_version, latest_released_version))