コード例 #1
0
ファイル: tests.py プロジェクト: jimmyngo/voluptuous
def test_schema_extend_overrides():
    """Verify that Schema.extend can override required/extra parameters."""

    base = Schema({'a': int}, required=True)
    extended = base.extend({'b': str}, required=False, extra=voluptuous.ALLOW_EXTRA)

    assert base.required == True
    assert base.extra == voluptuous.PREVENT_EXTRA
    assert extended.required == False
    assert extended.extra == voluptuous.ALLOW_EXTRA
コード例 #2
0
ファイル: tests.py プロジェクト: tuukkamustonen/voluptuous
def test_subschema_extension():
    """Verify that Schema.extend adds and replaces keys in a subschema"""

    base = Schema({'a': {'b': int, 'c': float}})
    extension = {'d': str, 'a': {'b': str, 'e': int}}
    extended = base.extend(extension)

    assert_equal(base.schema, {'a': {'b': int, 'c': float}})
    assert_equal(extension, {'d': str, 'a': {'b': str, 'e': int}})
    assert_equal(extended.schema, {'a': {'b': str, 'c': float, 'e': int}, 'd': str})
コード例 #3
0
ファイル: tests.py プロジェクト: tuukkamustonen/voluptuous
def test_schema_extend_key_swap():
    """Verify that Schema.extend can replace keys, even when different markers are used"""

    base = Schema({Optional('a'): int})
    extension = {Required('a'): int}
    extended = base.extend(extension)

    assert_equal(len(base.schema), 1)
    assert_true(isinstance(list(base.schema)[0], Optional))
    assert_equal(len(extended.schema), 1)
    assert_true((list(extended.schema)[0], Required))
コード例 #4
0
ファイル: tests.py プロジェクト: jimmyngo/voluptuous
def test_schema_extend():
    """Verify that Schema.extend copies schema keys from both."""

    base = Schema({'a': int}, required=True)
    extension = {'b': str}
    extended = base.extend(extension)

    assert base.schema == {'a': int}
    assert extension == {'b': str}
    assert extended.schema == {'a': int, 'b': str}
    assert extended.required == base.required
    assert extended.extra == base.extra
コード例 #5
0
ファイル: tests.py プロジェクト: tuukkamustonen/voluptuous
def test_schema_infer_list():
    schema = Schema.infer({
        'list': ['foo', True, 42, 3.14]
    })

    assert_equal(schema, Schema({
        Required('list'): [str, bool, int, float]
    }))
コード例 #6
0
ファイル: tests.py プロジェクト: tuukkamustonen/voluptuous
def test_schema_infer_scalar():
    assert_equal(Schema.infer('foo'), Schema(str))
    assert_equal(Schema.infer(True), Schema(bool))
    assert_equal(Schema.infer(42), Schema(int))
    assert_equal(Schema.infer(3.14), Schema(float))
    assert_equal(Schema.infer({}), Schema(dict))
    assert_equal(Schema.infer([]), Schema(list))
コード例 #7
0
ファイル: tests.py プロジェクト: tuukkamustonen/voluptuous
def test_schema_infer():
    schema = Schema.infer({
        'str': 'foo',
        'bool': True,
        'int': 42,
        'float': 3.14
    })
    assert_equal(schema, Schema({
        Required('str'): str,
        Required('bool'): bool,
        Required('int'): int,
        Required('float'): float
    }))
コード例 #8
0
ファイル: validation.py プロジェクト: mozaiques/zombase
    def __call__(self, data):
        _data = data.copy()
        popped = []

        for k, v in six.iteritems(data):
            if v is None and k not in self._not_none:
                _data.pop(k)
                popped.append((k, v))

        schema_out = Schema.__call__(self, _data)
        for k, v in popped:
            schema_out[k] = v

        return schema_out
コード例 #9
0
ファイル: tests.py プロジェクト: tuukkamustonen/voluptuous
def test_schema_infer_dict():
    schema = Schema.infer({
        'a': {
            'b': {
                'c': 'foo'
            }
        }
    })

    assert_equal(schema, Schema({
        Required('a'): {
            Required('b'): {
                Required('c'): str
            }
        }
    }))
コード例 #10
0
ファイル: tests.py プロジェクト: tuukkamustonen/voluptuous
def test_schema_infer_accepts_kwargs():
    schema = Schema.infer({
        'str': 'foo',
        'bool': True
    }, required=False, extra=True)

    # Subset of schema should be acceptable thanks to required=False.
    schema({'bool': False})

    # Keys that are in schema should still match required types.
    try:
        schema({'str': 42})
    except Invalid:
        pass
    else:
        assert False, 'Did not raise Invalid for Number'

    # Extra fields should be acceptable thanks to extra=True.
    schema({'str': 'bar', 'int': 42})
コード例 #11
0
    def setUp(self):
        self.task_schema = Schema(
            {
                'task': {
                    'scopes': list,
                    'provisionerId': 'buildbot-bridge',
                    'workerType': 'buildbot-bridge',
                    'payload': {
                        'properties': {
                            'product':
                            'firefox',
                            'version':
                            '42.0b2',
                            'build_number':
                            3,
                            'repo_path':
                            'releases/foo',
                            'script_repo_revision':
                            'abcd',
                            'revision':
                            'abcdef123456',
                            'tuxedo_server_url':
                            'https://bouncer.real.allizom.org/api',
                        }
                    }
                }
            },
            extra=True,
            required=True)

        test_kwargs = create_firefox_test_args({
            'push_to_candidates_enabled':
            True,
            'push_to_releases_enabled':
            True,
            'uptake_monitoring_enabled':
            True,
            'signing_pvt_key':
            PVT_KEY_FILE,
            'uptake_monitoring_platforms':
            ["macosx64", "win32", "win64", "linux", "linux64"],
            'release_channels': ['foo'],
            'final_verify_channels': ['foo'],
            'en_US_config': {
                "platforms": {
                    "macosx64": {
                        "task_id": "abc"
                    },
                    "win32": {
                        "task_id": "def"
                    },
                    "win64": {
                        "task_id": "jgh"
                    },
                    "linux": {
                        "task_id": "ijk"
                    },
                    "linux64": {
                        "task_id": "lmn"
                    },
                }
            },
        })
        self.graph = make_task_graph(**test_kwargs)
        self.task = get_task_by_name(self.graph,
                                     "release-foo-firefox_uptake_monitoring")
        self.payload = self.task["task"]["payload"]
コード例 #12
0
    def dict_schema() -> Schema:
        schema = Schema({Required("name"): str, Required("continent"): str})

        return schema
コード例 #13
0
cron_yml_schema = Schema({
    'jobs': [{
        # Name of the crontask (must be unique)
        Required('name'):
        basestring,

        # what to run

        # Description of the job to run, keyed by 'type'
        Required('job'):
        Any({
            Required('type'):
            'decision-task',

            # Treeherder symbol for the cron task
            Required('treeherder-symbol'):
            basestring,

            # --triggered-by './mach taskgraph decision' argument
            'triggered-by':
            basestring,

            # --target-tasks-method './mach taskgraph decision' argument
            'target-tasks-method':
            basestring,
        }),

        # when to run it

        # Optional set of projects on which this job should run; if omitted, this will
        # run on all projects for which cron tasks are set up
        'projects': [basestring],

        # Array of times at which this task should run.  These *must* be a multiple of
        # 15 minutes, the minimum scheduling interval.
        'when': [{
            'hour': int,
            'minute': All(int, even_15_minutes)
        }],
    }],
})
コード例 #14
0
 def __init__(self, schema, msg=None):
     self.schema = schema
     self._schema = Schema(schema)
     self.msg = msg
コード例 #15
0
#!/usr/bin/env python
__all__ = [
    "Environment", "EnvironmentalDataPoint", "FirmwareModule",
    "FirmwareModuleType", "Recipe", "SoftwareModule", "SoftwareModuleType"
]

from openag_lib.firmware.categories import SENSORS, ACTUATORS, CALIBRATION, all_categories
from voluptuous import Schema, Required, Any, Extra, Optional, REMOVE_EXTRA

Environment = Schema({
    "name": Any(str, unicode),
}, extra=REMOVE_EXTRA)
Environment.__doc__ = """
An :class:`Environment` abstractly represents a single homogenous
climate-controlled volume within a system. A food computer usually consists of
a single :class:`Environment`, but larger systems will often contain more than
one :class:`Environment`.

.. py:attribute:: name

    (str) A human-readable name for the environment
"""

EnvironmentalDataPoint = Schema({
    Required("environment"): Any(str, unicode),
    Required("variable"): Any(str, unicode),
    Required("is_manual", default=False): bool,
    Required("is_desired"): bool,
    "value": object,
    Required("timestamp"): Any(float, int),
}, extra=REMOVE_EXTRA)
コード例 #16
0
    def validate(self):
        """
        Validate the given configuration,
        converting properties to native Python types.

        The configuration to check must have been given to the
        constructor and stored in :py:attr:`self.config`.

        :raises: :py:class:`voluptuous.Invalid` if one property is invalid
        :raises: :py:class:`voluptuous.MultipleInvalid` if multiple
                 properties are not compliant
        """
        self._pre_validate()

        # schema to validate all cluster properties
        schema = {
            "cluster": {
                "cloud": All(str, Length(min=1)),
                "setup_provider": All(str, Length(min=1)),
                "login": All(str, Length(min=1)),
            },
            "setup": {
                "provider":
                All(str, Length(min=1)),
                Optional("playbook_path"):
                can_read_file(),
                Optional("ansible_command"):
                All(can_read_file(), can_execute_file()),
                Optional("ansible_extra_args"):
                All(str, Length(min=1)),
                Optional("ssh_pipelining"):
                Boolean(str),
            },
            "login": {
                "image_user": All(str, Length(min=1)),
                "image_user_sudo": All(str, Length(min=1)),
                "image_sudo": Boolean(str),
                "user_key_name": All(str, Length(min=1)),
                "user_key_private": can_read_file(),
                "user_key_public": can_read_file(),
            },
        }

        cloud_schema_ec2 = {
            "provider": 'ec2_boto',
            "ec2_url": Url(str),
            Optional("ec2_access_key"): All(str, Length(min=1)),
            Optional("ec2_secret_key"): All(str, Length(min=1)),
            "ec2_region": All(str, Length(min=1)),
            Optional("request_floating_ip"): Boolean(str),
            Optional("vpc"): All(str, Length(min=1)),
        }
        cloud_schema_gce = {
            "provider": 'google',
            "gce_client_id": All(str, Length(min=1)),
            "gce_client_secret": All(str, Length(min=1)),
            "gce_project_id": All(str, Length(min=1)),
            Optional("noauth_local_webserver"): Boolean(str),
            Optional("zone"): All(str, Length(min=1)),
            Optional("network"): All(str, Length(min=1)),
        }

        cloud_schema_openstack = {
            "provider": 'openstack',
            "auth_url": All(str, Length(min=1)),
            "username": All(str, Length(min=1)),
            "password": All(str, Length(min=1)),
            "project_name": All(str, Length(min=1)),
            Optional("request_floating_ip"): Boolean(str),
            Optional("region_name"): All(str, Length(min=1)),
            Optional("nova_api_version"): nova_api_version(),
        }

        node_schema = {
            "flavor": All(str, Length(min=1)),
            "image_id": All(str, Length(min=1)),
            "security_group": All(str, Length(min=1)),
            Optional("network_ids"): All(str, Length(min=1)),
        }

        # validation
        validator = Schema(schema, required=True, extra=True)
        node_validator = Schema(node_schema, required=True, extra=True)
        ec2_validator = Schema(cloud_schema_ec2, required=True, extra=False)
        gce_validator = Schema(cloud_schema_gce, required=True, extra=False)
        openstack_validator = Schema(cloud_schema_openstack,
                                     required=True,
                                     extra=False)

        if not self.config:
            raise Invalid("No clusters found in configuration.")

        for cluster, properties in self.config.iteritems():
            self.config[cluster] = validator(properties)

            if 'provider' not in properties['cloud']:
                raise Invalid("Missing `provider` option in cluster `%s`" %
                              cluster)
            try:
                cloud_props = properties['cloud']
                if properties['cloud']['provider'] == "ec2_boto":
                    self.config[cluster]['cloud'] = ec2_validator(cloud_props)
                elif properties['cloud']['provider'] == "google":
                    self.config[cluster]['cloud'] = gce_validator(cloud_props)
                elif properties['cloud']['provider'] == "openstack":
                    self.config[cluster]['cloud'] = openstack_validator(
                        cloud_props)
            except MultipleInvalid as ex:
                raise Invalid(
                    "Invalid configuration for cloud section `cloud/%s`: %s" %
                    (properties['cluster']['cloud'],
                     str.join(", ", [str(i) for i in ex.errors])))

            if 'nodes' not in properties or len(properties['nodes']) == 0:
                raise Invalid("No nodes configured for cluster `%s`" % cluster)

            for node, props in properties['nodes'].iteritems():
                # check name pattern to conform hostnames
                match = re.search(r'^[a-zA-Z0-9-]*$', node)
                if not match:
                    raise Invalid(
                        "Invalid name `%s` for node group. A valid node group"
                        " can only consist of letters, digits or the hyphen"
                        " character (`-`)" % (node, ))

                node_validator(props)

                if (properties['cloud']['provider'] == 'ec2_boto'
                        and 'vpc' in self.config[cluster]['cloud']
                        and 'network_ids' not in props):
                    raise Invalid("Node group `%s/%s` is being used in"
                                  " a VPC, so it must specify network_ids." %
                                  (cluster, node))

                if (properties['cloud']['provider'] == 'ec2_boto'
                        and 'network_ids' in props
                        and 'vpc' not in self.config[cluster]['cloud']):
                    raise Invalid("Cluster `%s` must specify a VPC to place"
                                  " `%s` instances in %s" %
                                  (cluster, node, props['network_ids']))

        self._post_validate()
コード例 #17
0
class SkipPackageSievePrescription(UnitPrescription):
    """Skip package sieve prescription unit implementation."""

    CONFIGURATION_SCHEMA: Schema = Schema({
        Required("package_name"):
        SchemaAny(str, None),
        Required("run"):
        SchemaAny(PRESCRIPTION_SKIP_PACKAGE_SIEVE_RUN_ENTRY_SCHEMA, None),
        Required("match"):
        PRESCRIPTION_SKIP_PACKAGE_SIEVE_MATCH_ENTRY_SCHEMA,
        Required("prescription"):
        Schema({"run": bool}),
    })

    @staticmethod
    def is_sieve_unit_type() -> bool:
        """Check if this unit is of type sieve."""
        return True

    @classmethod
    def should_include(
        cls, builder_context: "PipelineBuilderContext"
    ) -> Generator[Dict[str, Any], None, None]:
        """Check if the given pipeline unit should be included in the given pipeline configuration."""
        if cls._should_include_base(builder_context):
            prescription: Dict[str, Any] = cls._PRESCRIPTION  # type: ignore
            prescription_conf = {"run": False}
            if isinstance(prescription["match"], list):
                for item in prescription["match"]:
                    yield {
                        "package_name": item["package_name"],
                        "run": prescription.get("run"),
                        "match": item,
                        "prescription": prescription_conf,
                    }
            else:
                yield {
                    "package_name": prescription["match"]["package_name"],
                    "run": prescription.get("run"),
                    "match": prescription["match"],
                    "prescription": prescription_conf,
                }
            return None

        yield from ()
        return None

    def pre_run(self) -> None:
        """Initialize this unit before each run."""
        super().pre_run()

    def run(
        self, _: Generator[PackageVersion, None, None]
    ) -> Generator[PackageVersion, None, None]:
        """Run main entry-point for sieves to filter and score packages."""
        prescription_conf = self._configuration["prescription"]
        if not prescription_conf["run"]:
            self._run_log()
            self._run_stack_info()
            prescription_conf["run"] = True

        raise SkipPackage
コード例 #18
0
ファイル: source_test.py プロジェクト: sammacbeth/browser-f
source_test_description_schema = Schema({
    # most fields are passed directly through as job fields, and are not
    # repeated here
    Extra:
    object,

    # The platform on which this task runs.  This will be used to set up attributes
    # (for try selection) and treeherder metadata (for display).  If given as a list,
    # the job will be "split" into multiple tasks, one with each platform.
    Required('platform'):
    Any(text_type, [text_type]),

    # Build labels required for the task. If this key is provided it must
    # contain a build label for the task platform.
    # The task will then depend on a build task, and the installer url will be
    # saved to the GECKO_INSTALLER_URL environment variable.
    Optional('require-build'): {
        text_type: text_type,
    },

    # These fields can be keyed by "platform", and are otherwise identical to
    # job descriptions.
    Required('worker-type'):
    optionally_keyed_by('platform', job_description_schema['worker-type']),
    Required('worker'):
    optionally_keyed_by('platform', job_description_schema['worker']),
    Optional('python-version'): [int],
    # If true, the DECISION_TASK_ID env will be populated.
    Optional('require-decision-task-id'):
    bool,

    # A list of artifacts to install from 'fetch' tasks.
    Optional('fetches'): {
        text_type:
        optionally_keyed_by('platform',
                            job_description_schema['fetches'][text_type]),
    },
})
コード例 #19
0
ファイル: config.py プロジェクト: tameem-emumba/dvc
            },
            "webdavs": {
                **WEBDAV_COMMON,
                **REMOTE_COMMON
            },
            "remote": {
                str: object
            },  # Any of the above options are valid
        })
    },
    "state": {
        "row_limit": All(Coerce(int), Range(1)),
        "row_cleanup_quota": All(Coerce(int), Range(0, 100)),
    },
}
COMPILED_SCHEMA = Schema(SCHEMA)


class Config(dict):
    """Class that manages configuration files for a DVC repo.

    Args:
        dvc_dir (str): optional path to `.dvc` directory, that is used to
            access repo-specific configs like .dvc/config and
            .dvc/config.local.
        validate (bool): optional flag to tell dvc if it should validate the
            config or just load it as is. 'True' by default.

    Raises:
        ConfigError: thrown if config has an invalid format.
    """
コード例 #20
0
from voluptuous import Schema, Url

LABEL_SCHEMA = Schema({
    "id": int,
    "url": Url(),
    "name": str,
    "color": str,
    "default": bool
})
コード例 #21
0
def check_mode(x):
    if x in ["schedule", "none", "count_down"]:
        return x

    raise Invalid(f"invalid mode {x}")


def lb_dev_state(x):
    if x in ["normal"]:
        return x

    raise Invalid(f"Invalid dev_state {x}")


TZ_SCHEMA = Schema(
    {"zone_str": str, "dst_offset": int, "index": All(int, Range(min=0)), "tz_str": str}
)

CURRENT_CONSUMPTION_SCHEMA = Schema(
    Any(
        {
            "voltage": Any(All(float, Range(min=0, max=300)), None),
            "power": Any(Coerce(float, Range(min=0)), None),
            "total": Any(Coerce(float, Range(min=0)), None),
            "current": Any(All(float, Range(min=0)), None),
            "voltage_mv": Any(
                All(float, Range(min=0, max=300000)), int, None
            ),  # TODO can this be int?
            "power_mw": Any(Coerce(float, Range(min=0)), None),
            "total_wh": Any(Coerce(float, Range(min=0)), None),
            "current_ma": Any(
コード例 #22
0
ファイル: problem.py プロジェクト: wanghp18/picoCTF
from datetime import datetime
from os.path import isfile, join
from random import randint

import api
import pymongo
from api.annotations import log_action
from api.common import (check, InternalException, safe_fail,
                        SevereInternalException, validate, WebException)
from bson import json_util
from voluptuous import Length, Range, Required, Schema

submission_schema = Schema({
    Required("tid"):
    check(("This does not look like a valid tid.", [str, Length(max=100)])),
    Required("pid"):
    check(("This does not look like a valid pid.", [str, Length(max=100)])),
    Required("key"):
    check(("This does not look like a valid key.", [str, Length(max=100)]))
})

problem_schema = Schema({
    Required("name"):
    check(("The problem's display name must be a string.", [str])),
    Required("sanitized_name"):
    check(("The problems's sanitized name must be a string.", [str])),
    Required("score"):
    check(("Score must be a positive integer.", [int, Range(min=0)])),
    Required("author"):
    check(("Author must be a string.", [str])),
    Required("category"):
    check(("Category must be a string.", [str])),
コード例 #23
0
def validate_metadata_file(path, is_ansible, check_deprecation_dates=False):
    """Validate explicit runtime metadata file"""
    try:
        with open(path, 'r') as f_path:
            routing = yaml.safe_load(f_path)
    except yaml.error.MarkedYAMLError as ex:
        print('%s:%d:%d: YAML load failed: %s' %
              (path, ex.context_mark.line + 1, ex.context_mark.column + 1,
               re.sub(r'\s+', ' ', str(ex))))
        return
    except Exception as ex:  # pylint: disable=broad-except
        print('%s:%d:%d: YAML load failed: %s' %
              (path, 0, 0, re.sub(r'\s+', ' ', str(ex))))
        return

    if is_ansible:
        current_version = get_ansible_version()
    else:
        current_version = get_collection_version()

    # Updates to schema MUST also be reflected in the documentation
    # ~https://docs.ansible.com/ansible-core/devel/dev_guide/developing_collections.html

    # plugin_routing schema

    avoid_additional_data = Schema(Any(
        {
            Required('removal_version'): any_value,
            'warning_text': any_value,
        }, {
            Required('removal_date'): any_value,
            'warning_text': any_value,
        }),
                                   extra=PREVENT_EXTRA)

    deprecation_schema = All(
        # The first schema validates the input, and the second makes sure no extra keys are specified
        Schema({
            'removal_version':
            partial(removal_version,
                    is_ansible=is_ansible,
                    current_version=current_version),
            'removal_date':
            partial(isodate, check_deprecation_date=check_deprecation_dates),
            'warning_text':
            Any(*string_types),
        }),
        avoid_additional_data)

    tombstoning_schema = All(
        # The first schema validates the input, and the second makes sure no extra keys are specified
        Schema({
            'removal_version':
            partial(removal_version,
                    is_ansible=is_ansible,
                    current_version=current_version,
                    is_tombstone=True),
            'removal_date':
            partial(isodate, is_tombstone=True),
            'warning_text':
            Any(*string_types),
        }),
        avoid_additional_data)

    plugin_routing_schema = Any(
        Schema(
            {
                ('deprecation'): Any(deprecation_schema),
                ('tombstone'): Any(tombstoning_schema),
                ('redirect'): Any(*string_types),
            },
            extra=PREVENT_EXTRA), )

    list_dict_plugin_routing_schema = [{
        str_type: plugin_routing_schema
    } for str_type in string_types]

    plugin_schema = Schema(
        {
            ('action'): Any(None, *list_dict_plugin_routing_schema),
            ('become'): Any(None, *list_dict_plugin_routing_schema),
            ('cache'): Any(None, *list_dict_plugin_routing_schema),
            ('callback'): Any(None, *list_dict_plugin_routing_schema),
            ('cliconf'): Any(None, *list_dict_plugin_routing_schema),
            ('connection'): Any(None, *list_dict_plugin_routing_schema),
            ('doc_fragments'): Any(None, *list_dict_plugin_routing_schema),
            ('filter'): Any(None, *list_dict_plugin_routing_schema),
            ('httpapi'): Any(None, *list_dict_plugin_routing_schema),
            ('inventory'): Any(None, *list_dict_plugin_routing_schema),
            ('lookup'): Any(None, *list_dict_plugin_routing_schema),
            ('module_utils'): Any(None, *list_dict_plugin_routing_schema),
            ('modules'): Any(None, *list_dict_plugin_routing_schema),
            ('netconf'): Any(None, *list_dict_plugin_routing_schema),
            ('shell'): Any(None, *list_dict_plugin_routing_schema),
            ('strategy'): Any(None, *list_dict_plugin_routing_schema),
            ('terminal'): Any(None, *list_dict_plugin_routing_schema),
            ('test'): Any(None, *list_dict_plugin_routing_schema),
            ('vars'): Any(None, *list_dict_plugin_routing_schema),
        },
        extra=PREVENT_EXTRA)

    # import_redirection schema

    import_redirection_schema = Any(
        Schema(
            {
                ('redirect'): Any(*string_types),
                # import_redirect doesn't currently support deprecation
            },
            extra=PREVENT_EXTRA))

    list_dict_import_redirection_schema = [{
        str_type: import_redirection_schema
    } for str_type in string_types]

    # top level schema

    schema = Schema(
        {
            # All of these are optional
            ('plugin_routing'):
            Any(plugin_schema),
            ('import_redirection'):
            Any(None, *list_dict_import_redirection_schema),
            # requires_ansible: In the future we should validate this with SpecifierSet
            ('requires_ansible'):
            Any(*string_types),
            ('action_groups'):
            dict,
        },
        extra=PREVENT_EXTRA)

    # Ensure schema is valid

    try:
        schema(routing)
    except MultipleInvalid as ex:
        for error in ex.errors:
            # No way to get line/column numbers
            print('%s:%d:%d: %s' %
                  (path, 0, 0, humanize_error(routing, error)))
コード例 #24
0
validate_transaction = Schema(
    {
        'account': {
            'user_id': _unicode_or_printable_ascii,
            'username_md5': _md5,
        },
        'billing':
        _address,
        'payment': {
            'processor': _payment_processor,
            'was_authorized': bool,
            'decline_code': _unicode_or_printable_ascii,
        },
        'credit_card': {
            'avs_result': _single_char,
            'bank_name': _unicode_or_printable_ascii,
            'bank_phone_country_code': _telephone_country_code,
            'bank_phone_number': _unicode_or_printable_ascii,
            'cvv_result': _single_char,
            'issuer_id_number': _iin,
            'last_4_digits': _credit_card_last_4,
            'token': _credit_card_token,
        },
        'custom_inputs': {
            _custom_input_key: _custom_input_value
        },
        Required('device'): {
            'accept_language': _unicode_or_printable_ascii,
            Required('ip_address'): _ip_address,
            'session_age': All(_any_number, Range(min=0)),
            'session_id': _unicode_or_printable_ascii,
            'user_agent': _unicode_or_printable_ascii,
        },
        'email': {
            'address': _email_or_md5,
            'domain': _hostname,
        },
        'event': {
            'shop_id': _unicode_or_printable_ascii,
            'time': _rfc3339_datetime,
            'type': _event_type,
            'transaction_id': _unicode_or_printable_ascii,
        },
        'order': {
            'affiliate_id': _unicode_or_printable_ascii,
            'amount': _price,
            'currency': _currency_code,
            'discount_code': _unicode_or_printable_ascii,
            'has_gift_message': bool,
            'is_gift': bool,
            'referrer_uri': _uri,
            'subaffiliate_id': _unicode_or_printable_ascii,
        },
        'shipping':
        _shipping_address,
        'shopping_cart': [
            {
                'category': _unicode_or_printable_ascii,
                'item_id': _unicode_or_printable_ascii,
                'price': _price,
                'quantity': All(int, Range(min=1)),
            },
        ],
    }, )
コード例 #25
0
ファイル: organisation.py プロジェクト: openpermissions/perch
    @coroutine
    def can_approve(self, user, **data):
        """
        Only sys admins can approve an organisation, or a reseller sending pre_verified=true
        :param user: a User
        :param data: data that the user wants to update
        """
        is_admin = user.is_admin()
        is_reseller_preverifying = user.is_reseller() and data.get('pre_verified', False)
        raise Return(is_admin or is_reseller_preverifying)


all_permission_schema = Schema({
    'type': 'all',
    'permission': In(PERMISSIONS),
    'value': None
}, required=True)
organisation_permission_schema = all_permission_schema.extend({
    'type': 'organisation_id',
    'permission': In(PERMISSIONS),
    'value': unicode
})
service_type_permission_schema = all_permission_schema.extend({
    'type': 'service_type',
    'permission': In(PERMISSIONS),
    'value': In(SERVICE_TYPES)
})


def group_permissions(permissions):
コード例 #26
0
ファイル: __init__.py プロジェクト: ricardokirkner/betty
class Nginx(ConfigurableExtension, AppAwareFactory, Generator, ServerProvider):
    configuration_schema: Schema = Schema({
        'www_directory_path': str,
        'https': bool,
    })

    def __init__(self, app: App, www_directory_path: Optional[str] = None, https: Optional[bool] = None):
        self._https = https
        self._www_directory_path = www_directory_path
        self._app = app

    @classmethod
    def validate_configuration(cls, configuration: Optional[Dict]) -> Dict:
        try:
            return cls.configuration_schema(configuration)
        except Invalid as e:
            raise ConfigurationValueError(e)

    @classmethod
    def new_for_app(cls, app: App, *args, **kwargs):
        kwargs.setdefault('www_directory_path', app.configuration.www_directory_path)
        kwargs.setdefault('https', None)
        return cls(app, *args, **kwargs)

    @property
    def servers(self) -> Iterable[Server]:
        from betty.extension.nginx.serve import DockerizedNginxServer

        if DockerizedNginxServer.is_available():
            return [DockerizedNginxServer(self._app)]
        return []

    async def generate(self) -> None:
        await self.generate_configuration_file()
        await self._generate_dockerfile_file()

    @property
    def assets_directory_path(self) -> Optional[str]:
        return '%s/assets' % path.dirname(__file__)

    @property
    def https(self) -> bool:
        if self._https is None:
            return self._app.configuration.base_url.startswith('https')
        return self._https

    @property
    def www_directory_path(self) -> str:
        if self._www_directory_path is None:
            return self._app.configuration.www_directory_path
        return self._www_directory_path

    async def generate_configuration_file(self, destination_file_path: Optional[str] = None, **kwargs) -> None:
        kwargs = dict({
            'content_negotiation': self._app.configuration.content_negotiation,
            'https': self._app.extensions[Nginx].https,
            'locale': self._app.locale,
            'locales': self._app.configuration.locales,
            'multilingual': self._app.configuration.multilingual,
            'server_name': urlparse(self._app.configuration.base_url).netloc,
            'www_directory_path': self._app.extensions[Nginx].www_directory_path,
        }, **kwargs)
        if destination_file_path is None:
            destination_file_path = path.join(self._app.configuration.output_directory_path, 'nginx', 'nginx.conf')
        await generate_configuration_file(destination_file_path, self._app.jinja2_environment, **kwargs)

    async def _generate_dockerfile_file(self) -> None:
        await generate_dockerfile_file(path.join(self._app.configuration.output_directory_path, 'nginx', 'docker', 'Dockerfile'))
コード例 #27
0
ファイル: jira.py プロジェクト: bradleybluebean/padre
class UnplannedHandler(handler.TriggeredHandler):
    """Creates a unplanned issue + associates it to an active sprint."""

    # Because the client library fetches things over and over
    # and things we know to be the same, aren't changing a lot/ever...
    #
    # Size of these was picked somewhat arbitrarily but should be fine...
    cache = munch.Munch({
        'projects': LRUCache(maxsize=100),
        'boards': LRUCache(maxsize=100),
    })
    required_clients = ('jira', )
    config_section = 'jira'
    handles_what = {
        'message_matcher':
        matchers.match_or(matchers.match_slack("message"),
                          matchers.match_telnet("message")),
        'channel_matcher':
        matchers.match_channel(c.TARGETED),
        'triggers': [
            trigger.Trigger('jira unplanned', takes_args=True),
        ],
        'args': {
            'order': [
                'summary',
                'time_taken',
                'was_resolved',
                'project',
                'board',
            ],
            'converters': {
                'time_taken': _convert_time_taken,
                'was_resolved': hu.strict_bool_from_string,
            },
            'schema':
            Schema({
                Required("summary"): All(scu.string_types(), Length(min=1)),
                Required("project"): All(scu.string_types(), Length(min=1)),
                Required("board"): All(scu.string_types(), Length(min=1)),
                Required("time_taken"): int,
                Required("was_resolved"): bool,
            }),
            'help': {
                'summary':
                "short summary of the unplanned work",
                'board':
                'board to locate sprint to'
                ' drop newly created issue in (must exist)',
                'time_taken': ('time taken on unplanned'
                               ' work (ie 30 seconds, 5 minutes,'
                               ' 1 hour, 1 day...)'),
                'project':
                'project to create task in (must exist)',
                'was_resolved':
                'mark the newly created issue as resolved',
            },
            'defaults': {
                'project': 'CAA',
                'board': 'CAA board',
                'time_taken': "1 hour",
                "was_resolved": True,
            },
        },
        'authorizer':
        auth.user_in_ldap_groups('admins_cloud'),
    }

    @staticmethod
    def _find_and_cache(fetcher_func, match_func, cache_target, cache_key):
        if cache_key and cache_key in cache_target:
            return cache_target[cache_key]
        offset = 0
        result = None
        found = False
        while not found:
            items = fetcher_func(start_at=offset)
            if not items:
                break
            else:
                for item in items:
                    if match_func(item):
                        result = item
                        found = True
                        break
                if not found:
                    offset = offset + len(items) + 1
        if found and cache_key:
            cache_target[cache_key] = result
        return result

    @classmethod
    def _find_project(cls, jac, project):
        def match_func(p):
            return (p.name.lower() == project.lower()
                    or p.key.lower() == project.lower() or p.id == project)

        def fetcher_func(all_projects, start_at):
            return all_projects[start_at:]

        return cls._find_and_cache(
            functools.partial(fetcher_func, jac.projects()), match_func,
            cls.cache.projects, project)

    @classmethod
    def _find_board(cls, jac, board, type='scrum'):
        def match_func(b):
            return (b.name.lower() == board.lower() or b.id == board)

        def fetcher_func(start_at):
            return jac.boards(type=type, startAt=start_at)

        return cls._find_and_cache(fetcher_func, match_func, cls.cache.boards,
                                   ":".join([board, type]))

    @classmethod
    def _find_sprint(cls, jac, board, board_name, ok_states):
        def match_func(s):
            return s.state.lower() in ok_states

        def fetcher_func(start_at):
            return jac.sprints(board.id, startAt=start_at)

        # We don't want to cache anything, since we expect sprints to
        # actually become active/inactive quite a bit...
        return cls._find_and_cache(fetcher_func, match_func, {}, None)

    @staticmethod
    def _create_issue(jac,
                      project,
                      secs_taken,
                      summary,
                      user_name,
                      channel_name='',
                      quick_link=None):
        mins_taken = secs_taken / 60.0
        hours_taken = mins_taken / 60.0
        days_taken = hours_taken / 24.0
        time_taken_pieces = [
            "%0.2f days" % (days_taken),
            "%0.2f hours" % (hours_taken),
            "%0.2f minutes" % (mins_taken),
            "%s seconds" % (secs_taken),
        ]
        time_taken_text = " or ".join(time_taken_pieces)
        new_issue_description_lines = [
            ("User @%s spent %s doing"
             " unplanned work.") % (user_name, time_taken_text),
        ]
        if channel_name:
            new_issue_description_lines.extend([
                "",
                "In channel: #%s" % channel_name,
            ])
        if quick_link:
            new_issue_description_lines.extend([
                "",
                "Reference: %s" % quick_link,
            ])
        new_issue_fields = {
            'summary': summary,
            'issuetype': {
                'name': 'Task',
            },
            'components': [{
                'name': "Unplanned"
            }],
            'assignee': {
                'name': user_name,
            },
            'project': project.id,
            'description': "\n".join(new_issue_description_lines),
        }
        new_issue = jac.create_issue(fields=new_issue_fields)
        new_issue_link = "<%s|%s>" % (new_issue.permalink(), new_issue.key)
        return (new_issue, new_issue_link)

    def _run(self, summary, time_taken, was_resolved, project, board):
        # Load and validate stuff (before doing work...)
        jac = self.bot.clients.jira_client
        replier = functools.partial(self.message.reply_text,
                                    threaded=True,
                                    prefixed=False)
        # This one is used here because it appears the the RTM one isn't
        # processing/sending links correctly (did it ever, but this one
        # does handle links right, so ya...)
        reply_attachments = functools.partial(
            self.message.reply_attachments,
            log=LOG,
            link_names=True,
            as_user=True,
            thread_ts=self.message.body.ts,
            channel=self.message.body.channel,
            unfurl_links=False)
        j_project = self._find_project(jac, project)
        if not j_project:
            raise excp.NotFound("Unable to find project '%s'" % (project))
        j_board = self._find_board(jac, board)
        if not j_board:
            raise excp.NotFound("Unable to find board '%s'" % (board))
        # Create it in that project...
        replier("Creating unplanned issue"
                " in project `%s`, please wait..." % (project))
        new_issue, new_issue_link = self._create_issue(
            jac,
            j_project,
            time_taken,
            summary,
            self.message.body.user_name,
            channel_name=self.message.body.get('channel_name'),
            quick_link=self.message.body.get('quick_link'))
        reply_attachments(attachments=[{
            'pretext': ("Created unplanned"
                        " issue %s.") % (new_issue_link),
            'mrkdwn_in': ['pretext'],
        }])
        # Find and bind it to currently active sprint (if any)...
        j_sprint = self._find_sprint(jac, j_board, board, ['active'])
        if j_sprint:
            reply_attachments(attachments=[{
                'pretext': ("Binding %s to active sprint `%s`"
                            " of board `%s`." %
                            (new_issue_link, j_sprint.name, board)),
                'mrkdwn_in': ['pretext'],
            }])
            jac.add_issues_to_sprint(j_sprint.id, [new_issue.key])
            reply_attachments(attachments=[{
                'pretext': ("Bound %s to active sprint `%s`"
                            " of board `%s`." %
                            (new_issue_link, j_sprint.name, board)),
                'mrkdwn_in': ['pretext'],
            }])
        else:
            replier("No active sprint found"
                    " in board `%s`, sprint binding skipped." % (board))
        # Mark it as done...
        if was_resolved:
            transition = None
            possible_transitions = set()
            for t in jac.transitions(new_issue.id):
                t_name = t.get('name', '')
                t_name = t_name.lower()
                if t_name in _RESOLVED_TRANSITIONS:
                    transition = t
                if t_name:
                    possible_transitions.add(t_name)
            if not transition:
                possible_transitions = sorted(possible_transitions)
                possible_transitions = " or ".join(
                    ["`%s`" % t.upper() for t in possible_transitions])
                ok_transitions = sorted(_RESOLVED_TRANSITIONS)
                ok_transitions = " or ".join(
                    ["`%s`" % t.upper() for t in ok_transitions])
                reply_attachments(attachments=[{
                    'pretext': ("Unable to resolve %s, could not find"
                                " issues %s"
                                " state transition!") %
                    (new_issue_link, ok_transitions),
                    'mrkdwn_in': ['pretext', 'text'],
                    "text": ("Allowable state"
                             " transitions: %s" % possible_transitions),
                }])
            else:
                reply_attachments(attachments=[{
                    'pretext': ("Transitioning %s issue to resolved, "
                                "please wait...") % (new_issue_link),
                    'mrkdwn_in': ['pretext'],
                }])
                jac.transition_issue(new_issue.id,
                                     transition['id'],
                                     comment="All done! kthxbye")
                replier("Transitioned.")
        replier = self.message.reply_text
        replier("Thanks for tracking your unplanned work!",
                prefixed=True,
                threaded=True)
コード例 #28
0
            tuple([Any(list, tuple)]),
        ),
    )


ansible_module_kwargs_schema = Schema({
    'argument_spec':
    dict,
    'bypass_checks':
    bool,
    'no_log':
    bool,
    'check_invalid_arguments':
    Any(None, bool),
    'mutually_exclusive':
    sequence_of_sequences(min=2),
    'required_together':
    sequence_of_sequences(min=2),
    'required_one_of':
    sequence_of_sequences(min=2),
    'add_file_common_args':
    bool,
    'supports_check_mode':
    bool,
    'required_if':
    sequence_of_sequences(min=3),
})

suboption_schema = Schema(
    {
        Required('description'):
        Any(list_string_types, *string_types),
コード例 #29
0
    def __init__(self, paths):
        self.configfiles = self._list_config_files(paths)

        configparser = RawConfigParser()
        config_tmp = configparser.read(self.configfiles)
        self.conf = dict()
        for section in configparser.sections():
            self.conf[section] = dict(configparser.items(section))

        #self.conf = ConfigObj(self.configfile, interpolation=False)

        self.schemas = {
            "storage":
            Schema({
                Optional("storage_path"): All(str),
                Optional("storage_type"): Any('yaml', 'json', 'pickle'),
            }),
            "cloud":
            Schema(
                {
                    "provider": Any('ec2_boto', 'google', 'openstack'),
                    "ec2_url": Url(str),
                    Optional("ec2_access_key"): All(str, Length(min=1)),
                    Optional("ec2_secret_key"): All(str, Length(min=1)),
                    "ec2_region": All(str, Length(min=1)),
                    "auth_url": All(str, Length(min=1)),
                    "username": All(str, Length(min=1)),
                    "password": All(str, Length(min=1)),
                    "tenant_name": All(str, Length(min=1)),
                    Optional("region_name"): All(str, Length(min=1)),
                    "gce_project_id": All(str, Length(min=1)),
                    "gce_client_id": All(str, Length(min=1)),
                    "gce_client_secret": All(str, Length(min=1)),
                    "nova_client_api": nova_api_version()
                },
                extra=True),
            "cluster":
            Schema(
                {
                    "cloud": All(str, Length(min=1)),
                    "setup_provider": All(str, Length(min=1)),
                    "login": All(str, Length(min=1)),
                },
                required=True,
                extra=True),
            "setup":
            Schema({
                "provider": All(str, Length(min=1)),
            },
                   required=True,
                   extra=True),
            "login":
            Schema(
                {
                    "image_user": All(str, Length(min=1)),
                    "image_user_sudo": All(str, Length(min=1)),
                    "image_sudo": Boolean(str),
                    "user_key_name": All(str, Length(min=1)),
                    "user_key_private": can_read_file(),
                    "user_key_public": can_read_file()
                },
                required=True)
        }
コード例 #30
0
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""
Define some basic data shape schema on Python dictionaries parsed
from Yaml.

"""

from voluptuous import Coerce
from voluptuous import Optional
from voluptuous import Schema

SBP_EXTENSION = "/*.yaml"

filename = Schema(str)
identifier = Schema(str)
description = Schema(str)
include = Schema([filename])
bit = Schema(int)
type_identifier = Schema(str)
sbp_identifier = Schema(int)
units = Coerce(str)
bitmask = Coerce(str)
size = Schema(int)

bitfield = Schema([{
    Optional(bitmask): {
        Optional('units'): units,
        Optional('values'): [{
            bit: description
コード例 #31
0
def test_in():
    """Verify that In works."""
    schema = Schema({"color": In(frozenset(["blue", "red", "yellow"]))})
    schema({"color": "blue"})
コード例 #32
0
task_description_schema = Schema({
    # the label for this task
    Required('label'):
    basestring,

    # description of the task (for metadata)
    Required('description'):
    basestring,

    # attributes for this task
    Optional('attributes'): {
        basestring: object
    },

    # dependencies of this task, keyed by name; these are passed through
    # verbatim and subject to the interpretation of the Task's get_dependencies
    # method.
    Optional('dependencies'): {
        basestring: object
    },

    # expiration and deadline times, relative to task creation, with units
    # (e.g., "14 days").  Defaults are set based on the project.
    Optional('expires-after'):
    basestring,
    Optional('deadline-after'):
    basestring,

    # custom routes for this task; the default treeherder routes will be added
    # automatically
    Optional('routes'): [basestring],

    # custom scopes for this task; any scopes required for the worker will be
    # added automatically
    Optional('scopes'): [basestring],

    # custom "task.extra" content
    Optional('extra'): {
        basestring: object
    },

    # treeherder-related information; see
    # https://schemas.taskcluster.net/taskcluster-treeherder/v1/task-treeherder-config.json
    # If not specified, no treeherder extra information or routes will be
    # added to the task
    Optional('treeherder'): {
        # either a bare symbol, or "grp(sym)".
        'symbol':
        basestring,

        # the job kind
        'kind':
        Any('build', 'test', 'other'),

        # tier for this task
        'tier':
        int,

        # task platform, in the form platform/collection, used to set
        # treeherder.machine.platform and treeherder.collection or
        # treeherder.labels
        'platform':
        basestring,

        # treeherder environments (defaults to both staging and production)
        Required('environments', default=['production', 'staging']):
        ['production', 'staging'],
    },

    # information for indexing this build so its artifacts can be discovered;
    # if omitted, the build will not be indexed.
    Optional('index'): {
        # the name of the product this build produces
        'product':
        Any('firefox', 'mobile'),

        # the names to use for this job in the TaskCluster index
        'job-name':
        Any(
            # Assuming the job is named "normally", this is the v2 job name,
            # and the v1 and buildbot routes will be determined appropriately.
            basestring,

            # otherwise, give separate names for each of the legacy index
            # routes; if a name is omitted, no corresponding route will be
            # created.
            {
                # the name as it appears in buildbot routes
                Optional('buildbot'): basestring,
                Required('gecko-v2'): basestring,
            }),

        # The rank that the task will receive in the TaskCluster
        # index.  A newly completed task supercedes the currently
        # indexed task iff it has a higher rank.  If unspecified,
        # 'by-tier' behavior will be used.
        'rank':
        Any(
            # Rank is equal the timestamp of the build_date for tier-1
            # tasks, and zero for non-tier-1.  This sorts tier-{2,3}
            # builds below tier-1 in the index.
            'by-tier',

            # Rank is given as an integer constant (e.g. zero to make
            # sure a task is last in the index).
            int,

            # Rank is equal to the timestamp of the build_date.  This
            # option can be used to override the 'by-tier' behavior
            # for non-tier-1 tasks.
            'build_date',
        ),
    },

    # The `run_on_projects` attribute, defaulting to "all".  This dictates the
    # projects on which this task should be included in the target task set.
    # See the attributes documentation for details.
    Optional('run-on-projects'): [basestring],

    # If the task can be coalesced, this is the name used in the coalesce key
    # the project, etc. will be added automatically.  Note that try (level 1)
    # tasks are never coalesced
    Optional('coalesce-name'):
    basestring,

    # the provisioner-id/worker-type for the task.  The following parameters will
    # be substituted in this string:
    #  {level} -- the scm level of this push
    'worker-type':
    basestring,

    # information specific to the worker implementation that will run this task
    'worker':
    Any(
        {
            Required('implementation'):
            Any('docker-worker', 'docker-engine'),

            # For tasks that will run in docker-worker or docker-engine, this is the
            # name of the docker image or in-tree docker image to run the task in.  If
            # in-tree, then a dependency will be created automatically.  This is
            # generally `desktop-test`, or an image that acts an awful lot like it.
            Required('docker-image'):
            Any(
                # a raw Docker image path (repo/image:tag)
                basestring,
                # an in-tree generated docker image (from `testing/docker/<name>`)
                {'in-tree': basestring}),

            # worker features that should be enabled
            Required('relengapi-proxy', default=False):
            bool,
            Required('chain-of-trust', default=False):
            bool,
            Required('taskcluster-proxy', default=False):
            bool,
            Required('allow-ptrace', default=False):
            bool,
            Required('loopback-video', default=False):
            bool,
            Required('loopback-audio', default=False):
            bool,

            # caches to set up for the task
            Optional('caches'): [{
                # only one type is supported by any of the workers right now
                'type': 'persistent',

                # name of the cache, allowing re-use by subsequent tasks naming the
                # same cache
                'name': basestring,

                # location in the task image where the cache will be mounted
                'mount-point': basestring,
            }],

            # artifacts to extract from the task image after completion
            Optional('artifacts'): [{
                # type of artifact -- simple file, or recursive directory
                'type': Any('file', 'directory'),

                # task image path from which to read artifact
                'path': basestring,

                # name of the produced artifact (root of the names for
                # type=directory)
                'name': basestring,
            }],

            # environment variables
            Required('env', default={}): {
                basestring: taskref_or_string
            },

            # the command to run
            'command': [taskref_or_string],

            # the maximum time to run, in seconds
            'max-run-time':
            int,

            # the exit status code that indicates the task should be retried
            Optional('retry-exit-status'):
            int,
        },
        {
            Required('implementation'):
            'generic-worker',

            # command is a list of commands to run, sequentially
            'command': [taskref_or_string],

            # artifacts to extract from the task image after completion; note that artifacts
            # for the generic worker cannot have names
            Optional('artifacts'): [{
                # type of artifact -- simple file, or recursive directory
                'type': Any('file', 'directory'),

                # task image path from which to read artifact
                'path': basestring,
            }],

            # directories and/or files to be mounted
            Optional('mounts'): [{
                # a unique name for the cache volume
                'cache-name': basestring,

                # task image path for the cache
                'path': basestring,
            }],

            # environment variables
            Required('env', default={}): {
                basestring: taskref_or_string
            },

            # the maximum time to run, in seconds
            'max-run-time':
            int,

            # os user groups for test task workers
            Optional('os-groups', default=[]): [basestring],
        },
        {
            Required('implementation'): 'buildbot-bridge',

            # see
            # https://github.com/mozilla/buildbot-bridge/blob/master/bbb/schemas/payload.yml
            'buildername': basestring,
            'sourcestamp': {
                'branch': basestring,
                Optional('revision'): basestring,
                Optional('repository'): basestring,
                Optional('project'): basestring,
            },
            'properties': {
                'product': basestring,
                Extra: basestring,  # additional properties are allowed
            },
        },
        {
            'implementation':
            'macosx-engine',

            # A link for an executable to download
            Optional('link'):
            basestring,

            # the command to run
            Required('command'): [taskref_or_string],

            # environment variables
            Optional('env'): {
                basestring: taskref_or_string
            },

            # artifacts to extract from the task image after completion
            Optional('artifacts'): [{
                # type of artifact -- simple file, or recursive directory
                Required('type'):
                Any('file', 'directory'),

                # task image path from which to read artifact
                Required('path'):
                basestring,

                # name of the produced artifact (root of the names for
                # type=directory)
                Required('name'):
                basestring,
            }],
        }),

    # The "when" section contains descriptions of the circumstances
    # under which this task can be "optimized", that is, left out of the
    # task graph because it is unnecessary.
    Optional('when'):
    Any({
        # This task only needs to be run if a file matching one of the given
        # patterns has changed in the push.  The patterns use the mozpack
        # match function (python/mozbuild/mozpack/path.py).
        Optional('files-changed'): [basestring],
    }),
})
コード例 #33
0
    def test_generator_signing_balrog_tasks(self):
        for p in ("win32", "macosx64"):
            for v, appV in (("38.0build1", "38.0"), ("37.0build2", "37.0")):
                generator = get_task_by_name(self.graph, "{}_en-US_{}_funsize_update_generator".format(p, v))
                signing = get_task_by_name(self.graph, "{}_en-US_{}_funsize_signing_task".format(p, v))
                balrog = get_task_by_name(self.graph, "{}_en-US_{}_funsize_balrog_task".format(p, v))

                generator_schema = Schema({
                    'requires': [self.generator_image['taskId']],
                    'task': {
                        'metadata': {
                            'name': "[funsize] Update generating task %s %s for %s" % (p, "en-US", v.split('build')[0],)
                        }
                    }
                }, extra=True, required=True)

                signing_schema = Schema({
                    'requires': [generator['taskId']],
                    'task': {
                        'metadata': {
                            'name': "[funsize] MAR signing task %s %s for %s" % (p, "en-US", v.split('build')[0],),
                        },
                        'payload': {
                            'signingManifest': "https://queue.taskcluster.net/v1/task/%s/artifacts/public/env/manifest.json" % generator["taskId"],
                        },
                        'scopes': [
                            "project:releng:signing:cert:release-signing",
                            "project:releng:signing:format:mar",
                            "project:releng:signing:format:gpg",
                        ],
                    },
                }, extra=True, required=True)

                balrog_schema = Schema({
                    'requires': [signing['taskId'], self.funsize_balrog_image['taskId']],
                    'task': {
                        'scopes': ["docker-worker:feature:balrogVPNProxy"],
                        'metadata': {
                            'name': "[funsize] Publish to Balrog %s %s for %s" % (p, "en-US", v.split('build')[0],),
                        }
                    }
                }, extra=True, required=True)

                if p == "win32":
                    generator_schema = generator_schema.extend({
                        'task': {
                            'extra': {
                                'funsize': {
                                    'partials': [
                                        {
                                            'from_mar': "http://download.mozilla.org/?product=firefox-%s-complete&os=win&lang=en-US" % appV,
                                            'to_mar': "https://queue.taskcluster.net/v1/task/xyy/artifacts/public/build/firefox-42.0.en-US.win32.complete.mar",
                                        }
                                    ]
                                }
                            }
                        }
                    })

                elif p == "macosx64":
                    generator_schema = generator_schema.extend({
                        'task': {
                            'extra': {
                                'funsize': {
                                    'partials': [
                                        {
                                            'from_mar': "http://download.mozilla.org/?product=firefox-%s-complete&os=osx&lang=en-US" % appV,
                                            'to_mar': "https://queue.taskcluster.net/v1/task/xyz/artifacts/public/build/firefox-42.0.en-US.mac.complete.mar",
                                        }
                                    ]
                                }
                            }
                        }
                    })

                verify(generator, generator_schema, TestEnUSPartials.generator_not_allowed)
                verify(balrog, balrog_schema)
                verify(signing, signing_schema)
コード例 #34
0
ファイル: validations.py プロジェクト: macndesign/mont_scrap
            date = parse_datetime(value) or parse_date(value)
            if date is not None:
                return date
            else:
                raise ValueError
        except ValueError:
            raise Invalid('<{0}> is not a valid datetime.'.format(value))
    return fn


base_query_param_schema = Schema(
    {
        'q': str,
        'name': str,
        'offset': IntegerLike(),
        'limit': IntegerLike(),
        'install_ts': DatetimeWithTZ(),
        'update_ts': DatetimeWithTZ()
    },
    extra=ALLOW_EXTRA
)


company_query_schema = base_query_param_schema.extend(
    {
        "id": IntegerLike(),
        "name": str,
        "description": str,
        "auction_id": CSVofIntegers(),  # /?team_id=1,2,3
    }
)
コード例 #35
0
ファイル: validation.py プロジェクト: mozaiques/zombase
 def __init__(self, schema, required=False, extra=False, not_none=False):
     if not isinstance(schema, dict):
         raise ValueError('This special Schema is intented to be used with '
                          'dict only.')
     Schema.__init__(self, schema, required, extra)
     self._not_none = not_none if not_none is not False else ()
コード例 #36
0
ファイル: user.py プロジェクト: 5l1v3r1/NetSOS-CTF-Platform
_check_email_format = lambda email: re.match(r".+@.+\..{2,}", email) is not None

user_schema = Schema({
    Required('email'): check(
        ("Email must be between 5 and 50 characters.", [str, Length(min=5, max=50)]),
        ("Your email does not look like an email address.", [_check_email_format])
    ),
    Required('firstname'): check(
        ("First Name must be between 1 and 50 characters.", [str, Length(min=1, max=50)])
    ),
    Required('lastname'): check(
        ("Last Name must be between 1 and 50 characters.", [str, Length(min=1, max=50)])
    ),
    Required('country'): check(
        ("Please select a country", [str, Length(min=2, max=2)])
    ),
    Required('username'): check(
        ("Usernames must be between 3 and 20 characters.", [str, Length(min=3, max=20)]),
        ("This username already exists.", [
            lambda name: safe_fail(get_user, name=name) is None])
    ),
    Required('password'):
        check(("Passwords must be between 3 and 20 characters.", [str, Length(min=3, max=20)])
    ),
    Required('background'):
        check(("You must provide your background!", [str, Length(min=3, max=20)])
    )
}, extra=True)

new_eligible_team_schema = Schema({
    Required('team-name-new'): check(
コード例 #37
0
    return "darknet"


LABELS_SCHEMA = Schema([
    All(
        deprecated("triggers_recording", replacement="trigger_recorder"),
        {
            Required("label"):
            str,
            Optional("confidence", default=0.8):
            All(Any(0, 1, All(float, Range(min=0.0, max=1.0))), Coerce(float)),
            Optional("height_min", default=0.0):
            All(Any(0, 1, All(float, Range(min=0.0, max=1.0))), Coerce(float)),
            Optional("height_max", default=1.0):
            All(Any(0, 1, All(float, Range(min=0.0, max=1.0))), Coerce(float)),
            Optional("width_min", default=0.0):
            All(Any(0, 1, All(float, Range(min=0.0, max=1.0))), Coerce(float)),
            Optional("width_max", default=1.0):
            All(Any(0, 1, All(float, Range(min=0.0, max=1.0))), Coerce(float)),
            Optional("trigger_recorder", default=True):
            bool,
            Optional("require_motion", default=False):
            bool,
            Optional("post_processor", default=None):
            Any(str, None),
        },
        ensure_min_max,
    )
])


class LabelConfig:
コード例 #38
0
from voluptuous import Schema, Email, Coerce, Inclusive, In

from common.enums import EmailResult

contact_schema = Schema(
    {"name": str, "email": Email(), "first_name": str, "last_name": str}, required=True
)

segment_schema = Schema({"name": str}, required=True)

template_schema = Schema({"name": str, "template": str}, required=True)

segment_contact_join_schema = Schema({"segment_id": Coerce(int)}, required=True)

settings_schema = Schema(
    {
        Inclusive("name", "from"): str,
        Inclusive("email", "from"): Email(),
        Inclusive("user", "auth"): str,
        Inclusive("password", "auth"): str,
    }
)


campaign_schema = Schema(
    {"segment_id": Coerce(int), "template_id": Coerce(int), "subject": str},
    required=True,
)

update_status_schema = Schema(
    {
コード例 #39
0
import six.moves.urllib.parse as urlparse
from voluptuous import All
from voluptuous import Any
from voluptuous import Length
from voluptuous import Marker
from voluptuous import Required
from voluptuous import Schema

from monasca_api.v2.common.schemas import exceptions

LOG = log.getLogger(__name__)

schemes = ['http', 'https']

notification_schema = {
    Required('name'): Schema(All(Any(str, unicode), Length(max=250))),
    Required('type'): Schema(Any(str, unicode)),
    Required('address'): Schema(All(Any(str, unicode), Length(max=512))),
    Marker('period'): All(Any(int, str))}

request_body_schema = Schema(Any(notification_schema))


def parse_and_validate(msg, valid_periods, require_all=False):
    try:
        request_body_schema(msg)
    except Exception as ex:
        LOG.exception(ex)
        raise exceptions.ValidationException(str(ex))

    if 'period' not in msg:
コード例 #40
0
class UnitPrescription(Unit, metaclass=abc.ABCMeta):
    """A base class for implementing pipeline units based on prescription supplied."""

    # Each prescription unit defines these specifically.
    SHOULD_INCLUDE_CACHE: Dict[str, bool] = {}
    CONFIGURATION_SCHEMA: Schema = Schema({
        Required("package_name"):
        str,
        Required("match"):
        object,
        Required("run"):
        object,
        Required("prescription"):
        Schema({"run": bool}),
    })

    _PRESCRIPTION: Optional[Dict[str, Any]] = None

    _stack_info_run = attr.ib(type=bool, kw_only=True, default=False)
    _configuration = attr.ib(type=Dict[str, Any], kw_only=True)
    prescription = attr.ib(type=Dict[str, Any], kw_only=True)

    @prescription.default
    def _prescription_default(self) -> Dict[str, Any]:
        """Initialize prescription property."""
        if self._PRESCRIPTION is None:
            raise ValueError(
                "No assigned prescription on the class level to be set")

        return self._PRESCRIPTION

    @property
    def run_prescription(self) -> Dict[str, Any]:
        """Get run part of the prescription assigned."""
        return self._configuration.get("run", {})

    @property
    def match_prescription(self) -> Dict[str, Any]:
        """Get match part of the prescription assigned."""
        return self._configuration.get("match", {})

    @_configuration.default
    def _initialize_default_configuration(self) -> Dict[str, Any]:
        """Initialize default unit configuration based on declared class' default configuration."""
        if self._PRESCRIPTION is None:
            raise ValueError(
                "No assigned prescription on the class level to be set")

        return {
            "package_name": None,
            "match": self._PRESCRIPTION.get("match", {}),
            "run": self._PRESCRIPTION.get("run", {}),
            "prescription": {
                "run": False
            },
        }

    @classmethod
    def get_unit_name(cls) -> str:
        """Get the name of the current prescription unit.

        This method is a class method and *MUST NOT* be used when obtaining unit name on an
        instance. As part of the memory optimization we use class to get the current name of
        a prescription unit with assigned prescription. This means that the prescription unit
        instance would have different names reported with this method based on the current
        class context.
        """
        if cls._PRESCRIPTION is None:
            raise ValueError("No prescription defined")

        name: str = cls._PRESCRIPTION["name"]
        return name

    @classmethod
    def set_prescription(cls, prescription: Dict[str, Any]) -> None:
        """Set prescription to the unit."""
        cls._PRESCRIPTION = prescription

    @classmethod
    def _check_symbols(cls, unit_name: str, library_name: str,
                       symbols_expected: List[str],
                       symbols_used: List[str]) -> bool:
        """Check if symbols expected are available given the symbols used."""
        for symbol_expected in symbols_expected:
            for symbol_used in symbols_used:
                if symbol_expected.endswith(".*"):
                    if symbol_used.startswith(
                            symbol_expected[:-2]):  # Discard ending ".*"
                        _LOGGER.debug(
                            "%s: Library symbol %r matching unit requirement on symbol %r for %r",
                            unit_name,
                            symbol_used,
                            symbol_expected,
                            library_name,
                        )
                        break
                elif symbol_used == symbol_expected:
                    _LOGGER.debug(
                        "%s: Library symbol %r matching unit requirement on symbol %r for %r",
                        unit_name,
                        symbol_used,
                        symbol_expected,
                        library_name,
                    )
                    break

                _LOGGER.debug(
                    "%s: Not registering as library symbol requested %r for %r is not used",
                    unit_name,
                    symbol_expected,
                    library_name,
                )
                return False

        _LOGGER.debug("%s: All library symbols required for %r unit are used",
                      unit_name, library_name)
        return True

    @staticmethod
    def _check_version(version_present: Optional[str],
                       version_spec_declared: Optional[str]) -> bool:
        """Check that version present matches version specification."""
        if version_present is None:
            if version_spec_declared is not None:
                return False
            else:
                return True
        else:
            if version_spec_declared is None:
                return True

            return Version(version_present) in SpecifierSet(
                version_spec_declared)

    @classmethod
    def _should_include_base(
            cls, builder_context: "PipelineBuilderContext") -> bool:
        """Determine if this unit should be included."""
        if cls._PRESCRIPTION is None:
            raise ValueError("No prescription defined")

        should_include_dict = cls._PRESCRIPTION["should_include"]
        unit_name = cls.get_unit_name()

        times = should_include_dict.get(
            "times", 1)  # XXX: We allow values 0 or 1 in the schema described.
        if times == 0 or builder_context.is_included(cls):
            return False

        if not cls._should_include_base_cached(unit_name, builder_context,
                                               should_include_dict):
            # Using pre-cached results based on parts that do not change or first time run.
            return False

        # Dependencies.
        dependencies = should_include_dict.get("dependencies", {})
        for boot_name in dependencies.get("boots", []):
            if boot_name not in builder_context.get_included_boot_names():
                _LOGGER.debug(
                    "%s: Not registering as dependency on boot %r is not satisfied",
                    unit_name, boot_name)
                return False

        for pseudonym_name in dependencies.get("pseudonyms", []):
            if pseudonym_name not in builder_context.get_included_pseudonym_names(
            ):
                _LOGGER.debug(
                    "%s: Not registering as dependency on pseudonym %r is not satisfied",
                    unit_name, pseudonym_name)
                return False

        for sieve_name in dependencies.get("sieves", []):
            if sieve_name not in builder_context.get_included_sieve_names():
                _LOGGER.debug(
                    "%s: Not registering as dependency on sieve %r is not satisfied",
                    unit_name, sieve_name)
                return False

        for step_name in dependencies.get("steps", []):
            if step_name not in builder_context.get_included_step_names():
                _LOGGER.debug(
                    "%s: Not registering as dependency on step %r is not satisfied",
                    unit_name, step_name)
                return False

        for stride_name in dependencies.get("strides", []):
            if stride_name not in builder_context.get_included_stride_names():
                _LOGGER.debug(
                    "%s: Not registering as dependency on stride %r is not satisfied",
                    unit_name, stride_name)
                return False

        for wrap_name in dependencies.get("wraps", []):
            if wrap_name not in builder_context.get_included_wrap_names():
                _LOGGER.debug(
                    "%s: Not registering as dependency on stride %r is not satisfied",
                    unit_name, wrap_name)
                return False

        return True

    if TYPE_CHECKING:
        SHOULD_INCLUDE_FUNC_TYPE = Callable[[
            Type["UnitPrescription"], str, "PipelineBuilderContext", Dict[str,
                                                                          Any]
        ], bool]

    @classmethod
    @should_include_cache
    def _should_include_base_cached(
            cls, unit_name: str, builder_context: "PipelineBuilderContext",
            should_include_dict: Dict[str, Any]) -> bool:
        """Determine if this unit should be included."""
        adviser_pipeline = should_include_dict.get("adviser_pipeline", False)
        if not adviser_pipeline and builder_context.is_adviser_pipeline():
            _LOGGER.debug("%s: Not registering for adviser pipeline",
                          unit_name)
            return False
        elif adviser_pipeline and builder_context.is_adviser_pipeline():
            allowed_recommendation_types = should_include_dict.get(
                "recommendation_types")
            if (allowed_recommendation_types is not None
                    and builder_context.recommendation_type is not None
                    and builder_context.recommendation_type.name.lower()
                    not in _ValueList(allowed_recommendation_types)):
                _LOGGER.debug(
                    "%s: Not registering for adviser pipeline with recommendation type %s",
                    unit_name,
                    builder_context.recommendation_type.name,
                )
                return False

        if (not should_include_dict.get("dependency_monkey_pipeline", False)
                and builder_context.is_dependency_monkey_pipeline()):
            _LOGGER.debug("%s: Not registering for dependency monkey pipeline",
                          unit_name)
            return False
        elif (should_include_dict.get("dependency_monkey_pipeline", False)
              and builder_context.is_dependency_monkey_pipeline()):
            allowed_decision_types = should_include_dict.get("decision_types")
            if (allowed_decision_types is not None
                    and builder_context.decision_type is not None
                    and builder_context.decision_type.name.lower()
                    not in _ValueList(allowed_decision_types)):
                _LOGGER.debug(
                    "%s: Not registering for dependency monkey pipeline with decision type %s",
                    unit_name,
                    builder_context.decision_type.name,
                )
                return False

        authenticated = should_include_dict.get("authenticated")
        if authenticated is not None and authenticated is not builder_context.authenticated:
            _LOGGER.debug(
                "%s: Not registering as authentication requirements are not met",
                unit_name,
            )
            return False

        labels_expected = should_include_dict.get("labels", {})
        if labels_expected:
            for label_key, value in labels_expected.items():
                value_context = builder_context.labels.get(label_key)
                if value == value_context:
                    break
            else:
                _LOGGER.debug(
                    "%s: Not registering as labels requested %s do not match with labels supplied %s",
                    unit_name,
                    labels_expected,
                    builder_context.labels,
                )
                return False

        # Library usage.
        library_usage_expected = should_include_dict.get("library_usage", {})
        if library_usage_expected:
            if not builder_context.library_usage:
                _LOGGER.debug(
                    "%s: Not registering as no library usage supplied",
                    unit_name)
                return False

            for library_name, symbols_expected in library_usage_expected.items(
            ):
                symbols_used = builder_context.library_usage.get(library_name)

                if not symbols_used:
                    _LOGGER.debug(
                        "%s: Not registering as library %s is not used",
                        unit_name, library_name)
                    return False

                if not cls._check_symbols(unit_name, library_name,
                                          symbols_expected, symbols_used):
                    return False
            else:
                _LOGGER.debug(
                    "%s: All library symbols required present in the library usage supplied",
                    unit_name)

        runtime_environment_dict = should_include_dict.get(
            "runtime_environments", {})

        # Operating system.
        operating_systems = runtime_environment_dict.get("operating_systems")
        os_used = builder_context.project.runtime_environment.operating_system
        os_used_name = os_used.name if os_used is not None else None
        os_used_version = os_used.version if os_used is not None else None

        if operating_systems:
            for item in operating_systems:
                os_name = item.get("name")
                os_version = item.get("version")
                if (os_name is None or os_name == os_used_name) and (
                        os_version is None or os_version == os_used_version):
                    _LOGGER.debug(
                        "%s: Matching operating system %r in version %r",
                        unit_name, os_name, os_version)
                    break
            else:
                _LOGGER.debug(
                    "%s: Not matching operating system (using %r in version %r)",
                    unit_name,
                    os_used_name,
                    os_used_version,
                )
                return False

        # Hardware.
        hw_used = builder_context.project.runtime_environment.hardware

        for hardware_dict in runtime_environment_dict.get("hardware", []):
            # CPU/GPU
            cpu_families = hardware_dict.get("cpu_families")
            cpu_models = hardware_dict.get("cpu_models")
            cpu_flags = hardware_dict.get("cpu_flags") or []
            gpu_models = hardware_dict.get("gpu_models")
            if cpu_families is not None and hw_used.cpu_family not in _ValueList(
                    cpu_families):
                _LOGGER.debug("%s: Not matching CPU family used (using %r)",
                              unit_name, hw_used.cpu_family)
                return False

            if cpu_models is not None and hw_used.cpu_model not in _ValueList(
                    cpu_models):
                _LOGGER.debug("%s: Not matching CPU model used (using %r)",
                              unit_name, hw_used.cpu_model)
                return False

            if gpu_models is not None and hw_used.gpu_model not in _ValueList(
                    gpu_models):
                _LOGGER.debug("%s: Not matching GPU model used (using %r)",
                              unit_name, hw_used.gpu_model)
                return False

            if cpu_flags:
                if hw_used.cpu_family is None or hw_used.cpu_model is None:
                    _LOGGER.debug(
                        "%s: CPU family (%s) or CPU model (%s) not provided, cannot check CPU flags %r",
                        unit_name,
                        hw_used.cpu_family,
                        hw_used.cpu_model,
                        cpu_flags,
                    )
                    return False

                if isinstance(cpu_flags, dict):
                    for cpu_flag in cpu_flags["not"]:
                        if CPUDatabase.provides_flag(hw_used.cpu_family,
                                                     hw_used.cpu_model,
                                                     cpu_flag):
                            _LOGGER.debug(
                                "%s: CPU flag %r is provided by CPU family %s and CPU model %s, not registering unit",
                                unit_name,
                                cpu_flag,
                                hw_used.cpu_family,
                                hw_used.cpu_model,
                            )
                            return False
                else:
                    for cpu_flag in cpu_flags:
                        if not CPUDatabase.provides_flag(
                                hw_used.cpu_family, hw_used.cpu_model,
                                cpu_flag):
                            _LOGGER.debug(
                                "%s: Not matching CPU flag %r for CPU family %s and CPU model %s, not registering unit",
                                unit_name,
                                cpu_flag,
                                hw_used.cpu_family,
                                hw_used.cpu_model,
                            )
                            return False

                _LOGGER.debug(
                    "%s: Used CPU family %s and CPU model %s provides all CPU flags required %r",
                    unit_name,
                    hw_used.cpu_family,
                    hw_used.cpu_model,
                    cpu_flags,
                )

        # Software present.
        runtime_used = builder_context.project.runtime_environment

        python_version_spec = runtime_environment_dict.get("python_version")
        if not cls._check_version(runtime_used.python_version,
                                  python_version_spec):
            _LOGGER.debug(
                "%s: Not matching Python version used (using %r; expected %r)",
                unit_name,
                runtime_used.python_version,
                python_version_spec,
            )
            return False

        cuda_version_spec = runtime_environment_dict.get("cuda_version")
        if not cls._check_version(runtime_used.cuda_version,
                                  cuda_version_spec):
            _LOGGER.debug(
                "%s: Not matching CUDA version used (using %r; expected %r)",
                unit_name,
                runtime_used.cuda_version,
                cuda_version_spec,
            )
            return False

        platforms = runtime_environment_dict.get("platforms")
        if platforms is not None and runtime_used.platform not in _ValueList(
                platforms):
            _LOGGER.debug("%s: Not matching platform used (using %r)",
                          unit_name, runtime_used.platform)
            return False

        openblas_version_spec = runtime_environment_dict.get(
            "openblas_version")
        if not cls._check_version(runtime_used.openblas_version,
                                  openblas_version_spec):
            _LOGGER.debug(
                "%s: Not matching openblas version used (using %r; expected %r)",
                unit_name,
                runtime_used.openblas_version,
                openblas_version_spec,
            )
            return False

        openmpi_version_spec = runtime_environment_dict.get("openmpi_version")
        if not cls._check_version(runtime_used.openmpi_version,
                                  openmpi_version_spec):
            _LOGGER.debug(
                "%s: Not matching openmpi version used (using %r; expected %r)",
                unit_name,
                runtime_used.openmpi_version,
                openmpi_version_spec,
            )
            return False

        cudnn_version_spec = runtime_environment_dict.get("cudnn_version")
        if not cls._check_version(runtime_used.cudnn_version,
                                  cudnn_version_spec):
            _LOGGER.debug(
                "%s: Not matching cudnn version used (using %r; expected %r)",
                unit_name,
                runtime_used.cudnn_version,
                cudnn_version_spec,
            )
            return False

        mkl_version_spec = runtime_environment_dict.get("mkl_version")
        if not cls._check_version(runtime_used.mkl_version, mkl_version_spec):
            _LOGGER.debug(
                "%s: Not matching mkl version used (using %r; expected %r)",
                unit_name,
                runtime_used.mkl_version,
                mkl_version_spec,
            )
            return False

        base_images = runtime_environment_dict.get("base_images")
        if base_images is not None and runtime_used.base_image not in _ValueListBaseImage(
                base_images):
            _LOGGER.debug("%s: Not matching base image used (using %r)",
                          unit_name, runtime_used.base_image)
            return False

        # All the following require base image information.
        base_image = None
        if runtime_used.base_image:
            base_image = cls.get_base_image(runtime_used.base_image,
                                            raise_on_error=True)

        abi = runtime_environment_dict.get("abi")
        if abi:
            if not base_image:
                _LOGGER.debug(
                    "%s: Check on ABI present but no base image provided",
                    unit_name,
                )
                return False

            symbols_present = set(
                builder_context.graph.get_thoth_s2i_analyzed_image_symbols_all(
                    base_image[0],
                    base_image[1],
                    is_external=False,
                ))
            if not symbols_present:
                if builder_context.iteration == 0:
                    _LOGGER.warning(
                        f"%s: No symbols found for runtime environment %r",
                        unit_name, runtime_used.base_image)
                return False

            if isinstance(abi, dict) and "not" in abi:
                # Negate operation.
                if symbols_present.intersection(set(abi["not"])):
                    _LOGGER.debug(
                        "%s: Not matching ABI present in the base image",
                        unit_name)
                    return False
                else:
                    return True
            elif isinstance(abi, list):
                if set(abi).issubset(symbols_present):
                    return True
                else:
                    _LOGGER.debug(
                        "%s: Not matching ABI present in the base image",
                        unit_name)
                    return False
            else:
                _LOGGER.error(
                    "%s: Unknown ABI definition - please report this error to administrator",
                    unit_name)
                return False

        rpm_packages = runtime_environment_dict.get("rpm_packages")
        if rpm_packages:
            if not base_image:
                _LOGGER.debug(
                    "%s: Check on RPM packages present but no base image provided",
                    unit_name,
                )
                return False

            analysis_document_id = builder_context.graph.get_last_analysis_document_id(
                base_image[0],
                base_image[1],
                is_external=False,
            )

            if not analysis_document_id:
                if builder_context.iteration == 0:
                    _LOGGER.warning(
                        "%s: No analysis for base container image %r found",
                        unit_name,
                        runtime_used.base_image,
                    )
                return False

            rpm_packages_present = builder_context.graph.get_rpm_package_version_all(
                analysis_document_id)
            if not rpm_packages_present:
                _LOGGER.debug("%s: No RPM packages found for %r", unit_name,
                              runtime_used.base_image)
                return False
            if not cls._check_rpm_packages(unit_name, rpm_packages_present,
                                           rpm_packages):
                _LOGGER.debug(
                    "%s: Not matching RPM packages present in the base image %r",
                    unit_name, runtime_used.base_image)
                return False

        python_packages = runtime_environment_dict.get("python_packages")
        if python_packages:
            if not base_image:
                _LOGGER.debug(
                    "%s: Check on Python packages present but no base image provided",
                    unit_name,
                )
                return False

            analysis_document_id = builder_context.graph.get_last_analysis_document_id(
                base_image[0],
                base_image[1],
                is_external=False,
            )

            if not analysis_document_id:
                if builder_context.iteration == 0:
                    _LOGGER.warning(
                        "%s: No analysis for base container image %r found",
                        unit_name,
                        runtime_used.base_image,
                    )
                return False

            python_packages_present = builder_context.graph.get_python_package_version_all(
                analysis_document_id)
            if not python_packages_present:
                _LOGGER.debug("%s: No Python packages found for %r", unit_name,
                              runtime_used.base_image)
                return False
            if not cls._check_python_packages(
                    unit_name, python_packages_present, python_packages):
                _LOGGER.debug(
                    "%s: Not matching Python packages present in the base image %r",
                    unit_name, runtime_used.base_image)
                return False

        return True

    @classmethod
    def _index_url_check(cls, index_url_conf: Optional[Union[str, Dict[str,
                                                                       str]]],
                         index_url: str) -> bool:
        """Convert index_url to a comparable object considering "not"."""
        if index_url_conf is None:
            return True

        if isinstance(index_url_conf, dict):
            if list(index_url_conf.keys()) != ["not"]:
                raise ValueError(
                    "index_url configuration should state directly string or a 'not' value"
                )

            return index_url_conf["not"] != index_url
        else:
            return index_url_conf == index_url

    @classmethod
    def _check_python_packages(
        cls,
        unit_name: str,
        python_packages_present: List[Dict[str, str]],
        python_packages_required: List[Dict[str, str]],
    ) -> bool:
        """Check if required Python packages are present in the environment."""
        # Convert to dict to have O(1) access time.
        py_packages_present_dict: Dict[str, List[Dict[str, str]]] = {}
        for python_package_present in python_packages_present:
            package = py_packages_present_dict.get(
                python_package_present["package_name"])
            if package is None:
                py_packages_present_dict[
                    python_package_present["package_name"]] = [
                        python_package_present
                    ]
            else:
                package.append(python_package_present)

        if isinstance(python_packages_required, dict):
            if "not" not in python_packages_required:
                _LOGGER.error(
                    "%s: Unable to parse description of Python packages required",
                    unit_name)
                return False

            for not_required_python_package in python_packages_required["not"]:
                for py_package_present in py_packages_present_dict.get(
                        not_required_python_package["name"]) or []:
                    location = not_required_python_package.get("location")
                    if location is not None and not re.fullmatch(
                            location, py_package_present["location"]):
                        _LOGGER.debug(
                            "%s: Python package %r in %r is located in different location %r as expected",
                            unit_name,
                            not_required_python_package["name"],
                            py_package_present["location"],
                            location,
                        )
                        continue

                    version = not_required_python_package.get("version")
                    if version and py_package_present[
                            "package_version"] not in SpecifierSet(version):
                        _LOGGER.debug(
                            "%s: Python package '%s==%s' (in %r) matches version %r but should not",
                            unit_name,
                            not_required_python_package["name"],
                            py_package_present["package_version"],
                            py_package_present["location"],
                            version,
                        )
                        continue

                    _LOGGER.debug(
                        "%s: presence of Python package %r causes not including the pipeline unit",
                        unit_name,
                        py_package_present,
                    )
                    return False
        else:
            for required_python_package in python_packages_required:
                for py_package_present in py_packages_present_dict.get(
                        required_python_package["name"]) or []:
                    version = required_python_package.get("version")
                    if version and py_package_present[
                            "package_version"] not in SpecifierSet(version):
                        _LOGGER.debug(
                            "%s: Python package '%s==%s' (in %r) does not match required version %r",
                            unit_name,
                            required_python_package["name"],
                            py_package_present["package_version"],
                            py_package_present.get("location", "any"),
                            version,
                        )
                        continue

                    location = required_python_package.get("location")
                    if location is not None and not re.fullmatch(
                            location, py_package_present["location"]):
                        _LOGGER.debug(
                            "%s: Python package %r is located at %r but expected to be in %r",
                            unit_name,
                            required_python_package["name"],
                            py_package_present["location"],
                            location,
                        )
                        continue

                    _LOGGER.debug(
                        "%s: Python package %r in version %r (located in %r) is found in the runtime environment",
                        unit_name,
                        required_python_package["name"],
                        required_python_package.get("version", "any"),
                        py_package_present.get("location", "any"),
                    )
                    break
                else:
                    _LOGGER.debug(
                        "%s: Not including as Python package %r (in version %r) is not present in the environment",
                        unit_name,
                        required_python_package["name"],
                        required_python_package.get("version", "any"),
                    )
                    return False

        _LOGGER.debug("%s: all Python package presence checks passed",
                      unit_name)
        return True

    @staticmethod
    def _check_rpm_packages(
        unit_name: str,
        rpm_packages_present: List[Dict[str, str]],
        rpm_packages_required: Union[List[Dict[str, str]],
                                     Dict[str, List[Dict[str, str]]]],
    ) -> bool:
        """Check if required RPM packages are present."""
        # Convert RPM packages present to mapping to save some cycles and have O(1) look up.
        rpm_packages_pres = {
            i["package_name"]: i
            for i in rpm_packages_present
        }
        rpm_packages_req: List[Dict[str, str]]
        if isinstance(rpm_packages_required, dict):
            if "not" not in rpm_packages_required:
                _LOGGER.error(
                    "%s: Unable to parse description of RPM packages required",
                    unit_name)
                return False

            should_be_present = False
            rpm_packages_req = [i for i in rpm_packages_required["not"]]
        else:
            should_be_present = True
            rpm_packages_req = [i for i in rpm_packages_required]

        for rpm_package_req in rpm_packages_req:
            rpm_name = rpm_package_req["package_name"]
            rpm_present = rpm_packages_pres.get(rpm_name)

            if should_be_present:
                if not rpm_present:
                    _LOGGER.debug(
                        "%s: Not including unit as RPM %r is not present in the runtime environment",
                        unit_name,
                        rpm_name,
                    )
                    return False

                for key, value in rpm_package_req.items():
                    value_present = rpm_present.get(key)
                    if value_present != value:
                        _LOGGER.debug(
                            "%s: Not including unit as RPM %r has not matching %r - expected %r got %r",
                            unit_name,
                            rpm_name,
                            key,
                            value,
                            value_present,
                        )
                        return False
            else:
                if not rpm_present:
                    # If just one is not present, we know the unit is included.
                    return True

                for key, value in rpm_package_req.items():
                    value_present = rpm_present.get(key)
                    if value_present != value:
                        _LOGGER.debug(
                            "%s: Not including unit as RPM %s has matching %r - expected %r got %r",
                            unit_name,
                            rpm_name,
                            key,
                            value,
                            value_present,
                        )
                        return True

        if not should_be_present:
            _LOGGER.debug(
                "%s: Not including unit as all RPM are present in the runtime environment",
                unit_name)
            return False

        # Path to should be present.
        _LOGGER.debug("%s: all RPM package presence checks passed", unit_name)
        return True

    @classmethod
    def _prepare_justification_link(cls, entries: List[Dict[str,
                                                            Any]]) -> None:
        """Prepare justification links before using them."""
        for entry in entries:
            link = entry.get("link")
            if link and not link.startswith(("https://", "http://")):
                entry["link"] = jl(link)

    @property
    def name(self) -> str:
        """Get name of the prescription instance."""
        name: str = self.prescription["name"]
        return name

    def pre_run(self) -> None:
        """Prepare this pipeline unit before running it."""
        self._prepare_justification_link(
            self.run_prescription.get("stack_info", []))
        self._configuration["prescription"]["run"] = False
        super().pre_run()

    @staticmethod
    def _yield_should_include(
        unit_prescription: Dict[str, Any]
    ) -> Generator[Dict[str, Any], None, None]:
        """Yield for every entry stated in the match field."""
        match = unit_prescription.get("match", {})
        run = unit_prescription.get("run", {})
        prescription_conf = {"run": False}
        if isinstance(match, list):
            for item in match:
                yield {
                    "package_name":
                    item["package_version"].get("name") if item else None,
                    "match":
                    item,
                    "run":
                    run,
                    "prescription":
                    prescription_conf,
                }
        else:
            yield {
                "package_name":
                match["package_version"].get("name") if match else None,
                "match": match,
                "run": run,
                "prescription": prescription_conf,
            }

    @staticmethod
    def _yield_should_include_with_state(
        unit_prescription: Dict[str, Any]
    ) -> Generator[Dict[str, Any], None, None]:
        """Yield for every entry stated in the match field."""
        match = unit_prescription.get("match", {})
        prescription_conf = {"run": False}
        if isinstance(match, list):
            for item in match:
                match_resolved = item.get("state",
                                          {}).get("resolved_dependencies")
                yield {
                    # Return the first package name that should be matched to keep optimization for wrap calls.
                    "package_name":
                    match_resolved[0].get("name") if match_resolved else None,
                    "match":
                    item,
                    "run":
                    unit_prescription["run"],
                    "prescription":
                    prescription_conf,
                }
        else:
            match_resolved = match.get(
                "state", {}).get("resolved_dependencies") if match else None
            yield {
                "package_name":
                match_resolved[0].get("name") if match_resolved else None,
                "match":
                match,
                "run":
                unit_prescription["run"],
                "prescription":
                prescription_conf,
            }

    def _run_log(self) -> None:
        """Log message specified in the run prescription."""
        if self._configuration["prescription"]["run"]:
            # Noop. The prescription was already run.
            return

        log = self.run_prescription.get("log")
        if log:
            _LOGGER.log(level=getattr(logging, log["type"]),
                        msg=f"{self.name}: {log['message']}")

    def _run_stack_info(self) -> None:
        """Add stack info if any prescribed."""
        if self._configuration["prescription"]["run"]:
            # Noop. The prescription was already run.
            return

        stack_info = self.run_prescription.get("stack_info")
        if stack_info:
            self.context.stack_info.extend(stack_info)

    def _check_package_tuple_from_prescription(
            self, dependency_tuple: Tuple[str, str, str],
            dependency: Dict[str, str]) -> bool:
        """Check if the given package version tuple matches with what was written in prescription."""
        develop = dependency.get("develop")
        if develop is not None:
            package_version = self.context.get_package_version(
                dependency_tuple, graceful=True)
            if not package_version:
                return False

            if package_version.develop != develop:
                return False

        if not self._index_url_check(dependency.get("index_url"),
                                     dependency_tuple[2]):
            return False

        version = dependency.get("version")
        if version is not None:
            specifier = SpecifierSet(version)
            if dependency_tuple[1] not in specifier:
                return False

        return True

    def _run_state(self, state: State) -> bool:
        """Check state match."""
        state_prescription = self.match_prescription.get("state")
        if not state_prescription:
            # Nothing to check.
            return True

        for resolved_dependency in state_prescription.get(
                "resolved_dependencies", []):
            resolved = state.resolved_dependencies.get(
                resolved_dependency["name"])
            if not resolved:
                return False

            if not self._check_package_tuple_from_prescription(
                    resolved, resolved_dependency):
                return False

        return True

    def _run_state_with_initiator(self, state: State,
                                  package_version: PackageVersion) -> bool:
        """Check state match respecting also initiator of the give package."""
        state_prescription = self.match_prescription.get("state")
        if not state_prescription:
            # Nothing to check.
            return True

        package_version_from = state_prescription.get(
            "package_version_from") or []
        # XXX: we explicitly do not consider runtime environment as we expect to have it only one here.
        dependents = {
            i[0]
            for i in self.context.dependents.get(package_version.name, {}).get(
                package_version.to_tuple(), set())
        }

        for resolved_dependency in package_version_from:
            resolved = state.resolved_dependencies.get(
                resolved_dependency["name"])
            if not resolved:
                return False

            if not self._check_package_tuple_from_prescription(
                    resolved, resolved_dependency):
                return False

            if resolved not in dependents:
                _LOGGER.debug(
                    "Package %r stated in package_version_from not a did not introduce package %r",
                    resolved,
                    package_version.to_tuple(),
                )
                return False

            dependents.discard(resolved)

        if dependents and not state_prescription.get(
                "package_version_from_allow_other", False):
            for dependent in dependents:
                if dependent == state.resolved_dependencies.get(dependent[0]):
                    return False

        for resolved_dependency in state_prescription.get(
                "resolved_dependencies", []):
            resolved = state.resolved_dependencies.get(
                resolved_dependency["name"])
            if not resolved:
                return False

            if not self._check_package_tuple_from_prescription(
                    resolved, resolved_dependency):
                return False

        return True

    def _run_base(self) -> None:
        """Implement base routines for run part of the prescription."""
        self._run_log()
        self._run_stack_info()

        not_acceptable = self.run_prescription.get("not_acceptable")
        if not_acceptable:
            raise NotAcceptable(not_acceptable)

        eager_stop_pipeline = self.run_prescription.get("eager_stop_pipeline")
        if eager_stop_pipeline:
            raise EagerStopPipeline(eager_stop_pipeline)
コード例 #41
0
class TensorFlowAPISieve(Sieve):
    """A sieve that makes sure the right TensorFlow release is used based on user's API usage."""

    CONFIGURATION_SCHEMA: Schema = Schema(
        {Required("package_name"): SchemaAny(str)})
    CONFIGURATION_DEFAULT = {"package_name": "tensorflow"}
    _LINK_API = jl("tf_api")
    _LINK_NO_API = jl("tf_no_api")

    _messages_logged = attr.ib(type=Set[str], factory=set, init=False)
    _no_api_logged = attr.ib(type=bool, default=False, init=False)
    _acceptable_releases = attr.ib(type=Optional[Set[str]],
                                   default=None,
                                   init=False)

    def pre_run(self) -> None:
        """Initialize this pipeline unit before each run."""
        self._messages_logged.clear()
        self._acceptable_releases = None
        self._no_api_logged = False
        super().pre_run()

    def _pre_compute_releases(self) -> None:
        """Pre-compute releases that match library usage supplied by the user."""
        with open(os.path.join(self._DATA_DIR, "tensorflow", "api.json"),
                  "r") as api_file:
            known_api = json.load(api_file)

        self._acceptable_releases = set()
        tf_api_used = set(
            i for i in ((self.context.library_usage.get("report") or {}
                         ).get("tensorflow") or [])  # type: ignore
        )
        for tf_version, tf_api in known_api.items():
            if tf_api_used.issubset(tf_api):
                self._acceptable_releases.add(tf_version)

    @classmethod
    def should_include(
        cls, builder_context: "PipelineBuilderContext"
    ) -> Generator[Dict[str, Any], None, None]:
        """Register this pipeline unit for adviser library usage is provided."""
        if (builder_context.is_adviser_pipeline()
                and not builder_context.is_included(cls)
                and builder_context.library_usage
                and builder_context.recommendation_type
                not in (RecommendationType.LATEST, RecommendationType.TESTING)
                and "tensorflow"
                in (builder_context.library_usage.get("report") or {})):
            yield {"package_name": "tensorflow"}
            yield {"package_name": "tensorflow-gpu"}
            yield {"package_name": "intel-tensorflow"}
            yield {"package_name": "tensorflow-cpu"}
            return None

        yield from ()
        return None

    def run(
        self, package_versions: Generator[PackageVersion, None, None]
    ) -> Generator[PackageVersion, None, None]:
        """Use specific TensorFlow release based on library usage as supplied by the user."""
        if self._acceptable_releases is None:
            self._pre_compute_releases()

        if not self._acceptable_releases:
            if not self._no_api_logged:
                self._no_api_logged = True
                msg = "No TensorFlow symbols API found in the database that would match TensorFlow symbols used"
                _LOGGER.warning("%s - see %s", msg, self._LINK_NO_API)
                self.context.stack_info.append({
                    "type": "WARNING",
                    "message": msg,
                    "link": self._LINK_NO_API,
                })

            yield from package_versions
            return

        for package_version in package_versions:
            version = ".".join(
                map(str, package_version.semantic_version.release[:2]))
            if version in self._acceptable_releases:
                yield package_version
            elif version not in self._messages_logged:
                self._messages_logged.add(version)
                msg = (
                    f"Removing TensorFlow {package_version.to_tuple()!r} as it does not provide required symbols "
                    f"in the exposed API")
                _LOGGER.warning("%s - see %s", msg, self._LINK_API)
                self.context.stack_info.append({
                    "type": "WARNING",
                    "message": msg,
                    "link": self._LINK_API
                })
コード例 #42
0
    def setUp(self):
        self.graph_schema = Schema({
            'scopes': generate_scope_validator(scopes={
                "docker-worker:cache:tc-vcs",
                "docker-worker:image:taskcluster/builder:*",
                "queue:define-task:aws-provisioner-v1/opt-linux64",
                "queue:create-task:aws-provisioner-v1/opt-linux64",
                "queue:define-task:aws-provisioner-v1/build-c4-2xlarge",
                "queue:create-task:aws-provisioner-v1/build-c4-2xlarge",
                "docker-worker:cache:build-foo-release-workspace",
                "docker-worker:cache:tooltool-cache",
                "project:releng:signing:format:gpg",
                "project:releng:signing:cert:release-signing",
                "docker-worker:relengapi-proxy:tooltool.download.public",
            })
        }, extra=True, required=True)

        self.task_schema = Schema({
            'task': {
                'scopes': generate_scope_validator(scopes={
                    "docker-worker:cache:tc-vcs",
                    "docker-worker:image:taskcluster/builder:0.5.9",
                    "queue:define-task:aws-provisioner-v1/opt-linux64",
                    "queue:create-task:aws-provisioner-v1/opt-linux64",
                    "queue:define-task:aws-provisioner-v1/build-c4-2xlarge",
                    "queue:create-task:aws-provisioner-v1/build-c4-2xlarge",
                    "docker-worker:cache:build-foo-release-workspace",
                    "docker-worker:cache:tooltool-cache",
                    "docker-worker:relengapi-proxy:tooltool.download.public",
                }),
                'provisionerId': 'aws-provisioner-v1',
                'workerType': 'opt-linux64',
                'payload': {
                    'artifacts': dict,
                    'command': list,
                    'cache': dict,
                    'image': Match(r'^rail/source-builder@sha256'),
                    'env': {
                        'MOZ_PKG_VERSION': '42.0b2',
                    }
                }
            }
        }, extra=True, required=True)

        self.signing_task_schema = Schema({
            'task': {
                'scopes': generate_scope_validator(scopes={
                    "project:releng:signing:format:gpg",
                    "project:releng:signing:cert:release-signing",
                }),
                'provisionerId': 'signing-provisioner-v1',
                'workerType': 'signing-worker-v1',
                'payload': All({
                    'signingManifest': str,
                }, Length(1))
            }
        }, extra=True, required=True)

        test_kwargs = create_firefox_test_args({
            'source_enabled': True,
            'signing_pvt_key': PVT_KEY_FILE,
            'en_US_config': EN_US_CONFIG,
        })

        self.graph = make_task_graph(**test_kwargs)
        self.task = get_task_by_name(self.graph, "foo_source")
        self.signing_task = get_task_by_name(self.graph, "foo_source_signing")
コード例 #43
0
# -*- coding: utf-8 -*-
import json
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httputil import url_concat

from autumn.torn.form import Form
from voluptuous import Schema, Required
from autumn.utils import json_hook
from .. import BaseHandler
import hashlib

schema = Schema({
    Required('username'): str,
    Required('password'): str,
    'next': str,
})


class Login(BaseHandler):
    @gen.coroutine
    def get(self):
        form = Form(self.request.arguments, schema)

        http_client = AsyncHTTPClient()
        response = yield http_client.fetch(
            "http://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1")
        bg_img_url = json.loads(response.body,
                                object_hook=json_hook).images[0].url

        self.render('auth/login.html', form=form, bg_img_url=bg_img_url)
コード例 #44
0
ファイル: __init__.py プロジェクト: vkatsikaros/gecko-dev
job_description_schema = Schema({
    # The name of the job and the job's label.  At least one must be specified,
    # and the label will be generated from the name if necessary, by prepending
    # the kind.
    Optional('name'):
    basestring,
    Optional('label'):
    basestring,

    # the following fields are passed directly through to the task description,
    # possibly modified by the run implementation.  See
    # taskcluster/taskgraph/transforms/task.py for the schema details.
    Required('description'):
    task_description_schema['description'],
    Optional('attributes'):
    task_description_schema['attributes'],
    Optional('dependencies'):
    task_description_schema['dependencies'],
    Optional('expires-after'):
    task_description_schema['expires-after'],
    Optional('routes'):
    task_description_schema['routes'],
    Optional('scopes'):
    task_description_schema['scopes'],
    Optional('extra'):
    task_description_schema['extra'],
    Optional('treeherder'):
    task_description_schema['treeherder'],
    Optional('index'):
    task_description_schema['index'],
    Optional('run-on-projects'):
    task_description_schema['run-on-projects'],
    Optional('coalesce-name'):
    task_description_schema['coalesce-name'],
    Optional('needs-sccache'):
    task_description_schema['needs-sccache'],
    Optional('when'):
    task_description_schema['when'],

    # A description of how to run this job.
    'run': {
        # The key to a job implementation in a peer module to this one
        'using': basestring,

        # Any remaining content is verified against that job implementation's
        # own schema.
        Extra: object,
    },
    Optional('platforms'): [basestring],
    Required('worker-type'):
    Any(
        task_description_schema['worker-type'],
        {'by-platform': {
            basestring: task_description_schema['worker-type']
        }},
    ),
    Required('worker'):
    Any(
        task_description_schema['worker'],
        {'by-platform': {
            basestring: task_description_schema['worker']
        }},
    ),
})
コード例 #45
0
ファイル: singletons.py プロジェクト: wendybai83/curator
def filter_schema_check(action, filter_dict):
    valid_filters = SchemaCheck(
        filter_dict, Schema(filters.Filters(action, location='singleton')),
        'filters', '{0} singleton action "filters"'.format(action)).result()
    return validate_filters(action, valid_filters)
コード例 #46
0
    def setUp(self):
        # Task attributes common to each partner repack
        common_task_schema = Schema({
            'task': {
                'provisionerId': 'buildbot-bridge',
                'workerType': 'buildbot-bridge',
                'payload': {
                    'properties': {
                        'version': '42.0b2',
                        'build_number': 3,
                    }
                }
            }
        })

        self.partner_task_schema = common_task_schema.extend({
            'task': {
                'payload': {
                    'properties': {
                        'repack_manifests_url': '[email protected]:mozilla-partners/repack-manifests.git',
                    }
                }
            }
        }, required=True, extra=True)

        self.eme_free_task_schema = common_task_schema.extend({
            'task': {
                'payload': {
                    'properties': {
                        'repack_manifests_url': 'https://github.com/mozilla-partners/mozilla-EME-free-manifest',
                    }
                }
            }
        }, required=True, extra=True)

        self.sha1_task_schema = common_task_schema.extend({
            'task': {
                'payload': {
                    'properties': {
                        'repack_manifests_url': 'https://github.com/mozilla-partners/mozilla-sha1-manifest',
                    }
                }
            }
        }, required=True, extra=True)

        test_kwargs = create_firefox_test_args({
            'push_to_candidates_enabled': True,
            'push_to_releases_enabled': True,
            'push_to_releases_automatic': True,
            'source_enabled': True,
            'signing_pvt_key': PVT_KEY_FILE,
            'partner_repacks_platforms': ['win32', 'linux'],
            'eme_free_repacks_platforms': ['win32', 'macosx64'],
            'sha1_repacks_platforms': ['win32'],
            'release_channels': ['foo', 'bar'],
            'en_US_config': {
                "platforms": {
                    "linux": {'signed_task_id': 'abc', 'unsigned_task_id': 'abc'},
                    "macosx64": {'signed_task_id': 'abc', 'unsigned_task_id': 'abc'},
                    "win32": {'signed_task_id': 'abc', 'unsigned_task_id': 'abc'},
                }
            },
            'l10n_config': {
                "platforms": {
                    "win32": {
                        "en_us_binary_url": "https://queue.taskcluster.net/something/firefox.exe",
                        "mar_tools_url": "https://queue.taskcluster.net/something/",
                        "locales": ["de", "en-GB", "zh-TW"],
                        "chunks": 1,
                    },
                    "linux": {
                        "en_us_binary_url": "https://queue.taskcluster.net/something/firefox.tar.xz",
                        "mar_tools_url": "https://queue.taskcluster.net/something/",
                        "locales": ["de", "en-GB", "zh-TW"],
                        "chunks": 1,
                    },
                    "macosx64": {
                        "en_us_binary_url": "https://queue.taskcluster.net/something/firefox.dmg",
                        "mar_tools_url": "https://queue.taskcluster.net/something/",
                        "locales": ["de", "en-GB", "zh-TW"],
                        "chunks": 1,
                    },
                },
                "changesets": {
                    "de": "default",
                    "en-GB": "default",
                    "zh-TW": "default",
                },
            },
        })

        self.graph = make_task_graph(**test_kwargs)
        self.partner_tasks = [
            get_task_by_name(self.graph, "release-foo-firefox-{}_partner_repacks".format(platform))
            for platform in ["win32", "linux"]
        ]
        self.eme_free_tasks = [
            get_task_by_name(self.graph, "release-foo-firefox-{}_eme_free_repacks".format(platform))
            for platform in ["win32", "macosx64"]
        ]
        self.sha1_tasks = [
            get_task_by_name(self.graph, "release-foo-firefox-{}_sha1_repacks".format(platform))
            for platform in ["win32"]
        ]

        self.partner_push_to_mirrors_task = get_task_by_name(self.graph, "release-foo-firefox_partner_repacks_copy_to_releases")
        self.push_to_mirrors_task = get_task_by_name(self.graph, "release-foo_firefox_push_to_releases")

        self.upstream_dependencies = [
            "release-foo_firefox_{}_complete_en-US_beetmover_candidates".format(platform)
            for platform in ["win32", "linux", "macosx64"]
        ] + [
            "release-foo_firefox_{}_l10n_repack_beetmover_candidates_1".format(platform)
            for platform in ["win32", "linux", "macosx64"]
        ]