#[Start Global Variables]
FILE_LOCK = False

supported_baudrates = [
    50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, 9600, 19200,
    38400, 57600, 115200, 230400, 460800, 500000, 576000, 921600, 1000000,
    1152000, 1500000, 2000000, 2500000, 3000000, 3500000, 4000000
]  #sometimes supported, add as needed.
#[End Global Variables]

# [Start Schemas]
#Define Rules for the JSON Schema to fullfill:
operation_schema = Schema(
    {
        "startadress":
        And(lambda n: (0 <= n <= 9999), int),
        "function_code":
        And(lambda n: (0 <= n <= 30), int),
        "display_name":
        str,
        "sampling_interval":
        Or(And(lambda n: (0.05 <= n <= 864001), (Or(int, float))), 0),
        And(lambda n: bool("quantity_of_x" == n) ^ bool("output_value" == n), str):
        And(lambda n: (0 <= n <= 500), int
            ),  #Xor: A string: (quantity_of_x Xor output_value) is == integer
    },
    name="operation_schema",
    as_reference=True)

slave_schema = Schema(
    {
Example #2
0
File: task.py Project: kodless/leek
    "task-received": "RECEIVED",
    "task-started": "STARTED",
    "task-succeeded": "SUCCEEDED",
    "task-failed": "FAILED",
    "task-rejected": "REJECTED",
    "task-revoked": "REVOKED",
    "task-retried": "RETRY",
}

TaskEventSchema = Schema(
    {
        # in case of task-sent, task-received
        "type": Or(
            *TASK_EVENT_TYPES
        ),
        "uuid": And(str, len),
        "timestamp": And(float),  # Seconds to Milliseconds
        "utcoffset": And(int),
        "pid": And(int),
        "clock": And(int),
        Optional("name"): And(str, len),
        Optional("args"): And(str),
        Optional("kwargs"): And(str),
        # exchange/rq/queue are only available with task-sent events
        Optional("exchange"): And(str),
        Optional("routing_key"): And(str),
        Optional("queue"): And(str),
        # If the task has a parent task caller
        Optional("root_id"): Or(None, And(str, len)),
        Optional("parent_id"): Or(None, And(str, len)),
        # countdown: If the task is scheduled or set to be retried after failure
Example #3
0
    def validate_config(loaded_config):
        """Validate pysmurf configuration dictionary.

        Validates the parameters in the configuration dictionary
        provided by the `loaded_config` argument using the 3rd party
        `schema` python library.  If the configuration data is valid,
        parameters are returned as a dictionary.

        `schema` validation does several important things to raw data
        loaded from the pysmurf configuration file:

        - Checks that all mandatory configuration variables are defined.
        - Conditions all configuration variables into the correct type
          (e.g. `float`, `int`, `str`, etc.).
        - Automatically fills in the values for missing optional
          parameters.  Optional parameters are typically parameters which
          almost never change from SMuRF system to SMuRF system.
        - Checks if parameters have valid values (e.g., some parameters
          can only be either 0 or 1, or must be in a predefined interval,
          etc.).
        - Performs validation of some known higher level configuration
          data interdependencies (e.g. prevents the user from defining an
          RTM DAC as both a TES bias and an RF amplifier bias).

        If validation fails, `schema` will raise a `SchemaError` exception
        and fail to load the configuration data, forcing the user to fix
        the cause of the `SchemaError` exception before the configuration
        file can be loaded and used.

        Args
        ----
        loaded_config : dict
            Dictionary of pysmurf configuration parameters to run
            `schema` validation on.

        Returns
        -------
        validated_config : dict
            Dictionary of validated configuration parameters.
            Parameter values are conditioned by `schema` to conform to
            the specified types.

        Raises
        ------
        SchemaError
            Raised if the configuration data fails `schema` validation.

        """
        # Import useful schema objects
        # Try to import them from the system package. If it fails, then
        # import the local copy available in this repository.
        try:
            from schema import Schema, And, Use, Optional, Regex
        except ImportError:
            from pysmurf.client.base.schema import Schema, And, Use, Optional, Regex

        # Start with an extremely limited validation to figure out
        # things that we need to validate the entire configuration
        # file, like which bands are being used.  This also gets used
        # to construct the init:bands list, from which band_# are
        # present.
        band_schema = Schema({'init': {
            Regex('band_[0-7]'): {}
        }},
                             ignore_extra_keys=True)
        band_validated = band_schema.validate(loaded_config)

        # Build list of bands from which band_[0-7] blocks are present.  Will
        # use this to build the rest of the validation schema.
        band_regexp = re.compile("band_([0-7])")
        bands = sorted([
            int(m.group(1)) for m in (band_regexp.match(s)
                                      for s in band_validated['init'].keys())
            if m
        ])

        ###################################################
        ##### Building full validation schema
        schema_dict = {}

        # Only used if none specified in pysmurf instantiation
        schema_dict['epics_root'] = And(str, len)

        #### Start specifiying init schema
        # init must be present
        schema_dict['init'] = {}

        # Explicitly sets dspEnable to this value in pysmurf setup.  Shouldn't
        # be necessary if defaults.yml is properly configured (dspEnable can
        # be set there, instead).
        schema_dict['init'][Optional('dspEnable',
                                     default=1)] = And(int, lambda n: n in
                                                       (0, 1))

        # Each band has a configuration block in init.

        # Default data_out_mux definitions.  Should match fw.
        default_data_out_mux_dict = {}
        default_data_out_mux_dict[0] = [2, 3]
        default_data_out_mux_dict[1] = [0, 1]
        default_data_out_mux_dict[2] = [6, 7]
        default_data_out_mux_dict[3] = [8, 9]
        default_data_out_mux_dict[4] = [2, 3]
        default_data_out_mux_dict[5] = [0, 1]
        default_data_out_mux_dict[6] = [6, 7]
        default_data_out_mux_dict[7] = [8, 9]

        for band in bands:
            schema_dict['init'][f'band_{band}'] = {

                # Swap IQ channels on input
                Optional('iq_swap_in', default=0): And(int, lambda n: n in (0, 1)),
                # Swap IQ channels on output
                Optional('iq_swap_out', default=0): And(int, lambda n: n in (0, 1)),

                # Global feedback enable
                Optional('feedbackEnable', default=1): And(int, lambda n: n in (0, 1)),
                # Global feedback polarity
                Optional('feedbackPolarity', default=1): And(int, lambda n: n in (0, 1)),
                # Global feedback gain (might no longer be used in dspv3).
                "feedbackGain" :  And(int, lambda n: 0 <= n < 2**16),
                # Global feedback gain (might no longer be used in dspv3).
                "feedbackLimitkHz" : And(Use(float), lambda f: f > 0),

                # Number of cycles to delay phase reference
                'refPhaseDelay': And(int, lambda n: 0 <= n < 2**4),
                # Finer phase reference delay, 307.2MHz clock ticks.  This
                # goes in the opposite direction as refPhaseDelay.
                'refPhaseDelayFine': And(int, lambda n: 0 <= n < 2**8),

                # RF attenuator on SMuRF output.  UC=up convert.  0.5dB steps.
                'att_uc': And(int, lambda n: 0 <= n < 2**5),
                # RF attenuator on SMuRF input.  DC=down convert.  0.5dB steps.
                'att_dc': And(int, lambda n: 0 <= n < 2**5),
                # Tone amplitude.  3dB steps.
                'amplitude_scale': And(int, lambda n: 0 <= n < 2**4),

                # data_out_mux
                Optional("data_out_mux",
                         default=default_data_out_mux_dict[band]) : \
                And([Use(int)], list, lambda l: len(l) == 2 and
                    l[0] != l[1] and all(0 <= ll <= 9 for ll in l)),

                # Matches system latency for LMS feedback (9.6 MHz
                # ticks, use multiples of 52).  For dspv3 to adjust to
                # match refPhaseDelay*4 (ignore refPhaseDelayFine for
                # this).  If not provided and lmsDelay=None, sets to
                # lmsDelay = 4 x refPhaseDelay.
                Optional('lmsDelay', default=None) : And(int, lambda n: 0 <= n < 2**5),

                # Adjust trigRstDly such that the ramp resets at the flux ramp
                # glitch.  2.4 MHz ticks.
                'trigRstDly': And(int, lambda n: 0 <= n < 2**7),

                # LMS gain, powers of 2
                'lmsGain': And(int, lambda n: 0 <= n < 2**3),
            }
        #### Done specifying init schema

        #### Start specifying attenuator schema
        # Here's another one that users probably shouldn't be touching Just
        # doing basic validation here - not checking if they're distinct, for
        # instance.
        schema_dict["attenuator"] = {
            'att1': And(int, lambda n: 0 <= n < 4),
            'att2': And(int, lambda n: 0 <= n < 4),
            'att3': And(int, lambda n: 0 <= n < 4),
            'att4': And(int, lambda n: 0 <= n < 4)
        }

        #### Done specifying attenuator schema

        #### Start specifying cryostat card schema
        ## SHOULD MAKE IT SO THAT WE JUST LOAD AND VALIDATE A SEPARATE,
        ## HARDWARE SPECIFIC CRYOSTAT CARD CONFIG FILE.  FOR NOW, JUST DO
        ## SOME VERY BASIC VALIDATION.
        def represents_int(string):
            try:
                int(string)
                return True
            except ValueError:
                return False

        schema_dict["pic_to_bias_group"] = {And(str, represents_int): int}
        schema_dict["bias_group_to_pair"] = {
            And(str, represents_int): [int, int]
        }
        #### Done specifying cryostat card schema

        #### Start specifiying amplifier
        schema_dict["amplifier"] = {
            # 4K amplifier gate voltage, in volts.
            "hemt_Vg":
            Use(float),

            # Conversion from bits (the digital value the RTM DAC is set to)
            # to volts for the 4K amplifier gate.  Units are volts/bit.  An
            # important dependency is the voltage division on the cryostat
            # card, which can be different from cryostat card to cryostat card
            "bit_to_V_hemt":
            And(Use(float), lambda f: f > 0),
            # The 4K amplifier drain current is measured before a voltage
            # regulator, which also draws current.  An accurate measurement of
            # the 4K drain current requires subtracting the current drawn by
            # that regulator.  This is the offset to subtract off the measured
            # value, in mA.
            "hemt_Id_offset":
            Use(float),
            "50k_Id_offset":
            Use(float),
            # The resistance, in Ohm, of the resistor that is inline
            # with the 4K HEMT amplifier drain voltage source which is
            # used to infer the 4K HEMT amplifier drain current.  The
            # default value of 200 Ohm is the standard value in the
            # BOM for cryostat card revision C02 (PC-248-103-02-C02).
            # The resistor on that revision of the cryostat card is
            # R44.
            Optional('hemt_Vd_series_resistor', default=200.0):
            And(float, lambda f: f > 0),
            # The resistance, in Ohm, of the resistor that is inline
            # with the 50K amplifier drain voltage source which is
            # used to infer the 50K amplifier drain current.  The
            # default value of 10 Ohm is the standard value in the BOM
            # for cryostat card revision C02 (PC-248-103-02-C02).  The
            # resistor on that revision of the cryostat card is R54.
            Optional('50K_amp_Vd_series_resistor', default=10.0):
            And(float, lambda f: f > 0),
            # 50K amplifier gate voltage, in volts.
            "LNA_Vg":
            Use(float),
            # Which RTM DAC is wired to the gate of the 50K amplifier.
            "dac_num_50k":
            And(int, lambda n: 1 <= n <= 32),
            # Conversion from bits (the digital value the RTM DAC is set to)
            # to volts for the 50K amplifier gate.  Units are volts/bit.  An
            # important dependency is the voltage division on the cryostat
            # card, which can be different from cryostat card to cryostat card
            "bit_to_V_50k":
            And(Use(float), lambda f: f > 0),
            # Software limit on the minimum gate voltage that can be set for the 4K amplifier.
            "hemt_gate_min_voltage":
            Use(float),
            # Software limit on the maximum gate voltage that can be set for the 4K amplifier.
            "hemt_gate_max_voltage":
            Use(float)
        }
        #### Done specifiying amplifier

        #### Start specifying tune parameter schema
        schema_dict['tune_band'] = {
            'fraction_full_scale': And(Use(float), lambda f: 0 < f <= 1.),
            'reset_rate_khz': And(Use(float), lambda f: 0 <= f <= 100),
            Optional('default_tune', default=None): And(str, os.path.isfile)
        }

        ## Add tuning params that must be specified per band.
        per_band_tuning_params = [
            ('lms_freq', And(Use(float), lambda f: f > 0)),
            ('delta_freq', And(Use(float), lambda f: f > 0)),
            ('feedback_start_frac', And(Use(float), lambda f: 0 <= f <= 1)),
            ('feedback_end_frac', And(Use(float), lambda f: 0 <= f <= 1)),
            ('gradient_descent_gain', And(Use(float), lambda f: f > 0)),
            ('gradient_descent_averages', And(Use(int), lambda n: n > 0)),
            ('gradient_descent_converge_hz', And(Use(float), lambda f: f > 0)),
            ('gradient_descent_momentum', And(Use(int), lambda n: n >= 0)),
            ('gradient_descent_step_hz', And(Use(float), lambda f: f > 0)),
            ('gradient_descent_beta', And(Use(float), lambda f: 0 <= f <= 1)),
            ('eta_scan_averages', And(Use(int), lambda n: n > 0)),
            ('eta_scan_del_f', And(Use(int), lambda n: n > 0)),
        ]

        for band in bands:
            for (param, value) in per_band_tuning_params:
                if band == bands[0]:
                    schema_dict['tune_band'][param] = {}
                schema_dict['tune_band'][param][str(band)] = value
        ## Done adding tuning params that must be specified per band.

        #### Done specifying tune parameter schema

        #### Start specifying bad mask
        schema_dict[Optional('bad_mask', default={})] = {
            # Why are these indexed by integers that are also strings?
            # I don't think the keys here are used at all.
            Optional(str):
            And([Use(float)], list, lambda l: len(l) == 2 and l[0] < l[1] and
                all(4000 <= ll <= 8000 for ll in l))
        }
        #### Done specifying bad mask

        #### Start specifying TES-related
        # TES shunt resistance
        schema_dict["R_sh"] = And(Use(float), lambda f: f > 0)

        # Round-trip resistance on TES bias lines, in low current mode.
        # Includes the resistance on the cryostat cards, and cable resistance.
        schema_dict["bias_line_resistance"] = And(Use(float), lambda f: f > 0)

        # Ratio between the current per DAC unit in high current mode to the
        # current in low current mode.  Constained to be greater than or equal
        # to 1 since otherwise what does high current mode EVEN MEAN.
        schema_dict["high_low_current_ratio"] = And(Use(float),
                                                    lambda f: f >= 1)

        # If 1, TES biasing will *always* be in high current mode.
        schema_dict[Optional('high_current_mode_bool',
                             default=0)] = And(int, lambda n: n in (0, 1))

        # All SMuRF bias groups with TESs connected.
        schema_dict["all_bias_groups"] = And([int], list,
                                             lambda l: all(0 <= ll < 16
                                                           for ll in l))
        #### Done specifying TES-related

        #### Start specifying flux ramp-related
        schema_dict["flux_ramp"] = {
            # 20 bits for the C0 RTMs, 32 bits for the C1 RTMs.
            "num_flux_ramp_counter_bits": And(int, lambda n: n in (20, 32))
        }
        #### Done specifying flux-ramp related

        #### Start specifying constants schema
        # If all of a schema dictionary's keys are optional, must specify
        # them both in the schema key for that dictionary, and in the
        # schema for that dictionary.
        constants_default_dict = {'pA_per_phi0': 9.e6}
        cdd_key = Optional("constant", default=constants_default_dict)
        schema_dict[cdd_key] = {}
        # Assumes all constants default values are floats
        for key, value in constants_default_dict.items():
            schema_dict[cdd_key][Optional(key, default=value)] = Use(float)
        #### Done specifying constants schema

        #### Start thermal schema
        # OT protection for ultrascale FPGA, in degrees C.  If None,
        # then pysmurf doesn't try to engage OT protection.  For
        # unknown reasons, enabling OT protection in the ELMA crate
        # we've been using for testing on campus at Stanford takes
        # down the carrier after the third command in the enable
        # sequence (set_ot_threshold_disable(0)), but it works in the
        # RF lab at SLAC where they've been testing with an ASIS
        # crate.  Shawn has yet to have this work for him.  Newer fw
        # versions will have OT protection enabled in the fw.
        schema_dict[Optional('ultrascale_temperature_limit_degC',
                             default=None)] = And(Use(float),
                                                  lambda f: 0 <= f <= 99)
        #### Done specifying thermal schema

        #### Start specifying timing-related schema
        schema_dict["timing"] = {
            # "ext_ref" : internal oscillator locked to an external
            #   front-panel reference, or unlocked if there is no front
            #   panel reference.  (LmkReg_0x0147 : 0x1A).  Also sets
            #   flux_ramp_start_mode=0
            # "backplane" : takes timing from timing master through
            #   backplane.  Also sets flux_ramp_start_mode=1.
            "timing_reference": And(str, lambda s: s in
                                    ('ext_ref', 'backplane'))
        }
        #### Done specifying timing-related schema

        #### Start specifying smurf2mce
        # System should be smart enough to determine fs on the fly.
        schema_dict["fs"] = And(Use(float), lambda f: f > 0)

        def user_has_write_access(dirpath):
            return os.access(dirpath, os.W_OK)

        def dir_exists_with_write_access(file_path):
            filedir_path = os.path.dirname(file_path)
            if (not os.path.isdir(filedir_path)
                    or not user_has_write_access(filedir_path)):
                return False
            return True

        #### Start specifying directories
        schema_dict[Optional("default_data_dir",
                             default="/data/smurf_data")] = And(
                                 str, os.path.isdir, user_has_write_access)
        schema_dict[Optional("smurf_cmd_dir",
                             default="/data/smurf_data/smurf_cmd")] = And(
                                 str, os.path.isdir, user_has_write_access)
        schema_dict[Optional("tune_dir",
                             default="/data/smurf_data/tune")] = And(
                                 str, os.path.isdir, user_has_write_access)
        schema_dict[Optional("status_dir",
                             default="/data/smurf_data/status")] = And(
                                 str, os.path.isdir, user_has_write_access)
        #### Done specifying directories

        ##### Done building validation schema
        ###################################################

        ###################################################
        # Validate the full config
        schema = Schema(schema_dict, ignore_extra_keys=True)
        validated_config = schema.validate(loaded_config)

        ###################################################
        # Higher level/composite validation, if schema validation
        # succeeds

        # Check that no DAC has been assigned to multiple TES bias groups
        bias_group_to_pair = validated_config['bias_group_to_pair']
        tes_bias_group_dacs = np.ndarray.flatten(
            np.array([bg2p[1] for bg2p in bias_group_to_pair.items()]))
        assert (len(
            np.unique(tes_bias_group_dacs)) == len(tes_bias_group_dacs)), (
                'Configuration failed - DACs may not be ' +
                'assigned to multiple TES bias groups.')

        # Check that the DAC specified as the 50K gate driver
        # isn't also defined as one of the DACs in a TES bias group
        # pair.
        dac_num_50k = validated_config['amplifier']['dac_num_50k']
        # Taking the first element works because we already required
        # that no DAC show up in more than one TES bias group
        # definition.
        if dac_num_50k in tes_bias_group_dacs:
            # which TES bias group is defined as using the requested
            # DAC for biasing the 50K amplifier?
            bias_group = int([
                bg2p[0] for bg2p in bias_group_to_pair.items()
                if dac_num_50k in bg2p[1]
            ][0])
            assert False, 'Configuration failed - DAC requested ' + \
                f'for driving 50K amplifier gate, {dac_num_50k}, is ' + \
                f'also assigned to TES bias group {bias_group}.'

        ##### Done with higher level/composite validation.
        ###################################################

        # Splice in the sorted init:bands key before returning
        validated_config['init']['bands'] = bands

        return validated_config
Example #4
0
class Stage(object):
    STAGE_FILE = "Dvcfile"
    STAGE_FILE_SUFFIX = ".dvc"

    PARAM_MD5 = "md5"
    PARAM_CMD = "cmd"
    PARAM_WDIR = "wdir"
    PARAM_DEPS = "deps"
    PARAM_OUTS = "outs"
    PARAM_LOCKED = "locked"
    PARAM_META = "meta"
    PARAM_ALWAYS_CHANGED = "always_changed"

    SCHEMA = {
        Optional(PARAM_MD5): Or(str, None),
        Optional(PARAM_CMD): Or(str, None),
        Optional(PARAM_WDIR): Or(str, None),
        Optional(PARAM_DEPS): Or(And(list, Schema([dependency.SCHEMA])), None),
        Optional(PARAM_OUTS): Or(And(list, Schema([output.SCHEMA])), None),
        Optional(PARAM_LOCKED): bool,
        Optional(PARAM_META): object,
        Optional(PARAM_ALWAYS_CHANGED): bool,
    }

    TAG_REGEX = r"^(?P<path>.*)@(?P<tag>[^\\/@:]*)$"

    def __init__(
        self,
        repo,
        path=None,
        cmd=None,
        wdir=os.curdir,
        deps=None,
        outs=None,
        md5=None,
        locked=False,
        tag=None,
        state=None,
        always_changed=False,
    ):
        if deps is None:
            deps = []
        if outs is None:
            outs = []

        self.repo = repo
        self.path = path
        self.cmd = cmd
        self.wdir = wdir
        self.outs = outs
        self.deps = deps
        self.md5 = md5
        self.locked = locked
        self.tag = tag
        self.always_changed = always_changed
        self._state = state or {}

    def __repr__(self):
        return "Stage: '{path}'".format(
            path=self.relpath if self.path else "No path")

    @property
    def relpath(self):
        return relpath(self.path)

    @property
    def is_data_source(self):
        """Whether the DVC-file was created with `dvc add` or `dvc import`"""
        return self.cmd is None

    @staticmethod
    def is_valid_filename(path):
        return (
            # path.endswith doesn't work for encoded unicode filenames on
            # Python 2 and since Stage.STAGE_FILE_SUFFIX is ascii then it is
            # not needed to decode the path from py2's str
            path[-len(Stage.STAGE_FILE_SUFFIX):] == Stage.STAGE_FILE_SUFFIX
            or os.path.basename(path) == Stage.STAGE_FILE)

    @staticmethod
    def is_stage_file(path):
        return os.path.isfile(path) and Stage.is_valid_filename(path)

    def changed_md5(self):
        return self.md5 != self._compute_md5()

    @property
    def is_callback(self):
        """
        A callback stage is always considered as changed,
        so it runs on every `dvc repro` call.
        """
        return not self.is_data_source and len(self.deps) == 0

    @property
    def is_import(self):
        """Whether the DVC-file was created with `dvc import`."""
        return not self.cmd and len(self.deps) == 1 and len(self.outs) == 1

    @property
    def is_repo_import(self):
        if not self.is_import:
            return False

        return isinstance(self.deps[0], dependency.DependencyREPO)

    def _changed_deps(self):
        if self.locked:
            return False

        if self.is_callback:
            logger.warning(
                "DVC-file '{fname}' is a 'callback' stage "
                "(has a command and no dependencies) and thus always "
                "considered as changed.".format(fname=self.relpath))
            return True

        if self.always_changed:
            return True

        for dep in self.deps:
            status = dep.status()
            if status:
                logger.warning(
                    "Dependency '{dep}' of '{stage}' changed because it is "
                    "'{status}'.".format(dep=dep,
                                         stage=self.relpath,
                                         status=status[str(dep)]))
                return True

        return False

    def _changed_outs(self):
        for out in self.outs:
            status = out.status()
            if status:
                logger.warning(
                    "Output '{out}' of '{stage}' changed because it is "
                    "'{status}'".format(out=out,
                                        stage=self.relpath,
                                        status=status[str(out)]))
                return True

        return False

    def _changed_md5(self):
        if self.changed_md5():
            logger.warning("DVC-file '{}' changed.".format(self.relpath))
            return True
        return False

    def changed(self):
        # Short-circuit order: stage md5 is fast, deps are expected to change
        ret = (self._changed_md5() or self._changed_deps()
               or self._changed_outs())

        if ret:
            logger.warning("Stage '{}' changed.".format(self.relpath))
        else:
            logger.info("Stage '{}' didn't change.".format(self.relpath))

        return ret

    def remove_outs(self, ignore_remove=False, force=False):
        """Used mainly for `dvc remove --outs` and :func:`Stage.reproduce`."""
        for out in self.outs:
            if out.persist and not force:
                out.unprotect()
            else:
                logger.debug("Removing output '{out}' of '{stage}'.".format(
                    out=out, stage=self.relpath))
                out.remove(ignore_remove=ignore_remove)

    def unprotect_outs(self):
        for out in self.outs:
            out.unprotect()

    def remove(self, force=False, remove_outs=True):
        if remove_outs:
            self.remove_outs(ignore_remove=True, force=force)
        else:
            self.unprotect_outs()
        os.unlink(self.path)

    def reproduce(self, interactive=False, **kwargs):

        if not kwargs.get("force", False) and not self.changed():
            return None

        msg = ("Going to reproduce '{stage}'. "
               "Are you sure you want to continue?".format(stage=self.relpath))

        if interactive and not prompt.confirm(msg):
            raise DvcException("reproduction aborted by the user")

        self.run(**kwargs)

        logger.debug("'{stage}' was reproduced".format(stage=self.relpath))

        return self

    def update(self):
        if not self.is_repo_import and not self.is_import:
            raise StageUpdateError(self.relpath)

        self.deps[0].update()
        locked = self.locked
        self.locked = False
        try:
            self.reproduce()
        finally:
            self.locked = locked

    @staticmethod
    def validate(d, fname=None):
        from dvc.utils import convert_to_unicode

        try:
            Schema(Stage.SCHEMA).validate(convert_to_unicode(d))
        except SchemaError as exc:
            raise StageFileFormatError(fname, exc)

    @classmethod
    def _stage_fname(cls, outs, add):
        if not outs:
            return cls.STAGE_FILE

        out = outs[0]
        fname = out.path_info.name + cls.STAGE_FILE_SUFFIX

        if (add and out.is_in_repo
                and not contains_symlink_up_to(out.fspath, out.repo.root_dir)):
            fname = out.path_info.with_name(fname).fspath

        return fname

    @staticmethod
    def _check_stage_path(repo, path):
        assert repo is not None

        real_path = os.path.realpath(path)
        if not os.path.exists(real_path):
            raise StagePathNotFoundError(path)

        if not os.path.isdir(real_path):
            raise StagePathNotDirectoryError(path)

        proj_dir = os.path.realpath(repo.root_dir) + os.path.sep
        if not (real_path + os.path.sep).startswith(proj_dir):
            raise StagePathOutsideError(path)

    @property
    def is_cached(self):
        """
        Checks if this stage has been already ran and stored
        """
        from dvc.remote.local import RemoteLOCAL
        from dvc.remote.s3 import RemoteS3

        old = Stage.load(self.repo, self.path)
        if old._changed_outs():
            return False

        # NOTE: need to save checksums for deps in order to compare them
        # with what is written in the old stage.
        for dep in self.deps:
            dep.save()

        old_d = old.dumpd()
        new_d = self.dumpd()

        # NOTE: need to remove checksums from old dict in order to compare
        # it to the new one, since the new one doesn't have checksums yet.
        old_d.pop(self.PARAM_MD5, None)
        new_d.pop(self.PARAM_MD5, None)
        outs = old_d.get(self.PARAM_OUTS, [])
        for out in outs:
            out.pop(RemoteLOCAL.PARAM_CHECKSUM, None)
            out.pop(RemoteS3.PARAM_CHECKSUM, None)

        if old_d != new_d:
            return False

        # NOTE: committing to prevent potential data duplication. For example
        #
        #    $ dvc config cache.type hardlink
        #    $ echo foo > foo
        #    $ dvc add foo
        #    $ rm -f foo
        #    $ echo foo > foo
        #    $ dvc add foo # should replace foo with a link to cache
        #
        old.commit()

        return True

    @staticmethod
    def create(repo, **kwargs):

        wdir = kwargs.get("wdir", None)
        cwd = kwargs.get("cwd", None)
        fname = kwargs.get("fname", None)
        add = kwargs.get("add", False)

        # Backward compatibility for `cwd` option
        if wdir is None and cwd is not None:
            if fname is not None and os.path.basename(fname) != fname:
                raise StageFileBadNameError(
                    "DVC-file name '{fname}' may not contain subdirectories"
                    " if '-c|--cwd' (deprecated) is specified. Use '-w|--wdir'"
                    " along with '-f' to specify DVC-file path with working"
                    " directory.".format(fname=fname))
            wdir = cwd
        elif wdir is None:
            wdir = os.curdir

        stage = Stage(
            repo=repo,
            wdir=wdir,
            cmd=kwargs.get("cmd", None),
            locked=kwargs.get("locked", False),
            always_changed=kwargs.get("always_changed", False),
        )

        Stage._fill_stage_outputs(stage, **kwargs)
        stage.deps = dependency.loads_from(stage,
                                           kwargs.get("deps", []),
                                           erepo=kwargs.get("erepo", None))

        stage._check_circular_dependency()
        stage._check_duplicated_arguments()

        if not fname:
            fname = Stage._stage_fname(stage.outs, add)
        stage._check_dvc_filename(fname)

        # Autodetecting wdir for add, we need to create outs first to do that,
        # so we start with wdir = . and remap out paths later.
        if add and kwargs.get("wdir") is None and cwd is None:
            wdir = os.path.dirname(fname)

            for out in chain(stage.outs, stage.deps):
                if out.is_in_repo:
                    out.def_path = relpath(out.path_info, wdir)

        wdir = os.path.abspath(wdir)

        if cwd is not None:
            path = os.path.join(wdir, fname)
        else:
            path = os.path.abspath(fname)

        Stage._check_stage_path(repo, wdir)
        Stage._check_stage_path(repo, os.path.dirname(path))

        stage.wdir = wdir
        stage.path = path

        ignore_build_cache = kwargs.get("ignore_build_cache", False)

        # NOTE: remove outs before we check build cache
        if kwargs.get("remove_outs", False):
            logger.warning("--remove-outs is deprecated."
                           " It is now the default behavior,"
                           " so there's no need to use this option anymore.")
            stage.remove_outs(ignore_remove=False)
            logger.warning("Build cache is ignored when using --remove-outs.")
            ignore_build_cache = True

        if os.path.exists(path) and any(out.persist for out in stage.outs):
            logger.warning("Build cache is ignored when persisting outputs.")
            ignore_build_cache = True

        if os.path.exists(path):
            if (not ignore_build_cache and stage.is_cached
                    and not stage.is_callback and not stage.always_changed):
                logger.info("Stage is cached, skipping.")
                return None

            msg = ("'{}' already exists. Do you wish to run the command and "
                   "overwrite it?".format(stage.relpath))

            if not kwargs.get("overwrite", True) and not prompt.confirm(msg):
                raise StageFileAlreadyExistsError(stage.relpath)

            os.unlink(path)

        return stage

    @staticmethod
    def _fill_stage_outputs(stage, **kwargs):
        stage.outs = output.loads_from(stage,
                                       kwargs.get("outs", []),
                                       use_cache=True)
        stage.outs += output.loads_from(stage,
                                        kwargs.get("metrics", []),
                                        use_cache=True,
                                        metric=True)
        stage.outs += output.loads_from(stage,
                                        kwargs.get("outs_persist", []),
                                        use_cache=True,
                                        persist=True)
        stage.outs += output.loads_from(stage,
                                        kwargs.get("outs_no_cache", []),
                                        use_cache=False)
        stage.outs += output.loads_from(
            stage,
            kwargs.get("metrics_no_cache", []),
            use_cache=False,
            metric=True,
        )
        stage.outs += output.loads_from(
            stage,
            kwargs.get("outs_persist_no_cache", []),
            use_cache=False,
            persist=True,
        )

    @staticmethod
    def _check_dvc_filename(fname):
        if not Stage.is_valid_filename(fname):
            raise StageFileBadNameError(
                "bad DVC-file name '{}'. DVC-files should be named"
                " 'Dvcfile' or have a '.dvc' suffix (e.g. '{}.dvc').".format(
                    relpath(fname), os.path.basename(fname)))

    @staticmethod
    def _check_file_exists(repo, fname):
        if not repo.tree.exists(fname):
            raise StageFileDoesNotExistError(fname)

    @staticmethod
    def _check_isfile(repo, fname):
        if not repo.tree.isfile(fname):
            raise StageFileIsNotDvcFileError(fname)

    @classmethod
    def _get_path_tag(cls, s):
        regex = re.compile(cls.TAG_REGEX)
        match = regex.match(s)
        if not match:
            return s, None
        return match.group("path"), match.group("tag")

    @staticmethod
    def load(repo, fname):
        fname, tag = Stage._get_path_tag(fname)

        # it raises the proper exceptions by priority:
        # 1. when the file doesn't exists
        # 2. filename is not a DVC-file
        # 3. path doesn't represent a regular file
        Stage._check_file_exists(repo, fname)
        Stage._check_dvc_filename(fname)
        Stage._check_isfile(repo, fname)

        with repo.tree.open(fname) as fd:
            d = load_stage_fd(fd, fname)
        # Making a deepcopy since the original structure
        # looses keys in deps and outs load
        state = copy.deepcopy(d)

        Stage.validate(d, fname=relpath(fname))
        path = os.path.abspath(fname)

        stage = Stage(
            repo=repo,
            path=path,
            wdir=os.path.abspath(
                os.path.join(os.path.dirname(path),
                             d.get(Stage.PARAM_WDIR, "."))),
            cmd=d.get(Stage.PARAM_CMD),
            md5=d.get(Stage.PARAM_MD5),
            locked=d.get(Stage.PARAM_LOCKED, False),
            tag=tag,
            always_changed=d.get(Stage.PARAM_ALWAYS_CHANGED, False),
            state=state,
        )

        stage.deps = dependency.loadd_from(stage, d.get(Stage.PARAM_DEPS, []))
        stage.outs = output.loadd_from(stage, d.get(Stage.PARAM_OUTS, []))

        return stage

    def dumpd(self):
        rel_wdir = relpath(self.wdir, os.path.dirname(self.path))

        wdir = pathlib.PurePath(rel_wdir).as_posix()
        wdir = wdir if wdir != "." else None

        return {
            key: value
            for key, value in {
                Stage.PARAM_MD5: self.md5,
                Stage.PARAM_CMD: self.cmd,
                Stage.PARAM_WDIR: wdir,
                Stage.PARAM_LOCKED: self.locked,
                Stage.PARAM_DEPS: [d.dumpd() for d in self.deps],
                Stage.PARAM_OUTS: [o.dumpd() for o in self.outs],
                Stage.PARAM_META: self._state.get("meta"),
                Stage.PARAM_ALWAYS_CHANGED: self.always_changed,
            }.items() if value
        }

    def dump(self):
        fname = self.path

        self._check_dvc_filename(fname)

        logger.info(
            "Saving information to '{file}'.".format(file=relpath(fname)))
        d = self.dumpd()
        apply_diff(d, self._state)
        dump_stage_file(fname, self._state)

        self.repo.scm.track_file(relpath(fname))

    def _compute_md5(self):
        from dvc.output.base import OutputBase

        d = self.dumpd()

        # Remove md5 and meta, these should not affect stage md5
        d.pop(self.PARAM_MD5, None)
        d.pop(self.PARAM_META, None)

        # Ignore the wdir default value. In this case DVC-file w/o
        # wdir has the same md5 as a file with the default value specified.
        # It's important for backward compatibility with pipelines that
        # didn't have WDIR in their DVC-files.
        if d.get(self.PARAM_WDIR) == ".":
            del d[self.PARAM_WDIR]

        # NOTE: excluding parameters that don't affect the state of the
        # pipeline. Not excluding `OutputLOCAL.PARAM_CACHE`, because if
        # it has changed, we might not have that output in our cache.
        m = dict_md5(
            d,
            exclude=[
                self.PARAM_LOCKED,
                OutputBase.PARAM_METRIC,
                OutputBase.PARAM_TAGS,
                OutputBase.PARAM_PERSIST,
            ],
        )
        logger.debug("Computed stage '{}' md5: '{}'".format(self.relpath, m))
        return m

    def save(self):
        for dep in self.deps:
            dep.save()

        for out in self.outs:
            out.save()

        self.md5 = self._compute_md5()

    @staticmethod
    def _changed_entries(entries):
        return [
            str(entry) for entry in entries
            if entry.checksum and entry.changed_checksum()
        ]

    def check_can_commit(self, force):
        changed_deps = self._changed_entries(self.deps)
        changed_outs = self._changed_entries(self.outs)

        if changed_deps or changed_outs or self.changed_md5():
            msg = ("dependencies {}".format(changed_deps)
                   if changed_deps else "")
            msg += " and " if (changed_deps and changed_outs) else ""
            msg += "outputs {}".format(changed_outs) if changed_outs else ""
            msg += "md5" if not (changed_deps or changed_outs) else ""
            msg += " of '{}' changed. Are you sure you commit it?".format(
                self.relpath)
            if not force and not prompt.confirm(msg):
                raise StageCommitError(
                    "unable to commit changed '{}'. Use `-f|--force` to "
                    "force.`".format(self.relpath))
            self.save()

    def commit(self):
        for out in self.outs:
            out.commit()

    def _check_missing_deps(self):
        missing = [dep for dep in self.deps if not dep.exists]

        if any(missing):
            raise MissingDep(missing)

    @staticmethod
    def _warn_if_fish(executable):  # pragma: no cover
        if (executable is None
                or os.path.basename(os.path.realpath(executable)) != "fish"):
            return

        logger.warning(
            "DVC detected that you are using fish as your default "
            "shell. Be aware that it might cause problems by overwriting "
            "your current environment variables with values defined "
            "in '.fishrc', which might affect your command. See "
            "https://github.com/iterative/dvc/issues/1307. ")

    def _check_circular_dependency(self):
        from dvc.exceptions import CircularDependencyError

        circular_dependencies = set(d.path_info for d in self.deps) & set(
            o.path_info for o in self.outs)

        if circular_dependencies:
            raise CircularDependencyError(str(circular_dependencies.pop()))

    def _check_duplicated_arguments(self):
        from dvc.exceptions import ArgumentDuplicationError
        from collections import Counter

        path_counts = Counter(edge.path_info for edge in self.deps + self.outs)

        for path, occurrence in path_counts.items():
            if occurrence > 1:
                raise ArgumentDuplicationError(str(path))

    def _run(self):
        self._check_missing_deps()
        executable = os.getenv("SHELL") if os.name != "nt" else None
        self._warn_if_fish(executable)

        main_thread = isinstance(threading.current_thread(),
                                 threading._MainThread)
        old_handler = None
        p = None

        try:
            p = subprocess.Popen(
                self.cmd,
                cwd=self.wdir,
                shell=True,
                env=fix_env(os.environ),
                executable=executable,
                close_fds=True,
            )
            if main_thread:
                old_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
            p.communicate()
        finally:
            if old_handler:
                signal.signal(signal.SIGINT, old_handler)

        if (p is None) or (p.returncode != 0):
            raise StageCmdFailedError(self)

    def run(self, dry=False, no_commit=False, force=False):
        if (self.cmd or self.is_import) and not self.locked and not dry:
            self.remove_outs(ignore_remove=False, force=False)

        if self.locked:
            logger.info("Verifying outputs in locked stage '{stage}'".format(
                stage=self.relpath))
            if not dry:
                self.check_missing_outputs()

        elif self.is_import:
            logger.info("Importing '{dep}' -> '{out}'".format(
                dep=self.deps[0], out=self.outs[0]))
            if not dry:
                if not force and self._already_cached():
                    self.outs[0].checkout()
                else:
                    self.deps[0].download(self.outs[0])

        elif self.is_data_source:
            msg = "Verifying data sources in '{}'".format(self.relpath)
            logger.info(msg)
            if not dry:
                self.check_missing_outputs()

        else:
            logger.info("Running command:\n\t{}".format(self.cmd))
            if not dry:
                if (not force and not self.is_callback
                        and not self.always_changed
                        and self._already_cached()):
                    self.checkout()
                else:
                    self._run()

        if not dry:
            self.save()
            if not no_commit:
                self.commit()

    def check_missing_outputs(self):
        paths = [str(out) for out in self.outs if not out.exists]
        if paths:
            raise MissingDataSource(paths)

    def checkout(self, force=False, progress_callback=None):
        for out in self.outs:
            out.checkout(force=force,
                         tag=self.tag,
                         progress_callback=progress_callback)

    @staticmethod
    def _status(entries):
        ret = {}

        for entry in entries:
            ret.update(entry.status())

        return ret

    def status(self):
        ret = []

        if not self.locked:
            deps_status = self._status(self.deps)
            if deps_status:
                ret.append({"changed deps": deps_status})

        outs_status = self._status(self.outs)
        if outs_status:
            ret.append({"changed outs": outs_status})

        if self.changed_md5():
            ret.append("changed checksum")

        if self.is_callback or self.always_changed:
            ret.append("always changed")

        if ret:
            return {self.relpath: ret}

        return {}

    def _already_cached(self):
        return (not self.changed_md5()
                and all(not dep.changed() for dep in self.deps)
                and all(not out.changed_cache() if out.
                        use_cache else not out.changed() for out in self.outs))

    def get_all_files_number(self):
        return sum(out.get_files_number() for out in self.outs)
Example #5
0
        'user': assignee.user.username,
        'reason': assignee.reason,
    }


def is_int(s):
    try:
        int(s)
        return True
    except ValueError:
        return False


machine_schema = Schema({
    Optional('netboot_enabled'): bool,
    Optional('subarch'): Or(None, And(str, lambda s: validators.length(s, min=0, max=256))),
    Optional('preseed_id'): Or(None, Use(int)),
    Optional('kernel_id'): Or(None, Use(int)),
    Optional('kernel_opts'): And(str, lambda s: validators.length(s, min=0, max=1024)),
    Optional('initrd_id'): Or(None, Use(int)),
}, ignore_extra_keys=True)


machine_power_schema = Schema({
    'state': And(str, lambda s: s in ('on', 'off', 'reboot', 'pxe_reboot', 'bios_reboot', 'disk_reboot')),
})


machine_state_post_schema = Schema({
    'state': And(str, lambda s: s in ('provision')),
})
Example #6
0
def test_json_schema_and_simplify():
    s = And('test')
    assert s.json_schema() == {'enum': ['test']}
from schema import Schema, And, Use
from engine.helpers.const.service_characteristics import TYPE, REGIONS, DEPLOYMENT_TIME, LEASING_PERIOD

upload_service_schema = Schema({
    'productName':
    Use(str),
    'Service':
    Use(str),
    'logo':
    Use(str),
    'fileName':
    Use(str),
    'description':
    Use(str),
    'region': [And(str, Use(str.upper), lambda s: s in REGIONS)],
    'serviceType': [And(str, Use(str.upper), lambda s: s in TYPE)],
    'attackType': [And(Use(str.upper))],
    'deploymentTime':
    And(str, Use(str.upper), lambda s: s in DEPLOYMENT_TIME),
    'leasingPeriod':
    And(str, Use(str.upper), lambda s: s in LEASING_PERIOD),
    'price':
    And(Use(int), lambda n: n > 0),
    'txHash':
    Use(str),
    'serviceHash':
    Use(str)
})
Example #8
0
import json
from requests.exceptions import ConnectionError
from schema import Schema, And, Use, Optional
import requests
from os.path import expanduser
import os

vul_schema = Schema(
    {
        'name': str,
        'tool': str,
        'description': str,
        'project': str,
        'target': str,
        'scan': str,
        Optional('cwe'): And(Use(int)),
        Optional('observation'): str,
        Optional('severity'): And(Use(int), lambda n: 0 <= n <= 3),
        Optional('remediation'): str,
        Optional('evidences'): list
    },
    ignore_extra_keys=False)

evidence_schema = Schema(
    {
        'name': str,
        'url': str,
        'vulnId': str,
        Optional('param'): str,
        Optional('log'): str,
        Optional('attack'): str,
Example #9
0
            return result
        else:
            return tuple([int(s) for s in data])   
        
def percent_to_float(data):
    if isinstance(data, str) and re.match(r'-?\d+(\.\d+)?%', data):
        data = float(data.strip('%')) / 100
    if isinstance(data, int):
        data = float(data)
    else:
        assert isinstance(data, float), 'This field should be float, int or percent string'
    return data

policy_schema = Schema({
    Optional('weights', default=None): list,
    Optional('method', default=None): And(str, lambda s: s in ["per_channel", "per_tensor"]),
    Optional('init_sparsity', default=0): And(float, lambda s: s < 1.0 and s >= 0.0),
    Hook('target_sparsity', handler=_valid_prune_sparsity): object,
    Optional('target_sparsity', default=0.5): float,
    Optional("start_epoch", default=0): int,
    Hook('end_epoch', handler=_valid_prune_epoch): object,
    Optional('end_epoch', default=4): int
})

ops_schema = Schema({
    Optional('weight', default=None): {
        Optional('granularity', default=None): And(
            list,
            lambda s: all(i in ['per_channel', 'per_tensor'] for i in s)),
        Optional('scheme', default=None): And(
            list,
Example #10
0
class Page(object):
    "Banana banana"
    meta_schema = {
        Optional('title'): And(str, len),
        Optional('symbols'): Schema([And(str, len)]),
        Optional('short-description'): And(str, len),
        Optional('render-subpages'): bool,
        Optional('auto-sort'): bool,
        Optional('full-width'): bool
    }

    # pylint: disable=too-many-arguments
    def __init__(self,
                 source_file,
                 ast,
                 output_path,
                 project_name,
                 meta=None,
                 raw_contents=None):
        "Banana banana"
        assert source_file
        basename = os.path.basename(source_file)
        name = os.path.splitext(basename)[0]
        ref = os.path.join(output_path,
                           re.sub(r'\W+', '-',
                                  os.path.splitext(basename)[0]))
        pagename = '%s.html' % ref

        self.ast = ast
        self.extension_name = None
        self.source_file = source_file
        self.raw_contents = raw_contents
        self.comment = None
        self.generated = False
        self.pre_sorted = False
        self.output_attrs = None
        self.subpages = OrderedSet()
        self.symbols = []
        self.typed_symbols = {}
        self.is_stale = True
        self.formatted_contents = None
        self.detailed_description = None
        self.build_path = None
        self.project_name = project_name
        self.cached_paths = OrderedSet()

        meta = meta or {}

        try:
            self.meta = Schema(Page.meta_schema).validate(meta)
        except SchemaError as _:
            warn('invalid-page-metadata',
                 '%s: Invalid metadata: \n%s' % (self.source_file, str(_)))
            self.meta = meta

        self.symbol_names = OrderedSet(meta.get('symbols') or [])
        self.short_description = meta.get('short-description')
        self.render_subpages = meta.get('render-subpages', True)

        self.title = None
        self.__discover_title(meta)
        self.link = Link(pagename, self.title or name, ref)

    def __getstate__(self):
        return {
            'ast': None,
            'build_path': None,
            'title': self.title,
            'raw_contents': self.raw_contents,
            'short_description': self.short_description,
            'extension_name': self.extension_name,
            'link': self.link,
            'meta': self.meta,
            'source_file': self.source_file,
            'comment': self.comment,
            'generated': self.generated,
            'is_stale': False,
            'formatted_contents': None,
            'detailed_description': None,
            'output_attrs': None,
            'symbols': [],
            'typed_symbols': {},
            'subpages': self.subpages,
            'symbol_names': self.symbol_names,
            'project_name': self.project_name,
            'pre_sorted': self.pre_sorted,
            'cached_paths': self.cached_paths,
            'render_subpages': self.render_subpages
        }

    def resolve_symbols(self, tree, database, link_resolver):
        """
        When this method is called, the page's symbol names are queried
        from `database`, and added to lists of actual symbols, sorted
        by symbol class.
        """
        typed_symbols_list = namedtuple('TypedSymbolsList',
                                        ['name', 'symbols'])

        for subclass in all_subclasses(Symbol):
            self.typed_symbols[subclass] = typed_symbols_list(
                subclass.get_plural_name(), [])

        all_syms = OrderedSet()
        for sym_name in self.symbol_names:
            sym = database.get_symbol(sym_name)
            self.__query_extra_symbols(sym, all_syms, tree, link_resolver,
                                       database)

        if tree.project.is_toplevel:
            page_path = self.link.ref
        else:
            page_path = self.project_name + '/' + self.link.ref

        for sym in all_syms:
            sym.update_children_comments()
            self.__resolve_symbol(sym, link_resolver, page_path)
            self.symbol_names.add(sym.unique_name)

        for sym_type in [
                ClassSymbol, AliasSymbol, InterfaceSymbol, StructSymbol
        ]:
            syms = self.typed_symbols[sym_type].symbols

            if not syms:
                continue

            if self.title is None:
                self.title = syms[0].display_name
            if self.comment is None:
                self.comment = Comment(name=self.source_file)
                self.comment.short_description = syms[
                    0].comment.short_description
                self.comment.title = syms[0].comment.title
            break

    # pylint: disable=no-self-use
    def __fetch_comment(self, sym, database):
        old_comment = sym.comment
        new_comment = database.get_comment(sym.unique_name)
        sym.comment = Comment(sym.unique_name)

        if new_comment:
            sym.comment = new_comment
        elif old_comment:
            if old_comment.filename not in (ChangeTracker.all_stale_files |
                                            ChangeTracker.all_unlisted_files):
                sym.comment = old_comment

    def __format_page_comment(self, formatter, link_resolver):
        if not self.comment:
            return

        if self.comment.short_description:
            self.short_description = formatter.format_comment(
                self.comment.short_description, link_resolver).strip()
            if self.short_description.startswith('<p>'):
                self.short_description = self.short_description[3:-4]
        if self.comment.title:
            self.title = formatter.format_comment(self.comment.title,
                                                  link_resolver).strip()
            if self.title.startswith('<p>'):
                self.title = self.title[3:-4]

        if self.title:
            self.formatted_contents += '<h1>%s</h1>' % self.title

        self.formatted_contents += formatter.format_comment(
            self.comment, link_resolver)

    def format(self, formatter, link_resolver, output):
        """
        Banana banana
        """

        if not self.title and self.source_file:
            title = os.path.splitext(self.source_file)[0]
            self.title = os.path.basename(title).replace('-', ' ')

        self.formatted_contents = u''

        self.build_path = os.path.join(formatter.get_output_folder(self),
                                       self.link.ref)

        if self.ast:
            out, diags = cmark.ast_to_html(self.ast, link_resolver)
            for diag in diags:
                warn(diag.code,
                     message=diag.message,
                     filename=self.source_file)

            self.formatted_contents += out

        if not self.formatted_contents:
            self.__format_page_comment(formatter, link_resolver)

        self.output_attrs = defaultdict(lambda: defaultdict(dict))
        formatter.prepare_page_attributes(self)
        self.__format_symbols(formatter, link_resolver)
        self.detailed_description =\
            formatter.format_page(self)[0]

        if output:
            formatter.cache_page(self)

    # pylint: disable=no-self-use
    def get_title(self):
        """
        Banana banana
        """
        return self.title or 'unnamed'

    def __discover_title(self, meta):
        if meta is not None and 'title' in meta:
            self.title = meta['title']
        elif self.ast:
            self.title = cmark.title_from_ast(self.ast)

    def __format_symbols(self, formatter, link_resolver):
        for symbol in self.symbols:
            if symbol is None:
                continue
            debug(
                'Formatting symbol %s in page %s' %
                (symbol.unique_name, self.source_file), 'formatting')
            symbol.skip = not formatter.format_symbol(symbol, link_resolver)

    def __query_extra_symbols(self, sym, all_syms, tree, link_resolver,
                              database):
        if sym:
            self.__fetch_comment(sym, database)
            new_symbols = sum(tree.resolving_symbol_signal(self, sym), [])
            all_syms.add(sym)

            for symbol in new_symbols:
                self.__query_extra_symbols(symbol, all_syms, tree,
                                           link_resolver, database)

    def __resolve_symbol(self, symbol, link_resolver, page_path):
        symbol.resolve_links(link_resolver)

        symbol.link.ref = "%s#%s" % (page_path, symbol.unique_name)

        for link in symbol.get_extra_links():
            link.ref = "%s#%s" % (page_path, link.id_)

        tsl = self.typed_symbols.get(type(symbol))
        if tsl:
            tsl.symbols.append(symbol)
        self.symbols.append(symbol)

        debug(
            'Resolved symbol %s to page %s' %
            (symbol.display_name, self.link.ref), 'resolution')
Example #11
0
class Page:
    "Banana banana"
    meta_schema = {
        Optional('title'): And(str, len),
        Optional('short-description'): And(str),
        Optional('description'): And(str),
        Optional('render-subpages'): bool,
        Optional('auto-sort'): bool,
        Optional('full-width'): bool,
        Optional('see-also'): And(str, len),
        Optional('extra'): Schema({str: object}),
        Optional('thumbnail'): And(str, len),
        Optional('include'): And(str, len),
    }

    # pylint: disable=too-many-arguments
    # pylint: disable=too-many-locals
    def __init__(self,
                 name,
                 generated,
                 project_name,
                 extension_name,
                 source_file=None,
                 ast=None,
                 output_path='',
                 raw_contents=None,
                 comment=None,
                 meta=None,
                 pre_sorted=False,
                 symbol_names=None):
        assert name

        if not generated:
            assert source_file is not None

        self.name = name
        basename = os.path.basename(name)
        name = os.path.splitext(basename)[0]
        ref = os.path.join(output_path,
                           re.sub(r'\W+', '-',
                                  os.path.splitext(basename)[0]))
        pagename = '%s.html' % ref

        self.generated = generated
        self.project_name = project_name
        self.extension_name = extension_name
        self.source_file = source_file
        self.ast = ast
        self.raw_contents = raw_contents
        self.comment = comment
        self.pre_sorted = pre_sorted
        self.symbol_names = OrderedSet(symbol_names or [])

        self.output_attrs = None
        self.subpages = OrderedSet()
        self.symbols = []
        self.private_symbols = []
        self.typed_symbols = OrderedDict()
        self.by_parent_symbols = OrderedDict()
        self.formatted_contents = None
        self.detailed_description = None
        self.build_path = None
        self.cached_paths = OrderedSet()

        if comment:
            meta = comment.meta
        elif meta:
            meta = meta
        else:
            meta = {}

        self.meta = {}
        for key, value in meta.items():
            try:
                self.meta.update(
                    Schema(Page.meta_schema).validate(
                        {key.replace('_', '-').lower(): value}))
            except SchemaError as err:
                warn(
                    'invalid-page-metadata',
                    '%s: Invalid metadata: \n%s, discarding metadata' %
                    (self.name, str(err)))

        if not self.meta.get('extra'):
            self.meta['extra'] = defaultdict()

        self.title = self.meta.get(
            'title',
            cmark.title_from_ast(self.ast) if ast else '')
        self.thumbnail = self.meta.get('thumbnail')
        self.short_description = self.meta.get('short-description', None)
        self.render_subpages = self.meta.get('render-subpages', True)

        self.link = Link(pagename, self.title or name, ref)

    def __repr__(self):
        return "<Page %s>" % self.name

    @staticmethod
    def __get_empty_typed_symbols():
        typed_symbols_list = namedtuple('TypedSymbolsList',
                                        ['name', 'symbols'])
        empty_typed_symbols = {}

        for subclass in all_subclasses(Symbol):
            empty_typed_symbols[subclass] = typed_symbols_list(
                subclass.get_plural_name(), [])

        return empty_typed_symbols

    def resolve_symbols(self, tree, database, link_resolver):
        """
        When this method is called, the page's symbol names are queried
        from `database`, and added to lists of actual symbols, sorted
        by symbol class.
        """
        self.typed_symbols = self.__get_empty_typed_symbols()
        all_syms = OrderedSet()
        for sym_name in self.symbol_names:
            sym = database.get_symbol(sym_name)
            self.__query_extra_symbols(sym, all_syms, tree, link_resolver,
                                       database)

        if tree.project.is_toplevel:
            page_path = self.link.ref
        else:
            page_path = self.project_name + '/' + self.link.ref

        if self.meta.get("auto-sort", True):
            all_syms = sorted(all_syms, key=lambda x: x.unique_name)
        for sym in all_syms:
            sym.update_children_comments()
            self.__resolve_symbol(sym, link_resolver, page_path)
            self.symbol_names.add(sym.unique_name)

        # Always put symbols with no parent at the end
        no_parent_syms = self.by_parent_symbols.pop(None, None)
        if no_parent_syms:
            self.by_parent_symbols[None] = no_parent_syms

        for sym_type in [
                ClassSymbol, AliasSymbol, InterfaceSymbol, StructSymbol
        ]:
            syms = self.typed_symbols[sym_type].symbols

            if not syms:
                continue

            if self.title is None:
                self.title = syms[0].display_name
            if self.comment is None:
                self.comment = Comment(name=self.name)
                self.comment.short_description = syms[
                    0].comment.short_description
                self.comment.title = syms[0].comment.title
            break

    # pylint: disable=no-self-use
    def __fetch_comment(self, sym, database):
        sym.comment = database.get_comment(sym.unique_name) or Comment(
            sym.unique_name)

        for sym in sym.get_children_symbols():
            if isinstance(sym, Symbol):
                self.__fetch_comment(sym, database)

    def __format_page_comment(self, formatter, link_resolver):
        if not self.comment:
            return

        if self.comment.short_description:
            self.short_description = formatter.format_comment(
                self.comment.short_description, link_resolver).strip()
            if self.short_description.startswith('<p>'):
                self.short_description = self.short_description[3:-4]
        if self.comment.title:
            self.title = formatter.format_comment(self.comment.title,
                                                  link_resolver).strip()
            if self.title.startswith('<p>'):
                self.title = self.title[3:-4]

        if self.title:
            self.formatted_contents += '<h1 id="%s-page">%s</h1>' % (
                id_from_text(self.title), self.title)

        self.formatted_contents += formatter.format_comment(
            self.comment, link_resolver)

    def format(self, formatter, link_resolver, output):
        """
        Banana banana
        """

        if not self.title and self.name:
            title = os.path.splitext(self.name)[0]
            self.title = os.path.basename(title).replace('-', ' ')

        self.formatted_contents = u''

        self.build_path = os.path.join(formatter.get_output_folder(self),
                                       self.link.ref)

        if self.ast:
            out, diags = cmark.ast_to_html(self.ast, link_resolver)
            for diag in diags:
                warn(diag.code,
                     message=diag.message,
                     filename=self.source_file or self.name)

            self.formatted_contents += out

        if not self.formatted_contents:
            self.__format_page_comment(formatter, link_resolver)

        self.output_attrs = defaultdict(lambda: defaultdict(dict))
        formatter.prepare_page_attributes(self)
        self.__format_symbols(formatter, link_resolver)
        self.detailed_description =\
            formatter.format_page(self)[0]

        if output:
            formatter.cache_page(self)

    # pylint: disable=no-self-use
    def get_title(self):
        """
        Banana banana
        """
        return self.title or 'unnamed'

    def __discover_title(self, meta):
        if meta is not None and 'title' in meta:
            self.title = meta['title']
        elif self.ast:
            self.title = cmark.title_from_ast(self.ast)

    def __format_symbols(self, formatter, link_resolver):
        for symbol in self.symbols:
            if symbol is None:
                continue
            debug(
                'Formatting symbol %s in page %s' %
                (symbol.unique_name, self.name), 'formatting')
            symbol.detailed_description = formatter.format_symbol(
                symbol, link_resolver)

    def __query_extra_symbols(self, sym, all_syms, tree, link_resolver,
                              database):
        if sym:
            self.__fetch_comment(sym, database)
            all_syms.add(sym)

    def __resolve_symbol(self, symbol, link_resolver, page_path):
        symbol.resolve_links(link_resolver)

        symbol.link.ref = "%s#%s" % (page_path, symbol.unique_name)

        for link in symbol.get_extra_links():
            link.ref = "%s#%s" % (page_path, link.id_)

        tsl = self.typed_symbols.get(type(symbol))
        if tsl:
            tsl.symbols.append(symbol)

            by_parent_symbols = self.by_parent_symbols.get(symbol.parent_name)
            if not by_parent_symbols:
                by_parent_symbols = self.__get_empty_typed_symbols()
                parent_name = symbol.parent_name
                if parent_name is None:
                    parent_name = 'Others symbols'
                self.by_parent_symbols[symbol.parent_name] = by_parent_symbols
            by_parent_symbols.get(type(symbol)).symbols.append(symbol)

        self.symbols.append(symbol)

        debug(
            'Resolved symbol %s to page %s' %
            (symbol.unique_name, self.link.ref), 'resolution')
    Optional("notification_endpoint"): str,
    Optional("schedule"): str,
    Optional("restart_execution_on_update"): bool,
    Optional("pipeline_type", default="default"): Or("default"),
}

AWS_ACCOUNT_ID_REGEX_STR = r"\A[0-9]{12}\Z"
AWS_ACCOUNT_ID_SCHEMA = Schema(
    And(
        Or(int, str), Use(str),
        Regex(
            AWS_ACCOUNT_ID_REGEX_STR,
            error=(
                "The specified account id is incorrect. "
                "This typically happens when you specify the account id as a "
                "number, while the account id starts with a zero. If this is "
                "the case, please wrap the account id in quotes to make it a "
                "string. An AWS Account Id is a number of 12 digits, which "
                "should start with a zero if the Account Id has a zero at "
                "the start too. "
                "The number shown to not match the regular expression could "
                "be interpreted as an octal number due to the leading zero. "
                "Therefore, it might not match the account id as specified "
                "in the deployment map."))))

# CodeCommit Source
CODECOMMIT_SOURCE_PROPS = {
    "account_id": AWS_ACCOUNT_ID_SCHEMA,
    Optional("repository"): str,
    Optional("branch"): str,
    Optional("poll_for_changes"): bool,
    Optional("owner"): str,
Example #13
0

def percent_to_float(data):
    if isinstance(data, str) and re.match(r'-?\d+(\.\d+)?%', data):
        data = float(data.strip('%')) / 100
    else:
        assert isinstance(
            data, float), 'This field should be float or percent string'
    return data


policy_schema = Schema({
    Optional('weights', default=None):
    list,
    Optional('method', default=None):
    And(str, lambda s: s in ["per_channel", "per_tensor"]),
    Optional('init_sparsity', default=0):
    And(float, lambda s: s < 1.0 and s >= 0.0),
    Hook('target_sparsity', handler=_valid_prune_sparsity):
    object,
    Optional('target_sparsity', default=0.5):
    float,
    Optional("start_epoch", default=0):
    int,
    Hook('end_epoch', handler=_valid_prune_epoch):
    object,
    Optional('end_epoch', default=4):
    int
})

ops_schema = Schema({
Example #14
0
__credits__ = ["Costas Tyfoxylos"]
__license__ = '''MIT'''
__maintainer__ = '''Costas Tyfoxylos'''
__email__ = '''<*****@*****.**>'''
__status__ = '''Development'''  # "Prototype", "Development", "Production".


def is_valid_regex(value):
    """Validates a regex"""
    try:
        re.compile(value)
        is_valid = True
    except re.error:
        is_valid = False
    return is_valid


NAMING_SCHEMA = Schema([{
    'resource':
    str,
    'regex':
    is_valid_regex,
    Optional('fields'): [{
        'value': str,
        'regex': is_valid_regex
    }]
}])

POSITIONING_SCHEMA = Schema({And(str, lambda x: x.endswith('.tf')): [str]})
DISASTER_RECOVERY_FILENAME = 'disaster_recovery.tf'
Example #15
0
def test_schema_repr():  # what about repr with `error`s?
    schema = Schema([Or(None, And(str, Use(float)))])
    repr_ = "Schema([Or(None, And(<type 'str'>, Use(<type 'float'>)))])"
    # in Python 3 repr contains <class 'str'>, not <type 'str'>
    assert repr(schema).replace('class', 'type') == repr_
#
#
#
#
#
import csv

from cytoolz.dicttoolz import assoc
from cytoolz.functoolz import thread_first

from schema import Schema, And, Or

###################################################################
## Schemas
barcodeSchema = Schema({
    "barcode": And(str, lambda s: 12 <= len(s) <= 30),
    "forward_barcode": And(str, lambda s: 6 <= len(s) <= 15),
    "forward_spacer": And(str, lambda s: 0 <= len(s) <= 10),
    "forward_primer": And(str, lambda s: 12 <= len(s) <= 35),
    "reverse_barcode": And(str, lambda s: 6 <= len(s) <= 15),
    "reverse_spacer": And(str, lambda s: 0 <= len(s) <= 10),
    "reverse_primer": And(str, lambda s: 12 <= len(s) <= 35)
})

fastadataSchema = Schema({
    "forward_id": str,
    "forward_desc": str,
    "forward_sequence": str,
    "reverse_id": str,
    "reverse_desc": str,
    "reverse_sequence": str,
Example #17
0
File: desc.py Project: mcb30/pihat
 def decoder(self):
     return Or(
         And(int, Use(self.type)),
         And(str, Use(lambda x: self.type[x.upper()])),
     )
Example #18
0
from schema import Schema, And, Or, Optional, Use

states = [
    "QUEUED", "RECEIVED", "STARTED", "SUCCEEDED", "RETRY", "REVOKED", "FAILED",
    "REJECTED"
]

TriggerSchema = Schema({
    "enabled":
    And(bool),
    Optional("states", default=[]): [str],
    Optional("envs", default=[]): [str],
    Optional("runtime_upper_bound"):
    And(Use(float), lambda n: 0.000000000001 <= n <= 1000),
    Optional(Or("exclude", "include", only_one=True)): [str],
    "type":
    Or("slack"),
    "slack_wh_url":
    And(str, len),
})

ApplicationSchema = Schema({
    "app_name":
    And(str, lambda e: e.isalpha() and e.islower()),
    "app_description":
    And(str, len),
    Optional("fo_triggers", default=[]): []
})
Example #19
0
File: desc.py Project: mcb30/pihat
 def encoder(self):
     return And(self.type, Or(
         And(lambda x: not x.name.startswith('RESERVED_'),
             Use(lambda x: x.name.lower())),
         Use(lambda x: x.value),
     ))
Example #20
0
def test_schema_repr():  # what about repr with `error`s?
    schema = Schema([Or(None, And(str, Use(float)))])
    # repr_ = "Schema([Or(None, And(<type 'str'>, Use(<type 'float'>)))])"
    repr_ = "Schema(List(Or(Schema(None), And(Schema(<type 'str'>), Use(<type 'float'>)))))"
    # in Python 3 repr contains <class 'str'>, not <type 'str'>
    assert repr(schema).replace("class", "type") == repr_
Example #21
0
File: desc.py Project: mcb30/pihat
 def decoder(self):
     return Or(
         And(int, Use(self.type)),
         And(str, Use(lambda x: self.type[re.sub(r'(\d+)\s*mA$', r'MA_\1',
                                                 x.upper(), flags=re.I)])),
     )
Example #22
0
})

MOVE_SCHEMA = Schema({
    'type': Use(str),
    'id': Use(str),
    'quantity': Use(int),
})

NETWORK_SCHEMA = Schema({
    'ssid': Use(str),
    'password': Use(str),
})

JOB_SCHEMA = Schema({
    'job_type':
    And(str, Use(str.lower), lambda s: s in ('track', 'learn', 'secure')),
    'scenario':
    Or(None, Use(str)),
    'predicted_mission':
    Use(bool),
    'recognized_persons':
    Or(None, Use(list)),
    'non_moving_target':
    Or(None, Use(bool)),
    Optional('arm_expansion'):
    Or(None, Use(bool)),
    'ai':
    Or(None, Use(str)),
})

FACE_ENCODING_SCHEMA = Schema({
Example #23
0
File: desc.py Project: mcb30/pihat
 def encoder(self):
     return And(self.type, Or(
         And(lambda x: not x.name.startswith('RESERVED_'),
             Use(lambda x: re.sub(r'ma_(\d+)$', r'\1 mA', x.name.lower()))),
         Use(lambda x: x.value),
     ))
Example #24
0
from http import HTTPStatus

from flask import request, jsonify, Blueprint, current_app
from schema import Schema, SchemaError, And

from account_service.domain import commands
from account_service.domain.account import Account
from account_service.domain.errors import CustomerNotFound, AccountNotFound

accounts = Blueprint('accounts', __name__, url_prefix='/accounts/')

POST_ACCOUNT_PAYLOAD_SCHEMA = Schema({'customerId': And(str, len)})


@accounts.route('/<int:account_number>', methods=['GET'])
def get_account(account_number):
    account = commands.get_account(
        account_number=account_number,
        account_repository=current_app.account_repository)

    return jsonify(accountNumber=account.formatted_account_number,
                   accountStatus=account.account_status,
                   customerId=account.customer_id)


@accounts.route('/<int:account_number>', methods=['PUT'])
def update_account(account_number):
    body = request.get_json()

    commands.update_account(account_number=account_number,
                            status=body['status'],
Example #25
0
File: desc.py Project: mcb30/pihat
class EepromDescription:
    """EEPROM description"""
    # pylint: disable=too-many-instance-attributes

    data: Dict

    decoder: ClassVar[Schema] = Schema({
        Optional('uuid'): And(str, Use(UUID)),
        Optional('pid'): int,
        Optional('pver'): int,
        Optional('vstr'): And(str, Use(str.encode)),
        Optional('pstr'): And(str, Use(str.encode)),
        Optional('drive'): EepromDriveValue.decoder,
        Optional('slew'): EepromSlewValue.decoder,
        Optional('hysteresis'): EepromHysteresisValue.decoder,
        Optional('back_power'): EepromBackPowerValue.decoder,
        Optional('pins'): Or({}, {
            Or(int, And(str, Use(int))): {
                Optional('function'): EepromPinFunctionValue.decoder,
                Optional('pull'): EepromPinPullValue.decoder,
            },
        }),
    })

    encoder: ClassVar[Schema] = Schema({
        Optional('uuid'): And(UUID, Use(str)),
        Optional('pid'): int,
        Optional('pver'): int,
        Optional('vstr'): And(bytes, Use(bytes.decode)),
        Optional('pstr'): And(bytes, Use(bytes.decode)),
        Optional('drive'): EepromDriveValue.encoder,
        Optional('slew'): EepromSlewValue.encoder,
        Optional('hysteresis'): EepromHysteresisValue.encoder,
        Optional('back_power'): EepromBackPowerValue.encoder,
        Optional('pins'): Or({}, {
            int: {
                Optional('function'): EepromPinFunctionValue.encoder,
                Optional('pull'): EepromPinPullValue.encoder,
            },
        }),
    })

    json_decoder: ClassVar[JSONDecoder] = JSONDecoder()
    json_encoder: ClassVar[JSONEncoder] = JSONEncoder()

    def __init__(self, data=None, encoded=None, json=None, yaml=None):
        self.data = data if data is not None else {}
        if encoded is not None:
            self.encoded = encoded
        if json is not None:
            self.json = json
        if yaml is not None:
            self.yaml = yaml

    @property
    def encoded(self):
        """Description encoded as serializable Python objects"""
        return self.encoder.validate(self.data)

    @encoded.setter
    def encoded(self, value):
        self.data = self.decoder.validate(value)

    @property
    def json(self):
        """Description encoded as JSON"""
        return self.json_encoder.encode(self.encoded)

    @json.setter
    def json(self, value):
        self.encoded = self.json_decoder.decode(value)

    @property
    def yaml(self):
        """Description encoded as YAML"""
        return safe_dump(self.encoded, sort_keys=False)

    @yaml.setter
    def yaml(self, value):
        self.encoded = safe_load(value)

    def apply(self, eeprom):
        """Apply description to EEPROM"""
        if 'uuid' in self.data:
            eeprom.uuid = self.data['uuid']
        if 'pid' in self.data:
            eeprom.pid = self.data['pid']
        if 'pver' in self.data:
            eeprom.pver = self.data['pver']
        if 'vstr' in self.data:
            eeprom.vstr = self.data['vstr']
        if 'pstr' in self.data:
            eeprom.pstr = self.data['pstr']
        if 'drive' in self.data:
            eeprom.bank.drive = self.data['drive']
        if 'slew' in self.data:
            eeprom.bank.slew = self.data['slew']
        if 'hysteresis' in self.data:
            eeprom.bank.hysteresis = self.data['hysteresis']
        if 'back_power' in self.data:
            eeprom.power.back_power = self.data['back_power']
        for i, pin in self.data.get('pins', {}).items():
            eeprom.pins[i].used = True
            if 'function' in pin:
                eeprom.pins[i].function = pin['function']
            if 'pull' in pin:
                eeprom.pins[i].pull = pin['pull']

    def describe(self, eeprom):
        """Describe EEPROM"""
        self.data = {}
        if eeprom.uuid.int:
            self.data['uuid'] = eeprom.uuid
        if eeprom.pid:
            self.data['pid'] = eeprom.pid
        if eeprom.pver:
            self.data['pver'] = eeprom.pver
        if eeprom.vstr:
            self.data['vstr'] = eeprom.vstr
        if eeprom.pstr:
            self.data['pstr'] = eeprom.pstr
        if eeprom.bank.drive:
            self.data['drive'] = eeprom.bank.drive
        if eeprom.bank.slew:
            self.data['slew'] = eeprom.bank.slew
        if eeprom.bank.hysteresis:
            self.data['hysteresis'] = eeprom.bank.hysteresis
        if eeprom.power.back_power:
            self.data['back_power'] = eeprom.power.back_power
        for i, pin in enumerate(eeprom.pins):
            if pin.used:
                self.data.setdefault('pins', {})
                self.data['pins'][i] = {'function': pin.function}
                if pin.pull:
                    self.data['pins'][i]['pull'] = pin.pull

    @classmethod
    def from_eeprom(cls, eeprom):
        """Construct description from EEPROM"""
        self = cls()
        self.describe(eeprom)
        return self
Example #26
0
def main(argv, session):
    if six.PY2:
        args = docopt(__doc__.encode('utf-8'), argv=argv)
    else:
        args = docopt(__doc__, argv=argv)
    ERRORS = False

    # Validate args.
    s = Schema({
        str:
        Use(bool),
        '<identifier>':
        Or(
            None,
            And(str,
                validate_ia_identifier,
                error=
                ('<identifier> should be between 3 and 80 characters in length, and '
                 'can only contain alphanumeric characters, periods ".", '
                 'underscores "_", or dashes "-". However, <identifier> cannot begin '
                 'with periods, underscores, or dashes.'))),
        '<file>':
        And(
            Use(lambda l: l
                if not six.PY2 else convert_str_list_to_unicode(l)),
            And(lambda f: all(os.path.exists(x) for x in f if x != '-'),
                error='<file> should be a readable file or directory.'),
            And(lambda f: False
                if f == ['-'] and not args['--remote-name'] else True,
                error=
                '--remote-name must be provided when uploading from stdin.')),
        '--remote-name':
        Or(
            None,
            Use(lambda x: x.decode(sys.getfilesystemencoding())
                if six.PY2 else x)),
        '--spreadsheet':
        Or(None,
           os.path.isfile,
           error='--spreadsheet should be a readable file.'),
        '--file-metadata':
        Or(None,
           os.path.isfile,
           error='--file-metadata should be a readable file.'),
        '--metadata':
        Or(None,
           And(Use(get_args_dict), dict),
           error='--metadata must be formatted as --metadata="key:value"'),
        '--header':
        Or(None,
           And(Use(get_args_dict), dict),
           error='--header must be formatted as --header="key:value"'),
        '--retries':
        Use(lambda x: int(x[0]) if x else 0),
        '--sleep':
        Use(lambda l: int(l[0]), error='--sleep value must be an integer.'),
        '--size-hint':
        Or(Use(lambda l: str(l[0]) if l else None),
           int,
           None,
           error='--size-hint value must be an integer.'),
        '--status-check':
        bool,
    })
    try:
        args = s.validate(args)
    except SchemaError as exc:
        print('{0}\n{1}'.format(str(exc), printable_usage(__doc__)),
              file=sys.stderr)
        sys.exit(1)

    # Make sure the collection being uploaded to exists.
    collection_id = args['--metadata'].get('collection')
    if collection_id and not args['--no-collection-check'] and not args[
            '--status-check']:
        if isinstance(collection_id, list):
            collection_id = collection_id[0]
        collection = session.get_item(collection_id)
        if not collection.exists:
            print('You must upload to a collection that exists. '
                  '"{0}" does not exist.\n{1}'.format(
                      collection_id, printable_usage(__doc__)),
                  file=sys.stderr)
            sys.exit(1)

    # Status check.
    if args['--status-check']:
        if session.s3_is_overloaded():
            print('warning: {0} is over limit, and not accepting requests. '
                  'Expect 503 SlowDown errors.'.format(args['<identifier>']),
                  file=sys.stderr)
            sys.exit(1)
        else:
            print('success: {0} is accepting requests.'.format(
                args['<identifier>']))
            sys.exit()

    elif args['<identifier>']:
        item = session.get_item(args['<identifier>'])

    # Upload keyword arguments.
    if args['--size-hint']:
        args['--header']['x-archive-size-hint'] = args['--size-hint']
    # Upload with backups turned on by default.
    if not args['--header'].get('x-archive-keep-old-version'):
        args['--header']['x-archive-keep-old-version'] = '1'

    queue_derive = True if args['--no-derive'] is False else False
    verbose = True if args['--quiet'] is False else False

    if args['--file-metadata']:
        try:
            args['<file>'] = json.load(open(args['--file-metadata']))
        except json.decoder.JSONDecodeError:
            args['<file>'] = list()
            for line in open(args['--file-metadata']):
                j = json.loads(line.strip())
                args['<file>'].append(j)
    upload_kwargs = dict(
        metadata=args['--metadata'],
        headers=args['--header'],
        debug=args['--debug'],
        queue_derive=queue_derive,
        verbose=verbose,
        verify=args['--verify'],
        checksum=args['--checksum'],
        retries=args['--retries'],
        retries_sleep=args['--sleep'],
        delete=args['--delete'],
    )

    # Upload files.
    if not args['--spreadsheet']:
        if args['-']:
            local_file = TemporaryFile()
            local_file.write(sys.stdin.read())
            local_file.seek(0)
        else:
            local_file = args['<file>']

        if isinstance(local_file,
                      (list, tuple, set)) and args['--remote-name']:
            local_file = local_file[0]
        if args['--remote-name']:
            files = {args['--remote-name']: local_file}
        else:
            files = local_file

        for _r in _upload_files(item, files, upload_kwargs):
            if args['--debug']:
                break
            if (not _r.status_code) or (not _r.ok):
                ERRORS = True
            else:
                if args['--open-after-upload']:
                    webbrowser.open_new_tab('{}//{}/details/{}'.format(
                        session.protocol, session.host, item.identifier))

    # Bulk upload using spreadsheet.
    else:
        # Use the same session for each upload request.
        with io.open(args['--spreadsheet'], 'rU', newline='',
                     encoding='utf-8') as csvfp:
            spreadsheet = csv.DictReader(csvfp)
            prev_identifier = None
            for row in spreadsheet:
                upload_kwargs_copy = deepcopy(upload_kwargs)
                local_file = row['file']
                identifier = row.get('item', row.get('identifier'))
                if not identifier:
                    print('error: no identifier column on spreadsheet!')
                    sys.exit(1)
                del row['file']
                if 'identifier' in row:
                    del row['identifier']
                elif 'item' in row:
                    del row['item']
                if (not identifier) and (prev_identifier):
                    identifier = prev_identifier
                item = session.get_item(identifier)
                # TODO: Clean up how indexed metadata items are coerced
                # into metadata.
                md_args = [
                    '{0}:{1}'.format(k.lower(), v) for (k, v) in row.items()
                    if v
                ]
                metadata = get_args_dict(md_args)
                upload_kwargs_copy['metadata'].update(metadata)
                r = _upload_files(item, local_file, upload_kwargs_copy,
                                  prev_identifier, session)
                for _r in r:
                    if args['--debug']:
                        break
                    if (not _r) or (not _r.ok):
                        ERRORS = True
                    else:
                        if args['--open-after-upload']:
                            webbrowser.open_new_tab('{}//{}/details/{}'.format(
                                session.protocol, session.host, identifier))
                prev_identifier = identifier

    if ERRORS:
        sys.exit(1)
Example #27
0
def main(argv, session: ArchiveSession) -> None:
    args = docopt(__doc__, argv=argv)

    # Validation error messages.
    invalid_id_msg = (
        '<identifier> should be between 3 and 80 characters in length, and '
        'can only contain alphanumeric characters, underscores ( _ ), or '
        'dashes ( - )')

    # Validate args.
    s = Schema({
        str:
        Use(bool),
        '<file>':
        list,
        '--format':
        list,
        '--header':
        Or(None,
           And(Use(get_args_dict), dict),
           error='--header must be formatted as --header="key:value"'),
        '--glob':
        list,
        'delete':
        bool,
        '--retries':
        Use(lambda i: int(i[0])),
        '<identifier>':
        str,
    })
    try:
        args = s.validate(args)
    except SchemaError as exc:
        print(f'{exc}\n{printable_usage(__doc__)}', file=sys.stderr)
        sys.exit(1)

    verbose = True if not args['--quiet'] else False
    item = session.get_item(args['<identifier>'])
    if not item.exists:
        print('{0}: skipping, item does\'t exist.', file=sys.stderr)

    # Files that cannot be deleted via S3.
    no_delete = ['_meta.xml', '_files.xml', '_meta.sqlite']

    # Add keep-old-version by default.
    if not args['--header'].get(
            'x-archive-keep-old-version') and not args['--no-backup']:
        args['--header']['x-archive-keep-old-version'] = '1'

    if verbose:
        print(f'Deleting files from {item.identifier}', file=sys.stderr)

    if args['--all']:
        files = list(item.get_files())
        args['--cascade'] = True
    elif args['--glob']:
        files = item.get_files(glob_pattern=args['--glob'])
    elif args['--format']:
        files = item.get_files(formats=args['--format'])
    else:
        fnames = []
        if args['<file>'] == ['-']:
            fnames = [f.strip() for f in sys.stdin]
        else:
            fnames = [f.strip() for f in args['<file>']]

        files = list(item.get_files(fnames))

    if not files:
        print(' warning: no files found, nothing deleted.', file=sys.stderr)
        sys.exit(1)

    errors = False

    for f in files:
        if not f:
            if verbose:
                print(f' error: "{f.name}" does not exist', file=sys.stderr)
            errors = True
        if any(f.name.endswith(s) for s in no_delete):
            continue
        if args['--dry-run']:
            print(f' will delete: {item.identifier}/{f.name}', file=sys.stderr)
            continue
        try:
            resp = f.delete(verbose=verbose,
                            cascade_delete=args['--cascade'],
                            headers=args['--header'],
                            retries=args['--retries'])
        except requests.exceptions.RetryError as e:
            print(f' error: max retries exceeded for {f.name}',
                  file=sys.stderr)
            errors = True
            continue

        if resp.status_code != 204:
            errors = True
            msg = get_s3_xml_text(resp.content)
            print(f' error: {msg} ({resp.status_code})', file=sys.stderr)
            continue

    if errors is True:
        sys.exit(1)
Example #28
0
 def get_options(cls):
     return {
         "name":
         str,
         ConfigOption("description", default=None):
         Or(str, None),
         ConfigOption("logger_level", default=logger.TEST_INFO):
         int,
         ConfigOption("file_log_level", default=logger.DEBUG):
         Or(int, None),
         ConfigOption("runpath", default=default_runpath):
         Or(None, str, lambda x: callable(x)),
         ConfigOption("path_cleanup", default=True):
         bool,
         ConfigOption("all_tasks_local", default=False):
         bool,
         ConfigOption("shuffle", default=[]):
         list,  # list of string choices
         ConfigOption("shuffle_seed",
                      default=float(random.randint(1, 9999))):
         float,
         ConfigOption("exporters", default=None):
         Use(get_exporters),
         ConfigOption("stdout_style", default=defaults.STDOUT_STYLE):
         Style,
         ConfigOption("report_dir", default=defaults.REPORT_DIR):
         Or(str, None),
         ConfigOption("xml_dir", default=None):
         Or(str, None),
         ConfigOption("pdf_path", default=None):
         Or(str, None),
         ConfigOption("json_path", default=None):
         Or(str, None),
         ConfigOption("http_url", default=None):
         Or(str, None),
         ConfigOption("pdf_style", default=defaults.PDF_STYLE):
         Style,
         ConfigOption("report_tags", default=[]):
         [Use(tagging.validate_tag_value)],
         ConfigOption("report_tags_all", default=[]):
         [Use(tagging.validate_tag_value)],
         ConfigOption("merge_scheduled_parts", default=False):
         bool,
         ConfigOption("browse", default=False):
         bool,
         ConfigOption("ui_port", default=None):
         Or(None, int),
         ConfigOption(
             "web_server_startup_timeout",
             default=defaults.WEB_SERVER_TIMEOUT,
         ):
         int,
         ConfigOption("test_filter", default=filtering.Filter()):
         filtering.BaseFilter,
         ConfigOption("test_sorter", default=ordering.NoopSorter()):
         ordering.BaseSorter,
         # Test lister is None by default, otherwise Testplan would
         # list tests, not run them
         ConfigOption("test_lister", default=None):
         Or(None, listing.BaseLister),
         ConfigOption("verbose", default=False):
         bool,
         ConfigOption("debug", default=False):
         bool,
         ConfigOption("timeout", default=defaults.TESTPLAN_TIMEOUT):
         Or(None, And(int, lambda t: t >= 0)),
         ConfigOption("abort_wait_timeout", default=60):
         int,
         ConfigOption("interactive_handler", default=TestRunnerIHandler):
         object,
         ConfigOption("reset_report_uid", default=True):
         bool,
         ConfigOption("extra_deps", default=[]):
         list,
     }
Example #29
0
from schema import Schema, And, Use, Optional, SchemaError
import os
from src.models import LiteModel

from .defaults import default_dir

model_schema = Schema(
    And(str, os.path.exists, Use(LiteModel.load_from_filepath)))


def compare_shapes(s1, s2):
    return list(s1) == list(s2)


def get_model_shape_schema(input_shape, output_shape):
    input_schema = And(
        lambda model: compare_shapes(model.input_shape, input_shape),
        error=f"Got input shape {{0.input_shape}} expected {input_shape}")
    output_schema = And(
        lambda model: compare_shapes(model.output_shape, output_shape),
        error=f"Got input shape {{0.output_shape}} expected {output_shape}")
    return And(input_schema, output_schema)


models_schema = Schema({
    "characters":
    And(model_schema, get_model_shape_schema((1, 36, 36, 3), (1, 26))),
    "bonuses":
    And(model_schema, get_model_shape_schema((1, 15, 22, 3), (1, 5))),
    "values":
    And(model_schema, get_model_shape_schema((1, 17, 22, 3), (1, 22))),
Example #30
0
                )


_python_version = Schema(_pyversion, error="Invalid python version")


_requires_python = Schema(
        lambda x: version.VersionPredicate("python (%s)" % x),
        error="Invalid requires_python,"
    )


_version_predicate = Schema(_pred_validate, error="Invalid version predicate")


_name = And(basestring, len, _no_slashes)


_version = And(basestring, len)


_action = Schema(And(basestring, len, Or(
        "new release",
        "remove",
        "create",
        "docupdate",
        "update",
        lambda x: bool(
                    x.split(" ", 1)[0] == "update" and
                    [y.strip() for y in x.split(" ", 1)[1].split(",") if y],
                ),
Example #31
0
from .adapters import ADAPTERS
from ipaddress import ip_address, IPv4Address, IPv6Address


def is_ipv4(s):
    return isinstance(ip_address(s), IPv4Address)


def is_ipv6(s):
    return isinstance(ip_address(s), IPv6Address)


replace_schema = {
    'from': str,
    'to': str,
    Optional('ttl'): And(int, lambda n: n >= 120),
}

config_schema = Schema({
    'provider': lambda p: p in ADAPTERS.keys(),
    'auth': {
        Optional('cloudflare'): {
            Optional('token'): str,
            Optional('email'): str,
            Optional('certtoken'): str,
            Optional('debug'): bool,
        },
    },
    'replace': {
        Optional('a'): [{
            **replace_schema, 'from': is_ipv4,