Example #1
0
 def __init__(self, potential_string, variables={}, diag=False):
     """Initialize Potential."""
     if type(potential_string) is not list and type(potential_string) is str:
         self.potential_strings = [potential_string]
     else:
         self.potential_strings = potential_string
     self.num_v = len(self.potential_strings)
     self.variables = variables
     # Check if potential is static in time
     for pot_string in self.potential_strings:
         potential_nex = ne.NumExpr(pot_string)
         try:
             potential_nex.input_names.index("t")
             self.static = False
         except ValueError:
             self.static = True
         if self.static is False:
             break
     # Check if potential is linear (independent of psi).
     # If it depends on psi, it also depends on t and
     # is therefore not static
     for i in range(len(self.potential_strings)):
         for pot_string in self.potential_strings:
             potential_nex = ne.NumExpr(pot_string)
             try:
                 potential_nex.input_names.index("psi" + str(i))
                 self.linear = False
             except ValueError:
                 self.linear = True
             if self.linear is False:
                 self.static = False
                 break
         if self.linear is False:
             self.static = False
             break
     # Check if number of matrix elements given matches
     # the number diagonal or nondiagonal hermitian matrix
     # elements. In the case of a diagonal matrix the number
     # of matrix elements is equal to the number of internal
     # states. If it is nondiagonal one gives the lower
     # triangular part of V.
     self.diag = diag
     if diag is False:
         self.num_int_dim = 1 / 2 * (np.sqrt(8 * self.num_v + 1) - 1)
         assert (
             self.num_int_dim.is_integer()
         ), "Number of potential matrix elements incorrect"
         self.num_int_dim = int(self.num_int_dim)
     if diag is True:
         self.num_int_dim = len(self.potential_strings)
Example #2
0
 def _func(self):
     '''
     The optimized numexpr function.
     '''
     if self._func_cache is None:
         self._func_cache = ne.NumExpr(self.expr)
     return self._func_cache
Example #3
0
    def __init__(self,
                 dist_expr,
                 poni1_expr,
                 poni2_expr,
                 rot1_expr,
                 rot2_expr,
                 rot3_expr,
                 param_names,
                 pos_names=None,
                 constants=None,
                 content=None):
        """Constructor of the class

        :param dist_expr: formula (as string) providing with the dist
        :param poni1_expr: formula (as string) providing with the poni1
        :param poni2_expr: formula (as string) providing with the poni2
        :param rot1_expr: formula (as string) providing with the rot1
        :param rot2_expr: formula (as string) providing with the rot2
        :param rot3_expr: formula (as string) providing with the rot3
        :param param_names: list of names of the parameters used in the model
        :param pos_names: list of motor names for gonio with >1 degree of freedom
        :param constants: a dictionary with some constants the user may want to use
        :param content: Should be None or the name of the class (may be used
                        in the future to dispatch to multiple derivative classes)
        """
        if content is not None:
            # Ensures we use the constructor of the right class
            assert content in (self.__class__.__name__,
                               "GeometryTransformation")
        if numexpr is None:
            raise RuntimeError(
                "Geometry translation requires the *numexpr* package")
        self.dist_expr = dist_expr
        self.poni1_expr = poni1_expr
        self.poni2_expr = poni2_expr
        self.rot1_expr = rot1_expr
        self.rot2_expr = rot2_expr
        self.rot3_expr = rot3_expr

        self.variables = {"pi": numpy.pi}
        if constants is not None:
            self.variables.update(constants)

        self.param_names = tuple(param_names)
        if pos_names is not None:
            self.pos_names = tuple(pos_names)
        else:
            self.pos_names = ("pos", )
        for key in self.param_names + self.pos_names:
            if key in self.variables:
                raise RuntimeError(
                    f"The keyword `{key}` is already defined, please chose another variable name"
                )
            self.variables[key] = numpy.NaN

        self.codes = [
            numexpr.NumExpr(expr)
            for expr in (self.dist_expr, self.poni1_expr, self.poni2_expr,
                         self.rot1_expr, self.rot2_expr, self.rot3_expr)
        ]
Example #4
0
    def neEval(self, s, **kwargs):

        #e = self.neCache.setdefault(s, numexpr.NumExpr(s))
        if s in self.neCache:
            e = self.neCache[s]
        else:
            e = self.neCache[s] = numexpr.NumExpr(s)

        return e.run(*( kwargs[k] for k in e.input_names ))
Example #5
0
    def __init__(self, dist_expr=None, poni1_expr=None, poni2_expr=None,
                 rot1_expr=None, rot2_expr=None, rot3_expr=None, wavelength_expr=None,
                 param_names=None, pos_names=None, constants=None,
                 content=None):
        """Constructor of the class

        :param dist_expr: formula (as string) providing with the dist
        :param poni1_expr: formula (as string) providing with the poni1
        :param poni2_expr: formula (as string) providing with the poni2
        :param rot1_expr: formula (as string) providing with the rot1
        :param rot2_expr: formula (as string) providing with the rot2
        :param rot3_expr: formula (as string) providing with the rot3
        :param wavelength_expr: formula (as a string) to calculate wavelength used in angstrom
        :param param_names: list of names of the parameters used in the model
        :param pos_names: list of motor names for gonio with >1 degree of freedom
        :param constants: a dictionary with some constants the user may want to use
        :param content: Should be None or the name of the class (may be used
            in the future to dispatch to multiple derivative classes)
        """
        if content is not None:
            # Ensures we use the constructor of the right class
            assert content in (self.__class__.__name__, "ExtendedTransformation")
        if numexpr is None:
            raise RuntimeError("This Transformation requires the *numexpr* package")
        self.expressions = OrderedDict()

        if dist_expr is not None:
            self.expressions["dist"] = dist_expr
        if poni1_expr is not None:
            self.expressions["poni1"] = poni1_expr
        if poni2_expr is not None:
            self.expressions["poni2"] = poni2_expr
        if rot1_expr is not None:
            self.expressions["rot1"] = rot1_expr
        if rot2_expr is not None:
            self.expressions["rot2"] = rot2_expr
        if rot3_expr is not None:
            self.expressions["rot3"] = rot3_expr
        if wavelength_expr is not None:
            self.expressions["wavelength"] = wavelength_expr
        self.ParamNT = namedtuple("ParamNT", list(self.expressions.keys()))
        self.variables = {"pi": numpy.pi,
                          "hc": CONST_hc,
                          "q": CONST_q}
        if constants is not None:
            self.variables.update(constants)
        self.param_names = tuple(param_names) if param_names is not None else tuple()
        if pos_names is not None:
            self.pos_names = tuple(pos_names)
        else:
            self.pos_names = ("pos",)
        for key in self.param_names + self.pos_names:
            if key in self.variables:
                raise RuntimeError("The keyword %s is already defined, please chose another variable name")
            self.variables[key] = numpy.NaN

        self.codes = OrderedDict(((name, numexpr.NumExpr(expr)) for name, expr in self.expressions.items()))
Example #6
0
def numexpr_pipe(f):
    if not have_numexpr:
        raise RuntimeError("Numexpr is not installed")

    vm = numexpr.NumExpr(f)
    while 1:
        inputs = await ()
        outputs = vm(inputs)
        send(outputs)
Example #7
0
    def __init__(self, thickness=None, efficiency=None):
        """Class to simulate the decay of the parallax effect

        :param thickness: thickness of the sensible layer, in meter
        :param efficiency: efficiency for the sensor material between 0 and 1
        """
        self.thickness = None
        self.efficiency = None
        if thickness is not None:
            self.thickness = float(thickness)
        if efficiency is not None:
            self.efficiency = float(efficiency)
        if self.thickness and self.efficiency:
            BaseSensor.__init__(self, -log(1.0 - self.efficiency) / self.thickness)
        else:
            BaseSensor.__init__(self, None)
        self.formula = numexpr.NumExpr("where(x<0, 0.0, mu*exp(-mu*x))")
Example #8
0
        def cb(formula):
            symbols = list(ne.NumExpr(formula).input_names)
            curvenames = [curvecorr[x] for x in symbols]
            sers = [self.curves[c].series for c in curvenames]
            if not len(sers):
                return

            argsdf = pd.concat(sers, axis=1, keys=symbols)
            argsdf = argsdf.interpolate()
            args = argsdf.to_dict(orient='series')

            newvals = ne.evaluate(formula, local_dict=args)
            newname = self.validateNewCurveName(formula, True)
            newseries = pd.Series(newvals,
                                  index=argsdf.index,
                                  name=newname,
                                  dtype='float64')
            self.addSeriesAsCurve(newseries)
Example #9
0
def evaluate_form(form, parameters, global_dict=None, out=None, evaluate=True):
    """
    Evaluates a functional form from a string.

    """

    known_vals = {"PI": np.pi}

    if global_dict is None:
        global_dict = known_vals
    else:
        global_dict = global_dict.copy()
        global_dict.update(known_vals)

    if evaluate:
        return ne.evaluate(form, local_dict=parameters, global_dict=global_dict, out=out)
    else:
        return ne.NumExpr(form)
Example #10
0
class CompiledFlux(object):
    """
	An efficient pre-compiled form of a multi-component flux. For single-element evalutions
	this is ~2 times faster than switching on the primary type with an if statement; for 1e5
	samples it is 2000 times faster than operating on masked slices for each primary type.
	"""
    pdg_to_corsika = numexpr.NumExpr(
        build_lookup([(int(PDGCode.from_corsika(v)), v)
                      for v in ParticleType.values.keys()]))

    def __init__(self, expr):
        self.expr = numexpr.NumExpr(expr)
        # by default, assume PDG codes
        self._translator = CompiledFlux.pdg_to_corsika

    def to_PDG(self):
        """
		Convert to a form that takes PDG codes rather than CORSIKA codes.
		"""
        new = copy.copy(self)
        new._translator = CompiledFlux.pdg_to_corsika
        return new

    def __call__(self, E, ptype):
        """
		:param E: particle energy in GeV
		:param ptype: particle type code
		:type ptype: int
		"""
        if self._translator:
            ptype = self._translator(ptype)
        return self.expr(E, ptype)

    @staticmethod
    def build_lookup(mapping, var='ptype', default=0.):
        """
		Build an expression equivalent to a lookup table
		"""
        if len(mapping) > 0:
            return 'where(%s==%s, %s, %s)' % (
                var, mapping[0][0], mapping[0][1],
                build_lookup(mapping[1:], var, default))
        else:
            return str(default)
Example #11
0
def nonbonded_eval(coords, atom_types, form, parameters):
    """
    Evaluates the nb of the internal box
    """

    params = {k: np.atleast_2d(v) for k, v in parameters.items()}

    # Find parameter types, this function will supply the "r" (distance) parameter
    ptypes = [(p, np.double) for p in parameters.keys()]
    ptypes.append(("r", np.double))

    # Compile the NumExpr
    form = "sum(" + form + ")"
    try:
        expr = ne.NumExpr(form, signature=ptypes)
    except ValueError:
        raise KeyError(
            "nb_eval: Not all paramters for form %s resolved, found keys %s" %
            (form, list(ptypes.keys())))

    local_params = {}
    energy = 0.0
    for i in range(1, coords.shape[0]):

        # Compute distances
        tmp = coords[:i] - coords[i]
        dR = np.sqrt(np.einsum('ij,ij->i', tmp, tmp))
        local_params["r"] = dR

        # Organize NB data
        local_atom_type = atom_types[i]
        for key, data in parameters.items():
            local_params[key] = np.take(data[local_atom_type], atom_types[:i])

        # Evaluate!
        energy += expr.run(*(local_params[key] for key in expr.input_names))

    return energy
Example #12
0
    def common_headers(self):
        headers = {
            # SPECIAL_CCD_1
            "delectronsperadu": 1,
            "ldarkcorrectionswitch": 0,
            "lfloodfieldcorrectionswitch/mode": 0,
            "dsystemdcdb2gain": 1.0,
            "ddarksignal": 0,
            "dreadnoiserms": 0,
            # SPECIAL_CCD_2
            "ioverflowflag": 0,
            "ioverflowafterremeasureflag": 0,
            "inumofdarkcurrentimages": 0,
            "inumofmultipleimages": 0,
            "loverflowthreshold": 1000000,
            # SPECIAL_CCD_3
            # SPECIAL_CCD_4
            # SPECIAL_CCD_5
            # TIME
            # "dexposuretimeinsec": 0.2,
            "doverflowtimeinsec": 0,
            "doverflowfilter": 0,
            # MONITOR
            # PIXELSIZE
            # "drealpixelsizex": 0.075,
            # "drealpixelsizey": 0.075,
            "dsithicknessmmforpixeldetector": 1,
            # TIMESTAMP
            "timestampstring": get_isotime(),
            # GRIDPATTERN
            # STARTANGLESINDEG
            #             "dom_s":-180 + i,
            #             "dth_s":0,
            #             "dka_s":0,
            #             "dph_s":0,
            # ENDANGLESINDEG
            #             "dom_e":-179 + i,
            #             "dth_e": 0,
            #             "dka_e": 0,
            #             "dph_e": 0,
            # GONIOMODEL_1
            "dbeam2indeg": 0,
            "dbeam3indeg": 0,
            "detectorrotindeg_x": 0,
            "detectorrotindeg_y": 0,
            "detectorrotindeg_z": 0,
            #             "dxorigininpix":  img.data.shape[1] - (img.data.shape[1] - data.shape[1]) / 2 - center_x,
            #             "dyorigininpix": img.data.shape[0] - center_y,
            "dalphaindeg": 50,
            "dbetaindeg": 0,
            #                 "ddistanceinmm": 117,
            # GONIOMODEL_2
            # WAVELENGTH
            # "dalpha1": wl,
            # "dalpha2": wl,
            # "dalpha12": wl,
            # "dbeta1": wl,
            # MONOCHROMATOR
            "ddvalue-prepolfac": 0.98,
            "orientation-type": "SYNCHROTRON",
            # ABSTORUN
        }

        with fabio_open(self.options.images[0]) as source:
            shape = source.data.shape
            dtype = source.data.dtype
            if self.progress is not None:
                self.progress.max_value = source.nframes * len(
                    self.options.images)
            if isinstance(source, limaimage.LimaImage):
                # Populate the Pilatus header from the Lima
                entry_name = source.h5.attrs.get("default")
                if entry_name:
                    entry = source.h5.get(entry_name)
                    if entry:
                        data_name = entry.attrs["default"]
                        if data_name:
                            data_grp = entry.get(data_name)
                            if data_grp:
                                nxdetector = data_grp.parent
                                try:
                                    headers["drealpixelsizex"] = nxdetector[
                                        "detector_information/pixel_size/xsize"][
                                            ()] * 1e3
                                    headers["drealpixelsizey"] = nxdetector[
                                        "detector_information/pixel_size/ysize"][
                                            ()] * 1e3
                                except Exception as e:
                                    logger.warning(
                                        "Error in searching for pixel size (%s): %s",
                                        type(e), e)
                                try:
                                    t1 = nxdetector[
                                        "acquisition/exposure_time"][()]
                                    headers["dexposuretimeinsec"] = t1
                                except Exception as e:
                                    logger.warning(
                                        "Error in searching for exposure time (%s): %s",
                                        type(e), e)
            elif isinstance(source, eigerimage.EigerImage):
                raise NotImplementedError(
                    "Please implement Eiger detector data format parsing or at least open an issue"
                )
            else:
                raise NotImplementedError("Unsupported format: %s" %
                                          source.__class__.__name__)
        if self.mask is None:
            self.mask = numpy.zeros(shape, dtype=dtype)
        # Parse option for headers
        if self.options.energy:
            wavelength = CONST_hc / self.options.energy
        elif self.options.wavelength:
            wavelength = self.options.wavelength
        headers["dalpha1"] = headers["dalpha2"] = headers[
            "dalpha12"] = headers["dbeta1"] = wavelength
        if self.options.distance:
            headers["ddistanceinmm"] = self.options.distance
        if self.options.beam:
            x, y = self.options.beam
            x, y = self.new_beam_center(x, y, shape)
            headers["dxorigininpix"] = x
            headers["dyorigininpix"] = y
        if self.options.alpha:
            headers["dalphaindeg"] = self.options.alpha
        if self.options.kappa is not None:
            try:
                value = float(self.options.kappa)
            except ValueError:  # Handle the string
                value = numexpr.NumExpr(self.options.kappa)
            headers["dka_s"] = headers["dka_e"] = value
        if self.options.theta is not None:
            try:
                value = float(self.options.theta)
            except ValueError:  # Handle the string
                value = numexpr.NumExpr(self.options.theta)
            headers["dth_s"] = headers["dth_e"] = value
        if self.options.phi is not None:
            try:
                value = float(self.options.phi)
            except ValueError:  # Handle the string
                value = numexpr.NumExpr(self.options.phi)
            headers["dph_s"] = headers["dph_e"] = value
        if self.options.omega is not None:
            try:
                value = float(self.options.omega)
            except ValueError:
                # Handle the string
                value = numexpr.NumExpr(self.options.omega)
            headers["dom_s"] = headers["dom_e"] = value
        return headers
Example #13
0
    def setup(self, kwargs=None):
        """
        see class documentation
        """
        Plugin.setup(self, kwargs)
        if "output_dir" not in self.input:
            self.log_error("output_dir not in input")
        self.dest = os.path.abspath(self.input["output_dir"])

        if "unit" in self.input:
            self.unit = self.input.get("unit")

        if "metadata_job" in self.input:
            job_id = int(self.input.get("metadata_job"))
            status = Job.synchronize_job(job_id, self.TIMEOUT)
            abort_time = time.time() + self.TIMEOUT
            while status == Job.STATE_UNINITIALIZED:
                # Wait for job to start
                time.sleep(1)
                status = Job.synchronize_job(job_id, self.TIMEOUT)
                if time.time() > abort_time:
                    self.log_error(
                        "Timeout while waiting metadata plugin to finish")
                    break
            if status == Job.STATE_SUCCESS:
                self.metadata_plugin = Job.getJobFromId(job_id)
            else:
                self.log_error("Metadata plugin ended in %s: aborting myself" %
                               status)
        if not os.path.isdir(self.dest):
            os.makedirs(self.dest)
        c216_filename = os.path.abspath(self.input.get("c216_filename", ""))

        if (os.path.dirname(c216_filename) != self.dest) and (
                os.path.basename(c216_filename) not in os.listdir(self.dest)):
            self.output_hdf5["metadata"] = os.path.join(
                self.dest, os.path.basename(c216_filename))
            m = threading.Thread(target=shutil.copy,
                                 name="copy metadata",
                                 args=(c216_filename, self.dest))
            m.start()

        if "to_save" in self.input:
            to_save = self.input["to_save"][:]
            if type(to_save) in StringTypes:
                # fix a bug from spec ...
                self.to_save = [i.strip('[\\] ",') for i in to_save.split()]
                self.log_warning("processing planned: " +
                                 " ".join(self.to_save))
            else:
                self.to_save = to_save
        if "image_file" not in self.input:
            self.log_error("image_file not in input")
        self.image_file = self.input["image_file"]
        if not os.path.exists(self.image_file):
            if not self.image_file.startswith("/"):
                # prepend the dirname of the c216
                image_file = os.path.join(os.path.dirname(c216_filename),
                                          self.image_file)
                if os.path.exists(image_file):
                    self.image_file = image_file
                else:
                    self.log_error("image_file %s does not exist" %
                                   self.image_file)

        self.dark_filename = self.input.get("dark_filename")
        if "raw" in self.to_save:
            if os.path.dirname(self.image_file) != self.dest:
                t = threading.Thread(target=shutil.copy,
                                     name="copy raw",
                                     args=(self.image_file, self.dest))
                t.start()
            self.output_hdf5["raw"] = os.path.join(
                self.dest, os.path.basename(self.image_file))
            if type(self.dark_filename) in StringTypes and os.path.exists(
                    self.dark_filename):
                if os.path.dirname(self.dark_filename) != self.dest:
                    d = threading.Thread(target=shutil.copy,
                                         name="copy dark",
                                         args=(self.dark_filename, self.dest))
                    d.start()
                self.output_hdf5["dark"] = os.path.join(
                    self.dest, os.path.basename(self.dark_filename))
        self.scaling_factor = float(self.input.get("scaling_factor", 1.0))
        self.correct_solid_angle = bool(
            self.input.get("correct_solid_angle", True))
        self.correct_I1 = bool(self.input.get("correct_I1", True))
        self.I1, self.t = self.load_I1_t(c216_filename)

        # Variance formula: calculation of the function
        if "variance_formula" in self.input:
            self.variance_formula = self.input.get("variance_formula")
            if self.variance_formula:
                self.variance_function = numexpr.NumExpr(
                    self.variance_formula, [("data", numpy.float64),
                                            ("dark", numpy.float64)])
Example #14
0
        grid.subnetwork_length, state.subnetwork_cross_section_area)
    state.subnetwork_depth = calculate_subnetwork_depth(
        base_condition, length_condition, state.subnetwork_cross_section_area,
        parameters.tiny_value, grid.subnetwork_width, state.subnetwork_depth)
    state.subnetwork_wetness_perimeter = calculate_subnetwork_wetness_perimeter(
        base_condition, length_condition, state.subnetwork_depth,
        parameters.tiny_value, grid.subnetwork_width,
        state.subnetwork_wetness_perimeter)
    state.subnetwork_hydraulic_radii = calculate_subnetwork_hydraulic_radii(
        base_condition, length_condition, state.subnetwork_wetness_perimeter,
        parameters.tiny_value, state.subnetwork_cross_section_area,
        state.subnetwork_hydraulic_radii)


calculate_length_condition = ne.NumExpr(
    '(subnetwork_length > 0) & (subnetwork_storage > 0)',
    (('subnetwork_length', np.float64), ('subnetwork_storage', np.float64)))

calculate_subnetwork_cross_section_area = ne.NumExpr(
    'where('
    'base_condition,'
    'where('
    'length_condition,'
    'subnetwork_storage / subnetwork_length,'
    '0'
    '),'
    'subnetwork_cross_section_area'
    ')',
    (('base_condition', np.bool), ('length_condition', np.bool),
     ('subnetwork_storage', np.float64), ('subnetwork_length', np.float64),
     ('subnetwork_cross_section_area', np.float64)))
Example #15
0
import numpy as np
import numexpr as ne

from mosartwmpy.state.state import State

def update_hillslope_state(state: State, base_condition: np.ndarray) -> None:
    """Updates the depth of water remaining in the hillslope.

    Args:
        state (State): the current model state; will be mutated
        base_condition (np.ndarray): a boolean array representing where the update should occur in the state
    """
    
    state.hillslope_depth = calculate_hillslope_depth(base_condition, state.hillslope_storage, state.hillslope_depth)

calculate_hillslope_depth = ne.NumExpr(
    'where('
        'base_condition,'
        'hillslope_storage,'
        'hillslope_depth'
    ')',
    (('base_condition', np.bool), ('hillslope_storage', np.float64), ('hillslope_depth', np.float64))
)
Example #16
0
def process(options):
    """Perform actually the processing

    :param options: The argument parsed by agrparse.
    :return: EXIT_SUCCESS or EXIT_FAILURE
    """
    if options.verbose:
        pb = None
    else:
        pb = ProgressBar("Sparsification", 100, 30)

    logger.debug("Count the number of frames")
    if pb:
        pb.update(0, message="Count the number of frames")
    dense = [fabio.open(f) for f in options.images]
    nframes = sum([f.nframes for f in dense])

    logger.debug("Initialize the geometry")
    if pb:
        pb.update(0, message="Initialize the geometry", max_value=nframes)
    ai = load(options.poni)
    if options.mask is not None:
        mask = fabio.open(options.mask).data
        ai.detector.mask = mask
    else:
        mask = ai.detector.mask
    shape = dense[0].shape

    unit = to_unit(options.unit)
    if options.radial_range is not None:
        rrange = [float(i) for i in options.radial_range]
    else:
        rrange = None

    integrator = ai.setup_CSR(shape,
                              options.bins,
                              mask=mask,
                              pos0_range=rrange,
                              unit=unit,
                              split="no",
                              scale=False)

    logger.debug("Initialize the OpenCL device")
    if pb:
        pb.update(0, message="Initialize the OpenCL device")

    if options.device is not None:
        ctx = ocl.create_context(
            platformid=options.device[0],
            deviceid=options.device[1],
        )
    else:
        ctx = ocl.create_context(devicetype=options.device_type)

    logger.debug("Initialize the sparsificator")
    pf = OCL_PeakFinder(integrator.lut,
                        image_size=shape[0] * shape[1],
                        empty=options.dummy,
                        unit=unit,
                        bin_centers=integrator.bin_centers,
                        radius=ai._cached_array[unit.name.split("_")[0] +
                                                "_center"],
                        mask=mask,
                        ctx=ctx,
                        profile=options.profile,
                        block_size=options.workgroup)

    logger.debug("Start sparsification")
    frames = []

    cnt = 0
    if "I" in options.error_model:
        variance = numexpr.NumExpr(options.error_model)
        error_model = None
    else:
        error_model = options.error_model
        variance = None

    parameters = OrderedDict([("dummy", options.dummy),
                              ("delta_dummy", options.delta_dummy),
                              ("safe", False), ("error_model", error_model),
                              ("cutoff_clip", options.cutoff_clip),
                              ("cycle", options.cycle),
                              ("noise", options.noise),
                              ("cutoff_pick", options.cutoff_pick),
                              ("radial_range", rrange)])
    if options.solidangle:
        parameters["solidangle"], parameters[
            "solidangle_checksum"] = ai.solidAngleArray(with_checksum=True)
    if options.polarization is not None:
        parameters["polarization"], parameters[
            "polarization_checksum"] = ai.polarization(
                factor=options.polarization, with_checksum=True)
    t0 = time.perf_counter()
    for fabioimage in dense:
        for frame in fabioimage:
            intensity = frame.data
            current = pf.sparsify(
                intensity,
                variance=None if variance is None else variance(intensity),
                **parameters)
            frames.append(current)
            if pb:
                pb.update(cnt,
                          message="%s: %i pixels" % (os.path.basename(
                              fabioimage.filename), current.intensity.size))
            else:
                print("%s frame #%d, found %d intense pixels" %
                      (fabioimage.filename, fabioimage.currentframe,
                       current.intensity.size))
            cnt += 1
    t1 = time.perf_counter()
    if pb:
        pb.update(nframes, message="Saving: " + options.output)
        pb.clear()
    else:
        print("Saving: " + options.output)
    logger.debug("Save data")

    parameters["unit"] = unit.name.split("_")[0]
    parameters["error_model"] = options.error_model

    if options.polarization is not None:
        parameters.pop("polarization")
        parameters.pop("polarization_checksum")
        parameters["polarization_factor"] = options.polarization
    if options.solidangle:
        parameters.pop("solidangle")
        parameters.pop("solidangle_checksum")
        parameters["correctSolidAngle"] = True
    save_sparse(options.output,
                frames,
                beamline=options.beamline,
                ai=ai,
                source=options.images if options.save_source else None,
                extra=parameters)

    if options.profile:
        try:
            pf.log_profile(True)
        except Exception:
            pf.log_profile()
    if pb:
        pb.clear()
    logger.info(f"Total sparsification time: %.3fs \t (%.3f fps)", t1 - t0,
                cnt / (t1 - t0))

    return EXIT_SUCCESS
Example #17
0
    flow_volume = calculate_flow_volume(depth_condition, tiny_condition, volume_condition, flow_volume, state.grid_cell_unmet_demand)
    
    state.grid_cell_unmet_demand = calculate_reservoir_demand(depth_condition, tiny_condition, volume_condition, state.grid_cell_unmet_demand, parameters.irrigation_extraction_maximum_fraction, flow_volume)
    
    flow_volume = update_flow_volume(depth_condition, tiny_condition, volume_condition, parameters.irrigation_extraction_maximum_fraction, flow_volume)
    
    state.channel_storage = calculate_channel_storage(depth_condition, tiny_condition, flow_volume, state.channel_storage)
    
    # TODO ? fortran mosart appears to do some more math with temp_erout and TRunoff%erout that looks to me to always be zero
    
    update_main_channel_state(state, grid, parameters, depth_condition)

calculate_depth_condition = ne.NumExpr(
    '(mosart_mask > 0) &'
    'euler_mask &'
    '(tracer == LIQUID_TRACER) &'
    '(channel_depth >= irrigation_extraction_condition)',
    (('mosart_mask', np.int64), ('euler_mask', np.bool), ('tracer', np.int64), ('LIQUID_TRACER', np.int64), ('channel_depth', np.float64), ('irrigation_extraction_condition', np.float64))
)

calculate_tiny_condition = ne.NumExpr(
    '(channel_storage > tinier_value) &'
    '(reservoir_demand > tinier_value) &'
    '(channel_length > tinier_value)',
    (('channel_storage', np.float64), ('tinier_value', np.float64), ('reservoir_demand', np.float64), ('channel_length', np.float64))
)

calculate_volume_condition = ne.NumExpr(
    'irrigation_extraction_maximum_fraction * flow_volume >= reservoir_demand',
    (('irrigation_extraction_maximum_fraction', np.float64), ('flow_volume', np.float64), ('reservoir_demand', np.float64))
)
Example #18
0
            iteration_condition, state.channel_lateral_flow_hillslope,
            state.subnetwork_discharge)

    # average lateral flow over substeps
    state.channel_lateral_flow_hillslope = average_channel_lateral_flow_hillslope(
        base_condition, state.channel_lateral_flow_hillslope,
        grid.iterations_subnetwork)


calculate_subnetwork_flow_velocity = ne.NumExpr(
    'where('
    'base_condition & length_condition,'
    'where('
    'subnetwork_hydraulic_radii > 0,'
    '(subnetwork_hydraulic_radii ** (2/3)) * sqrt(subnetwork_slope) / subnetwork_manning,'
    '0'
    '),'
    'subnetwork_flow_velocity'
    ')', (('base_condition', np.bool), ('length_condition', np.bool),
          ('subnetwork_hydraulic_radii', np.float64),
          ('subnetwork_slope', np.float64), ('subnetwork_manning', np.float64),
          ('subnetwork_flow_velocity', np.float64)))

calculate_subnetwork_discharge = ne.NumExpr(
    'where('
    'base_condition,'
    'where('
    'length_condition,'
    '-subnetwork_flow_velocity * subnetwork_cross_section_area,'
    '-subnetwork_lateral_inflow'
    '),'
Example #19
0
    tmp_delta_runoff = calculate_tmp_delta_runoff(
        base_condition, state.hillslope_wetland_runoff, grid.area,
        grid.drainage_fraction)
    tmp_delta_runoff = update_tmp_delta_runoff(base_condition,
                                               tmp_delta_runoff,
                                               parameters.tiny_value)
    state.channel_delta_storage = calculate_channel_delta_storage(
        base_condition, state.channel_lateral_flow_hillslope,
        state.channel_inflow_upstream, state.channel_outflow_downstream,
        tmp_delta_runoff)


calculate_channel_inflow_upstream = ne.NumExpr(
    'where('
    'base_condition,'
    '-channel_outflow_sum_upstream_instant,'
    'channel_inflow_upstream'
    ')', (('base_condition', np.bool),
          ('channel_outflow_sum_upstream_instant', np.float64),
          ('channel_inflow_upstream', np.float64)))

calculate_channel_flow_velocity = ne.NumExpr(
    'where('
    'base_condition,'
    'where('
    '(channel_length > 0) & (channel_hydraulic_radii > 0),'
    '(channel_hydraulic_radii ** (2/3)) * sqrt(channel_slope) / channel_manning,'
    '0'
    '),'
    'channel_flow_velocity'
    ')',
    (('base_condition', np.bool), ('channel_length', np.float64),
Example #20
0
    def __init__(self,
                 name,
                 scale=1,
                 label=None,
                 equation=None,
                 formula=None,
                 center=None,
                 corner=None,
                 delta=None,
                 short_name=None,
                 unit_symbol=None):
        """Constructor of a unit.

        :param str name: name of the unit
        :param float scale: scale of the unit to go to SI
        :param str label: label for nice representation in matplotlib,
                                can use latex representation
        :param func equation: equation to calculate the value from coordinates
                                 (x,y,z) in detector space.
                                 Parameters of the function are `x`, `y`, `z`, `wavelength`
        :param str formula: string with the mathematical formula.
                       Valid variable names are `x`, `y`, `z`, `λ` and the constant `π`
        :param str center: name of the fast-path function
        :param str unit_symbol: symbol used to display values of this unit
        """
        self.name = name
        self.scale = scale
        self.label = label if label is not None else name
        self.corner = corner
        self.center = center
        self.delta = delta
        self._equation = equation
        self.formula = formula
        if (numexpr is not None) and isinstance(formula, str):
            signature = [
                ("x", numpy.float64),
                ("y", numpy.float64),
            ]
            if "z" in formula:
                signature.append(("z", numpy.float64))
            if "λ" in formula:
                signature.append(("λ", numpy.float64))
            if "π" in formula:
                signature.append(("π", numpy.float64))
            ne_formula = numexpr.NumExpr(formula, signature)

            def ne_equation(x,
                            y,
                            z=None,
                            wavelength=None,
                            ne_formula=ne_formula):
                π = numpy.pi
                λ = wavelength
                ldict = locals()
                args = tuple(ldict[i] for i in ne_formula.input_names)
                return ne_formula(*args)

            self.equation = ne_equation
        else:
            self.equation = self._equation
        self.short_name = short_name
        self.unit_symbol = unit_symbol
Example #21
0
    state.grid_cell_unmet_demand = subtract(state.grid_cell_unmet_demand,
                                            supplied)

    # add the residual flow volume back
    state.channel_outflow_downstream[:] -= pd.DataFrame(
        grid.reservoir_id, columns=['reservoir_id']).merge(
            reservoir_demand_flow.flow_volume,
            how='left',
            left_on='reservoir_id',
            right_index=True).flow_volume.fillna(0).values / delta_t


calculate_flow_volume = ne.NumExpr(
    'where('
    'has_reservoir,'
    '-(reservoir_flow_volume_ratio * delta_t * channel_outflow_downstream),'
    '0'
    ')',
    (('has_reservoir', np.bool), ('reservoir_flow_volume_ratio', np.float64),
     ('delta_t', np.float64), ('channel_outflow_downstream', np.float64)))

remove_flow = ne.NumExpr(
    'where('
    'has_reservoir,'
    'channel_outflow_downstream + flow_volume / delta_t,'
    'channel_outflow_downstream'
    ')',
    (('has_reservoir', np.bool), ('channel_outflow_downstream', np.float64),
     ('flow_volume', np.float64), ('delta_t', np.float64)))

divide = ne.NumExpr('a / b', (('a', np.float64), ('b', np.float64)))
Example #22
0
        depth_condition, volume_condition, state.grid_cell_unmet_demand,
        flow_volume)

    flow_volume = update_flow_volume(depth_condition, volume_condition,
                                     flow_volume)

    state.subnetwork_storage = calculate_subnetwork_storage(
        depth_condition, flow_volume, state.subnetwork_storage)

    update_subnetwork_state(state, grid, parameters, depth_condition)


calculate_depth_condition = ne.NumExpr(
    '(mosart_mask > 0) &'
    'euler_mask &'
    '(tracer == LIQUID_TRACER) &'
    '(subnetwork_depth >= irrigation_extraction_condition)',
    (('mosart_mask', np.int64), ('euler_mask', np.bool), ('tracer', np.int64),
     ('LIQUID_TRACER', np.int64), ('subnetwork_depth', np.float64),
     ('irrigation_extraction_condition', np.float64)))

calculate_volume_condition = ne.NumExpr('flow_volume >= reservoir_demand',
                                        (('flow_volume', np.float64),
                                         ('reservoir_demand', np.float64)))

calculate_reservoir_supply = ne.NumExpr(
    'where('
    'depth_condition,'
    'where('
    'volume_condition,'
    'reservoir_supply + reservoir_demand,'
    'reservoir_supply + flow_volume'
Example #23
0
    state.hillslope_storage = calculate_hillslope_storage(
        base_condition, state.hillslope_storage, delta_t,
        state.hillslope_delta_storage)

    update_hillslope_state(state, base_condition)

    state.subnetwork_lateral_inflow = calculate_subnetwork_lateral_inflow(
        base_condition, state.hillslope_subsurface_runoff,
        state.hillslope_overland_flow, grid.drainage_fraction, grid.area,
        state.subnetwork_lateral_inflow)


calculate_velocity_hillslope = ne.NumExpr(
    'where('
    'base_condition & (hillslope_depth > 0),'
    '(hillslope_depth ** (2/3)) * sqrt(hillslope) / hillslope_manning,'
    '0'
    ')', (('base_condition', np.bool), ('hillslope_depth', np.float64),
          ('hillslope', np.float64), ('hillslope_manning', np.float64)))

calculate_base_hillslope_overland_flow = ne.NumExpr(
    'where('
    'base_condition,'
    '-hillslope_depth * velocity_hillslope * drainage_density,'
    'hillslope_overland_flow'
    ')', (('base_condition', np.bool), ('hillslope_depth', np.float64),
          ('velocity_hillslope', np.float64), ('drainage_density', np.float64),
          ('hillslope_overland_flow', np.float64)))

calculate_hillslope_overland_flow = ne.NumExpr(
    'where('
Example #24
0
                                        parameters.slope_1_def)
    state.channel_wetness_perimeter = calculate_channel_wetness_perimeter(
        base_condition, condition, state.channel_depth, parameters.tiny_value,
        not_flooded, grid.channel_width, delta_depth, grid.grid_channel_depth,
        grid.channel_floodplain_width, parameters.slope_1_def,
        parameters.inverse_sin_atan_slope_1_def,
        state.channel_wetness_perimeter)

    state.channel_hydraulic_radii = calculate_channel_hydraulic_radii(
        base_condition, condition, state.channel_wetness_perimeter,
        parameters.tiny_value, state.channel_cross_section_area,
        state.channel_hydraulic_radii)


calculate_storage_condition = ne.NumExpr(
    '(channel_length > 0) & (channel_storage > 0)',
    (('channel_length', np.float64), ('channel_storage', np.float64)))

calculate_channel_cross_section_area = ne.NumExpr(
    'where('
    'base_condition,'
    'where('
    'storage_condition,'
    'channel_storage / channel_length,'
    '0'
    '),'
    'channel_cross_section_area'
    ')', (('base_condition', np.bool), ('storage_condition', np.bool),
          ('channel_storage', np.float64), ('channel_length', np.float64),
          ('channel_cross_section_area', np.float64)))
Example #25
0
 def __init__(self, expr):
     self.expr = numexpr.NumExpr(expr)
     # by default, assume PDG codes
     self._translator = CompiledFlux.pdg_to_corsika
Example #26
0
def convert_one(input_filename, options, start_at=0):
    """
    Convert a single file using options

    :param str input_filename: The input filename
    :param object options: List of options provided from the command line
    :param start_at: index to start at for given file
    :rtype: int
    :returns: the number of frames processed
    """
    flip = bool((options.rotation // 90) % 2)
    if options.transpose:
        flip = not flip

    input_filename = os.path.abspath(input_filename)
    input_exists = os.path.exists(input_filename)

    if options.verbose:
        print("Converting file '%s'" % (input_filename))

    if not input_exists:
        logger.error("Input file '%s' do not exists. Conversion skipped.",
                     input_filename)
        return -1

    try:
        logger.debug("Load '%s'", input_filename)
        source = fabio.open(input_filename)
    except KeyboardInterrupt:
        raise
    except Exception as e:
        logger.error(
            "Loading input file '%s' failed cause: \"%s\". Conversion skipped.",
            input_filename, e.message)
        logger.debug("Backtrace", exc_info=True)
        return -1

    shape = select_detecor((source.shape[-1],
                            source.shape[-2]) if flip else source.shape)
    pilatus_headers = fabio.cbfimage.PilatusHeader(
        "Silicon sensor, thickness 0.001 m")
    if isinstance(source, fabio.limaimage.LimaImage):
        # Populate the Pilatus header from the Lima
        entry_name = source.h5.attrs.get("default")
        if entry_name:
            entry = source.h5.get(entry_name)
            if entry:
                data_name = entry.attrs["default"]
                if data_name:
                    data_grp = entry.get(data_name)
                    if data_grp:
                        nxdetector = data_grp.parent
                        try:
                            detector = "%s, S/N %s" % (
                                nxdetector["detector_information/model"][()],
                                nxdetector["detector_information/name"][()])
                            pilatus_headers["Detector"] = detector
                        except Exception as e:
                            logger.warning(
                                "Error in searching for detector definition (%s): %s",
                                type(e), e)
                        try:
                            pilatus_headers["Pixel_size"] = (nxdetector[
                                "detector_information/pixel_size/xsize"][(
                                )], nxdetector[
                                    "detector_information/pixel_size/ysize"][(
                                    )])
                        except Exception as e:
                            logger.warning(
                                "Error in searching for pixel size (%s): %s",
                                type(e), e)
                        try:
                            t1 = nxdetector["acquisition/exposure_time"][()]
                            t2 = nxdetector["acquisition/latency_time"][()]
                            pilatus_headers["Exposure_time"] = t1
                            pilatus_headers["Exposure_period"] = t1 + t2
                        except Exception as e:
                            logger.warning(
                                "Error in searching for exposure time (%s): %s",
                                type(e), e)
    elif isinstance(source, fabio.eigerimage.EigerImage):
        raise NotImplementedError(
            "Please implement Eiger detector data format parsing or at least open an issue"
        )
    else:
        raise NotImplementedError("Unsupported format: %s" %
                                  source.__class__.__name__)

    # Parse option for Pilatus headers
    if options.energy:
        pilatus_headers["Wavelength"] = CONST_hc / options.energy
    elif options.wavelength:
        pilatus_headers["Wavelength"] = options.wavelength
    if options.distance:
        pilatus_headers["Detector_distance"] = options.distance
    if options.beam:
        pilatus_headers["Beam_xy"] = options.beam
    if options.alpha:
        pilatus_headers["Alpha"] = options.alpha
    if options.kappa:
        pilatus_headers["Kappa"] = options.kappa
    formula = None
    destination = None
    if options.chi is not None:
        try:
            value = float(options.chi)
        except ValueError:
            # Handle the string
            formula = numexpr.NumExpr(options.chi)
            destination = "Chi"
            pilatus_headers["Oscillation_axis"] = "CHI"
        else:
            pilatus_headers["Chi"] = value
            pilatus_headers["Chi_increment"] = 0.0

    if options.phi is not None:
        try:
            value = float(options.phi)
        except ValueError:
            # Handle the string
            formula = numexpr.NumExpr(options.phi)
            destination = "Phi"
            pilatus_headers["Oscillation_axis"] = "PHI"
        else:
            pilatus_headers["Phi"] = value
            pilatus_headers["Phi_increment"] = 0.0
    if options.omega is not None:
        try:
            value = float(options.omega)
        except ValueError:
            # Handle the string
            formula = numexpr.NumExpr(options.omega)
            destination = "Omega"
            pilatus_headers["Oscillation_axis"] = "OMEGA"
        else:
            pilatus_headers["Omega"] = value
            pilatus_headers["Omega_increment"] = 0.0

    for i, frame in enumerate(source):
        idx = i + start_at
        data = numpy.empty(shape, dtype=numpy.int32)
        data.fill(options.dummy)
        input_data = frame.data.astype(numpy.int32)
        if options.rotation:
            input_data = numpy.rot90(input_data, k=options.rotation // 90)
        if options.transpose:
            input_data = input_data.T
        if options.flip_ud:
            input_data = numpy.flipud(input_data)
        if options.flip_lr:
            input_data = numpy.fliplr(input_data)

        data[:input_data.shape[0], :input_data.shape[1]] = input_data

        mask = numpy.where(input_data == numpy.iinfo(frame.data.dtype).max)
        data[mask] = options.dummy
        converted = fabio.cbfimage.CbfImage(data=data)

        if formula and destination:
            position = formula(idx)
            delta = (formula(idx + 1) - position)
            pilatus_headers["Start_angle"] = pilatus_headers[
                destination] = position
            pilatus_headers["Angle_increment"] = pilatus_headers[
                destination + "_increment"] = delta
        converted.pilatus_headers = pilatus_headers

        output_filename = options.output.format(index=((idx + options.offset)))
        os.makedirs(os.path.dirname(output_filename), exist_ok=True)
        try:
            logger.debug("Write '%s'", output_filename)
            if not options.dry_run:
                converted.write(output_filename)
        except KeyboardInterrupt:
            raise
        except Exception as e:
            logger.error(
                "Saving output file '%s' failed cause: \"%s: %s\". Conversion skipped.",
                output_filename, type(e), e)
            logger.debug("Backtrace", exc_info=True)
            return -1
    return source.nframes