Ejemplo n.º 1
0
    def to_xarray(
            self,
            r_dict: Dict[str, bytes],
            geometry: Union[Polygon, MultiPolygon, None] = None) -> xr.Dataset:
        """Convert response to xarray.DataArray."""
        if isinstance(geometry, (Polygon, MultiPolygon)):
            gtiff2xarray = tlz.partial(geoutils.gtiff2xarray,
                                       geometry=geometry,
                                       geo_crs=self.crs)
        else:
            gtiff2xarray = tlz.partial(geoutils.gtiff2xarray)

        try:
            _ds = gtiff2xarray(r_dict=r_dict)
        except rio.RasterioIOError as ex:
            raise ServiceUnavailable(self.wms.url) from ex

        ds: xr.Dataset = _ds.to_dataset() if isinstance(_ds,
                                                        xr.DataArray) else _ds
        ds.attrs = _ds.attrs
        for lyr in self.layers:
            name = [n for n in self.units if n in lyr.lower()][-1]
            lyr_name = f"{name}_{lyr.split('_')[1]}"
            ds = ds.rename({lyr: lyr_name})
            ds[lyr_name].attrs["units"] = self.units[name]
            ds[lyr_name] = ds[lyr_name].astype(self.types[name])
            ds[lyr_name].attrs["nodatavals"] = (self.nodata[name], )
        return ds
def pipeline(chip, tx, ty, date, acquired, cfg):

    ctx = {
        'tx': tx,
        'ty': ty,
        'cx': first(chip),
        'cy': second(chip),
        'date': date,
        'acquired': acquired
    }

    return thread_first(
        ctx,
        partial(segments, cfg=cfg),
        segments_filter,
        partial(segaux.aux, cfg=cfg),
        segaux.aux_filter,
        segaux.combine,
        segaux.unload_segments,
        segaux.unload_aux,
        segaux.add_training_dates,
        add_average_reflectance,
        segaux.training_format,
        #segaux.log_chip,
        segaux.exit_pipeline)
Ejemplo n.º 3
0
def create_routes(app, redis):
    route = partial(add_route, app)
    handler = lambda fn: partial(fn, redis)

    route('GET', '/api/services', handler(get_services))
    route('POST', '/api/services', handler(create_service))
    route('GET', '/api/services/{s_id}', handler(get_service))
    route('DELETE', '/api/services/{s_id}', handler(delete_service))
Ejemplo n.º 4
0
def ssebopeta_bygeom(
    geometry: GTYPE,
    dates: Union[Tuple[str, str], Union[int, List[int]]],
    geo_crs: str = DEF_CRS,
) -> xr.DataArray:
    """Get daily actual ET for a region from SSEBop database.

    Notes
    -----
    Since there's still no web service available for subsetting SSEBop, the data first
    needs to be downloaded for the requested period then it is masked by the
    region of interest locally. Therefore, it's not as fast as other functions and
    the bottleneck could be the download speed.

    Parameters
    ----------
    geometry : shapely.geometry.Polygon or tuple
        The geometry for downloading clipping the data. For a tuple bbox,
        the order should be (west, south, east, north).
    dates : tuple or list, optional
        Start and end dates as a tuple (start, end) or a list of years [2001, 2010, ...].
    geo_crs : str, optional
        The CRS of the input geometry, defaults to epsg:4326.

    Returns
    -------
    xarray.DataArray
        Daily actual ET within a geometry in mm/day at 1 km resolution
    """
    f_list = helpers.get_ssebopeta_urls(dates)
    if isinstance(geometry, (Polygon, MultiPolygon)):
        gtiff2xarray = tlz.partial(geoutils.gtiff2xarray,
                                   geometry=geometry,
                                   geo_crs=geo_crs)
    else:
        gtiff2xarray = tlz.partial(geoutils.gtiff2xarray)

    session = RetrySession()

    with patch("socket.has_ipv6", False):

        def _ssebop(t: pd.Timestamp, url: str) -> xr.DataArray:
            resp = session.get(url)
            zfile = zipfile.ZipFile(io.BytesIO(resp.content))
            content = zfile.read(zfile.filelist[0].filename)
            ds: xr.DataArray = gtiff2xarray(r_dict={"eta": content})
            return ds.expand_dims({"time": [t]})

        data = xr.merge(_ssebop(t, url) for t, url in f_list)
    eta: xr.DataArray = data.where(
        data.eta < data.eta.nodatavals[0]).eta.copy() * 1e-3
    eta.attrs.update({
        "units": "mm/day",
        "nodatavals": (np.nan, ),
        "crs": DEF_CRS,
        "long_name": "Actual ET"
    })
    return eta
def load_data(ctx, cfg):
    return assoc(
        ctx, 'data',
        thread_first(
            ctx, partial(segments, cfg=cfg), partial(segaux.aux,
                                                     cfg=cfg), segaux.combine,
            segaux.unload_segments, segaux.unload_aux, extract_segments,
            partial(segaux.prediction_dates,
                    month=get("month", ctx),
                    day=get("day",
                            ctx)), segaux.average_reflectance, reformat))
Ejemplo n.º 6
0
def test_gaussian_GFE_entropy_gradient():
    num_units = 5
    lay = layers.GaussianLayer(num_units)

    lay.params.loc[:] = be.rand_like(lay.params.loc)
    lay.params.log_var[:] = be.randn(be.shape(lay.params.loc))

    from cytoolz import compose
    sum_square = compose(be.tsum, be.square)

    for itr in range(10):
        mag = lay.get_random_magnetization()
        lms = lay.lagrange_multipliers_analytic(mag)
        entropy = lay.TAP_entropy(mag)
        lr = 0.001
        gogogo = True
        grad = lay.TAP_magnetization_grad(mag, [], [], [])
        grad_mag = math.sqrt(be.float_scalar(be.accumulate(sum_square, grad)))
        normit = partial(be.tmul_, be.float_scalar(1.0/grad_mag))
        be.apply_(normit, grad)
        rand_grad = lay.get_random_magnetization()
        grad_mag = math.sqrt(be.float_scalar(be.accumulate(sum_square, rand_grad)))
        normit = partial(be.tmul_, be.float_scalar(1.0/grad_mag))
        be.apply_(normit, rand_grad)
        while gogogo:
            cop1_mag = deepcopy(mag)
            cop1_lms = deepcopy(lms)
            cop2_mag = deepcopy(mag)
            cop2_lms = deepcopy(lms)

            cop1_mag.mean[:] = mag.mean + lr * grad.mean
            cop2_mag.mean[:] = mag.mean + lr * rand_grad.mean
            cop1_mag.variance[:] = mag.variance + lr * grad.variance
            cop2_mag.variance[:] = mag.variance + lr * rand_grad.variance
            lay.clip_magnetization_(cop1_mag)
            lay.clip_magnetization_(cop2_mag)
            cop1_lms = lay.lagrange_multipliers_analytic(cop1_mag)
            cop2_lms = lay.lagrange_multipliers_analytic(cop2_mag)

            entropy_1 = lay.TAP_entropy(cop1_mag)
            entropy_2 = lay.TAP_entropy(cop2_mag)

            regress = entropy_1 - entropy_2 < 0.0
            #print(itr, "[",lr, "] ", entropy, entropy_1, entropy_2, regress)
            if regress:
                #print(grad, rand_grad)
                if lr < 1e-6:
                    assert False,\
                    "Gaussian GFE magnetization gradient is wrong"
                    break
                else:
                    lr *= 0.5
            else:
                break
Ejemplo n.º 7
0
def tf_ngrams_pipeline(input_col: str, output_col: str, df: DataFrame, n=3):
    """Calculate the term frequency vectors for the character-wise trigrams of an input string"""
    filled_col = input_col + "_filled"
    trigrams_col = input_col + "_character_ngrams"

    return pipe(
        df,
        partial(fill_nulls_with_empty_string, input_col, filled_col),
        partial(character_ngrams, filled_col, trigrams_col, n=n),
        partial(rm_empty_strings_from_tokens, trigrams_col, trigrams_col),
        partial(term_frequency_vectors, trigrams_col, output_col),
        partial(drop_cols, [filled_col, trigrams_col]),
    )
Ejemplo n.º 8
0
def tfidf_vectors_pipeline(input_col: str,
                           output_col: str,
                           df: DataFrame,
                           n=3):
    """Calculate the tfidf vectors for the character-wise trigrams of an input string"""
    tf_vectors = input_col + "_tf_vectors"
    tfidf_col = input_col + "_tfifd_vectors"

    return pipe(
        tf_ngrams_pipeline(input_col, tf_vectors, df),
        partial(tfidf_vectors, tf_vectors, tfidf_col),
        partial(normalize_vectors, tfidf_col, output_col),
        partial(drop_cols, [tf_vectors, tfidf_col]),
    )
Ejemplo n.º 9
0
def normalize_compilation_result(compilation_result):
    """
    Take the result from the --standard-json compilation and flatten it into an
    iterable of contract data dictionaries.
    """
    for source_path, file_contracts in compilation_result['contracts'].items():
        for contract_name, raw_contract_data in file_contracts.items():
            contract_data = normalize_standard_json_contract_data(
                raw_contract_data)
            yield pipe(
                contract_data,
                partial(assoc, key='source_path', value=source_path),
                partial(assoc, key='name', value=contract_name),
            )
Ejemplo n.º 10
0
def create(x, y, acquired, cfg):
    """Create a timeseries.

    Args:
        x (int): x coordinate
        y (int): y coordinate
        acquired (string): iso8601 date range
        cfg (dict): A Merlin configuration

    Returns:
        tuple - Results of format_fn applied to results of chips_fn
    """

    x, y = get_in(['chip', 'proj-pt'], cfg['snap_fn'](x=x, y=y))

    # get specs
    specmap = cfg['specs_fn'](specs=cfg['registry_fn']())

    # get function that will return chipmap.
    # Don't create state with a realized variable to preserve memory
    chipmap = partial(chips.mapped,
                      x=x,
                      y=y,
                      acquired=acquired,
                      specmap=specmap,
                      chips_fn=cfg['chips_fn'])

    # calculate locations chip.  There's another function
    # here to be split out and organized.

    grid = first(filter(lambda x: x['name'] == 'chip', cfg['grid_fn']()))

    cw, ch = specs.refspec(specmap).get('data_shape')

    locations = partial(chips.locations,
                        x=x,
                        y=y,
                        cw=cw,
                        ch=ch,
                        rx=grid.get('rx'),
                        ry=grid.get('ry'),
                        sx=grid.get('sx'),
                        sy=grid.get('sy'))

    return cfg['format_fn'](x=x,
                            y=y,
                            locations=locations(),
                            dates_fn=cfg['dates_fn'],
                            specmap=specmap,
                            chipmap=chipmap())
Ejemplo n.º 11
0
def map_abi_data(normalizers, types, data):
    '''
    This function will apply normalizers to your data, in the
    context of the relevant types. Each normalizer is in the format:

    def normalizer(datatype, data):
        # Conditionally modify data
        return (datatype, data)

    Where datatype is a valid ABI type string, like "uint".

    In case of an array, like "bool[2]", normalizer will receive `data`
    as an iterable of typed data, like `[("bool", True), ("bool", False)]`.

    Internals
    ---

    This is accomplished by:

    1. Decorating the data tree with types
    2. Recursively mapping each of the normalizers to the data
    3. Stripping the types back out of the tree
    '''
    pipeline = itertools.chain(
        [abi_data_tree(types)],
        map(data_tree_map, normalizers),
        [partial(recursive_map, strip_abi_type)],
    )

    return pipe(data, *pipeline)
Ejemplo n.º 12
0
def paths_ratio(graph, links_map, ppp, pool_size=4):
    p = Pool(pool_size)
    pmap = p.map
    pairs = [(n1, n2) for n1 in graph.nodes() for n2 in graph.nodes()
             if n1 != n2]
    fn = partial(_all_simple_paths_dfs, graph, links_map, ppp)
    simple_paths = merge(pmap(fn, pairs))
    all_paths = 0.0
    ok_paths = 0.0
    count = 0.0
    summ = 0.0
    averages = []
    for path_key, path_tuples in simple_paths.iteritems():
        count += 1
        ok_paths += len(path_tuples[0])
        all_paths += path_tuples[3]
        summ += len(path_tuples[0]) / float(path_tuples[3])
        print "--", path_key, len(path_tuples[0]) / float(path_tuples[3])
        averages.append(len(path_tuples[0]) / float(path_tuples[3]))

    print ">>>>>>>>", ok_paths / all_paths
    print "Average of average", summ / count
    print "MEAN", statistics.mean(averages)
    print "variance", statistics.variance(averages)
    p.close()
    return simple_paths
Ejemplo n.º 13
0
def chexists(dictionary, keys, check_fn):
    """applies check_fn against dictionary minus keys then ensures the items
    returned from check_fn exist in dictionary[keys]

    Args:
        dictionary (dict): {key: [v1, v3, v2]}
        keys (sequence): A sequence of keys in dictionary
        check_fn (function): Function that accepts dict and returns
                             sequence of items or Exception

    Returns:
        A sequence of items that are returned from check_fn and exist in
        dictionary[keys] or Exception
    """

    def exists_in(superset, subset):
        if issubset(subset, second(superset)):
            return True
        else:
            msg =  '{} is missing data.'.format(first(superset))
            msg2 = '{} is not a subset of {}'.format(subset, second(superset))
            raise Exception('\n\n'.join([msg, msg2]))

    popped  = {k: dictionary[k] for k in keys}
    checked = check_fn({k: dictionary[k] for k in difference(dictionary, keys)})
    all(map(partial(exists_in, subset=checked), popped.items()))
    return checked
Ejemplo n.º 14
0
def map_abi_data(normalizers, types, data):
    '''
    This function will apply normalizers to your data, in the
    context of the relevant types. Each normalizer is in the format:

    def normalizer(datatype, data):
        # Conditionally modify data
        return (datatype, data)

    Where datatype is a valid ABI type string, like "uint".

    In case of an array, like "bool[2]", normalizer will receive `data`
    as an iterable of typed data, like `[("bool", True), ("bool", False)]`.

    Internals
    ---

    This is accomplished by:

    1. Decorating the data tree with types
    2. Recursively mapping each of the normalizers to the data
    3. Stripping the types back out of the tree
    '''
    pipeline = itertools.chain(
        [abi_data_tree(types)],
        map(data_tree_map, normalizers),
        [partial(recursive_map, strip_abi_type)],
    )

    return pipe(data, *pipeline)
Ejemplo n.º 15
0
    def update(self, model, v_data, v_model, epoch):
        """
        Update the model parameters with a gradient step.

        Notes:
            Changes parameters of model in place.

        Args:
            model: a Model object to optimize
            v_data (tensor): observations
            v_mdoel (tensor): samples from the model
            epoch (int): the current epoch

        Returns:
            None

        """
        self.scheduler.increment(epoch)
        lr_ = partial(be.tmul_,
                      be.float_scalar(self.scheduler.get_lr() * self.stepsize))

        grad = model.gradient(v_data, v_model)
        self.memory.update(grad)
        self.delta = self.memory.normalize(self.memory.mean_gradient,
                                           unbiased=True)
        hidden.grad_apply_(lr_, self.delta)
        model.parameter_update(self.delta)
Ejemplo n.º 16
0
def commodity_in_sphere(center_system, commodity, radius=10):
    systems = systems_in_sphere(center_system, radius)
    system_names = [system["name"] for system in systems]
    this_commodity_from_system = partial(commodity_from_system,
                                         commodity=commodity)
    with ThreadPoolExecutor(max_workers=16) as exe:
        commodities_batched = exe.map(this_commodity_from_system, system_names)
        return list(itertools.chain.from_iterable(commodities_batched))
Ejemplo n.º 17
0
def generate_data(n, k, d, std_noise):
    X = np.random.normal(size=(n, d))
    betas = np.random.normal(size=(d, k))
    gate_betas = np.random.normal(size=(d, k))
    noise = np.random.normal(size=n) * std_noise
    true_model = tz.partial(predict_noiseless, betas=betas, gate_betas=gate_betas)
    y = true_model(X) + noise
    return X, y, true_model
Ejemplo n.º 18
0
def token_vectors_pipeline(input_col: str,
                           output_col: str,
                           df: DataFrame,
                           stemmer_func=None):
    """Convert a string into an array of integer token ids"""
    filled_col = input_col + "_filled"
    tokenised_col = input_col + "_tokenised"
    tf_vectors = input_col + "_tf_vectors"

    transforms = [
        # note that the tokenizer completely breaks given null input values
        partial(fill_nulls_with_empty_string, input_col, filled_col),
        partial(tokenize_words, filled_col, tokenised_col),
    ]

    # optionally stem the tokens
    if stemmer_func:
        transforms += [partial(stemmer_func, tokenised_col, tokenised_col)]

    transforms += [
        partial(rm_empty_strings_from_tokens, tokenised_col, tokenised_col),
        partial(term_frequency_vectors, tokenised_col, tf_vectors),
        partial(sparse_vector_indices, tf_vectors, output_col),
        partial(drop_cols, [filled_col, tokenised_col, tf_vectors]),
    ]
    return pipe(df, *transforms)
Ejemplo n.º 19
0
def hash_key(args, kwargs):
    # return (args, hash(frozenset(kwargs.items())))
    # return (map(make_hashable, args), frozenset(kwargs.items()))
    args = tuple(map(make_hashable, args))
    kwargs = frozenset(
        map(compose(tuple, partial(map, make_hashable)), kwargs.items()))
    # print('args', args)
    # print('kwargs', kwargs)
    return (args, kwargs)
def segments():

    return thread_first(request.json,
                        partial(exception_handler, http_status=500, name='log_request', fn=log_request),
                        partial(exception_handler, http_status=400, name='parameters', fn=parameters),
                        partial(exception_handler, http_status=500, name='timeseries', fn=partial(timeseries, cfg=cfg)),
                        partial(exception_handler, http_status=500, name='nodata', fn=partial(nodata, cfg=cfg)),
                        partial(exception_handler, http_status=500, name='detection', fn=partial(detection, cfg=cfg)),
                        partial(exception_handler, http_status=500, name='delete', fn=partial(delete, cfg=cfg)),
                        partial(exception_handler, http_status=500, name='save', fn=partial(save, cfg=cfg)),
                        respond)
Ejemplo n.º 21
0
def tags_at(run: int,
            *other_runs: int,
            beamline: int = None) -> Tuple[int, Sequence[int]]:
    """
    Example:
        hightag, tags = tags_at(509700, beamline=3)  # from single run
        hightag, tags = tags_at(509700, 509701, 509702, beamline=3)  # from multiple runs
    """
    if beamline is None:
        raise ValueError("Keyword argument 'beamline' must be given!")
    runs = run, *other_runs
    hightag_at_the_beamline = partial(hightag, beamline)
    taglist_at_the_beamline = partial(taglist, beamline)
    hightags: ndarray = pipe(runs, partial(map, hightag_at_the_beamline),
                             partial(fromiter, dtype='int'))
    if not (hightags == hightags[0]).all():
        raise ValueError('Not all the runs have a single hightag!')
    tags = pipe(runs, partial(map, taglist_at_the_beamline), concat, tuple)
    return hightags[0], tags
Ejemplo n.º 22
0
def find_background_illumination(fns,
                                 radius=None,
                                 input_bitdepth=None,
                                 quantile=0.5,
                                 stretch_quantile=0.):
    """Use a set of related images to find uneven background illumination.

    Parameters
    ----------
    fns : list of string
        A list of image file names
    radius : int, optional
        The radius of the structuring element used to find background.
        default: The width or height of the input images divided by 4,
        whichever is smaller.
    input_bitdepth : int, optional
        The bit-depth of the input images. Should be specified if non-standard
        bitdepth images are used in a 16-bit image file, e.g. 12-bit images.
        Default is the dtype of the input image.
    quantile : float in [0, 1], optional
        The desired quantile to find background. default: 0.5 (median)
    stretch_quantile : float in [0, 1], optional
        Stretch image to full dtype limit, saturating above this quantile.

    Returns
    -------
    illum : np.ndarray, float, shape (M, N)
        The estimated illumination over the image field.

    See Also
    --------
    `correct_image_illumination`, `correct_multiimage_illumination`.
    """
    # this function follows the "PyToolz" streaming data model to
    # obtain the illumination estimate.
    # first, define the functions for each individual step:
    in_range = ('image' if input_bitdepth is None else
                (0, 2**input_bitdepth - 1))
    rescale = tz.curry(exposure.rescale_intensity)
    normalize = (tz.partial(stretchlim, bottom=stretch_quantile)
                 if stretch_quantile > 0 else skimage.img_as_float)

    # produce a stream of properly-scaled images
    ims = (tz.pipe(fn, io.imread, rescale(in_range=in_range), normalize)
           for fn in fns)

    # take the mean of that stream
    mean_image = mean(ims)

    # return the median filter of that mean
    radius = radius or min(mean_image.shape) // 4
    illum = ndi.percentile_filter(mean_image,
                                  percentile=(quantile * 100),
                                  footprint=morphology.disk(radius))
    return illum
Ejemplo n.º 23
0
def pyccd(x, y, locations, dates_fn, specmap, chipmap):
    """Builds inputs for the pyccd algorithm.

    Args:
        x: x projection coordinate of chip
        y: y projection coordinate of chip
        locations: chip shaped 2d array of projection coordinates
        dates_fn (fn): returns dates that should be included in time series
        specmap (dict): mapping of keys to specs
        chipmap (dict): mapping of keys to chips

    Returns:
        A tuple of tuples.

    The pyccd format key is ```(chip_x, chip_y, x, y)``` with a
    dictionary of sorted numpy arrays representing each spectra plus an
    additional sorted dates array.

    >>> pyccd_format(*args)
        (((chip_x, chip_y, x1, y1), {"dates":  [],  "reds": [],
                                     "greens": [],  "blues": [],
                                     "nirs1":  [],  "swir1s": [],
                                     "swir2s": [],  "thermals": [],
                                     "qas":    []}),
         ((chip_x, chip_y, x1, y2), {"dates":  [],  "reds": [],
                                     "greens": [],  "blues": [],
                                     "nirs1":  [],  "swir1s": [],
                                     "swir2s": [],  "thermals": [],
                                     "qas":    []}))
        ...
    """
    
    _index   = specs.index(list(functions.flatten(specmap.values())))
    _dates   = dates_fn(datemap=dates.mapped(chipmap))
    _creator = partial(rods.create, x=x, y=y, dateseq=_dates, locations=locations, spec_index=_index)
    _flipped = partial(functions.flip_keys, {k: _creator(chipseq=v) for k, v in chipmap.items()})
    
    _rods = functions.insert_into_every(key='dates',
                                        value=list(map(dates.to_ordinal, dates.rsort(_dates))),
                                        dods=_flipped())
                                 
    return tuple((k, v) for k, v in _rods.items())
Ejemplo n.º 24
0
def grad_normalize_(grad):
    """
    Normalize the gradient vector with respect to the L2 norm

    Args:
        grad (Gradient)

    Return:
        None
    """
    nrm = grad_norm(grad)
    grad_apply_(partial(be.tmul_, be.float_scalar(1.0/nrm)), grad)
Ejemplo n.º 25
0
def find_background_illumination(fns, radius=None, input_bitdepth=None,
                                 quantile=0.5, stretch_quantile=0.):
    """Use a set of related images to find uneven background illumination.

    Parameters
    ----------
    fns : list of string
        A list of image file names
    radius : int, optional
        The radius of the structuring element used to find background.
        default: The width or height of the input images divided by 4,
        whichever is smaller.
    input_bitdepth : int, optional
        The bit-depth of the input images. Should be specified if non-standard
        bitdepth images are used in a 16-bit image file, e.g. 12-bit images.
        Default is the dtype of the input image.
    quantile : float in [0, 1], optional
        The desired quantile to find background. default: 0.5 (median)
    stretch_quantile : float in [0, 1], optional
        Stretch image to full dtype limit, saturating above this quantile.

    Returns
    -------
    illum : np.ndarray, float, shape (M, N)
        The estimated illumination over the image field.

    See Also
    --------
    `correct_image_illumination`, `correct_multiimage_illumination`.
    """
    # this function follows the "PyToolz" streaming data model to
    # obtain the illumination estimate.
    # first, define the functions for each individual step:
    in_range = ('image' if input_bitdepth is None
                else (0, 2**input_bitdepth - 1))
    rescale = tz.curry(exposure.rescale_intensity)
    normalize = (tz.partial(stretchlim, bottom=stretch_quantile)
                 if stretch_quantile > 0
                 else skimage.img_as_float)

    # produce a stream of properly-scaled images
    ims = (tz.pipe(fn, io.imread, rescale(in_range=in_range), normalize)
           for fn in fns)

    # take the mean of that stream
    mean_image = mean(ims)

    # return the median filter of that mean
    radius = radius or min(mean_image.shape) // 4

    mean_image = img_as_ubyte(stretchlim(mean_image))
    illum = imfilter.rank.median(mean_image, selem=morphology.disk(radius))
    return illum
Ejemplo n.º 26
0
def data(ctx, cfg):
    '''Retrieve training data for all chips in parallel'''
    
    p = partial(pipeline,
                tx=ctx['tx'],
                ty=ctx['ty'],
                date=ctx['date'],
                acquired=ctx['acquired'],
                cfg=cfg)

    with workers(cfg) as w:
        return assoc(ctx, 'data', numpy.array(list(flatten(w.map(p, ctx['chips']))), dtype=numpy.float32))
Ejemplo n.º 27
0
def create_routes(app):
    route = partial(add_route, app)

    route('GET', '/', handlers.index)
    route('GET', '/api/recipes', handlers.get_recipes)
    route('GET', '/api/recipes/{r_id}', handlers.get_recipe)
    route('POST', '/api/recipes', handlers.create_recipe)
    route('DELETE', '/api/recipes/{r_id}', handlers.delete_recipe)

    route('GET', '/api/ingredients', handlers.get_ingredients)
    route('GET', '/api/ingredients/{keyword}', handlers.get_ingredient)
    route('POST', '/api/ingredients', handlers.create_ingredient)
    route('DELETE', '/api/ingredients/{keyword}', handlers.delete_ingredient)
Ejemplo n.º 28
0
def serializable_unsigned_transaction_from_dict(transaction_dict):
    assert_valid_fields(transaction_dict)
    filled_transaction = pipe(
        transaction_dict,
        dict,
        partial(merge, TRANSACTION_DEFAULTS),
        chain_id_to_v,
        apply_formatters_to_dict(TRANSACTION_FORMATTERS),
    )
    if 'v' in filled_transaction:
        serializer = Transaction
    else:
        serializer = UnsignedTransaction
    return serializer.from_dict(filled_transaction)
Ejemplo n.º 29
0
def tif_to_json(tifpath,
                outpath,
                nodata=None,
                fill=None,
                partition_size=10000):
    """Converts TIF files to JSON suitable for reading as DataFrames

    Args:
        tifpath (str): Full path to input tif file
        jsonpath (str): Full path to output json file
        fill: Fill value.  Used to exclude values from the conversion
        nodata: Nodata value.  Used to exclude values which shouldnt be
                included in the conversion.
        partition_size (int): How big should each file partition be

    Returns:
        str: Full path of output json file
    """

    reader = partial(read, filepath=tifpath)
    writer = partial(write, filepath=outpath)

    return pipe(tifpath, genspec, locations, reader, compact, csv, writer)
Ejemplo n.º 30
0
def locations(spec):
    """Generator for all locations represented by a spec

    Args:
        spec (dict): xsize, ysize, pixel_x, pixel_y, ulx, uly keys

    Returns:
        Generator yielding spec plus x, y, x_index and y_index keys
    """

    locator = partial(locate, spec=spec)
    indices = ((y, x) for y in range(spec['ysize'])
               for x in range(spec['xsize']))
    return map(locator, indices)
Ejemplo n.º 31
0
    def __init__(self, mean_weight=0.9, mean_square_weight=0.0):
        """
        Create a gradient memory object to keep track of the first two
        moments of the gradient.

        Args:
            mean_weight (float \in (0,1); optional):
                how strongly to weight the previous gradient
            mean_square_weight (float \in (0,1); optional)
                how strongly to weight the square of the previous gradient

        Returns:
            GradientMemory

        """
        self.mean_weight = be.float_scalar(mean_weight)
        self.mean_square_weight = be.float_scalar(mean_square_weight)

        self.mean_gradient = None
        self.mean_square_gradient = None

        self.mixer_ = partial(be.mix_, self.mean_weight)
        self.square_mixer_ = partial(be.square_mix_, self.mean_square_weight)
Ejemplo n.º 32
0
def test_pyccd():
    c = cfg.get('chipmunk-ard', env=test.env)

    x, y = get_in(['chip', 'proj-pt'], c['snap_fn'](x=test.x, y=test.y))

    # get specs
    specmap = c['specs_fn'](specs=c['registry_fn']())

    # get function that will return chipmap.
    # Don't create state with a realized variable to preserve memory
    chipmap = partial(chips.mapped,
                      x=test.x,
                      y=test.y,
                      acquired=test.acquired,
                      specmap=specmap,
                      chips_fn=c['chips_fn'])

    # calculate locations chip.  There's another function
    # here to be split out and organized.

    grid = first(filter(lambda x: x['name'] == 'chip', c['grid_fn']()))

    cw, ch = specs.refspec(specmap).get('data_shape')

    locations = chips.locations(x=x,
                                y=y,
                                cw=cw,
                                ch=ch,
                                rx=grid.get('rx'),
                                ry=grid.get('ry'),
                                sx=grid.get('sx'),
                                sy=grid.get('sy'))

    data = c['format_fn'](x=x,
                          y=y,
                          locations=locations,
                          dates_fn=c['dates_fn'],
                          specmap=specmap,
                          chipmap=chipmap())

    # we are only testing the structure of the response here.
    # Full data validation is being done in the test for merlin.create()
    assert type(data) is tuple
    assert len(data) == 10000
    assert type(first(data)) is tuple
    assert type(first(first(data))) is tuple
    assert type(second(first(data))) is dict
    assert type(second(second(first(data)))) is tuple or list
    assert len(second(second(first(data)))) > 0
Ejemplo n.º 33
0
def find_background_illumination(fns, radius=51, quantile=0.05,
                                 stretch_quantile=0., method='mean'):
    """Use a set of related images to find uneven background illumination.

    Parameters
    ----------
    fns : list of string
        A list of image file names
    radius : int, optional
        The radius of the structuring element used to find background.
        default: 51
    quantile : float in [0, 1], optional
        The desired quantile to find background. default: 0.05
    stretch_quantile : float in [0, 1], optional
        Stretch image to full dtype limit, saturating above this quantile.
    method : 'mean', 'average', 'median', or 'histogram', optional
        How to use combine the smoothed intensities of the input images
        to infer the illumination field:

        - 'mean' or 'average': Use the mean value of the smoothed
        images at each pixel as the illumination field.
        - 'median': use the median value. Since all images need to be
        in-memory to compute this, use only for small sets of images.
        - 'histogram': use the median value approximated by a
        histogram. This can be computed on-line for large sets of
        images.

    Returns
    -------
    illum : np.ndarray, float, shape (M, N)
        The estimated illumination over the image field.

    See Also
    --------
    ``correct_image_illumination``.
    """
    # This function follows the "PyToolz" streaming data model to
    # obtain the illumination estimate. First, define each processing
    # step:
    read = io.imread
    normalize = (tlz.partial(stretchlim, bottom=stretch_quantile)
                 if stretch_quantile > 0
                 else skimage.img_as_float)
    rescale = rescale_to_11bits
    pad = fun.partial(skimage.util.pad, pad_width=radius, mode='reflect')
    rank_filter = fun.partial(rank.percentile, selem=skmorph.disk(radius),
                              p0=quantile)
    _unpad = fun.partial(unpad, pad_width=radius)
    unscale = rescale_from_11bits

    # Next, compose all the steps, apply to all images (streaming)
    bg = (tlz.pipe(fn, read, normalize, rescale, pad, rank_filter, _unpad,
                   unscale)
          for fn in fns)

    # Finally, reduce all the images and compute the estimate
    if method == 'mean' or method == 'average':
        illum, count = _reduce_with_count(np.add, bg)
        illum = skimage.img_as_float(illum) / count
    elif method == 'median':
        illum = np.median(list(bg), axis=0)
    elif method == 'histogram':
        raise NotImplementedError('histogram background illumination method '
                                  'not yet implemented.')
    else:
        raise ValueError('Method "%s" of background illumination finding '
                         'not recognised.' % method)

    return illum
Ejemplo n.º 34
0
}

filter_params_remapper = apply_key_map(FILTER_PARAMS_MAPPINGS)

FILTER_PARAMS_FORMATTERS = {
    'fromBlock': to_integer_if_hex,
    'toBlock': to_integer_if_hex,
}

filter_params_formatter = apply_formatters_to_dict(FILTER_PARAMS_FORMATTERS)

filter_params_transformer = compose(filter_params_remapper, filter_params_formatter)


TRANSACTION_FORMATTERS = {
    'to': apply_formatter_if(partial(operator.eq, b''), static_return(None)),
}


transaction_formatter = apply_formatters_to_dict(TRANSACTION_FORMATTERS)


RECEIPT_FORMATTERS = {
    'logs': apply_formatter_to_array(log_key_remapper),
}


receipt_formatter = apply_formatters_to_dict(RECEIPT_FORMATTERS)

transaction_params_transformer = compose(transaction_params_remapper, transaction_params_formatter)