Beispiel #1
0
def make_subset_data(data, random_subset=None, pixels=None, return_selection=False):
    if random_subset is None and pixels is None:
        return data
    if random_subset is not None and pixels is not None:
        raise ValueError("You can only specify one of pixels or random_subset")

    tot_pix = len(data.x)*len(data.y)
    if pixels is not None:
        n_sel = pixels
    else:
        n_sel = int(np.ceil(tot_pix*random_subset))
    selection = np.random.choice(tot_pix, n_sel, replace=False)
    subset = flat(data).isel(flat=selection)
    subset = copy_metadata(data, subset, do_coords=False)

    shape = (len(data.x), len(data.y))
    spacing = (get_spacing(data))
    start = (np.asscalar(data.x[0]), np.asscalar(data.y[0]))
    coords = {key:val.values for key, val in dict_without(dict(data.coords), ['x','y','z']).items()}
    subset.attrs['original_dims'] = yaml.dump((shape, spacing, start, coords))

    if return_selection:
        return subset, selection
    else:
        return subset
Beispiel #2
0
def fit(model, data, minimizer=Nmpfit, random_subset=None):
    """
    fit a model to some data

    Parameters
    ----------
    model : :class:`~holopy.fitting.model.Model` object
        A model describing the scattering system which leads to your data and
        the parameters to vary to fit it to the data
    data : :class:`~holopy.core.marray.Marray` object
        The data to fit
    minimizer : (optional) :class:`~holopy.fitting.minimizer.Minimizer`
        The minimizer to use to do the fit
    random_subset : float (optional)
        Fit only a randomly selected fraction of the data points in data

    Returns
    -------
    result : :class:`FitResult`
        an object containing the best fit parameters and information about the fit
    """
    time_start = time.time()

    if not isinstance(minimizer, Minimizer):
        if issubclass(minimizer, Minimizer):
            minimizer = minimizer()
        else:
            raise InvalidMinimizer(
                "Object supplied as a minimizer could not be"
                "interpreted as a minimizer")

    if random_subset is None:
        data = flat(data)
    else:
        data = make_subset_data(data, random_subset)

    def residual(par_vals):
        return model.residual(par_vals, data)

    try:
        fitted_pars, minimizer_info = minimizer.minimize(
            model.parameters, residual)
        converged = True
    except MinimizerConvergenceFailed as cf:
        warnings.warn("Minimizer Convergence Failed, your results may not be "
                      "correct")
        # we still return the data even if the minimizer fails to converge
        # because often the data is of some value, and may in fact be what the
        # user wants if they have set low iteration limits for a "rough fit"
        fitted_pars, minimizer_info = cf.result, cf.details
        converged = False

    fitted_scatterer = model.scatterer.make_from(fitted_pars)

    time_stop = time.time()
    fitted = model._calc(fitted_pars, data)

    return FitResult(fitted_pars, fitted_scatterer, chisq(fitted, data),
                     rsq(fitted, data), converged, time_stop - time_start,
                     model, minimizer, minimizer_info)
Beispiel #3
0
def fit(model, data, minimizer=Nmpfit, random_subset=None):
    """
    fit a model to some data

    Parameters
    ----------
    model : :class:`~holopy.fitting.model.Model` object
        A model describing the scattering system which leads to your data and
        the parameters to vary to fit it to the data
    data : :class:`~holopy.core.marray.Marray` object
        The data to fit
    minimizer : (optional) :class:`~holopy.fitting.minimizer.Minimizer`
        The minimizer to use to do the fit
    random_subset : float (optional)
        Fit only a randomly selected fraction of the data points in data

    Returns
    -------
    result : :class:`FitResult`
        an object containing the best fit parameters and information about the fit
    """
    time_start = time.time()

    if not isinstance(minimizer, Minimizer):
        if issubclass(minimizer, Minimizer):
            minimizer = minimizer()
        else:
            raise InvalidMinimizer("Object supplied as a minimizer could not be"
                                   "interpreted as a minimizer")

    if random_subset is None:
        data = flat(data)
    else:
        data = make_subset_data(data, random_subset)

    def residual(par_vals):
        return model.residual(par_vals, data)

    try:
        fitted_pars, minimizer_info = minimizer.minimize(model.parameters, residual)
        converged = True
    except MinimizerConvergenceFailed as cf:
        warnings.warn("Minimizer Convergence Failed, your results may not be "
                      "correct")
        # we still return the data even if the minimizer fails to converge
        # because often the data is of some value, and may in fact be what the
        # user wants if they have set low iteration limits for a "rough fit"
        fitted_pars, minimizer_info  = cf.result, cf.details
        converged = False

    fitted_scatterer = model.scatterer.make_from(fitted_pars)

    time_stop = time.time()
    fitted = model._calc(fitted_pars, data)

    return FitResult(fitted_pars, fitted_scatterer, chisq(fitted, data),
                     rsq(fitted, data), converged, time_stop - time_start,
                     model, minimizer, minimizer_info)
Beispiel #4
0
def residfunct(p, fjac=None):
    # nmpfit calls residfunct w/fjac as a kwarg, we ignore

    sphere = Sphere(n=p[0] + n_particle_imag * 1j, r=p[1], center=p[2:5])
    thry = Mie(False)
    calculated = calc_holo(holo, sphere, scaling=p[5], theory=thry)

    status = 0
    derivates = holo - calculated

    return ([status, get_values(flat(derivates))])
Beispiel #5
0
def residfunct(p, fjac = None):
    # nmpfit calls residfunct w/fjac as a kwarg, we ignore

    sphere = Sphere(n=p[0]+n_particle_imag*1j, r=p[1], center = p[2:5])
    thry = Mie(False)
    calculated = calc_holo(holo, sphere, scaling=p[5], theory=thry)

    status = 0
    derivates = holo - calculated

    return([status, get_values(flat(derivates))])
    def test_calc_field_keeps_same_coords_as_flattened_input_schema(self):
        theory = MockTheory()
        fields = theory.calculate_scattered_field(SPHERE, XSCHEMA)
        flat_schema = flat(XSCHEMA)
        self.assertTrue(np.all(flat_schema.x.shape == fields.x.shape))
        self.assertTrue(np.all(flat_schema.y.shape == fields.y.shape))
        self.assertTrue(np.all(flat_schema.z.shape == fields.z.shape))

        self.assertTrue(np.all(flat_schema.x.values == fields.x.values))
        self.assertTrue(np.all(flat_schema.y.values == fields.y.values))
        self.assertTrue(np.all(flat_schema.z.values == fields.z.values))
Beispiel #7
0
def make_subset_data(data,
                     random_subset=None,
                     pixels=None,
                     return_selection=False):
    if random_subset is None and pixels is None:
        return data
    if random_subset is not None and pixels is not None:
        raise ValueError("You can only specify one of pixels or random_subset")
    if pixels is not None:
        n_sel = pixels
    else:
        n_sel = int(np.ceil(data.size * random_subset))
    selection = np.random.choice(data.size, n_sel, replace=False)
    subset = flat(data)[selection]
    subset = copy_metadata(data, subset, do_coords=False)
    if return_selection:
        return subset, selection
    else:
        return subset
Beispiel #8
0
    def _pack_scattering_matrix_into_xarray(self, scat_matrs, r_theta_phi,
                                            schema):
        flattened_schema = flat(schema)
        point_or_flat = self._is_detector_view_point_or_flat(flattened_schema)
        dims = [point_or_flat, 'Epar', 'Eperp']

        coords = {point_or_flat: flattened_schema.coords[point_or_flat]}
        coords.update({
            'r': (point_or_flat, r_theta_phi[0]),
            'theta': (point_or_flat, r_theta_phi[1]),
            'phi': (point_or_flat, r_theta_phi[2]),
            'Epar': ['S2', 'S3'],
            'Eperp': ['S4', 'S1'],
        })

        packed = xr.DataArray(scat_matrs,
                              dims=dims,
                              coords=coords,
                              attrs=schema.attrs)
        return packed
Beispiel #9
0
 def _transform_to_desired_coordinates(cls, detector, origin, wavevec=1):
     if hasattr(detector, 'theta') and hasattr(detector, 'phi'):
         original_coordinate_system = 'spherical'
         original_coordinate_values = [
             (detector.r.values * wavevec if hasattr(detector, 'r') else
              np.full(detector.theta.values.shape, np.inf)),
             detector.theta.values,
             detector.phi.values,
         ]
     else:
         original_coordinate_system = 'cartesian'
         f = flat(detector)  # 1.6 ms
         original_coordinate_values = [
             wavevec * (f.x.values - origin[0]),
             wavevec * (f.y.values - origin[1]),
             wavevec * (origin[2] - f.z.values),
             # z is defined opposite light propagation, so we invert
         ]
     method = find_transformation_function(original_coordinate_system,
                                           cls.desired_coordinate_system)
     return method(original_coordinate_values)
Beispiel #10
0
    def _pack_field_into_xarray(self, scattered_field, schema):
        """Packs the numpy.ndarray, shape (N, 3) ``scattered_field`` into
        an xr.DataArray, shape (N, 3). This function needs to pack the
        fields [flat or point, vector], with the coordinates the
        same as that of the schema."""
        flattened_schema = flat(schema)  # now either point or flat
        point_or_flat = self._is_detector_view_point_or_flat(flattened_schema)
        coords = {
            key: (point_or_flat, val.values)
            for key, val in flattened_schema[point_or_flat].coords.items()
        }

        coords.update({
            point_or_flat: flattened_schema[point_or_flat],
            vector: ['x', 'y', 'z']
        })
        scattered_field = xr.DataArray(scattered_field,
                                       dims=[point_or_flat, vector],
                                       coords=coords,
                                       attrs=schema.attrs)
        return scattered_field
Beispiel #11
0
    def fit(self, model, data):
        """
        fit a model to some data

        Parameters
        ----------
        model : :class:`~holopy.fitting.model.Model` object
            A model describing the scattering system which leads to your
            data and the parameters to vary to fit it to the data
        data : xarray.DataArray
            The data to fit

        Returns
        -------
        result : :class:`FitResult`
            Contains the best fit parameters and information about the fit
        """
        # timing decorator...
        time_start = time.time()

        parameters = model._parameters
        if len(parameters) == 0:
            raise MissingParameter('at least one parameter to fit')

        if self.npixels is None:
            data = flat(data)
        else:
            data = make_subset_data(data, pixels=self.npixels)
        guess_lnprior = model.lnprior(
            {par.name:par.guess for par in parameters})

        def residual(rescaled_values):
            unscaled_values = self.unscale_pars_from_minimizer(
                parameters, rescaled_values)
            noise = model._find_noise(unscaled_values, data)
            residuals = model._residuals(unscaled_values, data, noise)
            ln_prior = model.lnprior(unscaled_values) - guess_lnprior
            zscore_prior = np.sqrt(2 * -ln_prior)
            np.append(residuals, zscore_prior)
            return residuals

        # The only work here
        fitted_pars, minimizer_info = self.minimize(parameters, residual)

        if not minimizer_info.success:
            warnings.warn("Minimizer Convergence Failed, your results \
                                may not be correct.")

        unit_errors = self._calculate_unit_noise_errors_from_fit(minimizer_info)
        noise = model._find_noise(fitted_pars, data)
        errors_scaled = noise * unit_errors
        errors = self.unscale_pars_from_minimizer(parameters, errors_scaled)
        intervals = [
            UncertainValue(
                fitted_pars[par.name], errors[par.name], name=par.name)
            for err, par in zip(errors, parameters)]

        # timing decorator...
        d_time = time.time() - time_start
        kwargs = {'intervals': intervals, 'minimizer_info': minimizer_info}
        return FitResult(data, model, self, d_time, kwargs)
 def test_is_detector_view_point_or_flat_when_flat(self):
     theory = MockTheory()
     flattened = flat(XSCHEMA)
     point_or_flat = theory._is_detector_view_point_or_flat(flattened)
     self.assertTrue(point_or_flat == 'flat')