def from_data_fit( cls, x: u.Quantity, y: u.Quantity, amplitude_guess: u.Quantity, offset_x_guess: u.Quantity, slope_guess: u.Quantity, axis_fit: int = 0, ): amplitude_unit = amplitude_guess.unit offset_x_unit = offset_x_guess.unit slope_unit = slope_guess.unit x, y = np.broadcast_arrays(x, y, subok=True) shape = x.shape x = np.moveaxis(x, axis_fit, 0) y = np.moveaxis(y, axis_fit, 0) x = x.reshape(x.shape[0], -1) y = y.reshape(y.shape[0], -1) amplitude = [] offset_x = [] slope = [] for i in range(x.shape[~0]): # print(i) def objective(params: np.ndarray): self_test = cls(amplitude=params[0] * amplitude_unit, offset_x=params[1] * offset_x_unit, slope=params[2] * slope_unit) residual = self_test(x[..., i]) - y[..., i] value = np.sqrt(np.mean(np.square(residual))).value # print(value) return value params_i = scipy.optimize.minimize( fun=objective, x0=np.array([ amplitude_guess.value, offset_x_guess.value, slope_guess.value, ]), # method='Powell', ).x a_i, x0_i, k_i = params_i amplitude.append(a_i) offset_x.append(x0_i) slope.append(k_i) return cls( amplitude=(amplitude * amplitude_unit).reshape(shape[1:]), offset_x=(offset_x * offset_x_unit).reshape(shape[1:]), slope=(slope * slope_unit).reshape(shape[1:]), )
def concat_apertures_from_intercept( self, intercept: kgpy.vectors.Cartesian3D, mask: u.Quantity, hull_axes: typ.Optional[typ.Sequence[int]] = None, color: str = 'black', ) -> 'Baffle': sh = intercept.shape if mask is None: mask = True mask = mask & np.isfinite(intercept.length_l1) num_axes = len(sh[:~0]) if hull_axes is None: hull_axes = tuple(range(num_axes)) else: hull_axes = [ax % num_axes for ax in hull_axes] num_hull_axes = len(hull_axes) hull_axes_dest = list(range(num_hull_axes)) intercept = np.moveaxis(intercept, hull_axes, hull_axes_dest) mask = np.moveaxis(mask, hull_axes, hull_axes_dest) intercept = intercept.reshape((-1, ) + intercept.shape[num_hull_axes:]) mask = mask.reshape((-1, ) + mask.shape[num_hull_axes:]) intercept = np.moveaxis(intercept, 0, ~0) mask = np.moveaxis(mask, 0, ~0) intercept = intercept.reshape((-1, ) + intercept.shape[~0:]) mask = mask.reshape((-1, ) + mask.shape[~0:]) apertures = [] for i in range(intercept.shape[0]): points = intercept[i, mask[i]] if points.shape[0] > 2: # points = shapely.geometry.MultiPoint(points.quantity.to(self.shapely_unit).value) # poly = points.convex_hull # aper = self._to_aperture(poly) hull = scipy.spatial.ConvexHull(points.xy.quantity) aper = surfaces.apertures.IrregularPolygon( vertices=points[hull.vertices].copy()) aper.color = color apertures.append(aper) return self.concat_apertures(apertures)
def data(self, data): """Set data. Some sanity checks are performed to avoid an invalid array. Also, the interpolator is set to None to avoid unwanted behaviour. Parameters ---------- data : `~astropy.units.Quantity`, array-like Data array """ data = Quantity(data) self._regular_grid_interp = None self._data = data.reshape(self.axes.shape)
def from_lstsq_fit( cls, # x: u.Quantity, # y: u.Quantity, # z: u.Quantity, # data: u.Quantity, data_input: vectors.Vector3D, data_output: u.Quantity, mask: typ.Optional[np.ndarray] = None, degree: int = 1, input_names: typ.Optional[typ.List[str]] = None, output_name: typ.Optional[str] = None, ) -> 'Polynomial3D': if mask is None: mask = np.array([True]) # num_components_out = data.shape[~0:] # grid_shape = np.broadcast(x, y, z, data[vector.x], mask).shape # vgrid_shape = grid_shape + num_components_out # grid_shape = np.broadcast(data_input, data_output, mask).shape # shape = grid_shape[:~2] # x = np.broadcast_to(x, grid_shape, subok=True) # y = np.broadcast_to(y, grid_shape, subok=True) # z = np.broadcast_to(z, grid_shape, subok=True) # data = np.broadcast_to(data, vgrid_shape, subok=True) # data_input = np.broadcast_to(data_input, grid_shape, subok=True) # data_output = np.broadcast_to(data_output, grid_shape, subok=True) # mask = np.broadcast_to(mask, grid_shape, subok=True) data_input, data_output, mask = np.broadcast_arrays(data_input, data_output, mask, subok=True) grid_shape = data_input.shape shape = grid_shape[:~2] # x = x.reshape(shape + (-1, )) # y = y.reshape(shape + (-1, )) # z = z.reshape(shape + (-1, )) # data = data.reshape(shape + (-1, ) + num_components_out) data_input = data_input.reshape(shape + (-1, )) data_output = data_output.reshape(shape + (-1, )) mask = mask.reshape(shape + (-1, )) # x = x.reshape((-1, ) + x.shape[~0:]) # y = y.reshape((-1, ) + y.shape[~0:]) # z = z.reshape((-1, ) + z.shape[~0:]) # data = data.reshape((-1,) + data.shape[~1:]) data_input = data_input.reshape((-1, ) + data_input.shape[~0:]) data_output = data_output.reshape((-1, ) + data_output.shape[~0:]) mask = mask.reshape((-1, ) + mask.shape[~0:]) coefficients = [] for i in range(len(data_output)): m = mask[i] vander = cls._vandermonde(vector_input=data_input[i][m], degree=degree) vander_value = [v.value for v in vander] vander_unit = [v.unit for v in vander] b = data_output[i][m] if isinstance(b, vectors.Vector): b = b.quantity coeffs = np.linalg.lstsq( a=np.stack(vander_value, axis=~0), # b=data_output[i][m].quantity, b=b, rcond=None, )[0] coefficients.append( [c / unit for c, unit in zip(coeffs, vander_unit)]) if isinstance(data_output, vectors.Vector): coefficients_factory = type(data_output).from_quantity else: coefficients_factory = lambda x: x coefficients = [ coefficients_factory(u.Quantity(c)) for c in zip(*coefficients) ] # coefficients = [c.reshape(shape + c.shape[~0:])[..., None, None, None, :] for c in coefficients] coefficients = [ c.reshape(shape)[..., np.newaxis, np.newaxis, np.newaxis] for c in coefficients ] return Polynomial3D( degree=degree, coefficients=coefficients, input_names=input_names, output_name=output_name, )
def generic_fit( self, observed_images: u.Quantity, target_images: u.Quantity, target_images_min: kgpy.vectors.Cartesian2D, target_images_max: kgpy.vectors.Cartesian2D, factory: typ.Callable[[typ.List[u.Quantity], 'System', int], 'System'], # channel_index: typ.Union[int, typ.Tuple[int, ...]] = (), params_guess: typ.Optional[typ.List[u.Quantity]] = None, params_min: typ.Optional[typ.List[u.Quantity]] = None, params_max: typ.Optional[typ.List[u.Quantity]] = None, use_correlate: bool = False, x_axis: int = ~2, y_axis: int = ~1, w_axis: int = ~0, ) -> 'System': axes_all = x_axis, y_axis, w_axis observed_images = np.expand_dims(observed_images, w_axis) observed_images_shape = list(observed_images.shape) observed_images_shape[w_axis] = target_images.shape[w_axis] observed_images = observed_images.reshape(observed_images_shape) observed_images = observed_images / np.median(observed_images, axis=axes_all, keepdims=True) target_images = target_images / np.median(target_images, axis=axes_all, keepdims=True) if params_guess is not None: params_unit = [q.unit for q in params_guess] else: params_unit = [q.unit for q in params_min] def factory_value(params: np.ndarray, other: 'System', chan_index: int) -> 'System': params = [param * unit for param, unit in zip(params, params_unit)] return factory(params, other, chan_index) def objective(params: np.ndarray, chan_index: int) -> float: other = factory_value(params=params, chan_index=chan_index) test_images = other( data=observed_images, wavelength=other.wavelength, spatial_input_min=kgpy.vectors.Vector2D(x=0 * u.pix, y=0 * u.pix), spatial_input_max=kgpy.vectors.Vector2D( x=observed_images.shape[x_axis], y=observed_images.shape[y_axis], ), spatial_output_min=target_images_min, spatial_output_max=target_images_max, spatial_samples_output=kgpy.vectors.Vector2D( x=target_images.shape[x_axis], y=target_images.shape[y_axis], ), inverse=True, ) if use_correlate: corr = scipy.signal.correlate( in1=np.nan_to_num(test_images[chan_index]), in2=target_images, mode='same', ) corr = np.prod(corr, axis=w_axis) lag = np.array(np.unravel_index(np.argmax(corr), corr.shape)) - np.array(corr.shape) // 2 test_images[chan_index] = np.roll(test_images[chan_index], -lag, axis=(x_axis, y_axis)) diff = test_images[chan_index] - target_images norm = np.sqrt(np.mean(np.square(diff))) return norm other = self.copy_shallow() shape = observed_images.shape[:~2] for i in range(np.prod(shape)): index = np.unravel_index(i, shape) params_converted_min = [param[index].to(unit).value for param, unit in zip(params_min, params_unit)] params_converted_max = [param[index].to(unit).value for param, unit in zip(params_max, params_unit)] params_converted_min = np.array(params_converted_min) params_converted_max = np.array(params_converted_max) x0 = scipy.optimize.brute( func=objective, ranges=np.stack([params_converted_min, params_converted_max], axis=~0), args=(other, index, ), disp=True, ) other = factory_value(params=x0, other=other, chan_index=index) return other
def _calc_intensity_avg(cls, intensity: u.Quantity) -> u.Quantity: return scipy.stats.trim_mean( intensity.reshape(intensity.shape[:~1] + (-1, )), proportiontocut=0.25, axis=~0, ) << intensity.unit
def __call__( self, cube: u.Quantity, wavelength: u.Quantity, spatial_input_min: vectors.Vector2D, spatial_input_max: vectors.Vector2D, spatial_output_min: vectors.Vector2D, spatial_output_max: vectors.Vector2D, spatial_samples_output: typ.Union[int, vectors.Vector2D], inverse: bool = False, # channel_index: typ.Optional[int] = None, interp_order: int = 1, interp_prefilter: bool = False, fill_value: float = np.nan, ) -> np.ndarray: # if isinstance(spatial_samples_output, int): # spatial_samples_output = vector.Vector2D(spatial_samples_output, spatial_samples_output) output_grid = vectors.Vector3D() output_grid.x = np.linspace(spatial_output_min.x, spatial_output_max.x, spatial_samples_output.x) output_grid.y = np.linspace(spatial_output_min.y, spatial_output_max.y, spatial_samples_output.y) output_grid.x = output_grid.x[..., :, np.newaxis, np.newaxis] output_grid.y = output_grid.y[..., np.newaxis, :, np.newaxis] wavelength = wavelength[..., np.newaxis, np.newaxis, :] output_grid.z = wavelength # output_grid_x = np.linspace(output_min[x], output_max[x], spatial_samples_output[x]) # output_grid_y = np.linspace(output_min[y], output_max[y], spatial_samples_output[y]) # wavelength, output_grid_x, output_grid_y = np.broadcast_arrays( # wavelength[..., None, None], output_grid_x[..., None], output_grid_y, subok=True) model = self.model(inverse=not inverse) # coordinates = model(wavelength, output_grid_x, output_grid_y) coordinates = model(output_grid) coordinates = (coordinates - spatial_input_min) / (spatial_input_max - spatial_input_min) # coordinates *= cube.shape[~1:] * u.pix coordinates = coordinates * vectors.Vector2D(x=cube.shape[~2] * u.pix, y=cube.shape[~1] * u.pix) coordinates = coordinates.to_3d(wavelength) sh = cube.shape[:~2] coordinates = np.broadcast_to(coordinates, sh + coordinates.shape[~2:]) coordinates_flat = coordinates.reshape((-1, ) + coordinates.shape[~2:]) # coordinates_flat = np.moveaxis(coordinates_flat, ~0, 2) cube_flat = cube.reshape((-1, ) + cube.shape[~2:]) new_cube_flat_shape = list(cube_flat.shape) new_cube_flat_shape[~2] = spatial_samples_output.x new_cube_flat_shape[~1] = spatial_samples_output.y new_cube_flat = np.empty(new_cube_flat_shape) for i in range(cube_flat.shape[0]): # for j in range(cube_flat.shape[~0]): coords = coordinates_flat[i] new_cube_flat[i] = scipy.ndimage.map_coordinates( input=cube_flat[i], coordinates=np.stack([coords.x, coords.y, coords.z]), order=interp_order, prefilter=interp_prefilter, cval=fill_value, ) return new_cube_flat.reshape(cube.shape[:~2] + new_cube_flat.shape[~2:]) << cube.unit