예제 #1
0
def getTransform(xvar=None, x=None, y=None, lcheck=True):
    ''' generate an affine transformation from xarray coordinate axes '''
    from rasterio.transform import Affine # to generate Affine transform
    
    if isinstance(xvar,(xr.DataArray,xr.Dataset)):
        x,y = getGeoCoords(xvar, lraise=True)
    elif xvar:
        raise TypeError('Can only infer GeoTransform from xarray Dataset or DataArray - not from {}.'.format(xvar))
    
    # check X-axis
    if isinstance(x,xr.DataArray): x = x.data
    if not isinstance(x,np.ndarray): 
        raise TypeError(x)
    diff_x = np.diff(x); dx = diff_x.min()
    if lcheck and not np.isclose(dx, diff_x.max(), rtol=1.e-2): 
        raise ValueError("X-axis is not regular: {} - {}".format(dx, diff_x.max()))
    
    # check Y-axis
    if isinstance(y,xr.DataArray): y = y.data
    if not isinstance(y,np.ndarray): 
        raise TypeError(y)
    diff_y = np.diff(y); dy = diff_y.min()
    if lcheck and not np.isclose(dy, diff_y.max(), rtol=1.e-2): 
        raise ValueError("Y-axis is not regular. {} - {}".format(dy, diff_y.max()))
    
    # generate transform
    return Affine.from_gdal(x[0]-dx/2.,dx,0.,y[0]-dy/2.,0.,dy), (len(x),len(y))
예제 #2
0
 def test_X_normalization_transformer(self):
   """Tests normalization transformer."""
   solubility_dataset = dc.data.tests.load_solubility_data()
   normalization_transformer = dc.trans.NormalizationTransformer(
       transform_X=True, dataset=solubility_dataset)
   X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
                   solubility_dataset.w, solubility_dataset.ids)
   solubility_dataset = normalization_transformer.transform(solubility_dataset)
   X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
                           solubility_dataset.w, solubility_dataset.ids)
   # Check ids are unchanged.
   for id_elt, id_t_elt in zip(ids, ids_t):
     assert id_elt == id_t_elt
   # Check y is unchanged since this is a X transformer
   np.testing.assert_allclose(y, y_t)
   # Check w is unchanged since this is a y transformer
   np.testing.assert_allclose(w, w_t)
   # Check that X_t has zero mean, unit std.
   #np.set_printoptions(threshold='nan')
   mean = X_t.mean(axis=0)
   assert np.amax(np.abs(mean-np.zeros_like(mean))) < 1e-7
   orig_std_array = X.std(axis=0)
   std_array = X_t.std(axis=0)
   # Entries with zero std are not normalized
   for orig_std, std in zip(orig_std_array, std_array):
     if not np.isclose(orig_std, 0):
       assert np.isclose(std, 1)
예제 #3
0
    def __test(n, pre_max, post_max, pre_avg, post_avg, delta, wait):

        # Generate a test signal
        x = np.random.randn(n)**2

        peaks = librosa.util.peak_pick(x,
                                       pre_max, post_max,
                                       pre_avg, post_avg,
                                       delta, wait)

        for i in peaks:
            # Test 1: is it a peak in this window?
            s = i - pre_max
            if s < 0:
                s = 0
            t = i + post_max

            diff = x[i] - np.max(x[s:t])
            assert diff > 0 or np.isclose(diff, 0, rtol=1e-3, atol=1e-4)

            # Test 2: is it a big enough peak to count?
            s = i - pre_avg
            if s < 0:
                s = 0
            t = i + post_avg

            diff = x[i] - (delta + np.mean(x[s:t]))
            assert diff > 0 or np.isclose(diff, 0, rtol=1e-3, atol=1e-4)

        # Test 3: peak separation
        assert not np.any(np.diff(peaks) <= wait)
예제 #4
0
 def compareOutputGeometry(self, orientedImageData, spacing, origin, directions):
   if orientedImageData is None:
     logging.error('Invalid input oriented image data')
     return False
   if (not isinstance(spacing, list) and not isinstance(spacing, tuple)) \
       or (not isinstance(origin, list) and not isinstance(origin, tuple)) \
       or not isinstance(directions, list):
     logging.error('Invalid baseline object types - need lists')
     return False
   if len(spacing) != 3 or len(origin) != 3 or len(directions) != 3 \
       or len(directions[0]) != 3 or len(directions[1]) != 3 or len(directions[2]) != 3:
     logging.error('Baseline lists need to contain 3 elements each, the directions 3 lists of 3')
     return False
   import numpy
   tolerance = 0.0001
   actualSpacing = orientedImageData.GetSpacing()
   actualOrigin = orientedImageData.GetOrigin()
   actualDirections = [[0]*3,[0]*3,[0]*3]
   orientedImageData.GetDirections(actualDirections)
   for i in [0,1,2]:
     if not numpy.isclose(spacing[i], actualSpacing[i], tolerance):
       logging.warning('Spacing discrepancy: ' + str(spacing) + ' != ' + str(actualSpacing))
       return False
     if not numpy.isclose(origin[i], actualOrigin[i], tolerance):
       logging.warning('Origin discrepancy: ' + str(origin) + ' != ' + str(actualOrigin))
       return False
     for j in [0,1,2]:
       if not numpy.isclose(directions[i][j], actualDirections[i][j], tolerance):
         logging.warning('Directions discrepancy: ' + str(directions) + ' != ' + str(actualDirections))
         return False
   return True
예제 #5
0
def test_init():
  alm = {
    'gps': {
        'a': 8,
        'af0': 9,
        'af1': 10,
        'argp': 7,
        'ecc': 8,
        'inc': 3,
        'ma': 8,
        'raaw': 6,
        'rora': 4,
        'toa': 2,
        'week': 11
    },
    'healthy': 1,
    'sid': {
        'band': 0,
        'constellation': 0,
        'sat': 1
    },
    'valid': 1,
  }

  satAlmanac = swiftnav.almanac.Almanac(**alm)
  assert np.isclose(alm['gps']['a'], satAlmanac.gps['a'])
  assert np.isclose(alm['gps']['ecc'], satAlmanac.gps['ecc'])
예제 #6
0
def compare_fields(uni0, uni1, rtol=5e-5, atol=1e-12, signed=True, verbose=True):
    """Compare field values of differenct universes.
    It is expected that fields are in the same order.

    Args:
        uni0 (:class:`exatomic.core.universe.Universe`): first universe
        uni1 (:class:`exatomic.core.universe.Universe`): second universe
        rtol (float): relative tolerance passed to numpy.isclose
        atol (float): absolute tolerance passed to numpy.isclose
        signed (bool): opposite signs are counted as different (default True)
        verbose (bool): print how close the fields are to each other numerically (default True)

    Returns:
        fracs (list): list of fractions measuring closeness of fields
    """
    fracs, kws = [], {'rtol': rtol, 'atol': atol}
    for i, (f0, f1) in enumerate(zip(uni0.field.field_values,
                                     uni1.field.field_values)):
        n = np.isclose(f0, f1, **kws).sum()
        if not signed: n = max(n, np.isclose(f0, -f1, **kws).sum())
        fracs.append(n / f0.shape[0])
    if verbose:
        fmt = '{{:<{}}}:{{:>9}}'.format(len(str(len(fracs))) + 1)
        print(fmt.format(len(fracs), 'Fraction'))
        fmt = fmt.replace('9', '9.5f')
        for i, f in enumerate(fracs):
            print(fmt.format(i, f))
    return fracs
예제 #7
0
def test_point_setitem():
    p = Point()

    p[0] = 6.0
    assert p[0] == 6.0

    p[1] = 16.0
    p[1] += 600.0
    assert np.isclose(p[1], 616.0)

    p[2] = 111.0
    p[2] *= 12.0
    p[2] /= 2
    assert np.isclose(p[2], 666.0)

    with pytest.raises(IndexError):
        p[3] = 6666.0

    p[:] = (0, 0, 0)
    assert np.all(p[:] == 0)

    p[:] = (1, 2, 3)
    assert np.all(p[:] == (1, 2, 3))

    p[:] += np.array((1, 2, 3))
    assert np.all(p[:] == (2, 4, 6))

    p[:] /= 2
    assert np.all(p[:] == (1, 2, 3))

    p[:] *= np.array((2., 2., 2.))
    assert np.all(p[:] == (2, 4, 6))
예제 #8
0
    def _calculate_phase_correction(self):
        """Calculate a new phase correction value for the new frequency."""
        self.dprint('Calculating frequency phase correction...')
        self.dprint('  Looking for sinusoid value %s' %
                    self.wavedata[self.last_frame])
        # phasor for new frequency at the last frame
        new_phasor = self._phasor(self.last_frame, self.frequency, 0)
        new_phasor_arg = self._phasor_argument(new_phasor)
        phase_correction = self.last_phase - new_phasor_arg

        corrected_phasor = self._phasor(self.last_frame, self.frequency,
                                        phase_correction)
        self.dprint('  First try at correction: %s' %
                    corrected_phasor.real)
        # Check whether we have the correct solution or if we need another half
        # period for the phase correction to match up
        if not numpy.isclose(self.wavedata[self.last_frame],
                            corrected_phasor.real,
                            rtol=self.cmp_precision):
            self.dprint('  Not close enough, adding 1/2 a period.')
            phase_correction += math.pi
            corrected_phasor = self._phasor(self.last_frame, self.frequency,
                                            phase_correction)
            self.dprint('  New correction: %s' % corrected_phasor.real)
            if not numpy.isclose(self.wavedata[self.last_frame],
                                corrected_phasor.real,
                                rtol=self.cmp_precision):
                raise Exception('Something is wrong, the correction does not '
                                'match up.')
        self.phase = phase_correction
        self.dprint('  New phase correction for freq %s set to %s' %
                    (self.frequency, self.phase))
예제 #9
0
 def test_fill_empty(self):
     empty = HistogramData(bins = np.linspace(0, 10, 11))
     empty.fill(5)
     assert empty.total == 1
     empty.fill(5, 0.4)
     assert np.isclose(empty.total_weight, 1.4)
     assert np.isclose(empty.total, 2)
예제 #10
0
    def test_linear_moma_sanity(self, model):
        """Test optimization criterion and optimality."""
        sol = model.optimize()
        with model:
            model.reactions.PFK.knock_out()
            knock_sol = model.optimize()
            sabs = (knock_sol.fluxes - sol.fluxes).abs().sum()

        with model:
            add_moma(model, linear=True)
            model.reactions.PFK.knock_out()
            moma_sol = model.optimize()
            moma_sabs = (moma_sol.fluxes - sol.fluxes).abs().sum()

        # Use normal FBA as reference solution.
        with model:
            add_moma(model, solution=sol, linear=True)
            model.reactions.PFK.knock_out()
            moma_ref_sol = model.optimize()
            moma_ref_sabs = (moma_ref_sol.fluxes - sol.fluxes).abs().sum()

        assert numpy.allclose(moma_sol.objective_value, moma_sabs)
        assert moma_sabs < sabs
        assert numpy.isclose(moma_sol.objective_value,
                             moma_ref_sol.objective_value)
        assert numpy.isclose(moma_sabs, moma_ref_sabs)

        with model:
            add_moma(model, linear=True)
            with pytest.raises(ValueError):
                add_moma(model)
def _isclosemod(a, b, atol=1E-5, mod=2*pi):
    """
    Return whether two numbers (or arrays) are within atol of each other
    in the modulo space determined by mod.
    """
    return (isclose(a%mod, b%mod, atol=atol) 
            or isclose((a+atol)%mod, (b+atol)%mod, atol=atol))
예제 #12
0
    def _create_D_matrix(self):
        N = self.level_counts_cs[-1]
        D = np.zeros((N, N, len(self.laser_intensity)), dtype='object')
        bxrho = BxRho_Voigt if self.shape.lower() == 'voigt' else BxRho_Lorentzian

        self.indices = []
        for laser_index, laser in enumerate(self.laser_intensity):
            for i, j in itertools.combinations(range(len(self.level_counts)), 2):
                for k, (fe, mze) in enumerate(zip(self.Flist[i], self.MFlist[i])):
                    for l, (fg, mzg) in enumerate(zip(self.Flist[j], self.MFlist[j])):
                        x = self.level_counts_cs[i] - self.level_counts[i] + k
                        y = self.level_counts_cs[j] - self.level_counts[j] + l
                        if np.isclose(self.A_array[i, j], 0) or np.isclose(self.partial_A[x, y], 0):
                            continue
                        purity = self._params['Purity'].value
                        frac = purity if self.mode[laser_index] == (mze - mzg) else (1.0 - purity) if self.mode[laser_index] == -(mze - mzg) else 0
                        if frac == 0:
                            pass
                        else:
                            intensity = frac * self._params['Laser_intensity_' + str(laser_index)].value
                            A = self._params['Transition_strength_' + str(i) + '_to_' + str(j)].value
                            mu = (self.energies[k] + self.energy_change[k]) - (self.energies[l] + self.energy_change[l])
                            kwargs = {'A': A, 'mu': mu, 'laser': intensity}
                            if self.shape.lower() == 'voigt':
                                kwargs['fwhmG'] = self._params['FWHMG_' + str(i) + '_to_' + str(j)].value * 1e6
                                kwargs['fwhmL'] = self._params['FWHML_' + str(i) + '_to_' + str(j)].value * 1e6
                            else:
                                kwargs['fwhm'] = self._params['FWHML_' + str(i) + '_to_' + str(j)].value * 1e6
                            D[x, y, laser_index] = bxrho(**kwargs)
                            self.indices.append((x, y, laser_index, i, j, mze, mzg))

        self.D = D
예제 #13
0
    def test_moma_sanity(self, model):
        """Test optimization criterion and optimality."""
        try:
            solver = sutil.get_solver_name(qp=True)
            model.solver = solver
        except sutil.SolverNotFound:
            pytest.skip("no qp support")

        sol = model.optimize()
        with model:
            model.reactions.PFK.knock_out()
            knock_sol = model.optimize()
            ssq = (knock_sol.fluxes - sol.fluxes).pow(2).sum()

        with model:
            add_moma(model)
            model.reactions.PFK.knock_out()
            moma_sol = model.optimize()
            moma_ssq = (moma_sol.fluxes - sol.fluxes).pow(2).sum()

        # Use normal FBA as reference solution.
        with model:
            add_moma(model, solution=sol)
            model.reactions.PFK.knock_out()
            moma_ref_sol = model.optimize()
            moma_ref_ssq = (moma_ref_sol.fluxes - sol.fluxes).pow(2).sum()

        assert numpy.isclose(moma_sol.objective_value, moma_ssq)
        assert moma_ssq < ssq
        assert numpy.isclose(moma_sol.objective_value,
                             moma_ref_sol.objective_value)
        assert numpy.isclose(moma_ssq, moma_ref_ssq)
예제 #14
0
def peak_effects(energies):
    """
    This module checks to see if a given list of peak energies contains any
    possible single escape peaks, double escape peaks, and sum peaks.
    """

    single_escape_peak = []
    single_escape_peak_index = []
    double_escape_peak = []
    double_escape_peak_index = []

    for i in range(len(energies)):
        #checks to see if condition for escape peaks is fulfilled.
        if energies[i] >= float(1022):
            for j in range(len(energies)):
                #checks to see if peak is a single escape peak.
                if np.isclose(energies[i],energies[j]+511,atol=1) == True:
                    single_escape_peak.append(energies[j])
                    single_escape_peak_index.append(j)
                #checks to see if peak is a double escape peak.
                if np.isclose(energies[i],energies[j]+1022,atol=1) == True:
                    double_escape_peak.append(energies[j])
                    double_escape_peak_index.append(j)
                    """
                    for m in range(len(energies)):
                        if np.isclose(energies[j],energies[m]+511,atol=1) == True:
                            double_escape_peak.append(energies[m])
                            double_escape_peak_index.append(m)
                    """
    #gets rid of the single escape peak that has the same value as a double
    #escape peak(s).
    double_count = []
    double_count_index = []

    for i in range(len(single_escape_peak)):
        for j in range(len(double_escape_peak)):
            if single_escape_peak[i] == double_escape_peak[j]:
                double_count.append(single_escape_peak[i])
                double_count_index.append(energies.index(single_escape_peak[i]))

    single_escape_peak = [x for x in single_escape_peak if x not in double_count]
    single_escape_peak_index = [x for x in single_escape_peak_index if x not in double_count_index]

    #finds the index of the original energy that is responsible for the single
    #escape peaks and the double escape peaks.
    origin_index_se = []
    origin_index_de = []
    for i in range(len(energies)):
        for j in range(len(single_escape_peak)):
            if np.isclose(energies[i],single_escape_peak[j]+511,atol=1) == True:
                origin_index_se.append(i)
        for k in range(len(double_escape_peak)):
            if np.isclose(energies[i],double_escape_peak[k]+1022,atol=1) == True:
                origin_index_de.append(i)

    peak_effects = {'single_escape_peak':single_escape_peak,'double_escape_peak':double_escape_peak,
                    'single_escape_peak_index':single_escape_peak_index,'double_escape_peak_index':double_escape_peak_index,
                    'origin_index_se':origin_index_se,'origin_index_de':origin_index_de}

    return(peak_effects)
예제 #15
0
    def test_monopole_fluxpoints(self):
        """Tests monopole flux points."""

        field = ElectricField([PointCharge(2, [0, 0])])
        circle = GaussianCircle([0, 0], 10)

        fluxpoints = circle.fluxpoints(field, 4)
        self.assertEqual(len(fluxpoints), 4)
        self.assertTrue(isclose(fluxpoints,
                                [[10, 0], [0, 10], [-10, 0], [0, -10]]).all())

        fluxpoints = circle.fluxpoints(field, 14)
        self.assertEqual(len(fluxpoints), 14)
        self.assertTrue(isclose(fluxpoints[0], [10, 0]).all())
        self.assertTrue(isclose(fluxpoints[7], [-10, 0]).all())

        x1 = fluxpoints[1:7]
        x2 = fluxpoints[-1:7:-1]
        x2[:, 1] = fabs(x2[:, 1])
        self.assertTrue(isclose(x1, x2).all())

        x1 = append(fluxpoints[-3:], fluxpoints[:4], axis=0)
        x2 = fluxpoints[-4:3:-1]
        x2[:, 0] = fabs(x2[:, 0])
        self.assertEqual(len(x1), len(x2))
        self.assertTrue(isclose(x1, x2).all())
예제 #16
0
def test_wigner():
    p = datasets.COLORS.v2p2_WL_wigner
    data = wt.data.from_COLORS(p)
    data.ai0.normalize()
    assert np.isclose(data.ai0.null, 0.)
    assert np.isclose(data.ai0.max(), 1.)
    data.close()
예제 #17
0
def _get_intersection_bound_vector_plane(bound_vector, plane):
    distance_to_plane = dot(
        plane.point_in_plane - bound_vector.initial_point,
        plane.normal_vector)
    projected_vector_length = dot(
        bound_vector.free_vector,
        plane.normal_vector)

    distance_to_plane_close_to_zero = isclose(
        distance_to_plane,
        0,
        **config['numbers_close_kwargs'])
    projected_vector_length_close_to_zero = isclose(
        projected_vector_length,
        0,
        **config['numbers_close_kwargs'])
    if (
            distance_to_plane_close_to_zero and
            projected_vector_length_close_to_zero):
        return bound_vector

    with errstate(divide='ignore'):
        param = nan_to_num(distance_to_plane / projected_vector_length)

    # TODO: add distinction for included and excluded initial and terminal
    # points
    if 0 <= param <= 1:
        intersection = (
            bound_vector.initial_point +
            param*(bound_vector.terminal_point - bound_vector.initial_point))
    else:
        intersection = None
    return intersection
예제 #18
0
def isrhombohedral(box, rtol=1e-05, atol=1e-08):
    """
    Tests if a box is consistent with a standard rhombohedral cell:
    a = b = c
    alpha = beta = gamma != 90
    
    Parameters
    ----------
    box : atomman.Box
        The box object to test.
    rtol : float, optional
        Relative tolerance for testing box parameters. Default value is 1e-5.
    atol : float, optional
        Absolute tolerance for testing box parameters. Default value is 1e-8.
        
    Returns
    -------
    bool
        True if box is a standard rhombohedral cell, False otherwise.
    """
    return (np.isclose(box.a, box.b, atol=atol, rtol=rtol)
            and np.isclose(box.a, box.c, atol=atol, rtol=rtol)
            and np.isclose(box.alpha, box.beta, atol=atol, rtol=rtol)
            and np.isclose(box.alpha, box.gamma, atol=atol, rtol=rtol)
            and not np.isclose(box.alpha, 90.0, atol=atol, rtol=rtol))
예제 #19
0
def test_get_fetch_size_accuracy():
    """
    Does it give the right answer?
    """
    # By hand for stat
    dat = fetch.MSID('aopcadmd', '2010:001', '2011:001', stat='5min')
    fetch_bytes = sum(getattr(dat, attr).nbytes for attr in dat.colnames)

    fetch_mb, out_mb = get_fetch_size('aopcadmd', '2010:001', '2011:001', stat='5min',
                                      interpolate_dt=328 * 2, fast=False)
    assert np.isclose(fetch_mb, fetch_bytes / 1e6, rtol=0.0, atol=0.01)

    # Now interpolate to 10 minute intervals
    dat.interpolate(328.0 * 2)
    fetch_bytes = sum(getattr(dat, attr).nbytes for attr in dat.colnames)
    assert np.isclose(out_mb, fetch_bytes / 1e6, rtol=0.0, atol=0.01)

    # By hand for full resolution
    dat = fetch.MSID('aopcadmd', '2011:001', '2011:010')
    fetch_bytes = sum(getattr(dat, attr).nbytes for attr in dat.colnames)

    fetch_mb, out_mb = get_fetch_size('aopcadmd', '2011:001', '2011:010',
                                      interpolate_dt=328 * 2, fast=False)
    assert np.isclose(fetch_mb, fetch_bytes / 1e6, rtol=0.0, atol=0.01)

    # Now interpolate to 10 minute intervals
    dat.interpolate(328.0 * 2)
    fetch_bytes = sum(getattr(dat, attr).nbytes for attr in dat.colnames)
    assert np.isclose(out_mb, fetch_bytes / 1e6, rtol=0.0, atol=0.01)
예제 #20
0
파일: SN.py 프로젝트: cbischoff/CosmoMC
    def inverse_covariance_matrix(self, alpha=0, beta=0):
        if 'mag' in self.covs:
            invcovmat = self.covs['mag'].copy()
        else:
            invcovmat = 0
        if self.alphabeta_covmat:
            if np.isclose(alpha, self._last_alpha) and np.isclose(beta, self._last_beta):
                return self.invcov
            self._last_alpha = alpha
            self._last_beta = beta

            alphasq = alpha * alpha
            betasq = beta * beta
            alphabeta = alpha * beta
            if 'stretch' in self.covs:
                invcovmat += alphasq * self.covs['stretch']
            if 'colour' in self.covs:
                invcovmat += betasq * self.covs['colour']
            if 'mag_stretch' in self.covs:
                invcovmat += 2 * alpha * self.covs['mag_stretch']
            if 'mag_colour' in self.covs:
                invcovmat -= 2 * beta * self.covs['mag_colour']
            if 'stretch_colour' in self.covs:
                invcovmat -= 2 * alphabeta * self.covs['stretch_colour']

            delta = self.pre_vars + alphasq * self.stretch_var + \
                    + betasq * self.colour_var + 2.0 * alpha * self.cov_mag_stretch \
                    - 2.0 * beta * self.cov_mag_colour \
                    - 2.0 * alphabeta * self.cov_stretch_colour
        else:
            delta = self.pre_vars
        np.fill_diagonal(invcovmat, invcovmat.diagonal() + delta)
        self.invcov = np.linalg.inv(invcovmat)
        return self.invcov
예제 #21
0
def test_field_initialized_to_zero(bates):
    for name in bates.grid["node"].keys():
        field = bates.grid["node"][name]
        if name != "surface_water__depth":
            assert np.all(np.isclose(field, 0.))
        else:
            assert np.all(np.isclose(field, 0.001))
예제 #22
0
def test_Cacciato09Sats2():
    """
    Check that the model behavior is altered in the expected way by changing
    param_dict values.
    """
    model = Cacciato09Sats(threshold=9.5)
    nsat_exp = model.mean_occupation(prim_haloprop=5e13)
    # Increasing b_0 by x should increase the occupation by exactly 10**x.
    model.param_dict['b_0'] += 0.1
    nsat_exp_new = model.mean_occupation(prim_haloprop=5e13)
    assert np.isclose(nsat_exp_new, nsat_exp * 10**0.1, rtol=1e-2, atol=1.e-2)

    # Increasing b_1 by x should increase the occupation by exactly
    # 10**(x * (log prim_haloprop - 12.0)).
    model.param_dict['b_0'] -= 0.1
    model.param_dict['b_1'] += 0.1
    nsat_exp_new = model.mean_occupation(prim_haloprop=5e13)
    assert np.isclose(nsat_exp_new, nsat_exp * 10**(
        0.1 * (np.log10(5e13) - 12.0)), rtol=1e-2, atol=1.e-2)

    # Increasing b_2 by x should increase the occupation by exactly
    # 10**(x * (log prim_haloprop - 12.0)**2).
    model.param_dict['b_1'] -= 0.1
    model.param_dict['b_2'] += 0.1
    nsat_exp_new = model.mean_occupation(prim_haloprop=5e13)
    assert np.isclose(nsat_exp_new, nsat_exp * 10 ** (
        0.1 * (np.log10(5e13) - 12.0)**2), rtol=1e-2, atol=1.e-2)
예제 #23
0
    def get_vanadium(self, detector_mask, m1, colltrans, exp, indir):
        """
        This function returns either (vanadium_count, vanadium_monitor, None) or
        (None, None, vcorr) depending what type of file is provided by getProperty("Vanadium")
        """
        if not self.getProperty("Normalise").value:
            return None, None, np.ones(44)[detector_mask]

        vanadium_filename = self.getProperty("Vanadium").value
        if vanadium_filename:
            if vanadium_filename.split('.')[-1] == 'dat':
                vanadium = np.genfromtxt(vanadium_filename)
                vanadium_count = vanadium[:, 5:49].sum(axis=0)[detector_mask]
                vanadium_monitor = vanadium[:, 3].sum()
                logger.notice("Using vanadium data file: {}".format(vanadium_filename))
                return vanadium_count, vanadium_monitor, None
            else:
                vcorr_filename = vanadium_filename
        else: # Find adjacent vcorr file
            # m1 is the monochromator angle
            # m1 = 0 -> Ge 115, 1.54A
            # m1 = 9.45 -> Ge 113, 2.41A
            # colltrans is the collimator position, whether in or out of the beam
            # colltrans = 0 -> IN
            # colltrans = +/-80 -> OUT
            vcorr_filename = 'HB2A_{}__Ge_{}_{}_vcorr.txt'.format(exp,
                                                                  115 if np.isclose(m1, 0, atol=0.1) else 113,
                                                                  "IN" if np.isclose(colltrans, 0, atol=0.1) else "OUT")
        vcorr_filename = os.path.join(indir, vcorr_filename)
        logger.notice("Using vcorr file: {}".format(vcorr_filename))
        if not os.path.isfile(vcorr_filename):
            raise RuntimeError("Vanadium file {} does not exist".format(vcorr_filename))

        return None, None, np.genfromtxt(vcorr_filename)[detector_mask]
예제 #24
0
def likelihood_check(obs_distns,trans_matrix,init_distn,data,target_val):
    for cls in [m.HMMPython, m.HMM]:
        hmm = cls(alpha=6.,init_state_concentration=1, # placeholders
                obs_distns=obs_distns)
        hmm.trans_distn.trans_matrix = trans_matrix
        hmm.init_state_distn.weights = init_distn
        hmm.add_data(data)

        # test default log_likelihood method

        assert np.isclose(target_val, hmm.log_likelihood())

        # manual tests of the several message passing methods

        states = hmm.states_list[-1]

        states.clear_caches()
        states.messages_forwards_normalized()
        assert np.isclose(target_val,states._normalizer)

        states.clear_caches()
        states.messages_forwards_log()
        assert np.isinf(target_val) or np.isclose(target_val,states._normalizer)

        states.clear_caches()
        states.messages_backwards_log()
        assert np.isinf(target_val) or np.isclose(target_val,states._normalizer)

        # test held-out vs in-model

        assert np.isclose(target_val, hmm.log_likelihood(data))
예제 #25
0
def test_colorbar_renorm():
    x, y = np.ogrid[-4:4:31j, -4:4:31j]
    z = 120000*np.exp(-x**2 - y**2)

    fig, ax = plt.subplots()
    im = ax.imshow(z)
    cbar = fig.colorbar(im)
    assert np.allclose(cbar.ax.yaxis.get_majorticklocs(),
                       np.arange(0, 120000.1, 15000))

    cbar.set_ticks([1, 2, 3])
    assert isinstance(cbar.locator, FixedLocator)

    norm = LogNorm(z.min(), z.max())
    im.set_norm(norm)
    assert isinstance(cbar.locator, _ColorbarLogLocator)
    assert np.allclose(cbar.ax.yaxis.get_majorticklocs(),
                       np.logspace(-8, 5, 14))
    # note that set_norm removes the FixedLocator...
    assert np.isclose(cbar.vmin, z.min())
    cbar.set_ticks([1, 2, 3])
    assert isinstance(cbar.locator, FixedLocator)
    assert np.allclose(cbar.ax.yaxis.get_majorticklocs(),
                       [1.0, 2.0, 3.0])

    norm = LogNorm(z.min() * 1000, z.max() * 1000)
    im.set_norm(norm)
    assert np.isclose(cbar.vmin, z.min() * 1000)
    assert np.isclose(cbar.vmax, z.max() * 1000)
예제 #26
0
    def test_case1_vs_npss(self):


        component  =  pod_mach.PodMach()

        prob = create_problem(component)

        prob.setup()

        prob['comp.gam'] = 1.4
        prob['comp.R'] = 287.0
        prob['comp.BF'] = .9
        prob['comp.A_pod'] = 1.4
        prob['comp.L'] = 22.0
        prob['comp.prc'] = 12.5
        prob['comp.p_tube'] = 850.0
        prob['comp.T_ambient'] = 298.0
        prob['comp.mu'] = 1.846e-5
        prob['comp.M_duct'] = .95
        prob['comp.M_diff'] = .6
        prob['comp.cp'] = 1009.0
        prob['comp.delta_star'] = .07
        prob['comp.M_pod'] = .8

        prob.run()

        assert np.isclose(prob['comp.Re'], 3278799.304354, rtol=0.1)
        assert np.isclose(prob['comp.A_tube'], 18.600833, rtol=0.1)
예제 #27
0
파일: test_utils.py 프로젝트: TuKo/brainiak
def test_gen_design():
    from brainiak.utils.utils import gen_design
    import numpy as np
    import os.path
    files = {'FSL1': 'example_stimtime_1_FSL.txt',
             'FSL2': 'example_stimtime_2_FSL.txt',
             'AFNI1': 'example_stimtime_1_AFNI.txt'}
    for key in files.keys():
        files[key] = os.path.join(os.path.dirname(__file__), files[key])
    design1 = gen_design(stimtime_files=files['FSL1'], scan_duration=[48, 20],
                         TR=2, style='FSL')
    assert design1.shape == (34, 1), 'Returned design matrix has wrong shape'
    assert design1[24] == 0, (
        "gen_design should generated design matrix for each run separately "
        "and concatenate them.")
    design2 = gen_design(stimtime_files=[files['FSL1'], files['FSL2']],
                         scan_duration=[48, 20], TR=2, style='FSL')
    assert design2.shape == (34, 2), 'Returned design matrix has wrong shape'
    design3 = gen_design(stimtime_files=files['FSL1'], scan_duration=68, TR=2,
                         style='FSL')
    assert design3[24] != 0, (
        'design matrix should be non-zero 8 seconds after an event onset.')
    design4 = gen_design(stimtime_files=[files['FSL2']],
                         scan_duration=[48, 20], TR=2, style='FSL')
    assert np.all(np.isclose(design1 * 0.5, design4)), (
        'gen_design does not treat missing values correctly')
    design5 = gen_design(stimtime_files=[files['FSL2']],
                         scan_duration=[48, 20], TR=1)
    assert np.all(np.isclose(design4, design5[::2])), (
        'design matrices sampled at different frequency do not match'
        ' at corresponding time points')
    design6 = gen_design(stimtime_files=[files['AFNI1']],
                         scan_duration=[48, 20], TR=2, style='AFNI')
    assert np.all(np.isclose(design1, design6)), (
        'design matrices generated from AFNI style and FSL style do not match')
예제 #28
0
    def test(self):
        global filename
        if filename is None: filename = corsika.example_data_dir + '/DAT000002-32'
        assert os.path.exists(filename)

        raw = corsika.RawStream(filename)
        block = corsika.Block()

        # get the run header, event header and first particle block
        raw.get_next_block(block)
        assert block.ID == 'RUNH'
        raw.get_next_block(block)
        assert block.ID == 'EVTH'
        raw.get_next_block(block)
        assert numpy.all(numpy.isclose(reference, block.data.reshape((-1,7))))
        n_blocks = 3
        while raw.get_next_block(block):
            n_blocks += 1

        # check total number of blocks
        assert n_blocks == 4725
        raw.close()

        # check particle iteration
        raw = corsika.RawStream(filename)
        raw.get_next_block(block)
        raw.get_next_block(block)
        particles = raw.particles() # this works because it is positioned right before the particle block
        for i,p in enumerate(raw.particles()):
            if i >= reference.shape[0]: break
            assert numpy.all(numpy.isclose([p.px, p.py, p.pz, p.x, p.y, p.t_or_z], reference[i][1:], 3))
예제 #29
0
def test_ISFC():
    curr_dir = os.path.dirname(__file__)

    mask_fname = os.path.join(curr_dir, 'mask.nii.gz')
    mask = io.load_boolean_mask(mask_fname)
    fnames = [os.path.join(curr_dir, 'subj1.nii.gz'),
              os.path.join(curr_dir, 'subj2.nii.gz')]
    masked_images = image.mask_images(io.load_images(fnames), mask)

    D = image.MaskedMultiSubjectData.from_masked_images(masked_images,
                                                        len(fnames))

    assert D.shape == (4, 5, 2), "Loaded data has incorrect shape"

    (ISFC, p) = brainiak.isfc.isfc(D, return_p=True, num_perm=100,
                                   two_sided=True, random_state=0)

    ground_truth = \
        [[1, 1, 0, -1],
         [1, 1, 0, -1],
         [0, 0, 1,  0],
         [-1, -1, 0, 1]]

    ground_truth_p = 1 - np.abs(ground_truth)

    assert np.isclose(ISFC, ground_truth).all(), \
        "Calculated ISFC does not match ground truth"

    assert np.isclose(p, ground_truth_p).all(), \
        "Calculated p values do not match ground truth"
예제 #30
0
def test_Cacciato09Cens6():
    """Check that the median primary galaxy property behave accordingly.
    """
    model = Cacciato09Cens(threshold=9.5)
    prim_galprop_1 = model.median_prim_galprop(prim_haloprop=1e14)
    model.param_dict['log_M_1'] += 0.1
    prim_galprop_2 = model.median_prim_galprop(prim_haloprop=1e14*10**0.1)
    assert np.isclose(prim_galprop_1, prim_galprop_2, rtol=1e-6, atol=1.e-2)

    model.param_dict['log_L_0'] += 0.1
    prim_galprop_3 = model.median_prim_galprop(prim_haloprop=1e14*10**0.1)
    assert np.isclose(prim_galprop_2 * 10**0.1, prim_galprop_3, rtol=1e-6,
                      atol=1.e-2)

    model.param_dict['gamma_1'] += 0.1
    prim_galprop_4 = model.median_prim_galprop(prim_haloprop=1e14*10**0.1)
    assert prim_galprop_3 != prim_galprop_4

    model.param_dict['gamma_2'] += 0.1
    prim_galprop_5 = model.median_prim_galprop(prim_haloprop=1e14 * 10 ** 0.1)
    assert prim_galprop_4 != prim_galprop_5

    model.param_dict['sigma'] += 0.1
    prim_galprop_6 = model.median_prim_galprop(prim_haloprop=1e14 * 10 ** 0.1)
    assert np.isclose(prim_galprop_5, prim_galprop_6, rtol=1e-6, atol=1.e-2)
예제 #31
0
파일: constraint.py 프로젝트: wuyou33/torq
    def cost_grad_curv(self, state, seg=0, doGrad=True, doCurv=False):
        """
        Computes cost, cost gradient and cost curvature of the cylinder constraint. A dictionary for each dimension

        Cost gradient and curvature are returned for use in optimisation steps (e.g. linesearch)

        Args:
            state: The trajectory to evaluate for obstacle costs. np.array with dimensions (nder,nsamp,nseg).
                    A dict for each dimension
            seg:    The segment to calculate the cost and gradient for
            doGrad: boolean to select whether or not to evaluate the gradient
            doCurv: boolean to select whether or not to evaluate the curvature

        Uses:
            self.
            moving: Flag to indicate if the obstacle is moving
            x1: position of the centre of one end the cylinder obstacle (np.array, 3 by nsamp)
            x2: position of the centre of the other end the cylinder obstacle (np.array, 3 by nsamp)
            der: Derivative to operate on
            in_out_scale: +1 or -1 to indicate if the obstacle is a keep out (-1) or keep in (+1) obstacle
            A: Shape matrix for the ellipsoid end caps on the cylinders
            r: the radius of the cylinder
            a: the vecotr between end caps
            c: the norm squared of the vector a

        Outputs:
            cost: the cost for the constraint (single number)
            grad: the cost gradient for the constraint. A dict for each dimension,
                            in each an np.array of length N (number of coefficients for each dimension)
            curv: the cost curvature for the constraint. A dict for each dimension,
                            in each an np.array of length N (number of coefficients for each dimension)
            max_ID: Index of the maximum violation

        """

        # Number of samples in a segment
        nder = state['x'].shape[0]
        nsamp = state['x'].shape[1]

        # Initialise grad: df/dx at the maximum, so for each dimension is an array of the number of derivatives (x includes the derivatives)
        # grad = dict(x=np.zeros((nder,1)),y=np.zeros((nder,1)),z=np.zeros((nder,1)),yaw=np.zeros((nder,1)))
        # curv = dict(x=np.zeros((nder,1)),y=np.zeros((nder,1)),z=np.zeros((nder,1)),yaw=np.zeros((nder,1)))

        grad = dict(x=np.zeros((nder, nsamp)),
                    y=np.zeros((nder, nsamp)),
                    z=np.zeros((nder, nsamp)),
                    yaw=np.zeros((nder, nsamp)))
        curv = dict(x=np.zeros((nder, 3, nsamp)),
                    y=np.zeros((nder, 3, nsamp)),
                    z=np.zeros((nder, 3, nsamp)),
                    yaw=np.zeros((nder, 3, nsamp)))

        # Extend vectors for number of states
        x1 = np.repeat(np.reshape(self.x1, (3, 1)), nsamp, axis=1)
        x2 = np.repeat(np.reshape(self.x2, (3, 1)), nsamp, axis=1)
        a = np.repeat(np.reshape(self.a, (3, 1)), nsamp, axis=1)

        # Vectors
        # Vector between end points
        x1_x = np.matrix(np.zeros((3, nsamp)))
        x1_x[0, :] = state['x'][self.der, :, seg] - x1[0, :]
        x1_x[1, :] = state['y'][self.der, :, seg] - x1[1, :]
        x1_x[2, :] = state['z'][self.der, :, seg] - x1[2, :]
        # Vector between end points
        x2_x = np.matrix(np.zeros((3, nsamp)))
        x2_x[0, :] = state['x'][self.der, :, seg] - x2[0, :]
        x2_x[1, :] = state['y'][self.der, :, seg] - x2[1, :]
        x2_x[2, :] = state['z'][self.der, :, seg] - x2[2, :]

        # Determine which cost to apply for each part of the path
        dot_bot = self.long_dot(a, x1_x)
        dot_top = self.long_dot(
            -a, x2_x
        )  # negative to reverse the direction of a so it comes from the same point as x2_x

        x_endcap1 = x1_x[:, dot_bot < 0]
        x_endcap2 = x2_x[:, dot_top < 0]
        x_cylinder = x1_x[:, (dot_bot >= 0) * (dot_top >= 0)]

        ### COSTS ###
        # Ellipsoid endcap costs
        cost_tmp_bot = np.zeros(np.shape(x_endcap1)[1])
        cost_tmp_top = np.zeros(np.shape(x_endcap2)[1])
        for i in range(np.shape(x_endcap1)[1]):
            cost_tmp_bot[i] = self.in_out_scale * (
                x_endcap1[:, i].T * self.A * x_endcap1[:, i] - 1)[0, 0]
        for i in range(np.shape(x_endcap2)[1]):
            cost_tmp_top[i] = self.in_out_scale * (
                x_endcap2[:, i].T * self.A * x_endcap2[:, i] - 1)[0, 0]

        # Cylinder
        a2 = np.repeat(np.reshape(self.a, (3, 1)),
                       np.shape(x_cylinder)[1],
                       axis=1)  #a*ones(1,length(x(1,:))); # Repeat in matrix
        b = self.long_cross(
            a2, x_cylinder
        )  #This gives |a||x1_x|sin(theta), for each set of points

        #Distance to the line squared is |b|^2 / |a|^2, which leaves
        # |x2_x|^2*sin^2(theta) = d^2
        #Cost function is line d^2 - radius^2 (positive if outside)
        cost_tmp_mid = self.in_out_scale * (self.long_dot(b, b) / self.c -
                                            self.r**2 * np.ones(
                                                (1, np.shape(b)[1])))
        # cost_tmp_mid = self.in_out_scale*(self.long_dot(b,b)/self.c/self.r**2  - np.ones((1,np.shape(b)[1])))
        # cost_tmp_mid = in_out_scale.*(dot(b,b)./constraint.c./constraint.r^2 - ones(1,length(b(1,:))));

        # Combine costs
        # Initialise
        cost_tmp = np.zeros(np.shape(x1_x)[1])

        # Add ellipsoid endcap costs
        cost_tmp[dot_bot < 0] = cost_tmp_bot
        cost_tmp[dot_top < 0] = cost_tmp_top

        # Add cylinder cost
        cost_tmp[(dot_bot >= 0) * (dot_top >= 0)] = cost_tmp_mid

        # Get out the max cost or summed cost
        if self.sum_func:
            # Summ all costs
            if self.keep_out is True:
                cost_tmp[cost_tmp < 0.0] = 0.0
                max_ID = np.where(cost_tmp > 0.0)[0]
            else:
                max_ID = np.where(cost_tmp > -np.inf)[0]
            max_cost = np.sum(cost_tmp)

        else:
            # Maximum violation
            max_cost = np.amax(cost_tmp)
            if np.sum(np.isclose(cost_tmp, max_cost)) < 1:
                print("ERROR - can't match max in cost_tmp")
            max_ID = np.atleast_1d(
                np.where(np.isclose(cost_tmp, max_cost))[0][0])

        # if np.size(max_ID) > 1:
        #     max_ID = max_ID[0]
        if max_cost <= 0:
            # Constraint not active - no need to compute gradient.
            # Set max ID to negative as a flag
            max_ID = -1
            max_cost = 0.0
            return max_cost, grad, curv, np.atleast_1d(max_ID)

        # Compute the gradient and curvature
        if doGrad:
            a = self.a
            for ID in max_ID:
                if np.dot(self.a, x1_x[:, ID]) < 0:  # bottom ellipsoid
                    grad_tmp = (self.in_out_scale * 2 * self.A * x1_x[:, ID]).T

                    if doCurv:
                        # crv_tmp = np.diagonal(self.in_out_scale*2*self.A)
                        crv_tmp = self.in_out_scale * 2 * self.A
                elif np.dot(-self.a, x2_x[:, ID]) < 0:  # top ellipsoid
                    grad_tmp = (self.in_out_scale * 2 * self.A * x2_x[:, ID]).T

                    if doCurv:
                        # crv_tmp = np.diagonal(self.in_out_scale*2*self.A)
                        crv_tmp = self.in_out_scale * 2 * self.A
                else:  # Cylinder
                    b = np.cross(a, x1_x[:, ID].T)
                    grad_tmp = self.in_out_scale * 2 * np.cross(b, a) / self.c
                    if doCurv:
                        # crv_tmp = self.in_out_scale*2/self.c*np.array([a[1]**2+a[2]**2,a[0]**2+a[2]**2,a[0]**2+a[1]**2])
                        crv_tmp = self.in_out_scale * 2 / self.c * np.array(
                            [[a[1]**2 + a[2]**2, -a[0] * a[1], -a[0] * a[2]],
                             [-a[0] * a[1], a[0]**2 + a[2]**2, -a[1] * a[2]],
                             [-a[0] * a[2], -a[1] * a[2], a[0]**2 + a[1]**2]])

                grad['x'][self.der, ID] = grad_tmp[0, 0]
                grad['y'][self.der, ID] = grad_tmp[0, 1]
                grad['z'][self.der, ID] = grad_tmp[0, 2]

                if doCurv:
                    curv['x'][self.der, :, ID] = crv_tmp[0, :]
                    curv['y'][self.der, :, ID] = crv_tmp[1, :]
                    curv['z'][self.der, :, ID] = crv_tmp[2, :]

        return max_cost, grad, curv, np.atleast_1d(max_ID)
예제 #32
0
 def phi_z(self, value):
     if self.__phi_z.size != value.size or not np.all(
             np.isclose(self.__phi_z, value)):
         self.__phi_z = value
         self.phi_s = value.copy()
         self.reset()
예제 #33
0
 def theta_z(self, value):
     if self.__theta_z.size != value.size or not np.all(
             np.isclose(self.__theta_z, value)):
         self.__theta_z = value
         self.theta_s = value.copy()
         self.reset()
def ppt_distinguishability(states: List[np.ndarray],
                           probs: List[float] = None) -> float:
    r"""
    Compute probability of distinguishing a state via PPT measurements.

    Implements the semidefinite program (SDP) whose optimal value is equal to
    the maximum probability of perfectly distinguishing orthogonal maximally
    entangled states using any PPT measurement; a measurement whose operators
    are positive under partial transpose. This SDP was explicitly provided in
    [1].

    Specifically, the function implements the dual problem (as this is
    computationally more efficient) and is defined as:

    .. math::

        \begin{equation}
            \begin{aligned}
                \text{minimize:} \quad & \frac{1}{k} \text{Tr}(Y) \\
                \text{subject to:} \quad & Y \geq \text{T}_{\mathcal{A}}
                                          (\rho_j), \quad j = 1, \ldots, k, \\
                                         & Y \in \text{Herm}(\mathcal{A} \otimes
                                          \mathcal{B}).
            \end{aligned}
        \end{equation}

    References:
        [1] Cosentino, Alessandro.
        "Positive-partial-transpose-indistinguishable states via semidefinite
        programming."
        Physical Review A 87.1 (2013): 012321.
        https://arxiv.org/abs/1205.1031

    :param states: A list of density operators (matrices) corresponding to
                   quantum states.
    :param probs: A list of probabilities where `probs[i]` corresponds to the
                  probability that `states[i]` is selected by Alice.
    :return: The optimal probability with which the states can be distinguished
             via PPT measurements.
    """
    # Assume that at least one state is provided.
    if states is None or states == []:
        raise ValueError("InvalidStates: There must be at least one state "
                         "provided.")

    # Assume uniform probability if no specific distribution is given.
    if probs is None:
        probs = [1 / len(states)] * len(states)
    if not np.isclose(sum(probs), 1):
        raise ValueError("Invalid: Probabilities must sum to 1.")

    dim_x, dim_y = states[0].shape

    # The variable `states` is provided as a list of vectors. Transform them
    # into density matrices.
    if dim_y == 1:
        for i, state_ket in enumerate(states):
            states[i] = state_ket * state_ket.conj().T

    constraints = []
    y_var = cvxpy.Variable((dim_x, dim_x), hermitian=True)
    objective = 1 / len(states) * cvxpy.Minimize(cvxpy.trace(
        cvxpy.real(y_var)))

    dim = int(np.log2(dim_x))
    dim_list = [2] * int(np.log2(dim_x))
    sys_list = list(range(1, dim, 2))

    for i, _ in enumerate(states):
        constraints.append(
            cvxpy.real(y_var) >> partial_transpose(
                states[i], sys=sys_list, dim=dim_list))

    problem = cvxpy.Problem(objective, constraints)
    sol_default = problem.solve()

    return sol_default
# SOLUTION #
yulp = BlockFunction(W)
(y, u, l, p) = block_split(yulp)

# FUNCTIONAL #
J = 0.5*inner(y - y_d, y - y_d)*dx + 0.5*alpha*inner(u, u)*ds(2)

# UNCONTROLLED FUNCTIONAL VALUE #
A_state = assemble(a[3][0])
F_state = assemble(f[3])
bc_state = [DirichletBC(W.sub(0), Constant(0.), boundaries, idx) for idx in (2, 4)]
[bc_state_.apply(A_state) for bc_state_ in bc_state]
[bc_state_.apply(F_state) for bc_state_ in bc_state]
solve(A_state, y.vector(), F_state)
print("Uncontrolled J =", assemble(J))
assert isclose(assemble(J), 0.5038976)
plt.figure()
plot(y, title="uncontrolled state")
plt.show()

# OPTIMAL CONTROL #
A = block_assemble(a, keep_diagonal=True)
F = block_assemble(f)
bc.apply(A)
bc.apply(F)
block_solve(A, yulp.block_vector(), F)
print("Optimal J =", assemble(J))
assert isclose(assemble(J), 0.1281223)
plt.figure()
plot(y, title="state")
plt.figure()
 def boundary_left_full(x, on_boundary):
     return on_boundary and not (np.isclose(x[0], v_ru[0])
             or (np.isclose(x[1], v_ru[1]) and x[0]>-0.5)
             or (np.isclose(x[1], v_ld[1]) and x[0]>-0.5))
 def boundary_left_free(x, on_boundary):
     return on_boundary and (np.isclose(x[0], v_ld[0])
             or (np.isclose(x[1], v_ru[1]) and x[0]<-0.5)
             or (np.isclose(x[1], v_ld[1]) and x[0]<-0.5))
예제 #38
0
def test_full_model():

    shape = (50, 50, 50)
    spacing = [10. for _ in shape]
    nbl = 10

    # Create two-layer model from preset
    model = demo_model(preset='layers-isotropic', vp_top=1., vp_bottom=2.,
                       spacing=spacing, shape=shape, nbl=nbl)

    # Test Model pickling
    pkl_model = pickle.dumps(model)
    new_model = pickle.loads(pkl_model)
    assert np.isclose(np.linalg.norm(model.vp.data[:]-new_model.vp.data[:]), 0)

    f0 = .010
    dt = model.critical_dt
    t0 = 0.0
    tn = 350.0
    time_range = TimeAxis(start=t0, stop=tn, step=dt)

    # Test TimeAxis pickling
    pkl_time_range = pickle.dumps(time_range)
    new_time_range = pickle.loads(pkl_time_range)
    assert np.isclose(np.linalg.norm(time_range.time_values),
                      np.linalg.norm(new_time_range.time_values))

    # Test Class Constant pickling
    pkl_origin = pickle.dumps(model.grid.origin_symbols)
    new_origin = pickle.loads(pkl_origin)

    for a, b in zip(model.grid.origin_symbols, new_origin):
        assert a.compare(b) == 0

    # Test Class TimeDimension pickling
    time_dim = TimeDimension(name='time', spacing=Constant(name='dt', dtype=np.float32))
    pkl_time_dim = pickle.dumps(time_dim)
    new_time_dim = pickle.loads(pkl_time_dim)
    assert time_dim.spacing._value == new_time_dim.spacing._value

    # Test Class SteppingDimension
    stepping_dim = SteppingDimension(name='t', parent=time_dim)
    pkl_stepping_dim = pickle.dumps(stepping_dim)
    new_stepping_dim = pickle.loads(pkl_stepping_dim)
    assert stepping_dim.is_Time == new_stepping_dim.is_Time

    # Test Grid pickling
    pkl_grid = pickle.dumps(model.grid)
    new_grid = pickle.loads(pkl_grid)
    assert model.grid.shape == new_grid.shape

    assert model.grid.extent == new_grid.extent
    assert model.grid.shape == new_grid.shape
    for a, b in zip(model.grid.dimensions, new_grid.dimensions):
        assert a.compare(b) == 0

    ricker = RickerSource(name='src', grid=model.grid, f0=f0, time_range=time_range)

    pkl_ricker = pickle.dumps(ricker)
    new_ricker = pickle.loads(pkl_ricker)
    assert np.isclose(np.linalg.norm(ricker.data), np.linalg.norm(new_ricker.data))
예제 #39
0
    def test_convergence_mvem_2d_ani_simplex(self):

        rhs_ex = lambda pt: 14
        p_ex = (
            lambda pt: 2 * np.power(pt[0, :], 2)
            - 6 * np.power(pt[1, :], 2)
            + np.multiply(pt[0, :], pt[1, :])
        )
        u_ex_0 = lambda pt: -9 * pt[0, :] + 10 * pt[1, :] + 4
        u_ex_1 = lambda pt: -6 * pt[0, :] + 23 * pt[1, :] + 5

        p_errs_known = np.array(
            [
                0.2411784823808065,
                0.13572349427526526,
                0.08688469978140642,
                0.060345813825004285,
                0.044340156291519606,
            ]
        )
        u_errs_known = np.array(
            [
                1.7264059760345327,
                1.3416423116340397,
                1.0925566034251672,
                0.9198698104736416,
                0.7936243780450764,
            ]
        )

        for i, p_err_known, u_err_known in zip(
            np.arange(5), p_errs_known, u_errs_known
        ):
            g = pp.StructuredTriangleGrid([3 + i] * 2, [1, 1])
            g.compute_geometry()

            kxx = 2 * np.ones(g.num_cells)
            kxy = np.ones(g.num_cells)
            perm = pp.SecondOrderTensor(kxx=kxx, kyy=kxx, kxy=kxy, kzz=1)
            bf = g.get_boundary_faces()
            bc = pp.BoundaryCondition(g, bf, bf.size * ["dir"])
            bc_val = np.zeros(g.num_faces)
            bc_val[bf] = p_ex(g.face_centers[:, bf])
            # Minus sign to move to rhs
            source = np.multiply(g.cell_volumes, rhs_ex(g.cell_centers))
            vect = np.vstack(
                (g.cell_volumes, 2 * g.cell_volumes, np.zeros(g.num_cells))
            ).ravel(order="F")

            solver = pp.MVEM(keyword="flow")
            solver_rhs = pp.DualScalarSource(keyword="flow")

            specified_parameters = {
                "bc": bc,
                "bc_values": bc_val,
                "second_order_tensor": perm,
                "source": source,
                "vector_source": vect,
            }
            data = pp.initialize_default_data(g, {}, "flow", specified_parameters)

            solver.discretize(g, data)
            solver_rhs.discretize(g, data)
            M, rhs_bc = solver.assemble_matrix_rhs(g, data)
            _, rhs = solver_rhs.assemble_matrix_rhs(g, data)

            up = sps.linalg.spsolve(M, rhs_bc + rhs)
            p = solver.extract_pressure(g, up, data)
            err = np.sqrt(
                np.sum(
                    np.multiply(g.cell_volumes, np.power(p - p_ex(g.cell_centers), 2))
                )
            )
            self.assertTrue(np.isclose(err, p_err_known))

            P = data[pp.DISCRETIZATION_MATRICES]["flow"][solver.vector_proj_key]
            u = solver.extract_flux(g, up, data)
            P0u = solver.project_flux(g, u, data)
            uu_ex_0 = u_ex_0(g.cell_centers)
            uu_ex_1 = u_ex_1(g.cell_centers)
            uu_ex_2 = np.zeros(g.num_cells)
            uu_ex = np.vstack((uu_ex_0, uu_ex_1, uu_ex_2))
            err = np.sqrt(
                np.sum(
                    np.multiply(
                        g.cell_volumes, np.sum(np.power(P0u - uu_ex, 2), axis=0)
                    )
                )
            )
            self.assertTrue(np.isclose(err, u_err_known))
예제 #40
0
파일: constraint.py 프로젝트: wuyou33/torq
    def cost_grad_curv(self, state, seg=0, doGrad=True, doCurv=False):
        """
        Computes cost, cost gradient and cost curvature of the ellipsoid constraint. A dictionary for each dimension

        Cost gradient and curvature are returned for use in optimisation steps (e.g. linesearch)

        Args:
            state: The trajectory to evaluate for obstacle costs. np.array with dimensions (nder,nsamp,nseg).
                    A dict for each dimension
            seg:    The segment to calculate the cost and gradient for
            doGrad: boolean to select whether or not to evaluate the gradient
            doCurv: boolean to select whether or not to evaluate the curvature

        Uses:
            self.
            moving: Flag to indicate if the obstacle is moving
            x0: position of the centre of the ellipsoid obstacle (np.array, 3 by nsmap)
            der: Derivative to operate on
            in_out_scale: +1 or -1 to indicate if the obstacle is a keep out (-1) or keep in (+1) obstacle
            A: Shape matrix for the ellipsoid

        Outputs:
            cost: the cost for the constraint (single number)
            grad: the cost gradient for the constraint. A dict for each dimension,
                            in each an np.array of length N (number of coefficients for each dimension)
            curv: the cost curvature for the constraint. A dict for each dimension,
                            in each an np.array of length N (number of coefficients for each dimension)
            max_ID: Index of the maximum violation

        """

        # Number of samples in a segment
        nder = state['x'].shape[0]
        nsamp = state['x'].shape[1]

        # Initialise grad: df/dx at the maximum, so for each dimension is an array of the number of derivatives (x includes the derivatives)
        # grad = dict(x=np.zeros((nder,1)),y=np.zeros((nder,1)),z=np.zeros((nder,1)),yaw=np.zeros((nder,1)))
        # curv = dict(x=np.zeros((nder,1)),y=np.zeros((nder,1)),z=np.zeros((nder,1)),yaw=np.zeros((nder,1)))

        grad = dict(x=np.zeros((nder, nsamp)),
                    y=np.zeros((nder, nsamp)),
                    z=np.zeros((nder, nsamp)),
                    yaw=np.zeros((nder, nsamp)))
        curv = dict(x=np.zeros((nder, 3, nsamp)),
                    y=np.zeros((nder, 3, nsamp)),
                    z=np.zeros((nder, 3, nsamp)),
                    yaw=np.zeros((nder, 3, nsamp)))

        if self.moving:
            if self.x0.shape[1] == nsamp:
                x0 = self.x0
            else:
                print(
                    "Error: need centre to be defined as a trajectory the same size as state"
                )
                return
        else:
            # Copy centre for the computations
            x0 = np.repeat(np.reshape(self.x0, (3, 1)), nsamp, axis=1)

        # normalise state from the centre
        x = np.matrix(np.zeros((3, nsamp)))
        x[0, :] = state['x'][self.der, :, seg] - x0[0, :]
        x[1, :] = state['y'][self.der, :, seg] - x0[1, :]
        x[2, :] = state['z'][self.der, :, seg] - x0[2, :]

        # Calculate cost
        cost_tmp = np.zeros(nsamp)
        for i in range(nsamp):
            cost_tmp[i] = self.in_out_scale * (x[:, i].T * self.A * x[:, i] -
                                               1)[0, 0]

        if self.sum_func:
            # Summ all costs
            if self.keep_out is True:
                cost_tmp[cost_tmp < 0.0] = 0.0
                max_ID = np.where(cost_tmp > 0.0)[0]
            else:
                max_ID = np.where(cost_tmp > -np.inf)[0]

            max_cost = np.sum(cost_tmp)

        else:
            # Maximum violation
            max_cost = np.amax(cost_tmp)
            if np.sum(np.isclose(cost_tmp, max_cost)) < 1:
                print("ERROR - can't match max in cost_tmp")
            max_ID = np.where(np.isclose(cost_tmp, max_cost))[0][0]

        # if np.size(max_ID) > 1:
        #     max_ID = max_ID[0]
        if max_cost <= 0:
            # Constraint not active - no need to compute gradient.
            # Set max ID to negative as a flag
            max_ID = -1
            max_cost = 0.0
            return max_cost, grad, curv, np.atleast_1d(max_ID)

        # if not self.keep_out:
        #     print("max cost from accel constraint is >0: {}".format(max_cost))#,np.linalg.norm(x[:,max_ID])))

        # Compute the gradient
        if doGrad:
            # grad_tmp = self.in_out_scale*2*self.A*x[:,max_ID]
            grad_tmp = self.in_out_scale * 2 * self.A * x[:, max_ID]
            grad['x'][self.der, max_ID] = grad_tmp[0, :]
            grad['y'][self.der, max_ID] = grad_tmp[1, :]
            grad['z'][self.der, max_ID] = grad_tmp[2, :]

        if doCurv:
            # crv_tmp = np.diagonal(self.in_out_scale*2*self.A)
            crv_tmp = self.in_out_scale * 2 * self.A
            # crv_tmp = np.diagonal(self.in_out_scale*2*self.A)
            curv['x'][self.der, :, :] = np.dot(crv_tmp[0, :].reshape((3, 1)),
                                               np.ones((1, nsamp)))
            curv['y'][self.der, :, :] = np.dot(crv_tmp[1, :].reshape((3, 1)),
                                               np.ones((1, nsamp)))
            curv['z'][self.der, :, :] = np.dot(crv_tmp[2, :].reshape((3, 1)),
                                               np.ones((1, nsamp)))
            # np.repeat(crv_tmp[0,:].reshape((3,1)),np.size(max_ID),axis=1)#np.dot(crv_tmp[0,:].reshape((3,1)),np.ones((1,np.size(max_ID))))

            # curv['x'][self.der,:] = crv_tmp[0]
            # curv['y'][self.der,:] = crv_tmp[1]
            # curv['z'][self.der,:] = crv_tmp[2]

        return max_cost, grad, curv, np.atleast_1d(max_ID)
예제 #41
0
파일: srf.py 프로젝트: x-malet/GSTools
    def __call__(
        self, pos, seed=np.nan, point_volumes=0.0, mesh_type="unstructured"
    ):
        """Generate the spatial random field.

        The field is saved as `self.field` and is also returned.

        Parameters
        ----------
        pos : :class:`list`
            the position tuple, containing main direction and transversal
            directions
        seed : :class:`int`, optional
            seed for RNG for reseting. Default: keep seed from generator
        point_volumes : :class:`float` or :class:`numpy.ndarray`
            If your evaluation points for the field are coming from a mesh,
            they are probably representing a certain element volume.
            This volume can be passed by `point_volumes` to apply the
            given variance upscaling. If `point_volumes` is ``0`` nothing
            is changed. Default: ``0``
        mesh_type : :class:`str`
            'structured' / 'unstructured'

        Returns
        -------
        field : :class:`numpy.ndarray`
            the SRF
        """
        # internal conversation
        x, y, z = pos2xyz(pos, max_dim=self.model.dim)
        self.pos = xyz2pos(x, y, z)
        self.mesh_type = mesh_type
        # update the model/seed in the generator if any changes were made
        self.generator.update(self.model, seed)
        # format the positional arguments of the mesh
        check_mesh(self.model.dim, x, y, z, mesh_type)
        mesh_type_changed = False
        if self.model.do_rotation:
            if mesh_type == "structured":
                mesh_type_changed = True
                mesh_type_old = mesh_type
                mesh_type = "unstructured"
                x, y, z, axis_lens = reshape_axis_from_struct_to_unstruct(
                    self.model.dim, x, y, z
                )
            x, y, z = unrotate_mesh(self.model.dim, self.model.angles, x, y, z)
        y, z = make_isotropic(self.model.dim, self.model.anis, y, z)

        # generate the field
        self.raw_field = self.generator.__call__(x, y, z, mesh_type)

        # reshape field if we got an unstructured mesh
        if mesh_type_changed:
            mesh_type = mesh_type_old
            self.raw_field = reshape_field_from_unstruct_to_struct(
                self.model.dim, self.raw_field, axis_lens
            )

        # apply given conditions to the field
        if self.condition:
            (
                cond_field,
                krige_field,
                err_field,
                krigevar,
                info,
            ) = self.cond_func(self)
            # store everything in the class
            self.field = cond_field
            self.krige_field = krige_field
            self.err_field = err_field
            self.krige_var = krigevar
            if "mean" in info:  # ordinary krging estimates mean
                self.mean = info["mean"]
        else:
            self.field = self.raw_field + self.mean

        # upscaled variance
        if not np.isscalar(point_volumes) or not np.isclose(point_volumes, 0):
            scaled_var = self.upscaling_func(self.model, point_volumes)
            self.field -= self.mean
            self.field *= np.sqrt(scaled_var / self.model.sill)
            self.field += self.mean

        return self.field
예제 #42
0
def test_get_global_observation():
    number_of_agents = 20

    stochastic_data = {
        'prop_malfunction': 1.,  # Percentage of defective agents
        'malfunction_rate': 30,  # Rate of malfunction occurence
        'min_duration': 3,  # Minimal duration of malfunction
        'max_duration': 20  # Max duration of malfunction
    }

    speed_ration_map = {
        1.: 0.25,  # Fast passenger train
        1. / 2.: 0.25,  # Fast freight train
        1. / 3.: 0.25,  # Slow commuter train
        1. / 4.: 0.25
    }  # Slow freight train

    env = RailEnv(
        width=50,
        height=50,
        rail_generator=sparse_rail_generator(max_num_cities=6,
                                             max_rails_between_cities=4,
                                             seed=15,
                                             grid_mode=False),
        schedule_generator=sparse_schedule_generator(speed_ration_map),
        number_of_agents=number_of_agents,
        obs_builder_object=GlobalObsForRailEnv())
    env.reset()

    obs, all_rewards, done, _ = env.step(
        {i: RailEnvActions.MOVE_FORWARD
         for i in range(number_of_agents)})
    for i in range(len(env.agents)):
        agent: EnvAgent = env.agents[i]
        print("[{}] status={}, position={}, target={}, initial_position={}".
              format(i, agent.status, agent.position, agent.target,
                     agent.initial_position))

    for i, agent in enumerate(env.agents):
        obs_agents_state = obs[i][1]
        obs_targets = obs[i][2]

        # test first channel of obs_targets: own target
        nr_agents = np.count_nonzero(obs_targets[:, :, 0])
        assert nr_agents == 1, "agent {}: something wrong with own target, found {}".format(
            i, nr_agents)

        # test second channel of obs_targets: other agent's target
        for r in range(env.height):
            for c in range(env.width):
                _other_agent_target = 0
                for other_i, other_agent in enumerate(env.agents):
                    if other_agent.target == (r, c):
                        _other_agent_target = 1
                        break
                assert obs_targets[(
                    r, c
                )][1] == _other_agent_target, "agent {}: at {} expected to be other agent's target = {}".format(
                    i, (r, c), _other_agent_target)

        # test first channel of obs_agents_state: direction at own position
        for r in range(env.height):
            for c in range(env.width):
                if (agent.status == RailAgentStatus.ACTIVE or agent.status
                        == RailAgentStatus.DONE) and (r, c) == agent.position:
                    assert np.isclose(obs_agents_state[(r, c)][0], agent.direction), \
                        "agent {} in status {} at {} expected to contain own direction {}, found {}" \
                            .format(i, agent.status, (r, c), agent.direction, obs_agents_state[(r, c)][0])
                elif (agent.status == RailAgentStatus.READY_TO_DEPART) and (
                        r, c) == agent.initial_position:
                    assert np.isclose(obs_agents_state[(r, c)][0], agent.direction), \
                        "agent {} in status {} at {} expected to contain own direction {}, found {}" \
                            .format(i, agent.status, (r, c), agent.direction, obs_agents_state[(r, c)][0])
                else:
                    assert np.isclose(obs_agents_state[(r, c)][0], -1), \
                        "agent {} in status {} at {} expected contain -1 found {}" \
                            .format(i, agent.status, (r, c), obs_agents_state[(r, c)][0])

        # test second channel of obs_agents_state: direction at other agents position
        for r in range(env.height):
            for c in range(env.width):
                has_agent = False
                for other_i, other_agent in enumerate(env.agents):
                    if i == other_i:
                        continue
                    if other_agent.status in [
                            RailAgentStatus.ACTIVE, RailAgentStatus.DONE
                    ] and (r, c) == other_agent.position:
                        assert np.isclose(obs_agents_state[(r, c)][1], other_agent.direction), \
                            "agent {} in status {} at {} should see other agent with direction {}, found = {}" \
                                .format(i, agent.status, (r, c), other_agent.direction, obs_agents_state[(r, c)][1])
                    has_agent = True
                if not has_agent:
                    assert np.isclose(obs_agents_state[(r, c)][1], -1), \
                        "agent {} in status {} at {} should see no other agent direction (-1), found = {}" \
                            .format(i, agent.status, (r, c), obs_agents_state[(r, c)][1])

        # test third and fourth channel of obs_agents_state: malfunction and speed of own or other agent in the grid
        for r in range(env.height):
            for c in range(env.width):
                has_agent = False
                for other_i, other_agent in enumerate(env.agents):
                    if other_agent.status in [
                            RailAgentStatus.ACTIVE, RailAgentStatus.DONE
                    ] and other_agent.position == (r, c):
                        assert np.isclose(obs_agents_state[(r, c)][2], other_agent.malfunction_data['malfunction']), \
                            "agent {} in status {} at {} should see agent malfunction {}, found = {}" \
                                .format(i, agent.status, (r, c), other_agent.malfunction_data['malfunction'],
                                        obs_agents_state[(r, c)][2])
                        assert np.isclose(obs_agents_state[(r, c)][3],
                                          other_agent.speed_data['speed'])
                        has_agent = True
                if not has_agent:
                    assert np.isclose(obs_agents_state[(r, c)][2], -1), \
                        "agent {} in status {} at {} should see no agent malfunction (-1), found = {}" \
                            .format(i, agent.status, (r, c), obs_agents_state[(r, c)][2])
                    assert np.isclose(obs_agents_state[(r, c)][3], -1), \
                        "agent {} in status {} at {} should see no agent speed (-1), found = {}" \
                            .format(i, agent.status, (r, c), obs_agents_state[(r, c)][3])

        # test fifth channel of obs_agents_state: number of agents ready to depart in to this cell
        for r in range(env.height):
            for c in range(env.width):
                count = 0
                for other_i, other_agent in enumerate(env.agents):
                    if other_agent.status == RailAgentStatus.READY_TO_DEPART and other_agent.initial_position == (
                            r, c):
                        count += 1
                assert np.isclose(obs_agents_state[(r, c)][4], count), \
                    "agent {} in status {} at {} should see {} agents ready to depart, found{}" \
                        .format(i, agent.status, (r, c), count, obs_agents_state[(r, c)][4])
예제 #43
0
    train_label = np.load('./Index/TrainLabel{}.npy'.format(k + 1))

    test_label = np.load('./Index/TestLabel{}.npy'.format(k + 1))

    X_dgm0 = [d['h1'] for d in Data]

    X_dgm1 = [d['h1'] for d in Data]

    # Lets change the birth-death to birth-persistence
    for j in range(len(X_dgm1)):
        temp_dgm = X_dgm1[j]

        temp_dgm[:, 1] = temp_dgm[:, 1] - temp_dgm[:, 0]

        temp_dgm[np.isclose(temp_dgm, 0, rtol=1e-05, atol=1e-05)] = 1e-05

        temp_dgm = np.log(temp_dgm)

        X_dgm1[j] = temp_dgm

    X_train_0 = [X_dgm0[ind] for ind in train_index]
    X_test_0 = [X_dgm0[ind] for ind in test_index]

    X_train_1 = [X_dgm1[ind] for ind in train_index]
    X_test_1 = [X_dgm1[ind] for ind in test_index]

    F_train = train_label
    F_test = test_label

    d = 5
예제 #44
0
    def test_convergence_mvem_2d_iso_simplex(self):

        a = 8 * np.pi ** 2
        rhs_ex = lambda pt: np.multiply(
            np.sin(2 * np.pi * pt[0, :]), np.sin(2 * np.pi * pt[1, :])
        )
        p_ex = lambda pt: rhs_ex(pt) / a
        u_ex_0 = (
            lambda pt: np.multiply(
                -np.cos(2 * np.pi * pt[0, :]), np.sin(2 * np.pi * pt[1, :])
            )
            * 2
            * np.pi
            / a
            + 1
        )
        u_ex_1 = (
            lambda pt: np.multiply(
                -np.sin(2 * np.pi * pt[0, :]), np.cos(2 * np.pi * pt[1, :])
            )
            * 2
            * np.pi
            / a
        )

        p_errs_known = np.array(
            [
                0.007347293666843033,
                0.004057878042430692,
                0.002576479539795832,
                0.0017817307824819935,
                0.0013057660031758425,
            ]
        )

        u_errs_known = np.array(
            [
                0.024425617686195774,
                0.016806807988931565,
                0.012859109258624922,
                0.010445238111710832,
                0.00881184436169123,
            ]
        )

        for i, p_err_known, u_err_known in zip(
            np.arange(5), p_errs_known, u_errs_known
        ):
            g = pp.StructuredTriangleGrid([3 + i] * 2, [1, 1])
            g.compute_geometry()

            kxx = np.ones(g.num_cells)
            perm = pp.SecondOrderTensor(kxx=kxx, kyy=kxx, kzz=1)
            bf = g.get_boundary_faces()
            bc = pp.BoundaryCondition(g, bf, bf.size * ["dir"])
            bc_val = np.zeros(g.num_faces)
            bc_val[bf] = p_ex(g.face_centers[:, bf])
            # Minus sign to move to rhs
            source = np.multiply(g.cell_volumes, rhs_ex(g.cell_centers))
            vect = np.vstack(
                (g.cell_volumes, np.zeros(g.num_cells), np.zeros(g.num_cells))
            ).ravel(order="F")

            solver = pp.MVEM(keyword="flow")
            solver_rhs = pp.DualScalarSource(keyword="flow")

            specified_parameters = {
                "bc": bc,
                "bc_values": bc_val,
                "second_order_tensor": perm,
                "source": source,
                "vector_source": vect,
            }
            data = pp.initialize_default_data(g, {}, "flow", specified_parameters)

            solver.discretize(g, data)
            solver_rhs.discretize(g, data)

            M, rhs_bc = solver.assemble_matrix_rhs(g, data)
            _, rhs = solver_rhs.assemble_matrix_rhs(g, data)

            up = sps.linalg.spsolve(M, rhs_bc + rhs)
            p = solver.extract_pressure(g, up, data)
            err = np.sqrt(
                np.sum(
                    np.multiply(g.cell_volumes, np.power(p - p_ex(g.cell_centers), 2))
                )
            )
            self.assertTrue(np.isclose(err, p_err_known))

            _ = data[pp.DISCRETIZATION_MATRICES]["flow"][solver.vector_proj_key]
            u = solver.extract_flux(g, up, data)
            P0u = solver.project_flux(g, u, data)
            uu_ex_0 = u_ex_0(g.cell_centers)
            uu_ex_1 = u_ex_1(g.cell_centers)
            uu_ex_2 = np.zeros(g.num_cells)
            uu_ex = np.vstack((uu_ex_0, uu_ex_1, uu_ex_2))
            err = np.sqrt(
                np.sum(
                    np.multiply(
                        g.cell_volumes, np.sum(np.power(P0u - uu_ex, 2), axis=0)
                    )
                )
            )
            self.assertTrue(np.isclose(err, u_err_known))
def run_maxwell_filter(subject, session=None):
    if config.proc and 'sss' in config.proc and config.use_maxwell_filter:
        raise ValueError(f'You cannot set use_maxwell_filter to True '
                         f'if data have already processed with Maxwell-filter.'
                         f' Got proc={config.proc}.')

    bids_path_in = BIDSPath(subject=subject,
                            session=session,
                            task=config.get_task(),
                            acquisition=config.acq,
                            processing=config.proc,
                            recording=config.rec,
                            space=config.space,
                            suffix=config.get_datatype(),
                            datatype=config.get_datatype(),
                            root=config.bids_root)
    bids_path_out = bids_path_in.copy().update(suffix='raw',
                                               root=config.deriv_root,
                                               check=False)

    if not bids_path_out.fpath.parent.exists():
        os.makedirs(bids_path_out.fpath.parent)

    # Load dev_head_t and digitization points from reference run.
    # Re-use in all runs and for processing empty-room recording.
    reference_run = config.get_mf_reference_run()
    # XXX Loading info would suffice!
    bids_path_in.update(run=reference_run)
    raw = load_data(bids_path_in)
    dev_head_t = raw.info['dev_head_t']
    dig = raw.info['dig']
    del reference_run, raw

    for run_idx, run in enumerate(config.get_runs()):
        bids_path_in.update(run=run)
        bids_path_out.update(run=run)
        raw = load_data(bids_path_in)

        # Fix stimulation artifact
        if config.fix_stim_artifact:
            events, _ = mne.events_from_annotations(raw)
            raw = mne.preprocessing.fix_stim_artifact(
                raw,
                events=events,
                event_id=None,
                tmin=config.stim_artifact_tmin,
                tmax=config.stim_artifact_tmax,
                mode='linear')

        # Auto-detect bad channels.
        if config.find_flat_channels_meg or config.find_noisy_channels_meg:
            find_bad_channels(raw=raw,
                              subject=subject,
                              session=session,
                              task=config.get_task(),
                              run=run)

        # Maxwell-filter experimental data.
        if config.use_maxwell_filter:
            msg = 'Applying Maxwell filter to experimental data.'
            logger.info(
                gen_log_message(message=msg,
                                step=1,
                                subject=subject,
                                session=session))

            # Warn if no bad channels are set before Maxwell filter
            if not raw.info['bads']:
                msg = '\nFound no bad channels. \n '
                logger.warning(
                    gen_log_message(message=msg,
                                    subject=subject,
                                    step=1,
                                    session=session))

            if config.mf_st_duration:
                msg = '    st_duration=%d' % (config.mf_st_duration)
                logger.info(
                    gen_log_message(message=msg,
                                    step=1,
                                    subject=subject,
                                    session=session))

            # Keyword arguments shared between Maxwell filtering of the
            # experimental and the empty-room data.
            common_mf_kws = dict(calibration=config.mf_cal_fname,
                                 cross_talk=config.mf_ctc_fname,
                                 st_duration=config.mf_st_duration,
                                 origin=config.mf_head_origin,
                                 coord_frame='head',
                                 destination=dev_head_t)

            raw_sss = mne.preprocessing.maxwell_filter(raw, **common_mf_kws)
            raw_out = raw_sss
            raw_fname_out = (bids_path_out.copy().update(processing='sss',
                                                         extension='.fif'))
        elif config.ch_types == ['eeg']:
            msg = 'Not applying Maxwell filter to EEG data.'
            logger.info(
                gen_log_message(message=msg,
                                step=1,
                                subject=subject,
                                session=session))
            raw_out = raw
            raw_fname_out = bids_path_out.copy().update(extension='.fif')
        else:
            msg = ('Not applying Maxwell filter.\nIf you wish to apply it, '
                   'set use_maxwell_filter=True in your configuration.')
            logger.info(
                gen_log_message(message=msg,
                                step=1,
                                subject=subject,
                                session=session))
            raw_out = raw
            raw_fname_out = bids_path_out.copy().update(extension='.fif')

        # Save only the channel types we wish to analyze (including the
        # channels marked as "bad").
        # We do not run `raw_out.pick()` here because it uses too much memory.
        chs_to_include = config.get_channels_to_analyze(raw_out.info)
        raw_out.save(raw_fname_out,
                     picks=chs_to_include,
                     overwrite=True,
                     split_naming='bids')
        del raw_out
        if config.interactive:
            # Load the data we have just written, because it contains only
            # the relevant channels.
            raw = mne.io.read_raw_fif(raw_fname_out, allow_maxshield=True)
            raw.plot(n_channels=50, butterfly=True)

        # Empty-room processing.
        #
        # We pick the empty-room recording closest in time to the first run
        # of the experimental session.
        if run_idx == 0 and config.process_er:
            msg = 'Processing empty-room recording …'
            logger.info(
                gen_log_message(step=1,
                                subject=subject,
                                session=session,
                                message=msg))

            bids_path_er_in = bids_path_in.find_empty_room()
            raw_er = load_data(bids_path_er_in)
            raw_er.info['bads'] = [
                ch for ch in raw.info['bads'] if ch.startswith('MEG')
            ]

            # Maxwell-filter empty-room data.
            if config.use_maxwell_filter:
                msg = 'Applying Maxwell filter to empty-room recording'
                logger.info(
                    gen_log_message(message=msg,
                                    step=1,
                                    subject=subject,
                                    session=session))

                # We want to ensure we use the same coordinate frame origin in
                # empty-room and experimental data processing. To do this, we
                # inject the sensor locations and the head <> device transform
                # into the empty-room recording's info, and leave all other
                # parameters the same as for the experimental data. This is not
                # very clean, as we normally should not alter info manually,
                # except for info['bads']. Will need improvement upstream in
                # MNE-Python.
                raw_er.info['dig'] = dig
                raw_er.info['dev_head_t'] = dev_head_t
                raw_er_sss = mne.preprocessing.maxwell_filter(
                    raw_er, **common_mf_kws)

                # Perform a sanity check: empty-room rank should match the
                # experimental data rank after Maxwell filtering.
                rank_exp = mne.compute_rank(raw, rank='info')['meg']
                rank_er = mne.compute_rank(raw_er, rank='info')['meg']
                if not np.isclose(rank_exp, rank_er):
                    msg = (f'Experimental data rank {rank_exp:.1f} does not '
                           f'match empty-room data rank {rank_er:.1f} after '
                           f'Maxwell filtering. This indicates that the data '
                           f'were processed  differenlty.')
                    raise RuntimeError(msg)

                raw_er_out = raw_er_sss
                raw_er_fname_out = bids_path_out.copy().update(
                    processing='sss')
            else:
                raw_er_out = raw_er
                raw_er_fname_out = bids_path_out.copy()

            raw_er_fname_out = raw_er_fname_out.update(task='noise',
                                                       extension='.fif',
                                                       run=None)

            # Save only the channel types we wish to analyze
            # (same as for experimental data above).
            raw_er_out.save(raw_er_fname_out,
                            picks=chs_to_include,
                            overwrite=True,
                            split_naming='bids')
            del raw_er_out
def test_weighted_median_meta_analysis():
    # np.random.seed(1337)

    num_invalid_ivs = 80
    num_valid_ivs = 100
    num_replicates = 30
    z_scores = []
    for i in range(num_replicates):
        true_beta = np.random.normal()
        good_exposure_betas = np.random.normal(0, 1, size=num_valid_ivs)
        good_exposure_se = abs(np.random.normal(0, 0.2, size=num_valid_ivs))
        good_outcome_betas = true_beta * good_exposure_betas + np.random.normal(
            0, 0.1, size=num_valid_ivs)
        good_outcome_se = abs(np.random.normal(0, 0.1, size=num_valid_ivs))

        bad_beta = true_beta
        #give some difference between the good and bad betas
        while abs(bad_beta - true_beta) < 0.2:
            bad_beta = np.random.normal()

        bad_exposure_betas = np.random.normal(0, 1, size=num_invalid_ivs)
        bad_exposure_se = abs(np.random.normal(0, 0.2, size=num_invalid_ivs))
        bad_outcome_betas = bad_beta * bad_exposure_betas + np.random.normal(
            0, 0.1, size=num_invalid_ivs)
        bad_outcome_se = abs(np.random.normal(0, 0.1, size=num_invalid_ivs))

        exposure_betas = np.append(good_exposure_betas, bad_exposure_betas)
        exposure_ses = np.append(good_exposure_se, bad_exposure_se)
        exposure_mat = np.append([exposure_betas], [exposure_ses], axis=0).T

        outcome_betas = np.append(good_outcome_betas, bad_outcome_betas)
        outcome_ses = np.append(good_outcome_se, bad_outcome_se)
        outcome_mat = np.append([outcome_betas], [outcome_ses], axis=0).T

        this_result = causal_inference.MendelianRandomization()
        beta_wm, se_wm, p_wm = this_result.do_weighted_median_meta_analysis_on_estimate_vectors(
            exposure_mat, outcome_mat)
        print(beta_wm, se_wm, p_wm)

        rel_path = '/'.join(('test_resources', 'weighted_median_output'))

        if len(__file__.split("/")) > 1:
            out_loc = "{}/{}".format("/".join(__file__.split("/")[:-1]),
                                     rel_path)
        else:
            out_loc = rel_path

        script_loc = "{}/{}".format(
            "/".join(out_loc.split("/")[:-1]),
            'weighted_median_reference_implementation.R')

        with open(out_loc, 'w') as f:
            f.write("beta_exposure\tse_exposure\tbeta_outcome\tse_outcome\n")
            for i in range(outcome_mat.shape[0]):
                f.write(
                    f"{exposure_mat[i,0]}\t{exposure_mat[i,1]}\t{outcome_mat[i,0]}\t{outcome_mat[i,1]}\n"
                )

        proc = subprocess.run(["Rscript", script_loc, out_loc],
                              stdout=subprocess.PIPE,
                              bufsize=1,
                              universal_newlines=True)
        lines = proc.stdout.split("\n")
        ref_implementation_beta = float(lines[0])
        ref_implementation_se = float(lines[1])

        subprocess.run(['rm', out_loc], check=True)

        assert (np.isclose(beta_wm, ref_implementation_beta))
예제 #47
0
    def read_cst_beam(
        self,
        filename,
        beam_type="power",
        feed_pol="x",
        rotate_pol=True,
        frequency=None,
        telescope_name=None,
        feed_name=None,
        feed_version=None,
        model_name=None,
        model_version=None,
        history="",
        x_orientation=None,
        reference_impedance=None,
        extra_keywords=None,
        run_check=True,
        check_extra=True,
        run_check_acceptability=True,
    ):
        """
        Read in data from a cst file.

        Parameters
        ----------
        filename : str
            The cst file to read from.
        beam_type : str
            What beam_type to read in ('power' or 'efield').
        feed_pol : str
            The feed or polarization or list of feeds or polarizations the
            files correspond to.
            Defaults to 'x' (meaning x for efield or xx for power beams).
        rotate_pol : bool
            If True, assume the structure in the simulation is symmetric under
            90 degree rotations about the z-axis (so that the y polarization can be
            constructed by rotating the x polarization or vice versa).
            Default: True if feed_pol is a single value or a list with all
            the same values in it, False if it is a list with varying values.
        frequency : float or list of float
            The frequency or list of frequencies corresponding to the filename(s).
            This is assumed to be in the same order as the files.
            If not passed, the code attempts to parse it from the filenames.
        telescope_name : str
            The name of the telescope corresponding to the filename(s).
        feed_name : str
            The name of the feed corresponding to the filename(s).
        feed_version : str
            The version of the feed corresponding to the filename(s).
        model_name : str
            The name of the model corresponding to the filename(s).
        model_version : str
            The version of the model corresponding to the filename(s).
        history : str
            A string detailing the history of the filename(s).
        x_orientation : str, optional
            Orientation of the physical dipole corresponding to what is
            labelled as the x polarization. Options are "east" (indicating
            east/west orientation) and "north" (indicating north/south orientation)
        reference_impedance : float, optional
            The reference impedance of the model(s).
        extra_keywords : dict, optional
            A dictionary containing any extra_keywords.
        run_check : bool
            Option to check for the existence and proper shapes of
            required parameters after reading in the file.
        check_extra : bool
            Option to check optional parameters as well as
            required ones.
        run_check_acceptability : bool
            Option to check acceptable range of the values of
            required parameters after reading in the file.

        """
        self.telescope_name = telescope_name
        self.feed_name = feed_name
        self.feed_version = feed_version
        self.model_name = model_name
        self.model_version = model_version
        self.history = history
        if not uvutils._check_history_version(self.history,
                                              self.pyuvdata_version_str):
            self.history += self.pyuvdata_version_str

        if x_orientation is not None:
            self.x_orientation = x_orientation
        if reference_impedance is not None:
            self.reference_impedance = float(reference_impedance)
        if extra_keywords is not None:
            self.extra_keywords = extra_keywords

        if beam_type == "power":
            self.Naxes_vec = 1

            if feed_pol == "x":
                feed_pol = "xx"
            elif feed_pol == "y":
                feed_pol = "yy"

            if rotate_pol:
                rot_pol_dict = {"xx": "yy", "yy": "xx", "xy": "yx", "yx": "xy"}
                pol2 = rot_pol_dict[feed_pol]
                self.polarization_array = np.array(
                    [uvutils.polstr2num(feed_pol),
                     uvutils.polstr2num(pol2)])
            else:
                self.polarization_array = np.array(
                    [uvutils.polstr2num(feed_pol)])

            self.Npols = len(self.polarization_array)
            self._set_power()
        else:
            self.Naxes_vec = 2
            self.Ncomponents_vec = 2
            if rotate_pol:
                if feed_pol == "x":
                    self.feed_array = np.array(["x", "y"])
                else:
                    self.feed_array = np.array(["y", "x"])
            else:
                if feed_pol == "x":
                    self.feed_array = np.array(["x"])
                else:
                    self.feed_array = np.array(["y"])
            self.Nfeeds = self.feed_array.size
            self._set_efield()

        self.data_normalization = "physical"
        self.antenna_type = "simple"

        self.Nfreqs = 1
        self.Nspws = 1
        self.freq_array = np.zeros((self.Nspws, self.Nfreqs))
        self.bandpass_array = np.zeros((self.Nspws, self.Nfreqs))

        self.spw_array = np.array([0])
        self.pixel_coordinate_system = "az_za"
        self._set_cs_params()

        out_file = open(filename, "r")
        line = out_file.readline().strip()  # Get the first line
        out_file.close()
        raw_names = line.split("]")
        raw_names = [raw_name for raw_name in raw_names if not raw_name == ""]
        column_names = []
        units = []
        for raw_name in raw_names:
            column_name, unit = tuple(raw_name.split("["))
            column_names.append("".join(column_name.lower().split(" ")))
            units.append(unit.lower().strip())

        data = np.loadtxt(filename, skiprows=2)

        theta_col = np.where(np.array(column_names) == "theta")[0][0]
        phi_col = np.where(np.array(column_names) == "phi")[0][0]

        if "deg" in units[theta_col]:
            theta_data = np.radians(data[:, theta_col])
        else:
            theta_data = data[:, theta_col]
        if "deg" in units[phi_col]:
            phi_data = np.radians(data[:, phi_col])
        else:
            phi_data = data[:, phi_col]

        theta_axis = np.sort(np.unique(theta_data))
        phi_axis = np.sort(np.unique(phi_data))
        if not theta_axis.size * phi_axis.size == theta_data.size:
            raise ValueError("Data does not appear to be on a grid")

        theta_data = theta_data.reshape((theta_axis.size, phi_axis.size),
                                        order="F")
        phi_data = phi_data.reshape((theta_axis.size, phi_axis.size),
                                    order="F")

        delta_theta = np.diff(theta_axis)
        if not np.isclose(np.max(delta_theta), np.min(delta_theta)):
            raise ValueError(
                "Data does not appear to be regularly gridded in zenith angle")
        delta_theta = delta_theta[0]

        delta_phi = np.diff(phi_axis)
        if not np.isclose(np.max(delta_phi), np.min(delta_phi)):
            raise ValueError(
                "Data does not appear to be regularly gridded in azimuth angle"
            )
        delta_phi = delta_phi[0]

        self.axis1_array = phi_axis
        self.Naxes1 = self.axis1_array.size
        self.axis2_array = theta_axis
        self.Naxes2 = self.axis2_array.size

        if self.beam_type == "power":
            # type depends on whether cross pols are present
            # (if so, complex, else float)
            self.data_array = np.zeros(
                self._data_array.expected_shape(self),
                dtype=self._data_array.expected_type,
            )
        else:
            self.data_array = np.zeros(self._data_array.expected_shape(self),
                                       dtype=np.complex)

        if frequency is not None:
            self.freq_array[0] = frequency
        else:
            self.freq_array[0] = self.name2freq(filename)

        if rotate_pol:
            # for second polarization, rotate by pi/2
            rot_phi = phi_data + np.pi / 2
            rot_phi[np.where(rot_phi >= 2 * np.pi)] -= 2 * np.pi
            roll_rot_phi = np.roll(rot_phi,
                                   int((np.pi / 2) / delta_phi),
                                   axis=1)
            if not np.allclose(roll_rot_phi, phi_data):
                raise ValueError("Rotating by pi/2 failed")

            # theta is not affected by the rotation

        # get beam
        if self.beam_type == "power":

            data_col_enum = ["abs(e)", "abs(v)"]
            data_col = []
            for name in data_col_enum:
                this_col = np.where(np.array(column_names) == name)[0]
                if this_col.size > 0:
                    data_col = data_col + this_col.tolist()
            if len(data_col) == 0:
                raise ValueError(
                    "No power column found in file: {}".format(filename))
            elif len(data_col) > 1:
                raise ValueError(
                    "Multiple possible power columns found in file: {}".format(
                        filename))
            data_col = data_col[0]
            power_beam1 = (data[:, data_col].reshape(
                (theta_axis.size, phi_axis.size), order="F")**2.0)

            self.data_array[0, 0, 0, 0, :, :] = power_beam1

            if rotate_pol:
                # rotate by pi/2 for second polarization
                power_beam2 = np.roll(power_beam1,
                                      int((np.pi / 2) / delta_phi),
                                      axis=1)
                self.data_array[0, 0, 1, 0, :, :] = power_beam2
        else:
            self.basis_vector_array = np.zeros(
                (self.Naxes_vec, self.Ncomponents_vec, self.Naxes2,
                 self.Naxes1))
            self.basis_vector_array[0, 0, :, :] = 1.0
            self.basis_vector_array[1, 1, :, :] = 1.0

            theta_mag_col = np.where(
                np.array(column_names) == "abs(theta)")[0][0]
            theta_phase_col = np.where(
                np.array(column_names) == "phase(theta)")[0][0]
            phi_mag_col = np.where(np.array(column_names) == "abs(phi)")[0][0]
            phi_phase_col = np.where(
                np.array(column_names) == "phase(phi)")[0][0]

            theta_mag = data[:, theta_mag_col].reshape(
                (theta_axis.size, phi_axis.size), order="F")
            phi_mag = data[:, phi_mag_col].reshape(
                (theta_axis.size, phi_axis.size), order="F")
            if "deg" in units[theta_phase_col]:
                theta_phase = np.radians(data[:, theta_phase_col])
            else:
                theta_phase = data[:, theta_phase_col]
            if "deg" in units[phi_phase_col]:
                phi_phase = np.radians(data[:, phi_phase_col])
            else:
                phi_phase = data[:, phi_phase_col]
            theta_phase = theta_phase.reshape((theta_axis.size, phi_axis.size),
                                              order="F")
            phi_phase = phi_phase.reshape((theta_axis.size, phi_axis.size),
                                          order="F")

            theta_beam = theta_mag * np.exp(1j * theta_phase)
            phi_beam = phi_mag * np.exp(1j * phi_phase)

            self.data_array[0, 0, 0, 0, :, :] = phi_beam
            self.data_array[1, 0, 0, 0, :, :] = theta_beam

            if rotate_pol:
                # rotate by pi/2 for second polarization
                theta_beam2 = np.roll(theta_beam,
                                      int((np.pi / 2) / delta_phi),
                                      axis=1)
                phi_beam2 = np.roll(phi_beam,
                                    int((np.pi / 2) / delta_phi),
                                    axis=1)
                self.data_array[0, 0, 1, 0, :, :] = phi_beam2
                self.data_array[1, 0, 1, 0, :, :] = theta_beam2

        self.bandpass_array[0] = 1

        if frequency is None:
            warnings.warn("No frequency provided. Detected frequency is: "
                          "{freqs} Hz".format(freqs=self.freq_array))

        if run_check:
            self.check(check_extra=check_extra,
                       run_check_acceptability=run_check_acceptability)
예제 #48
0
def test_image_selectivity(responses, expected):
    img_sel = image_selectivity(responses)
    assert (np.isclose(img_sel, expected, equal_nan=True))
        ap1 = np.concatenate((1 - np.sum(ap1, axis=1).reshape(-1, 1), ap1), axis=1)
        ap2 = trrecord.format['AP2']
        ap2 = np.concatenate((1 - np.sum(ap2, axis=1).reshape(-1, 1), ap2), axis=1)

    # TODO this needs better testing
    subset_summed_dosages = {}
    for aidx1, len_allele1 in enumerate(len_alleles):
        for aidx2, len_allele2 in enumerate(len_alleles):
            summed_len = len_allele1 + len_allele2
            if summed_len not in subset_summed_dosages:
                subset_summed_dosages[summed_len] = 0
            subset_summed_dosages[summed_len] += np.sum(np.multiply(
                ap1[unfiltered_subset, aidx1], ap2[unfiltered_subset, aidx2]
            ))

    assert np.isclose(sum(subset_summed_dosages.values()), n_samples)
    alleles = list(subset_summed_dosages.keys())
    alleles_copy = alleles.copy()
    for allele in alleles_copy:
        if subset_summed_dosages[allele] < args.dosage_cutoff:
            alleles.remove(allele)
    alleles = sorted(alleles)

    mean_per_dosage = {float(allele): val for allele, val in ast.literal_eval(result[f'{stat_name}_{args.phenotype}_per_single_dosage'].to_numpy()[0]).items()}
    ci5e_2 = {float(allele): val for allele, val in ast.literal_eval(result['0.05_significance_CI'].to_numpy()[0]).items()}
    ci5e_8 = {float(allele): val for allele, val in ast.literal_eval(result['5e-8_significance_CI'].to_numpy()[0]).items()}
    #y_min = min(ci5e_8[allele][0] for allele in alleles)
    #y_max = max(ci5e_8[allele][1] for allele in alleles)

    fig_kws = dict(
        width = 600,
예제 #50
0
    def test_model_serving(self):
        """
        Train a simple model and test serving flow by loading the SavedModel
        """

        # Test model training on TFRecord SequenceExample data
        data_dir = os.path.join(self.root_data_dir, "tfrecord")
        feature_config: FeatureConfig = self.get_feature_config()

        metrics_keys = ["categorical_accuracy"]

        def get_dataset(parse_tfrecord):
            return RelevanceDataset(
                data_dir=data_dir,
                data_format=DataFormatKey.TFRECORD,
                feature_config=feature_config,
                tfrecord_type=self.args.tfrecord_type,
                max_sequence_size=self.args.max_sequence_size,
                batch_size=self.args.batch_size,
                preprocessing_keys_to_fns={},
                train_pcent_split=self.args.train_pcent_split,
                val_pcent_split=self.args.val_pcent_split,
                test_pcent_split=self.args.test_pcent_split,
                use_part_files=self.args.use_part_files,
                parse_tfrecord=parse_tfrecord,
                file_io=self.file_io,
                logger=self.logger,
            )

        # Get raw TFRecord dataset
        raw_dataset = get_dataset(parse_tfrecord=False)

        # Parse the raw TFRecord dataset
        parsed_dataset = get_dataset(parse_tfrecord=True)

        model: RankingModel = self.get_ranking_model(
            loss_key=self.args.loss_key, feature_config=feature_config, metrics_keys=metrics_keys
        )

        model.fit(dataset=parsed_dataset, num_epochs=1, models_dir=self.output_dir)

        model.save(
            models_dir=self.args.models_dir,
            preprocessing_keys_to_fns={},
            postprocessing_fn=None,
            required_fields_only=not self.args.use_all_fields_at_inference,
            pad_sequence=self.args.pad_sequence_at_inference,
        )

        # Load SavedModel and get the right serving signature
        default_model = kmodels.load_model(
            os.path.join(self.output_dir, "final", "default"), compile=False
        )
        assert ServingSignatureKey.DEFAULT in default_model.signatures
        default_signature = default_model.signatures[ServingSignatureKey.DEFAULT]

        tfrecord_model = kmodels.load_model(
            os.path.join(self.output_dir, "final", "tfrecord"), compile=False
        )
        assert ServingSignatureKey.TFRECORD in tfrecord_model.signatures
        tfrecord_signature = tfrecord_model.signatures[ServingSignatureKey.TFRECORD]

        # Fetch a single batch for testing
        sequence_example_protos = next(iter(raw_dataset.test))
        parsed_sequence_examples = next(iter(parsed_dataset.test))[0]
        parsed_dataset_batch = parsed_dataset.test.take(1)

        # Use the loaded serving signatures for inference
        model_predictions = model.predict(parsed_dataset_batch)[self.args.output_name].values
        default_signature_predictions = default_signature(**parsed_sequence_examples)[
            self.args.output_name
        ]

        # Since we do not pad dummy records in tfrecord serving signature,
        # we can only predict on a single record at a time
        tfrecord_signature_predictions = [
            tfrecord_signature(protos=tf.gather(sequence_example_protos, [i]))[
                self.args.output_name
            ]
            for i in range(self.args.batch_size)
        ]

        def _flatten_records(x):
            """Collapse first two dimensions of a tensor -> [batch_size, max_num_records]"""
            return tf.reshape(x, tf.concat([[-1], tf.shape(x)[2:]], axis=0))

        def _filter_records(x, mask):
            """
            Filter records that were padded in each query

            Input shape: [batch_size, num_features]

            Output shape: [batch_size, num_features]
            """
            return tf.squeeze(tf.gather_nd(x, tf.where(tf.not_equal(mask, 0))))

        # Get mask for padded values
        mask = _flatten_records(parsed_sequence_examples["mask"])

        # Flatten scores to each record and filter out scores from padded records
        default_signature_predictions = _filter_records(
            _flatten_records(default_signature_predictions), mask
        )
        tfrecord_signature_predictions = tf.squeeze(
            tf.concat(tfrecord_signature_predictions, axis=1)
        )

        # Compare the scores from the different versions of the model
        assert np.isclose(model_predictions, default_signature_predictions, rtol=0.01,).all()

        assert np.isclose(model_predictions, tfrecord_signature_predictions, rtol=0.01,).all()

        assert np.isclose(
            default_signature_predictions, tfrecord_signature_predictions, rtol=0.01,
        ).all()
예제 #51
0
def differ(dframe_l, dframe_r, left_on, right_on, fields_l=None, fields_r=None, show_diff=False, show_ratio=False, show_data=True, tol_pct=0.0, tol_abs=0.0, depict=False):
    '''TODO'''
    
    # Quick check that input arguments are valid
    assert len(fields_l) == len(fields_r), 'Comparison lists not of equal length / None.'
    assert left_on in dframe_l.columns, '{} is not a column in dframe_l'.format(left_on)
    assert right_on in dframe_r.columns, '{} is not a column in dframe_r'.format(right_on)
    for each in fields_l:
        assert each in dframe_l.columns, '{} is not a column in dframe_l'.format(each)
    for each in fields_r:
        assert each in dframe_r.columns, '{} is not a column in dframe_r'.format(each)

    
    # Prepare working dfs
    df_l = dframe_l.rename(columns={left_on: 'compid'}).copy()
    df_r = dframe_r.rename(columns={right_on: 'compid'}).copy()
    fields = zip(fields_l, fields_r)

    print(fields)
    
    final_fields = []
    for each in fields:
        if each[0] == each[1]:
            final_fields.append((each[0] + '_l', each[1] + '_r'))
        else:
            final_fields.append(each)

    ordered_fields = list(chain(*final_fields))
    df_out = pd.merge(df_l[['compid'] + fields_l], df_r[['compid'] + fields_r], how='outer', left_on='compid', right_on='compid', suffixes=['_l', '_r'], indicator='found')
    df_out = df_out[['compid', 'found'] + ordered_fields]

    # Do comparison
    vs_fields = []
    for comparison_pair in final_fields:
        lbl = comparison_pair[0] + ' vs ' + comparison_pair[1]
        vs_fields.append(lbl)
        try:
            df_out[lbl] = np.isclose(df_out[comparison_pair[0]], df_out[comparison_pair[1]], rtol=tol_pct, atol=tol_abs)
            
        except TypeError:
#             traceback.print_exc()
#             print('Looks like these are not numbers.')
            if df_out[comparison_pair[0]].dtype.kind == df_out[comparison_pair[1]].dtype.kind == 'O':
                df_out[lbl + ' leven_dist'] = df_out.apply(lambda x: leven_dist(x[comparison_pair[0]], x[comparison_pair[1]]), axis=1)
                df_out.loc[df_out[lbl + ' leven_dist']==0, lbl] = True
                df_out[lbl].fillna(value=False, inplace=True)
        except:
            print('Cannot diff:', lbl, '.', comparison_pair[0],  type(comparison_pair[0]), comparison_pair[1], type(comparison_pair[1]))

    # Calc diff
    if show_diff:
        for comparison_pair in final_fields:
            lbl = comparison_pair[0] + ' - ' + comparison_pair[1]
            try:
                df_out[lbl] = df_out[comparison_pair[0]] - df_out[comparison_pair[1]]
            except:
                print('Cannot calc:', lbl, '.', comparison_pair[0],  type(comparison_pair[0]), comparison_pair[1], type(comparison_pair[1]))

                
    # Calc ratio
    if show_ratio:
        for comparison_pair in final_fields:
            lbl2 = comparison_pair[0] + ' / ' + comparison_pair[1]
            try:
                df_out[lbl2] = df_out[comparison_pair[0]] / df_out[comparison_pair[1]]
            except:
                print('Cannot calc:', lbl2, '.', comparison_pair[0],  type(comparison_pair[0]), comparison_pair[1], type(comparison_pair[1]))

                
    # Summary Results: Percentage of pairs matched
    df_out['pct_pairs_matched'] = sum([df_out[each] for each in vs_fields]) / len(final_fields)
    
    # Check if need to return data
    if not show_data:
        df_out = df_out.drop(ordered_fields, axis=1)


    if depict:
        display(dframe_l.dtypes)
    

    return df_out.sort_values(['found','compid'], ascending=[False,True]).reset_index(drop=True)
예제 #52
0
def process_gecsx(procstatus, dscfg, radar_list=None):
    """
    Computes ground clutter RCS, radar visibility and many others using the
    GECSX algorithmn translated from IDL into python

    Parameters
    ----------
    procstatus : int
        Processing status: 0 initializing, 1 processing volume,
        2 post-processing
    dscfg : dictionary of dictionaries
        data set configuration. Accepted Configuration Keywords::

        datatype : list of string. Dataset keyword
            The input data types

    radar_list : list of Radar objects
        Optional. list of radar objects

    Returns
    -------
    new_dataset : list of dict
        list of dictionaries containing the polar data output and the
        Cartesian data output in this order
    ind_rad : int
        radar index

    """
    if procstatus != 1:
        return None, None

    for datatypedescr in dscfg['datatype']:
        radarnr, _, _, _, _ = get_datatype_fields(datatypedescr)

    ind_rad = int(radarnr[5:8]) - 1

    fname = dscfg['dempath'][ind_rad] + dscfg['demfile']

    if 'demproj' in dscfg.keys():
        demproj = dscfg['demproj']
        try:
            demproj = int(demproj)
        except ValueError:
            # demproj is not an EPSG int
            pass

    dem_data = read_dem(fname, projparams=demproj)

    # If no radar data is provided we create empty radar object from user
    # specification
    if len(radar_list) == 0:
        ranges = np.arange(dscfg['range_resolution'] / 2, dscfg['rmax'],
                           dscfg['range_resolution'])
        azimuths = np.arange(dscfg['azmin'], dscfg['azmax'],
                             dscfg['anglestep'])
        elevations = dscfg['antenna_elevations']

        radar = pyart.testing.make_empty_ppi_radar(len(ranges), len(azimuths),
                                                   len(elevations))
        radar.latitude['data'] = np.array(dscfg['RadarPosition']['latitude'])
        radar.longitude['data'] = np.array(dscfg['RadarPosition']['longitude'])
        radar.altitude['data'] = np.array(dscfg['RadarPosition']['altitude'])
        radar.azimuth['data'] = np.array(list(azimuths) * len(elevations))
        radar.range['data'] = ranges
        radar.fixed_angle['data'] = np.array(elevations)
        radar.elevation['data'] = np.array(
            [len(azimuths) * [e] for e in elevations]).ravel()
        # change radar name
        radar.metadata['instrument_name'] = dscfg['RadarName']
    else:
        radar = radar_list[0]
        if 'antenna_elevations' in dscfg:
            # Intersection between radar elevation angles and config choice
            # using a certain numerical tolerance as radar angles are
            # sometimes coded as 0.699996 for 0.7 degrees in the radar files
            # for example
            el1 = radar.fixed_angle['data'].astype(float)
            el2 = dscfg['antenna_elevations'].astype(float)
            idx_to_process = [
                i for i in range(len(el1)) if np.any(np.isclose(el1[i], el2))
            ]

            print('Radar elevations angles redefined in config file')
            print('Elevation angles {:s} will be processed'.format(
                str([el1[i] for i in idx_to_process])))
            radar = radar.extract_sweeps(idx_to_process)

    # Create dict with radar specifications
    radar_specs = {}
    radar_specs['frequency'] = dscfg['frequency'][ind_rad]
    radar_specs['loss'] = dscfg['lrxh'][ind_rad] + dscfg['mflossh'][ind_rad]
    radar_specs['power'] = dscfg['txpwrh'][ind_rad]
    radar_specs['tau'] = dscfg['pulse_width'][ind_rad]
    radar_specs['beamwidth'] = dscfg['radar_beam_width_h'][ind_rad]
    radar_specs['gain'] = dscfg['AntennaGainH'][ind_rad]

    az_conv = dscfg.get('AzimTol', 0)[ind_rad]
    ke = dscfg.get('refcorr', 4 / 3.)[ind_rad]
    atm_att = dscfg.get('attg', 0.012)[ind_rad]
    mosotti_kw = dscfg.get('mosotti_factor', 0.9644)[0]
    sigma0_method = dscfg.get('sigma0_method', 'Gabella')
    raster_oversampling = dscfg.get('raster_oversampling', 1)
    verbose = dscfg.get('verbose', 1)
    clip = dscfg.get('clip', 1)
    daz = dscfg.get('az_discretization', 0.2)
    dr = dscfg.get('range_discretization', 100)

    gecsx_grid, gecsx_radar = pyart.retrieve.gecsx(
        radar,
        radar_specs,
        dem_data,
        fill_value=None,
        az_conv=az_conv,
        dr=dr,
        daz=daz,
        ke=ke,
        atm_att=atm_att,
        mosotti_kw=mosotti_kw,
        raster_oversampling=raster_oversampling,
        sigma0_method=sigma0_method,
        clip=clip,
        return_pyart_objects=True,
        verbose=verbose)
    new_dataset = [{'radar_out': gecsx_grid}, {'radar_out': gecsx_radar}]

    return new_dataset, ind_rad
예제 #53
0
def test_add_wcs_with_db_fsmcorr_v1(eng_db_ngas, data_file):
    """Test using the database with original FSM correction"""
    stp.add_wcs(data_file, fsmcorr_version='v1', siaf_path=siaf_db, j2fgs_transpose=False)

    model = datamodels.Level1bModel(data_file)
    assert np.isclose(model.meta.pointing.ra_v1, 348.9278669)
    assert np.isclose(model.meta.pointing.dec_v1, -38.749239)
    assert np.isclose(model.meta.pointing.pa_v3, 50.1767077)
    assert model.meta.wcsinfo.wcsaxes == 2
    assert model.meta.wcsinfo.crpix1 == 693.5
    assert model.meta.wcsinfo.crpix2 == 512.5
    assert np.isclose(model.meta.wcsinfo.crval1, 348.8776709)
    assert np.isclose(model.meta.wcsinfo.crval2, -38.854159)
    assert model.meta.wcsinfo.ctype1 == "RA---TAN"
    assert model.meta.wcsinfo.ctype2 == "DEC--TAN"
    assert model.meta.wcsinfo.cunit1 == 'deg'
    assert model.meta.wcsinfo.cunit2 == 'deg'
    assert np.isclose(model.meta.wcsinfo.cdelt1, 3.0555555e-5)
    assert np.isclose(model.meta.wcsinfo.cdelt2, 3.0555555e-5)
    assert np.isclose(model.meta.wcsinfo.pc1_1, 0.0385309)
    assert np.isclose(model.meta.wcsinfo.pc1_2, 0.9992574)
    assert np.isclose(model.meta.wcsinfo.pc2_1, 0.9992574)
    assert np.isclose(model.meta.wcsinfo.pc2_2, -0.0385309)
    assert model.meta.wcsinfo.v2_ref == 200.0
    assert model.meta.wcsinfo.v3_ref == -350.0
    assert model.meta.wcsinfo.vparity == -1
    assert model.meta.wcsinfo.v3yangle == 42.0
    assert np.isclose(model.meta.wcsinfo.ra_ref, 348.8776709)
    assert np.isclose(model.meta.wcsinfo.dec_ref, -38.854159)
    assert np.isclose(model.meta.wcsinfo.roll_ref, 50.20832726650)
    assert word_precision_check(
        model.meta.wcsinfo.s_region,
        (
            'POLYGON ICRS'
            ' 348.8563379013152 -38.874810886750495'
            ' 348.85810582665334 -38.84318773861823'
            ' 348.8982592685148 -38.84439628911871'
            ' 348.89688051688233 -38.876020020321164'
        )
    )
예제 #54
0
def test_ada_exp_factory_no_asympt(test_f: Callable[[float], float]):
    """Test of the adaptive exponential extrapolator."""
    seeded_f = apply_seed_to_func(test_f, SEED)
    fac = AdaExpFactory(steps=4, scale_factor=2.0, asymptote=None)
    fac.iterate(seeded_f)
    assert np.isclose(fac.reduce(), seeded_f(0, err=0), atol=CLOSE_TOL)
예제 #55
0
def set_elastic_params(elas_prm_names, elas_prm_dflt_vals,
                       elas_prm_order, defaulted, blk_dbg_prm, **kwargs):
    r"""Calculate and check a full set of isotropic, linear elastic parameters.

    An isotropic, linear elastic solid is defined by specifying, via the kwargs
    dict, any **two** of the **six** elastic parameters defined in class Blake.
    This function computes the remaining four elastic parameters and performs
    some sanity checking.  In particular we require that,

    1. Each user-specified modulus parameter is positive.
    2. Each pair of user-specified parameters define a material which has a
       positive-definite (PD) strain energy function.
    3. If Poisson's Ratio is negative when calculated from user-specified
       moduli, a non-fatal warning message to that effect is issued.  A
       negative value is uncommon for materials in their linear elastic range.
       Moreover, if the user *intends* that Poisson's Ratio be negative, then
       it should be passed to the Blake constructor *explicitly*.  When
       Poisson's Ratio is inferred from moduli values, a square root sign
       ambiguity may prevent the correct sign being set.

    If conditions 1 or 2 aren't satisfied, an error message is issued and
    *ExactPack* terminates.  Otherwise, the full parameter set is returned as a
    dictionary of the material parameter names and values.  When True is passed
    as the value of argument "defaulted", a dictionary of default values is
    constructed from the first two arguments and returned.  This is the *only*
    default case accepted by this function.

    **NOTE** : It is the caller's responsibility to ensure that the order of
    elements in elas_prm_names and elas_prm_dflt_vals agrees with each other
    and with the ordinality specified in elas_prm_order.  Even if defaulted is
    *False*, the elas_prm_names and elas_prm_order arguments must still agree.
    In particular, the elas_prm_names list *must* contain the same six strings
    as the corresponding attribute of :class:`Blake`.
    
    The blk_dbg_prm argument is True/False and enables available debugging
    code throughout this module when True.
    """

    # Sanity check defaults, construct default dict & return if
    # defaulted == True.
    nelprm = 6          # number of elastic parameters
    if (len(elas_prm_names) == nelprm and
        len(elas_prm_dflt_vals) == nelprm and
        len(elas_prm_order) == nelprm):
        elas_prm_dflts = dict(zip(elas_prm_names, elas_prm_dflt_vals))
        if defaulted:
            return elas_prm_dflts
    else:
        raise ValueError('Length of default argument(s) is invalid!')

    #           Setup for generic case

    # Internal param vars (same order as elas_prm_names):
    int_var_names = ('plda', 'pg', 'pe', 'pnu', 'pk', 'pm')
    # make a reverse dict from internal var names to external param names.
    ivar_pnms = dict(zip(int_var_names, elas_prm_names))

    # Create local elas param vars w/ COMPLEX init. value.
    # ipr, ips are intermediate variables (sometimes called R, S)
    # which are computed in some cases.
    ipr = ips = 1j
    for nm in int_var_names:
        stmt = nm + ' = ' + str(1j)
        exec(stmt, None, None)

    #          Generic case

    # Store the valid param names from kwargs.key().
    elas_prm_args = {}
    for ky in elas_prm_dflts:
        if ky in kwargs:
            elas_prm_args[ky] = float(kwargs[ky])

    if len(elas_prm_args) != 2:
        raise ValueError(
            """Invalid number of elastic parameters specified!
            EXACTLY *two* of the six possible elastic parameters must be
            specified to create a non-default Blake instance!
            Misspelled parameter name(s) can also raise this error.
            """)

    #   Parameter Restrictions
    # Here we check only that: any GIVEN moduli are positive and
    # a GIVEN poisson_ratio (pnu) lies in the PD strain energy range:
    # -1.0 < pnu < 0.5.
    for ky in elas_prm_args:
        if (ky == 'poisson_ratio'):
            if not (-1.0 < elas_prm_args[ky] < 0.5):
                raise(
                    ValueError(
                        'Specified value of ' + ky +
                        ' is not in the open interval: (-1.0, 0.5).')
                )
        else:
            # all other elas params are moduli
            if elas_prm_args[ky] <= 0.0:
                raise(
                    ValueError(
                        'Specified value of ' + ky +
                        ' is non-positive.')
                )

    # Ensure elas_prm_order[ekey0] < elas_prm_order[ekey1]: 
    # coding below depends on this.
    eprmkys = elas_prm_args.keys()
    if elas_prm_order[eprmkys[0]] < elas_prm_order[eprmkys[1]]:
        [eky0, eky1] = eprmkys
    else:
        if blk_dbg_prm:
            print '\neprmkeys reversed per elas_prm_order!'
        eprmkys.reverse()
        [eky0, eky1] = eprmkys

    # Set prmcase and local param vars.
    if eky0 == 'lame_mod':
        plda = elas_prm_args[eky0]
        if eky1 == 'shear_mod':
            prmcase = 0
            pg = elas_prm_args[eky1]
        elif eky1 == 'youngs_mod':
            prmcase = 1
            pe = elas_prm_args[eky1]
        elif eky1 == 'poisson_ratio':
            prmcase = 2
            pnu = elas_prm_args[eky1]
        elif eky1 == 'bulk_mod':
            prmcase = 3
            pk = elas_prm_args[eky1]
        elif eky1 == 'long_mod':
            prmcase = 4
            pm = elas_prm_args[eky1]

    elif eky0 == 'shear_mod':
        pg = elas_prm_args[eky0]
        if eky1 == 'youngs_mod':
            prmcase = 5
            pe = elas_prm_args[eky1]
        elif eky1 == 'poisson_ratio':
            prmcase = 6
            pnu = elas_prm_args[eky1]
        elif eky1 == 'bulk_mod':
            prmcase = 7
            pk = elas_prm_args[eky1]
        elif eky1 == 'long_mod':
            prmcase = 8
            pm = elas_prm_args[eky1]

    elif eky0 == 'youngs_mod':
        pe = elas_prm_args[eky0]
        if eky1 == 'poisson_ratio':
            prmcase = 9
            pnu = elas_prm_args[eky1]
        elif eky1 == 'bulk_mod':
            prmcase = 10
            pk = elas_prm_args[eky1]
        elif eky1 == 'long_mod':
            prmcase = 11
            pm = elas_prm_args[eky1]

    elif eky0 == 'poisson_ratio':
        pnu = elas_prm_args[eky0]
        if eky1 == 'bulk_mod':
            prmcase = 12
            pk = elas_prm_args[eky1]
        elif eky1 == 'long_mod':
            prmcase = 13
            pm = elas_prm_args[eky1]

    elif eky0 == 'bulk_mod':
        pk = elas_prm_args[eky0]
        if eky1 == 'long_mod':
            prmcase = 14
            pm = elas_prm_args[eky1]

    if blk_dbg_prm:
        print 'prmcase = ', prmcase

    # POSITIVE-DEFINITENESS (PD) of STRAIN ENERGY FUNCTION
    # Calculation and checking of full elas param set.
    #
    # There are four PD strain energy test functions at top of file.
    # We apply one of these tests in each prmcase.
    # RECALL: we have already verified that each GIVEN *modulus* is pos.
    # and, if GIVEN, pnu (poisson's ratio) satisfies: -1.0 < pnu < 0.5.
    # We ASSUME THESE CONDITIONS here.
    #
    # When positive moduli *could* yield a neg. value for pnu (Poisson) we
    # invoke warn_negative_poisson() which issues a non-fatal warning if pnu
    # actually *is* neg.  See the docstring for detail.
    #
    # When the given moduli: 1) will yield a complex param value; 2) are
    # sufficiently close to yielding a NaN param value, we invoke one of the
    # term_*_*() functions which issues a ValueError().  We invoke these
    # checking functions as early as possible in each prmcase block to avoid
    # possible un-trapped floating point exceptions (FPEs).

    # Tolerances for numpy.isclose() to control termination using
    # term_nan_lame() or term_nan_poisson().
    abstol = 0.0
    reltol = 1.0e-13

    #
    # eky0 == 'lame_mod'
    if prmcase == 0:
        # given -- lame_mod, shear_mod
        # Neg. poisson and FPEs not possible.
        check_ii(prmcase, eky0, plda, eky1, pg, blk_dbg_prm)
        pe = pg * (3 * plda + 2 * pg) / (plda + pg)
        pnu = plda / (2 * (plda + pg))
    elif prmcase == 1:
        # given -- lame_mod, youngs_mod
        # Neg. poisson and FPEs not possible.
        ipr = pow(pe**2 + 9*plda**2 + 2*pe*plda, 0.5)
        pnu = 2 * plda / (pe + plda + ipr)
        pg = 0.25 * (pe - 3 * plda + ipr)
        check_ii(prmcase, eky0, plda, ivar_pnms['pg'], pg, blk_dbg_prm)
    elif prmcase == 2:
        # given -- lame_mod, poisson_ratio
        # Neg. poisson and FPEs not possible.
        pe = plda * (1 + pnu) * (1 - 2 * pnu) / pnu
        pg = plda * (1 - 2 * pnu) / (2 * pnu)
        check_ii(prmcase, eky0, plda, ivar_pnms['pg'], pg, blk_dbg_prm)
    elif prmcase == 3:
        # given -- lame_mod, bulk_mod
        # check for poisson_mod = NaN
        pnames = (eky0, eky1)
        pvals = (plda, pk)
        icb = 3*pk
        if np.isclose(plda, icb, rtol=reltol, atol=abstol):
            term_nan_poisson(prmcase, pnames, pvals, blk_dbg_prm)
        pnu = plda / (3 * pk - plda)
        warn_negative_poisson(prmcase, pnu, pnames, pvals, blk_dbg_prm)
        pg = 3 * (pk - plda) / 2
        check_ii(prmcase, eky0, plda, ivar_pnms['pg'], pg, blk_dbg_prm)
        pe = 9 * pk * (pk - plda) / (3 * pk - plda)
    elif prmcase == 4:
        # given -- lame_mod, long_mod
        # Neg. poisson and FPEs not possible.
        pg = (pm - plda) / 2
        check_ii(prmcase, eky0, plda, ivar_pnms['pg'], pg, blk_dbg_prm)
        pe = (pm - plda) * (pm + 2 * plda) / (pm + plda)
        pnu = plda / (pm + plda)
    #
    # eky0 == 'shear_mod'
    elif prmcase == 5:
        # given -- shear_mod, youngs_mod
        # check for lame_mod = NaN
        pnames = (eky0, eky1)
        pvals = (pg, pe)
        icb = 3 * pg            # (pe = icb) --> div-zero
        if np.isclose(pe, icb, rtol=reltol, atol=abstol):
            term_nan_lame(prmcase, pnames, pvals, blk_dbg_prm)
        plda = pg * (pe - 2 * pg) / (3 * pg - pe)
        pnu = (pe / (2 * pg)) - 1
        warn_negative_poisson(prmcase, pnu, pnames, pvals, blk_dbg_prm)
        check_iv(prmcase, eky0, pg, ivar_pnms['pnu'], pnu, blk_dbg_prm)
    elif prmcase == 6:
        # given -- shear_mod, poisson_ratio
        # Neg. poisson and FPEs not possible.
        check_iv(prmcase, eky0, pg, eky1, pnu, blk_dbg_prm)
        plda = 2 * pg * pnu / (1 - 2 * pnu)
        pe = 2 * pg * (1 + pnu)
    elif prmcase == 7:
        # given -- shear_mod, bulk_mod
        pnames = (eky0, eky1)
        pvals = (pg, pm)
        pnu = (3 * pk - 2 * pg) / (6 * pk + 2 * pg)
        warn_negative_poisson(prmcase, pnu, pnames, pvals, blk_dbg_prm)
        check_iii(prmcase, eky0, pg, eky1, pk, blk_dbg_prm)
        plda = pk - 2 * pg / 3
        pe = 9 * pk * pg / (3 * pk + pg)
    elif prmcase == 8:
        # given -- shear_mod, long_mod
        # check for poisson_mod = NaN
        pnames = (eky0, eky1)
        pvals = (pg, pm)
        #                       # (pm = pg) --> div-zero
        if np.isclose(pm, pg, rtol=reltol, atol=abstol):
            term_nan_poisson(prmcase, pnames, pvals, blk_dbg_prm)
        pnu = (pm - 2 * pg) / (2 * pm - 2 * pg)
        warn_negative_poisson(prmcase, pnu, pnames, pvals, blk_dbg_prm)
        check_iv(prmcase, eky0, pg, ivar_pnms['pnu'], pnu, blk_dbg_prm)
        plda = pm - 2 * pg
        pe = pg * (3 * pm - 4 * pg) / (pm - pg)
    #
    # eky0 == 'youngs_mod'
    elif prmcase == 9:
        # given -- youngs_mod, poisson_ratio
        # Neg. poisson and FPEs not possible.
        check_v(prmcase, eky0, pe, eky1, pnu, blk_dbg_prm)
        plda = pe * pnu / ((1 + pnu) * (1 - 2 * pnu))
        pg = 0.5 * pe / (1 + pnu)
    elif prmcase == 10:
        # given -- youngs_mod, bulk_mod
        # check for lame_mod = NaN
        pnames = (eky0, eky1)
        pvals = (pe, pk)
        icb = 9 * pk            # (pe = icb) --> div-zero
        if np.isclose(pe, icb, rtol=reltol, atol=abstol):
            term_nan_lame(prmcase, pnames, pvals, blk_dbg_prm)
        plda = 3 * pk * (3 * pk - pe) / (9 * pk - pe)
        pnu = (3 * pk - pe) / (6 * pk)
        warn_negative_poisson(prmcase, pnu, pnames, pvals, blk_dbg_prm)
        check_v(prmcase, eky0, pe, ivar_pnms['pnu'], pnu, blk_dbg_prm)
        pg = 3 * pk * pe / (9 * pk - pe)
    elif prmcase == 11:
        # given -- youngs_mod, long_mod
        # check for poisson = Complex
        # NOTE: sign(pnu) == sign(ips)
        #
        # Because materials with (pnu < 0) are unusual, we use the
        # pos. root here.  To handle materials with an intended NEGATIVE
        # poisson_ratio, the user should explicitly pass poisson_ratio
        # to the constructor.
        #
        # SIGN of f(pe, pm) = pe**2 + 9*pm**2 - 10*pe*pm.
        # In terms of pm and pg, (pm - pe) = (pm - 2*pg)**2 / (pm - pg).
        # The numerator has min value 0 on line pm = 2*pg and denom. > 0,
        # so pm > pe for all pm > pg > 0, which is the case for real matls.
        # Positivity of f(pe, pm) follows directly by considering
        # f(pe, alpha*pe) with alpha > 1.
        # Thus ips is real provided only that pm > pg.
        pnames = (eky0, eky1)
        pvals = (pe, pm)
        ips2 = pe**2 + 9 * pm**2 - 10*pe*pm
        if ips2 < 0.0:
            term_cmplx_poisson(prmcase, pnames, pvals, blk_dbg_prm)
        ips = pow(ips2, 0.5)
        pnu = 0.25 * (pe - pm + ips) / pm
        warn_negative_poisson(prmcase, pnu, pnames, pvals, blk_dbg_prm)
        check_v(prmcase, eky0, pe, ivar_pnms['pnu'], pnu, blk_dbg_prm)
        plda = 0.25 * (pm - pe + ips)
        pg = 0.125 * (3 * pm + pe - ips)
    #
    # eky0 == 'poisson_ratio'
    elif prmcase == 12:
        # given -- poisson_ratio, bulk_mod
        # Neg. poisson and FPEs not possible.
        pg = 3 * pk * (1 - 2 * pnu) / (2 * (1 + pnu))
        check_iv(prmcase, ivar_pnms['pg'], pg, eky0, pnu, blk_dbg_prm)
        plda = 3 * pk * pnu / (1 + pnu)
        pe = 3 * pk * (1 - 2 * pnu)
    elif prmcase == 13:
        # given -- poisson_ratio, long_mod
        # Neg. poisson and FPEs not possible.
        pg = 0.5 * pm * (1 - 2 * pnu) / (1 - pnu)
        check_iv(prmcase, ivar_pnms['pg'], pg, eky0, pnu, blk_dbg_prm)
        plda = pm * pnu / (1 - pnu)
        pe = 2 * (1 + pnu) * pg
    #
    # eky0 == 'bulk_mod'
    elif prmcase == 14:
        # given -- bulk_mod, long_mod
        pnames = (eky0, eky1)
        pvals = (pk, pm)
        pg = 0.75 * (pm - pk)
        check_iii(prmcase, ivar_pnms['pg'], pg, eky0, pk, blk_dbg_prm)
        pnu = (3 * pk - pm) / (3 * pk + pm)
        warn_negative_poisson(prmcase, pnu, pnames, pvals, blk_dbg_prm)
        plda = 0.5 * (3 * pk - pm)
        pe = 9 * pk * (pm - pk) / (3 * pk + pm)

    # end if prmcase

    # Verify that (plda, pg, pe, pnu) have been set.
    allset = not any((v for v in (plda, pg, pe, pnu) if isinstance(v,complex)))
    errmsg = (
        """Internal: one of the internal params has not yet been set!
        Contact ExactPack team!
        """ )
    assert allset, errmsg

    # One or both of bulk_modd, long_mod may not yet have been set.
    # plda and pg have been set.
    if isinstance(pk, complex):
        pk = plda + 2 * pg / 3
    if isinstance(pm, complex):
        pm = plda + 2 * pg

    # Provided inputs are as described in docstring, this dict contains the
    # correct name-value mapping.
    return dict(zip(elas_prm_names, [plda, pg, pe, pnu, pk, pm]))
예제 #56
0
def print_fail(a1_name, a1, a2_name, a2):
  close = np.isclose(a1, a2)
  print('  Index  %s  %s   Difference'%(a1_name, a2_name))
  for i in range(len(close)):
   if not close[i]:
     print(' ',i,a1[i],a2[i],abs(a1[i]-a2[i]))
예제 #57
0
def convert_dds_to_cirq_circuit(dynamic_decoupling_sequence,
                                target_qubits=None,
                                gate_time=0.1,
                                add_measurement=True,
                                algorithm=INSTANT_UNITARY):
    """Converts a Dynamic Decoupling Sequence into quantum circuit
    as defined in cirq

    Parameters
    ----------
    dynamic_decoupling_sequence : DynamicDecouplingSequence
        The dynamic decoupling sequence
    target_qubits : list, optional
        List of target qubits for the sequence operation; the qubits must be
        cirq.Qid type; defaults to None in which case a 1-D lattice of one
        qubit is used (indexed as 0).
    gate_time : float, optional
        Time (in seconds) delay introduced by a gate; defaults to 0.1
    add_measurement : bool, optional
        If True, the circuit contains a measurement operation for each of the
        target qubits. Measurement from each of the qubits is associated
        with a string as key. The string is formatted as 'qubit-X' where
        X is a number between 0 and len(target_qubits).
    algorithm : str, optional
        One of 'fixed duration unitary' or 'instant unitary'; In the case of
        'fixed duration unitary', the sequence operations are assumed to be
        taking the amount of gate_time while 'instant unitary' assumes the sequence
        operations are instantaneous (and hence does not contribute to the delay between
        offsets). Defaults to 'instant unitary'.

    Returns
    -------
    cirq.Circuit
        The circuit containing gates corresponding to sequence operation.

    Raises
    ------
    ArgumentsValueError
        If any of the input parameters result in an invalid operation.

    Notes
    -----

    Dynamic Decoupling Sequences (DDS) consist of idealized pulse operation. Theoretically,
    these operations (pi-pulses in X,Y or Z) occur instantaneously. However, in practice,
    pulses require time. Therefore, this method of converting an idealized sequence
    results to a circuit that is only an approximate implementation of the idealized sequence.

    In idealized definition of DDS, `offsets` represents the instances within sequence
    `duration` where a pulse occurs instantaneously. A series of appropriate circuit components
    is placed in order to represent these pulses.

    In 'standard circuit', the `gaps` or idle time in between active pulses are filled up
    with `identity` gates. Each identity gate introduces a delay of `gate_time`. In this
    implementation, the number of identity gates is determined by
    :math:`np.int(np.floor(offset_distance / gate_time))`. As a consequence,
    :math:`np.int(np.floor(offset_distance / gate_time))`. As a consequence,
    the duration of the real-circuit is :math:`gate_time \\times number_of_identity_gates +
    pulse_gate_time \\times number_of_pulses`.

    Q-CTRL Open Controls support operation resulting in rotation around at most one axis at
    any offset.
    """

    if dynamic_decoupling_sequence is None:
        raise ArgumentsValueError(
            'No dynamic decoupling sequence provided.',
            {'dynamic_decoupling_sequence': dynamic_decoupling_sequence})

    if not isinstance(dynamic_decoupling_sequence, DynamicDecouplingSequence):
        raise ArgumentsValueError(
            'Dynamical decoupling sequence is not recognized.'
            'Expected DynamicDecouplingSequence instance', {
                'type(dynamic_decoupling_sequence)':
                type(dynamic_decoupling_sequence)
            })

    if gate_time <= 0:
        raise ArgumentsValueError(
            'Time delay of gates must be greater than zero.',
            {'gate_time': gate_time})

    if target_qubits is None:
        target_qubits = [cirq.LineQubit(0)]

    if algorithm not in [FIX_DURATION_UNITARY, INSTANT_UNITARY]:
        raise ArgumentsValueError(
            'Algorithm must be one of {} or {}'.format(INSTANT_UNITARY,
                                                       FIX_DURATION_UNITARY),
            {'algorithm': algorithm})

    unitary_time = 0.
    if algorithm == FIX_DURATION_UNITARY:
        unitary_time = gate_time

    rabi_rotations = dynamic_decoupling_sequence.rabi_rotations
    azimuthal_angles = dynamic_decoupling_sequence.azimuthal_angles
    detuning_rotations = dynamic_decoupling_sequence.detuning_rotations

    if len(rabi_rotations.shape) == 1:
        rabi_rotations = rabi_rotations[np.newaxis, :]
    if len(azimuthal_angles.shape) == 1:
        azimuthal_angles = azimuthal_angles[np.newaxis, :]
    if len(detuning_rotations.shape) == 1:
        detuning_rotations = detuning_rotations[np.newaxis, :]

    operations = np.vstack(
        (rabi_rotations, azimuthal_angles, detuning_rotations))
    offsets = dynamic_decoupling_sequence.offsets

    time_covered = 0
    circuit = cirq.Circuit()
    for operation_idx in range(operations.shape[1]):

        offset_distance = offsets[operation_idx] - time_covered

        if np.isclose(offset_distance, 0.0):
            offset_distance = 0.0

        if offset_distance < 0:
            raise ArgumentsValueError("Offsets cannot be placed properly",
                                      {'sequence_operations': operations})

        if offset_distance > 0:
            while (time_covered + gate_time) <= offsets[operation_idx]:
                gate_list = []
                for qubit in target_qubits:
                    gate_list.append(cirq.I(qubit))
                time_covered += gate_time
                circuit.append(gate_list)

        rabi_rotation = operations[0, operation_idx]
        azimuthal_angle = operations[1, operation_idx]
        x_rotation = rabi_rotation * np.cos(azimuthal_angle)
        y_rotation = rabi_rotation * np.sin(azimuthal_angle)
        z_rotation = operations[2, operation_idx]

        rotations = np.array([x_rotation, y_rotation, z_rotation])
        zero_pulses = np.isclose(rotations, 0.0).astype(np.int)
        nonzero_pulse_counts = 3 - np.sum(zero_pulses)
        if nonzero_pulse_counts > 1:
            raise ArgumentsValueError(
                'Open Controls support a sequence with one '
                'valid pulse at any offset. Found sequence '
                'with multiple rotation operations at an offset.', {
                    'dynamic_decoupling_sequence':
                    str(dynamic_decoupling_sequence),
                    'offset':
                    dynamic_decoupling_sequence.offsets[operation_idx],
                    'rabi_rotation':
                    dynamic_decoupling_sequence.rabi_rotations[operation_idx],
                    'azimuthal_angle':
                    dynamic_decoupling_sequence.
                    azimuthal_angles[operation_idx],
                    'detuning_rotaion':
                    dynamic_decoupling_sequence.
                    detuning_rotations[operation_idx]
                })

        gate_list = []
        for qubit in target_qubits:
            if nonzero_pulse_counts == 0:
                gate_list.append(cirq.I(qubit))
            else:
                if not np.isclose(rotations[0], 0.0):
                    gate_list.append(cirq.Rx(rotations[0])(qubit))
                elif not np.isclose(rotations[1], 0.0):
                    gate_list.append(cirq.Ry(rotations[1])(qubit))
                elif not np.isclose(rotations[2], 0.):
                    gate_list.append(cirq.Rz(rotations[2])(qubit))
        circuit.append(gate_list)
        if np.isclose(np.sum(rotations), 0.0):
            time_covered = offsets[operation_idx]
        else:
            time_covered = offsets[operation_idx] + unitary_time
    if add_measurement:
        gate_list = []
        for idx, qubit in enumerate(target_qubits):
            gate_list.append(cirq.measure(qubit, key='qubit-{}'.format(idx)))
        circuit.append(gate_list)

    return circuit
예제 #58
0
def is_same_pdf_norm(a: Union[tf.Tensor, float], b: float) -> bool:
    if isinstance(a, tf.Tensor):
        return np.isclose(a.numpy(), b)
    else:
        return np.isclose(a, b)
예제 #59
0
def test_collector_with_dict_state():
    env = MyTestEnv(size=5, sleep=0, dict_state=True)
    policy = MyPolicy(dict_state=True)
    c0 = Collector(policy, env, ReplayBuffer(size=100),
                   Logger.single_preprocess_fn)
    c0.collect(n_step=3)
    c0.collect(n_episode=2)
    assert len(c0.buffer) == 10
    env_fns = [
        lambda x=i: MyTestEnv(size=x, sleep=0, dict_state=True)
        for i in [2, 3, 4, 5]
    ]
    envs = DummyVectorEnv(env_fns)
    envs.seed(666)
    obs = envs.reset()
    assert not np.isclose(obs[0]['rand'], obs[1]['rand'])
    c1 = Collector(policy, envs,
                   VectorReplayBuffer(total_size=100, buffer_num=4),
                   Logger.single_preprocess_fn)
    c1.collect(n_step=12)
    result = c1.collect(n_episode=8)
    assert result['n/ep'] == 8
    lens = np.bincount(result['lens'])
    assert result['n/st'] == 21 and np.all(lens == [0, 0, 2, 2, 2, 2]) or \
        result['n/st'] == 20 and np.all(lens == [0, 0, 3, 1, 2, 2])
    batch, _ = c1.buffer.sample(10)
    c0.buffer.update(c1.buffer)
    assert len(c0.buffer) in [42, 43]
    if len(c0.buffer) == 42:
        assert np.all(c0.buffer[:].obs.index[..., 0] == [
            0,
            1,
            2,
            3,
            4,
            0,
            1,
            2,
            3,
            4,
            0,
            1,
            0,
            1,
            0,
            1,
            0,
            1,
            0,
            1,
            2,
            0,
            1,
            2,
            0,
            1,
            2,
            3,
            0,
            1,
            2,
            3,
            0,
            1,
            2,
            3,
            4,
            0,
            1,
            2,
            3,
            4,
        ]), c0.buffer[:].obs.index[..., 0]
    else:
        assert np.all(c0.buffer[:].obs.index[..., 0] == [
            0,
            1,
            2,
            3,
            4,
            0,
            1,
            2,
            3,
            4,
            0,
            1,
            0,
            1,
            0,
            1,
            0,
            1,
            2,
            0,
            1,
            2,
            0,
            1,
            2,
            0,
            1,
            2,
            3,
            0,
            1,
            2,
            3,
            0,
            1,
            2,
            3,
            4,
            0,
            1,
            2,
            3,
            4,
        ]), c0.buffer[:].obs.index[..., 0]
    c2 = Collector(
        policy, envs,
        VectorReplayBuffer(total_size=100, buffer_num=4, stack_num=4),
        Logger.single_preprocess_fn)
    c2.collect(n_episode=10)
    batch, _ = c2.buffer.sample(10)
def arc(v_lin, v_ang, dt=1):
    if(np.isclose(v_ang, 0)):  # we are moving straight, R is at the infinity and we handle this case separately
        return mktr(v_lin * dt, 0)  # note we translate along x
    R = v_lin / v_ang
    return mktr(0, R) @ mkrot(v_ang * dt) @ mktr(0, -R)