コード例 #1
0
def test_non_ndarray_inputs():
    # Regression check for gh-5604

    class MyArray(object):
        def __init__(self, data):
            self.data = data

        @property
        def __array_interface__(self):
            return self.data.__array_interface__

    class MyArray2(object):
        def __init__(self, data):
            self.data = data

        def __array__(self):
            return self.data

    for cls in [MyArray, MyArray2]:
        x = np.arange(5)

        assert_(np.may_share_memory(cls(x[::2]), x[1::2]))
        assert_(not np.shares_memory(cls(x[::2]), x[1::2]))

        assert_(np.shares_memory(cls(x[1::3]), x[::2]))
        assert_(np.may_share_memory(cls(x[1::3]), x[::2]))
コード例 #2
0
def test_shares_memory_api():
    x = np.zeros([4, 5, 6], dtype=np.int8)

    assert_equal(np.shares_memory(x, x), True)
    assert_equal(np.shares_memory(x, x.copy()), False)

    a = x[:,::2,::3]
    b = x[:,::3,::2]
    assert_equal(np.shares_memory(a, b), True)
    assert_equal(np.shares_memory(a, b, max_work=None), True)
    assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=1)
    assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=long(1))
コード例 #3
0
ファイル: test_array.py プロジェクト: DusanMilunovic/pandas
def test_array_copy():
    a = np.array([1, 2])
    # default is to copy
    b = pd.array(a)
    assert np.shares_memory(a, b._ndarray) is False

    # copy=True
    b = pd.array(a, copy=True)
    assert np.shares_memory(a, b._ndarray) is False

    # copy=False
    b = pd.array(a, copy=False)
    assert np.shares_memory(a, b._ndarray) is True
コード例 #4
0
ファイル: test_base.py プロジェクト: wuhaochen/pandas
def test_to_numpy_copy(arr, as_series):
    obj = pd.Index(arr, copy=False)
    if as_series:
        obj = pd.Series(obj.values, copy=False)

    # no copy by default
    result = obj.to_numpy()
    assert np.shares_memory(arr, result) is True

    result = obj.to_numpy(copy=False)
    assert np.shares_memory(arr, result) is True

    # copy=True
    result = obj.to_numpy(copy=True)
    assert np.shares_memory(arr, result) is False
コード例 #5
0
ファイル: test_core.py プロジェクト: diegobersanetti/gwpy
    def test_prepend(self, instance):
        # test appending from empty (with and without copy)
        new = type(instance)()
        new.prepend(instance)
        for key in new:
            assert shares_memory(new[key].value, instance[key].value)
            utils.assert_quantity_sub_equal(new[key], instance[key])

        # create copy of dict that is contiguous
        new = type(instance)()
        for key in instance:
            a = instance[key]
            new[key] = type(a)([1, 2, 3, 4, 5], x0=a.xspan[1], dx=a.dx,
                               dtype=a.dtype)
        # append and test
        b = new.copy()
        b.prepend(instance)
        for key in b:
            utils.assert_array_equal(
                b[key].value,
                numpy.concatenate((instance[key].value, new[key].value)))

        # create copy of dict that is discontiguous
        new = type(instance)()
        for key in instance:
            a = instance[key]
            new[key] = type(a)([1, 2, 3, 4, 5], x0=a.xspan[1], dx=a.dx,
                               dtype=a.dtype)
        # check error
        with pytest.raises(ValueError):
            new.append(instance)
        # check padding works (don't validate too much, that is tested
        # elsewhere)
        b = new.copy()
        b.prepend(instance, pad=0)
コード例 #6
0
ファイル: test_methods.py プロジェクト: tnir/pandas
def test_reindex_columns(using_copy_on_write):
    # Case: reindexing the column returns a new dataframe
    # + afterwards modifying the result
    df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
    df_orig = df.copy()
    df2 = df.reindex(columns=["a", "c"])

    if using_copy_on_write:
        # still shares memory (df2 is a shallow copy)
        assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
    else:
        assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
    # mutating df2 triggers a copy-on-write for that column
    df2.iloc[0, 0] = 0
    assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
    if using_copy_on_write:
        assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
    tm.assert_frame_equal(df, df_orig)
コード例 #7
0
def test_cannot_set_shape_on_preallocated_memory(ref_shape):
    ones_arr = np.ones(shape=(1, 3, 32, 32), dtype=np.float32)
    ones_arr = np.ascontiguousarray(ones_arr)
    ov_tensor = Tensor(ones_arr, shared_memory=True)
    assert np.shares_memory(ones_arr, ov_tensor.data)
    with pytest.raises(RuntimeError) as e:
        ov_tensor.shape = ref_shape
    assert "Cannot call setShape for Blobs created on top of preallocated memory" in str(
        e.value)
コード例 #8
0
ファイル: call_tf_test.py プロジェクト: zhaowilliam/jax
 def test_eval_devicearray_no_copy(self):
   if jtu.device_under_test() != "cpu":
     # TODO(necula): add tests for GPU and TPU
     raise unittest.SkipTest("no_copy test works only on CPU")
   # For DeviceArray zero-copy works even if not aligned
   x = jnp.ones((3, 3), dtype=np.float32)
   res = jax2tf.call_tf(lambda x: x)(x)
   self.assertAllClose(x, res)
   self.assertTrue(np.shares_memory(x, res))
コード例 #9
0
ファイル: test_copying.py プロジェクト: MDAnalysis/mdanalysis
def test_positions_share_memory(original_and_copy):
    # check that the memory in Timestep objects is unique
    original, copy = original_and_copy
    assert not np.shares_memory(original.ts.positions, copy.ts.positions)

    original.ts.positions *= 2

    with pytest.raises(AssertionError):
        assert_equal(original.ts.positions, copy.ts.positions)
コード例 #10
0
def test_no_copy_when_single_float_dtype_dataframe(dtype):
    pd = pytest.importorskip('pandas')
    X = np.random.rand(10, 2).astype(dtype)
    df = pd.DataFrame(X)
    # feature names are required to not make a copy (rename makes a copy)
    feature_name = ['x1', 'x2']
    built_data = lgb.basic._data_from_pandas(df, feature_name, None, None)[0]
    assert built_data.dtype == dtype
    assert np.shares_memory(X, built_data)
コード例 #11
0
ファイル: test_setitem.py プロジェクト: stevenschaerer/pandas
def test_set_column_with_series(using_copy_on_write):
    # Case: setting a series as a new column (df[col] = s) copies that data
    df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
    ser = Series([1, 2, 3])

    df["c"] = ser

    if using_copy_on_write:
        # with CoW we can delay the copy
        assert np.shares_memory(df["c"].values, ser.values)
    else:
        # the series data is copied
        assert not np.shares_memory(df["c"].values, ser.values)

    # and modifying the series does not modify the DataFrame
    ser.iloc[0] = 0
    assert ser.iloc[0] == 0
    tm.assert_series_equal(df["c"], Series([1, 2, 3], name="c"))
コード例 #12
0
    def backward(self, grad, *, graph, _reduction=None, **kwargs):
        """ Back-propagates the gradient through all of the operation's inputs.
        Constant tensors do not propagate a gradient.

        Parameters
        ----------
        grad : numpy.ndarray
            The back-propagated total derivative with respect to the present
            operation (`f`): d(out)/df

        graph : Set[Operation]
            The set of all operations relevant to the terminal node of the computational graph,
            which triggered back-propagation.

        _reduction : Optional[Callable[[ndarray, Tuple[int, ...]], ndarray]]
            Developer option-only. A callable used to process the gradient
            prior to accumulation (e.g. broadcast-reduction)
        """
        for index, var in enumerate(self.variables):
            if not var.constant:
                if not var._ops:
                    raise InvalidBackprop(
                        "Part of the computational graph containing "
                        "this tensor was 'cleared' prior to backprop.")

                try:
                    backed_grad = self.backward_var(grad, index, **kwargs)
                except SkipGradient:
                    continue

                if is_invalid_gradient(backed_grad):
                    raise InvalidGradient(
                        f"An invalid gradient-value was passed to:"
                        f"\n\t`{type(self).__name__}.backward_var(<gradient>, index={index})`"
                        f"\nGradients are expected to be real-valued scalars or "
                        f"numpy arrays, got a gradient of type: {type(backed_grad)}"
                    )
                if var.grad is None:
                    tmp_grad = np.asarray(backed_grad)

                    if _reduction is not None:
                        tmp_grad = _reduction(tmp_grad, var.shape)

                    var.grad = (np.copy(tmp_grad) if np.shares_memory(
                        tmp_grad, grad) else tmp_grad)
                else:
                    if _reduction is None:
                        var.grad += backed_grad
                    else:
                        var.grad += _reduction(backed_grad, var.shape)
        for var in {
                i
                for i in self.variables
                if not i.constant and i.creator is not None
        }:
            var._accum_ops.add(self)
            var._backward(graph=graph)
コード例 #13
0
    def test_utils_alm_return_2d_2d_pol(self):

        npol = 2
        lmax = 5
        alm = np.ones((npol, hp.Alm.getsize(lmax)), dtype=np.complex128)

        alm_out = utils.alm_return_2d(alm, npol, lmax)

        self.assertEqual(alm_out.shape, (2, hp.Alm.getsize(lmax)))
        self.assertTrue(np.shares_memory(alm, alm_out))
コード例 #14
0
def test_destroy():
    self = HashTable(10, float)
    self.add([.3, .5, .8])

    # Release self.keys so that it can be written to.
    keys = self.destroy()
    assert keys.flags.writeable
    assert np.shares_memory(keys, self.keys)

    # destroy() should be re-callable without complaint (although it's now
    # functionless).
    assert np.shares_memory(keys, self.destroy())

    # Now that self.keys has been made accessibly writeable, it is no longer
    # safe to use the table.
    with pytest.raises(exceptions.HashTableDestroyed, match=".*"):
        self.add(.8)
    with pytest.raises(exceptions.HashTableDestroyed):
        self.get(.5)
コード例 #15
0
ファイル: test_signals.py プロジェクト: uwmisl/poretitioner
 def converting_signal_to_picoamperes_creates_new_array_test(self):
     # It's important that we do not share memory between raw/pico/fractional signals, so modifying one doesn't change the others unexpectedly.
     channel_number = 2  # Arbitrary
     n = 4
     adc_signal = np.array([2, 4, 8, 20])
     raw = RawSignal(adc_signal, channel_number, CALIBRATION)
     pico = raw.to_picoamperes()
     assert not np.shares_memory(
         raw, pico,
         max_work=n), "RawSignal should create a new space in memory."
コード例 #16
0
def test_final_state_vector_is_not_last_object():
    sim = cirq.Simulator()

    q = cirq.LineQubit(0)
    initial_state = np.array([1, 0], dtype=np.complex64)
    circuit = cirq.Circuit(cirq.wait(q))
    result = sim.simulate(circuit, initial_state=initial_state)
    assert result.state_vector() is not initial_state
    assert not np.shares_memory(result.state_vector(), initial_state)
    np.testing.assert_equal(result.state_vector(), initial_state)
コード例 #17
0
    def test_from_sequence_copy(self):
        cat = Categorical(np.arange(5).repeat(2))
        result = Categorical._from_sequence(cat, dtype=None, copy=False)

        # more generally, we'd be OK with a view
        assert result._codes is cat._codes

        result = Categorical._from_sequence(cat, dtype=None, copy=True)

        assert not np.shares_memory(result._codes, cat._codes)
コード例 #18
0
def test_final_density_matrix_is_not_last_object():
    sim = cirq.DensityMatrixSimulator()

    q = cirq.LineQubit(0)
    initial_state = np.array([[1, 0], [0, 0]], dtype=np.complex64)
    circuit = cirq.Circuit(cirq.wait(q))
    result = sim.simulate(circuit, initial_state=initial_state)
    assert result.final_density_matrix is not initial_state
    assert not np.shares_memory(result.final_density_matrix, initial_state)
    np.testing.assert_equal(result.final_density_matrix, initial_state)
コード例 #19
0
def test_data_is_the_same_object_when_not_proxy():
    dp = load_datapackage(fs_or_obj=ZipFS(str(dirpath / "test-fixture.zip")))
    fdp = dp.filter_by_attribute("matrix", "sa_matrix")

    arr1, _ = dp.get_resource("sa-data-array.data")
    arr2, _ = fdp.get_resource("sa-data-array.data")

    assert np.allclose(arr1, arr2)
    assert arr1 is arr2
    assert np.shares_memory(arr1, arr2)
コード例 #20
0
def test_copies_are_not_associated():
    dataset = examples.load_structured()
    points = pyvista_ndarray(dataset.GetPoints().GetData(), dataset=dataset)
    points_2 = points.copy()

    # check that copies of pyvista_ndarray are dissociated from the original dataset
    assert points_2.VTKObject is None
    assert points_2.dataset is None
    assert points_2.association.name == 'NONE'
    assert not np.shares_memory(points, points_2)
コード例 #21
0
def test_subset_column_selection_modify_parent(using_copy_on_write):
    # Case: taking a subset of the columns of a DataFrame
    # + afterwards modifying the parent
    df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})

    subset = df[["a", "c"]]
    if using_copy_on_write:
        # the subset shares memory ...
        assert np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
        # ... but parent uses CoW parent when it is modified
    df.iloc[0, 0] = 0

    assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
    if using_copy_on_write:
        # different column/block still shares memory
        assert np.shares_memory(get_array(subset, "c"), get_array(df, "c"))

    expected = DataFrame({"a": [1, 2, 3], "c": [0.1, 0.2, 0.3]})
    tm.assert_frame_equal(subset, expected)
コード例 #22
0
ファイル: test_iloc.py プロジェクト: stevenschaerer/pandas
    def test_iloc_setitem_fullcol_categorical(self, indexer, key, using_array_manager):
        frame = DataFrame({0: range(3)}, dtype=object)

        cat = Categorical(["alpha", "beta", "gamma"])

        if not using_array_manager:
            assert frame._mgr.blocks[0]._can_hold_element(cat)

        df = frame.copy()
        orig_vals = df.values

        overwrite = isinstance(key, slice) and key == slice(None)
        warn = None
        if overwrite:
            warn = FutureWarning
        msg = "will attempt to set the values inplace instead"
        with tm.assert_produces_warning(warn, match=msg):
            indexer(df)[key, 0] = cat

        if overwrite:
            # TODO: GH#39986 this probably shouldn't behave differently
            expected = DataFrame({0: cat})
            assert not np.shares_memory(df.values, orig_vals)
        else:
            expected = DataFrame({0: cat}).astype(object)
            if not using_array_manager:
                assert np.shares_memory(df[0].values, orig_vals)

        tm.assert_frame_equal(df, expected)

        # check we dont have a view on cat (may be undesired GH#39986)
        df.iloc[0, 0] = "gamma"
        assert cat[0] != "gamma"

        # TODO with mixed dataframe ("split" path), we always overwrite the column
        frame = DataFrame({0: np.array([0, 1, 2], dtype=object), 1: range(3)})
        df = frame.copy()
        orig_vals = df.values
        with tm.assert_produces_warning(FutureWarning, match=msg):
            indexer(df)[key, 0] = cat
        expected = DataFrame({0: cat, 1: range(3)})
        tm.assert_frame_equal(df, expected)
コード例 #23
0
    def test_iloc_setitem_fullcol_categorical(self, indexer, key,
                                              using_array_manager):
        frame = DataFrame({0: range(3)}, dtype=object)

        cat = Categorical(["alpha", "beta", "gamma"])

        if not using_array_manager:
            assert frame._mgr.blocks[0]._can_hold_element(cat)

        df = frame.copy()
        orig_vals = df.values
        indexer(df)[key, 0] = cat

        overwrite = isinstance(key, slice) and key == slice(None)

        if overwrite or using_array_manager:
            # TODO(ArrayManager) we always overwrite because ArrayManager takes
            #  the "split" path, which still overwrites
            # TODO: GH#39986 this probably shouldn't behave differently
            expected = DataFrame({0: cat})
            assert not np.shares_memory(df.values, orig_vals)
        else:
            expected = DataFrame({0: cat}).astype(object)
            if not using_array_manager:
                assert np.shares_memory(df[0].values, orig_vals)

        tm.assert_frame_equal(df, expected)

        # check we dont have a view on cat (may be undesired GH#39986)
        df.iloc[0, 0] = "gamma"
        if overwrite:
            assert cat[0] != "gamma"
        else:
            assert cat[0] != "gamma"

        # TODO with mixed dataframe ("split" path), we always overwrite the column
        frame = DataFrame({0: np.array([0, 1, 2], dtype=object), 1: range(3)})
        df = frame.copy()
        orig_vals = df.values
        indexer(df)[key, 0] = cat
        expected = DataFrame({0: cat, 1: range(3)})
        tm.assert_frame_equal(df, expected)
コード例 #24
0
def test_fdp_can_load_proxy_first():
    dp = load_datapackage(fs_or_obj=ZipFS(str(dirpath / "test-fixture.zip")),
                          proxy=True)
    fdp = dp.filter_by_attribute("matrix", "sa_matrix")
    arr2, _ = fdp.get_resource("sa-data-array.data")
    arr1, _ = dp.get_resource("sa-data-array.data")

    assert np.allclose(arr1, arr2)
    assert arr1.base is not arr2
    assert arr2.base is not arr1
    assert not np.shares_memory(arr1, arr2)
コード例 #25
0
def test_advanced_integer_index(shape: Tuple[int, ...], min_dims: int,
                                data: st.SearchStrategy):
    max_dims = data.draw(st.integers(min_dims, min_dims + 3), label="max_dims")
    index = data.draw(
        adv_integer_index(shape, min_dims=min_dims, max_dims=max_dims))
    x = np.zeros(shape)
    out = x[index]  # raises if the index is invalid
    note("x[index]: {}".format(out))
    assert min_dims <= out.ndim <= max_dims, "The input parameters were not respected"
    assert not np.shares_memory(
        x, out), "An advanced index should create a copy upon indexing"
コード例 #26
0
    def test_transpose_get_view_dt64tzget_view(self):
        dti = date_range("2016-01-01", periods=6, tz="US/Pacific")
        arr = dti._data.reshape(3, 2)
        df = DataFrame(arr)
        assert df._mgr.nblocks == 1

        result = df.T
        assert result._mgr.nblocks == 1

        rtrip = result._mgr.blocks[0].values
        assert np.shares_memory(arr._ndarray, rtrip._ndarray)
コード例 #27
0
ファイル: test_tensor.py プロジェクト: liubo-intel/openvino
def test_can_reset_shape_after_decreasing_on_preallocated_memory():
    ones_arr = np.ones(shape=(1, 3, 32, 32), dtype=np.float32)
    ones_arr = np.ascontiguousarray(ones_arr)
    ov_tensor = Tensor(ones_arr, shared_memory=True)
    ref_shape_1 = [1, 3, 24, 24]
    ref_shape_2 = [1, 3, 32, 32]
    assert np.shares_memory(ones_arr, ov_tensor.data)
    ov_tensor.shape = ref_shape_1
    assert list(ov_tensor.shape) == ref_shape_1
    ov_tensor.shape = ref_shape_2
    assert list(ov_tensor.shape) == ref_shape_2
コード例 #28
0
ファイル: test_rename.py プロジェクト: tnir/pandas
    def test_rename_nocopy(self, float_frame):
        renamed = float_frame.rename(columns={"C": "foo"}, copy=False)

        assert np.shares_memory(renamed["foo"]._values,
                                float_frame["C"]._values)

        with tm.assert_produces_warning(None):
            # This loc setitem already happens inplace, so no warning
            #  that this will change in the future
            renamed.loc[:, "foo"] = 1.0
        assert (float_frame["C"] == 1.0).all()
コード例 #29
0
def test_data_is_readable_multiple_times_when_proxy_directory():
    dp = load_datapackage(fs_or_obj=OSFS(str(dirpath / "tfd")), proxy=True)
    fdp = dp.filter_by_attribute("matrix", "sa_matrix")

    arr1, _ = dp.get_resource("sa-data-array.data")
    arr2, _ = fdp.get_resource("sa-data-array.data")

    assert np.allclose(arr1, arr2)
    assert arr1.base is not arr2
    assert arr2.base is not arr1
    assert not np.shares_memory(arr1, arr2)
コード例 #30
0
    def test_ifft_input_shape(self):
        # Tests ifft for various shapes and choices of axes.
        # 1D IFFT over last axis for 3d array.
        fsignal = np.ones((1, 2, 5), dtype=np.complex128)
        fsignal[0, 1, :] = 10.
        out_exp = np.zeros((1, 2, 5))
        out_exp[0, 0, 0] = 5
        out_exp[0, 1, 0] = 50
        out = fft.ifft(fsignal)
        np.testing.assert_allclose(out, out_exp, atol=1e-12)
        self.assertTrue(out.flags['C_CONTIGUOUS'])

        # 1D IFFT over middle axis for 3d array.
        fsignal = np.ones((1, 5, 2), dtype=np.complex128)
        fsignal[0, :, 1] = 10.
        out_exp = np.zeros((1, 5, 2))
        out_exp[0, 0, 0] = 5
        out_exp[0, 0, 1] = 50
        out = fft.ifft(fsignal, axes=[-2])
        np.testing.assert_allclose(out, out_exp, atol=1e-12)
        self.assertTrue(out.flags['C_CONTIGUOUS'])

        # 2D IFFT over last 2 axes of 4d array.
        fsignal = np.ones((1, 2, 5, 10), dtype=np.complex128)
        fsignal[0, 1, :] = 10.
        out_exp = np.zeros((1, 2, 5, 10))
        out_exp[0, 0, 0, 0] = 50
        out_exp[0, 1, 0, 0] = 500
        out = fft.ifft(fsignal, axes=[-2, -1])
        np.testing.assert_allclose(out, out_exp, atol=1e-12)
        self.assertTrue(out.flags['C_CONTIGUOUS'])

        # 2D IFFT over last 2 axes of 4d non-contiguous array.
        fsignal = np.ones((1, 2, 5, 10), dtype=np.complex128)
        fsignal[0, 1, :] = 10.
        tod = np.zeros((5, 10, 1, 2),
                       dtype=np.complex128).transpose(2, 3, 0, 1)
        out_exp = np.zeros_like(tod)
        out_exp[0, 0, 0, 0] = 50
        out_exp[0, 1, 0, 0] = 500
        out = fft.ifft(fsignal, tod=tod, axes=[-2, -1])
        self.assertTrue(np.shares_memory(tod, out))
        np.testing.assert_allclose(out, out_exp, atol=1e-12)
        self.assertFalse(out.flags['C_CONTIGUOUS'])

        # 2D IFFT over middle 2 axes of 4d array.
        fsignal = np.ones((1, 5, 10, 2), dtype=np.complex128)
        fsignal[0, :, :, 1] = 10.
        out_exp = np.zeros((1, 5, 10, 2))
        out_exp[0, 0, 0, 0] = 50
        out_exp[0, 0, 0, 1] = 500
        out = fft.ifft(fsignal, axes=[-3, -2])
        np.testing.assert_allclose(out, out_exp, atol=1e-12)
        self.assertTrue(out.flags['C_CONTIGUOUS'])
コード例 #31
0
ファイル: test_methods.py プロジェクト: tnir/pandas
def test_rename_columns(using_copy_on_write):
    # Case: renaming columns returns a new dataframe
    # + afterwards modifying the result
    df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
    df_orig = df.copy()
    df2 = df.rename(columns=str.upper)

    if using_copy_on_write:
        assert np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
    df2.iloc[0, 0] = 0
    assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
    if using_copy_on_write:
        assert np.shares_memory(get_array(df2, "C"), get_array(df, "c"))
    expected = DataFrame({
        "A": [0, 2, 3],
        "B": [4, 5, 6],
        "C": [0.1, 0.2, 0.3]
    })
    tm.assert_frame_equal(df2, expected)
    tm.assert_frame_equal(df, df_orig)
コード例 #32
0
def test_state_copy():
    sim = cirq.CliffordSimulator()

    q = cirq.LineQubit(0)
    circuit = cirq.Circuit(cirq.H(q), cirq.H(q))

    state_ch_forms = []
    for step in sim.simulate_moment_steps(circuit):
        state_ch_forms.append(step.state.ch_form)
    for x, y in itertools.combinations(state_ch_forms, 2):
        assert not np.shares_memory(x.v, y.v)
コード例 #33
0
ファイル: test_fillna.py プロジェクト: tnir/pandas
    def test_fillna_on_column_view(self):
        # GH#46149 avoid unnecessary copies
        arr = np.full((40, 50), np.nan)
        df = DataFrame(arr)

        df[0].fillna(-1, inplace=True)
        assert (arr[:, 0] == -1).all()

        # i.e. we didn't create a new 49-column block
        assert len(df._mgr.arrays) == 1
        assert np.shares_memory(df.values, arr)
コード例 #34
0
def main():
    x = np.linspace(1, 21, 11)
    print(x)

    y = np.array([2, 4, 7])
    print(y)

    print(x[y])

    z = np.arange(25).reshape(5, 5)
    print(z)
    k = np.array([2, 4])
    print(k)

    print(z[k, :])
    print(z[:, k])

    m = z[:, k]
    m[0, 0] = 100
    print(m)
    print(z)
    # ! 'Fancy Indexing
    # Bir ndArray'i başka bir array ile kopyalarsak oluşan değişiklik local kalır
    print(np.shares_memory(z, m))

    # ! 'Boolean Indexing
    f = np.arange(10)
    print(f)
    print(f[(f % 2 == 0)])
    p = f[(f % 2 == 0)]
    p[0] = 100
    print(p)
    print(f)

    s1 = np.random.randint(10, size=10)
    s2 = np.random.randint(10, size=10)
    print(s1)
    print(s2)
    print(s1 > s2)
    print(s1 < s2)
    print(type(s1 < s2))
    print((s1 > s2).dtype)

    print(np.all(s1 > s2))
    print(np.any(s1 > s2))

    t = np.linspace(1, 21, 11)
    print(t)

    mask = (t % 3 == 0)
    print(mask)
    print(type(mask))
    print(t[mask])
コード例 #35
0
ファイル: test_methods.py プロジェクト: tnir/pandas
def test_copy(using_copy_on_write):
    df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
    df_copy = df.copy()

    # the deep copy doesn't share memory
    assert not np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
    if using_copy_on_write:
        assert df_copy._mgr.refs is None

    # mutating copy doesn't mutate original
    df_copy.iloc[0, 0] = 0
    assert df.iloc[0, 0] == 1
コード例 #36
0
ファイル: test_setitem.py プロジェクト: stevenschaerer/pandas
def test_set_column_with_array():
    # Case: setting an array as a new column (df[col] = arr) copies that data
    df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
    arr = np.array([1, 2, 3], dtype="int64")

    df["c"] = arr

    # the array data is copied
    assert not np.shares_memory(df["c"].values, arr)
    # and thus modifying the array does not modify the DataFrame
    arr[0] = 0
    tm.assert_series_equal(df["c"], Series([1, 2, 3], name="c"))
コード例 #37
0
ファイル: test_core.py プロジェクト: diegobersanetti/gwpy
    def test_from_nds2_buffer(self):
        # build fake buffer
        nds_buffer = mocks.nds2_buffer(
            'X1:TEST',
            self.data,
            1000000000,
            self.data.shape[0],
            'm',
            name='test',
            slope=2,
            offset=1,
        )

        # convert to TimeSeries
        a = self.TEST_CLASS.from_nds2_buffer(nds_buffer)

        # check everything works (including default dynamic scaling)
        assert isinstance(a, self.TEST_CLASS)
        assert not shares_memory(a.value, nds_buffer.data)
        utils.assert_array_equal(a.value, nds_buffer.data * 2 + 1)
        assert a.t0 == 1000000000 * units.s
        assert a.dt == units.s / nds_buffer.data.shape[0]
        assert a.name == 'test'
        assert a.channel == Channel(
            'X1:TEST',
            sample_rate=self.data.shape[0],
            unit='m',
            type='raw',
            dtype='float32',
        )

        # check that we can use keywords to override settings
        b = self.TEST_CLASS.from_nds2_buffer(nds_buffer, scaled=False,
                                             copy=False, sample_rate=128)
        assert b.dt == 1/128. * units.s
        assert shares_memory(nds_buffer.data, b.value)
コード例 #38
0
ファイル: test_core.py プロジェクト: diegobersanetti/gwpy
    def test_to_from_pycbc(self, array):
        from pycbc.types import TimeSeries as PyCBCTimeSeries

        # test default conversion
        pycbcts = array.to_pycbc()
        assert isinstance(pycbcts, PyCBCTimeSeries)
        utils.assert_array_equal(array.value, pycbcts.data)
        assert array.t0.value == pycbcts.start_time
        assert array.dt.value == pycbcts.delta_t

        # go back and check we get back what we put in in the first place
        a2 = type(array).from_pycbc(pycbcts)
        utils.assert_quantity_sub_equal(
            array, a2, exclude=['name', 'unit', 'channel'])

        # test copy=False
        a2 = type(array).from_pycbc(array.to_pycbc(copy=False), copy=False)
        assert shares_memory(array.value, a2.value)
コード例 #39
0
    def test_unary_gufunc_fuzz(self):
        shapes = [7, 13, 8, 21, 29, 32]
        gufunc = _umath_tests.euclidean_pdist

        rng = np.random.RandomState(1234)

        for ndim in range(2, 6):
            x = rng.rand(*shapes[:ndim])

            it = iter_random_view_pairs(x, same_steps=False, equal_size=True)

            min_count = 500 // (ndim + 1)**2

            overlapping = 0
            while overlapping < min_count:
                a, b = next(it)

                if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2:
                    continue

                # Ensure the shapes are so that euclidean_pdist is happy
                if b.shape[-1] > b.shape[-2]:
                    b = b[...,0,:]
                else:
                    b = b[...,:,0]

                n = a.shape[-2]
                p = n * (n - 1) // 2
                if p <= b.shape[-1] and p > 0:
                    b = b[...,:p]
                else:
                    n = max(2, int(np.sqrt(b.shape[-1]))//2)
                    p = n * (n - 1) // 2
                    a = a[...,:n,:]
                    b = b[...,:p]

                # Call
                if np.shares_memory(a, b):
                    overlapping += 1

                with np.errstate(over='ignore', invalid='ignore'):
                    assert_copy_equivalent(gufunc, [a], out=b)
コード例 #40
0
ファイル: test_core.py プロジェクト: diegobersanetti/gwpy
    def test_to_from_lal(self, array):
        import lal

        # check that to + from returns the same array
        lalts = array.to_lal()
        a2 = type(array).from_lal(lalts)
        utils.assert_quantity_sub_equal(array, a2, exclude=['name', 'channel'])
        assert a2.name == ''

        # test copy=False
        a2 = type(array).from_lal(lalts, copy=False)
        assert shares_memory(a2.value, lalts.data.data)

        # test units
        array.override_unit('undef')
        with pytest.warns(UserWarning):
            lalts = array.to_lal()
        assert lalts.sampleUnits == lal.DimensionlessUnit
        a2 = self.TEST_CLASS.from_lal(lalts)
        assert a2.unit == units.dimensionless_unscaled
コード例 #41
0
ファイル: test_frequencyseries.py プロジェクト: stefco/gwpy
    def test_to_from_pycbc(self, array):
        from pycbc.types import FrequencySeries as PyCBCFrequencySeries

        array.epoch = 0

        # test default conversion
        pycbcfs = array.to_pycbc()
        assert isinstance(pycbcfs, PyCBCFrequencySeries)
        utils.assert_array_equal(array.value, pycbcfs.data)
        assert array.f0.value == 0 * units.Hz
        assert array.df.value == pycbcfs.delta_f
        assert array.epoch.gps == pycbcfs.epoch

        # go back and check we get back what we put in in the first place
        a2 = type(array).from_pycbc(pycbcfs)
        utils.assert_quantity_sub_equal(
            array, a2, exclude=['name', 'unit', 'channel'])

        # test copy=False
        a2 = type(array).from_pycbc(array.to_pycbc(copy=False), copy=False)
        assert shares_memory(array.value, a2.value)
コード例 #42
0
ファイル: test_numpy.py プロジェクト: DusanMilunovic/pandas
def test_constructor_copy():
    arr = np.array([0, 1])
    result = PandasArray(arr, copy=True)

    assert np.shares_memory(result._ndarray, arr) is False
コード例 #43
0
    def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16,
                             count=5000):
        shapes = [7, 13, 8, 21, 29, 32]

        rng = np.random.RandomState(1234)

        for ndim in range(1, 6):
            x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype)

            it = iter_random_view_pairs(x, same_steps=False, equal_size=True)

            min_count = count // (ndim + 1)**2

            overlapping = 0
            while overlapping < min_count:
                a, b = next(it)

                a_orig = a.copy()
                b_orig = b.copy()

                if get_out_axis_size is None:
                    assert_copy_equivalent(operation, [a], out=b)

                    if np.shares_memory(a, b):
                        overlapping += 1
                else:
                    for axis in itertools.chain(range(ndim), [None]):
                        a[...] = a_orig
                        b[...] = b_orig

                        # Determine size for reduction axis (None if scalar)
                        outsize, scalarize = get_out_axis_size(a, b, axis)
                        if outsize == 'skip':
                            continue

                        # Slice b to get an output array of the correct size
                        sl = [slice(None)] * ndim
                        if axis is None:
                            if outsize is None:
                                sl = [slice(0, 1)] + [0]*(ndim - 1)
                            else:
                                sl = [slice(0, outsize)] + [0]*(ndim - 1)
                        else:
                            if outsize is None:
                                k = b.shape[axis]//2
                                if ndim == 1:
                                    sl[axis] = slice(k, k + 1)
                                else:
                                    sl[axis] = k
                            else:
                                assert b.shape[axis] >= outsize
                                sl[axis] = slice(0, outsize)
                        b_out = b[tuple(sl)]

                        if scalarize:
                            b_out = b_out.reshape([])

                        if np.shares_memory(a, b_out):
                            overlapping += 1

                        # Check result
                        assert_copy_equivalent(operation, [a], out=b_out, axis=axis)
コード例 #44
0
ファイル: test_core.py プロジェクト: diegobersanetti/gwpy
 def test_copy(self, instance):
     a = instance.copy()
     assert type(a) is type(instance)
     for x, y in zip(instance, a):
         utils.assert_quantity_sub_equal(x, y)
         assert not shares_memory(x.value, y.value)
コード例 #45
0
ファイル: test_core.py プロジェクト: diegobersanetti/gwpy
 def test_copy(self, instance):
     copy = instance.copy()
     assert isinstance(copy, self.TEST_CLASS)
     for key in copy:
         assert not shares_memory(copy[key].value, instance[key].value)
         utils.assert_quantity_sub_equal(copy[key], instance[key])