def test_assumes_dims_like_own_name(self):
     input_state = {
         'air_temperature': DataArray(
             np.zeros([2, 2, 4]),
             dims=['x', 'y', 'z'],
             attrs={'units': 'degK'},
         )
     }
     input_properties = {
         'air_temperature': {
             'dims': ['x', 'y', 'z'],
             'units': 'degK',
         }
     }
     raw_arrays = get_numpy_arrays_with_properties(input_state, input_properties)
     output_properties = {
         'air_temperature': {
             'units': 'degK/s',
         }
     }
     return_value = restore_data_arrays_with_properties(
         raw_arrays, output_properties, input_state, input_properties
     )
     assert isinstance(return_value, dict)
     assert len(return_value.keys()) == 1
     assert isinstance(return_value['air_temperature'], DataArray)
     assert return_value['air_temperature'].attrs['units'] is 'degK/s'
     assert np.byte_bounds(
         return_value['air_temperature'].values) == np.byte_bounds(
         input_state['air_temperature'].values)
     assert (return_value['air_temperature'].values.base is
             input_state['air_temperature'].values)
     assert return_value['air_temperature'].shape == (2, 2, 4)
Beispiel #2
0
def _reduce_memmap_backed(a, m):
    """Pickling reduction for memmap backed arrays

    a is expected to be an instance of np.ndarray (or np.memmap)
    m is expected to be an instance of np.memmap on the top of the ``base``
    attribute ancestry of a. ``m.base`` should be the real python mmap object.
    """
    # offset that comes from the striding differences between a and m
    a_start = np.byte_bounds(a)[0]
    m_start = np.byte_bounds(m)[0]
    offset = a_start - m_start

    # offset from the backing memmap
    offset += m.offset

    if m.flags['F_CONTIGUOUS']:
        order = 'F'
    else:
        # The backing memmap buffer is necessarily contiguous hence C if not
        # Fortran
        order = 'C'

    # If array is a contiguous view, no need to pass the strides
    if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']:
        strides = None
    else:
        strides = a.strides
    return (strided_from_memmap, (m.filename, a.dtype, m.mode, offset, order,
                                  a.shape, strides))
 def test_restores_new_dims_with_wildcard(self):
     input_state = {
         'air_pressure': DataArray(
             np.zeros([2, 2, 4]),
             dims=['x', 'y', 'z'],
             attrs={'units': 'degK'},
         ),
     }
     input_properties = {
         'air_pressure': {
             'dims': ['*'],
             'units': 'degK',
             'alias': 'p'
         },
     }
     raw_arrays = {
         'q': np.zeros([16, 2])
     }
     output_properties = {
         'q': {
             'dims': ['*', 'new_dim'],
             'units': 'm',
         },
     }
     data_arrays = restore_data_arrays_with_properties(
         raw_arrays, output_properties, input_state, input_properties
     )
     assert len(data_arrays.keys()) == 1
     assert 'q' in data_arrays.keys()
     assert np.all(data_arrays['q'].values.flatten() == raw_arrays['q'].flatten())
     assert np.byte_bounds(
         data_arrays['q'].values) == np.byte_bounds(
         raw_arrays['q'])
     assert data_arrays['q'].dims == ('x', 'y', 'z', 'new_dim')
     assert data_arrays['q'].shape == (2, 2, 4, 2)
Beispiel #4
0
def _reduce_memmap_backed(a, m):
    """Pickling reduction for memmap backed arrays

    a is expected to be an instance of np.ndarray (or np.memmap)
    m is expected to be an instance of np.memmap on the top of the ``base``
    attribute ancestry of a. ``m.base`` should be the real python mmap object.
    """
    # offset that comes from the striding differences between a and m
    a_start = np.byte_bounds(a)[0]
    m_start = np.byte_bounds(m)[0]
    offset = a_start - m_start

    # offset from the backing memmap
    offset += m.offset

    if m.flags["F_CONTIGUOUS"]:
        order = "F"
    else:
        # The backing memmap buffer is necessarily contiguous hence C if not
        # Fortran
        order = "C"

    # If array is a contiguous view, no need to pass the strides
    if a.flags["F_CONTIGUOUS"] or a.flags["C_CONTIGUOUS"]:
        strides = None
    else:
        strides = a.strides
    return (strided_from_memmap, (m.filename, a.dtype, m.mode, offset, order, a.shape, strides))
 def test_restores_aliased_name(self):
     input_state = {
         'air_temperature': DataArray(
             np.zeros([2, 2, 4]),
             dims=['x', 'y', 'z'],
             attrs={'units': 'degK'},
         )
     }
     input_properties = {
         'air_temperature': {
             'dims': ['x', 'y', 'z'],
             'units': 'degK',
         }
     }
     raw_arrays = {
         'p': np.zeros([2, 2, 4])
     }
     output_properties = {
         'air_pressure': {
             'dims': ['x', 'y', 'z'],
             'units': 'm',
             'alias': 'p',
         },
     }
     data_arrays = restore_data_arrays_with_properties(
         raw_arrays, output_properties, input_state, input_properties
     )
     assert len(data_arrays.keys()) == 1
     assert 'air_pressure' in data_arrays.keys()
     assert np.all(data_arrays['air_pressure'].values == raw_arrays['p'])
     assert np.byte_bounds(data_arrays['air_pressure'].values) == np.byte_bounds(raw_arrays['p'])
Beispiel #6
0
def decode_lab (l, ab, T=None):
    ab_flat = np.reshape(ab, (-1, ab.shape[-1]))
    if not T is None:   # need to test
        o = ab_flat
        ab_flat += 1e-15
        np.log(ab_flat, ab_flat)
        ab_flat /= T
        np.exp(ab_flat, ab_flat)
        S = np.sum(ab_flat, axis=0)
        assert S.shape[0] == ab_flat.shape[0]
        ab_flat /= S
        assert np.byte_bounds(o) == np.byte_bounds(ab_flat)
        pass

    ab_small = np.reshape(np.dot(ab_flat, ab_dict), ab.shape[:3] + (2,))

    _, H, W, _ = l.shape
    lab_one = np.zeros((H, W, 3), dtype=np.float32)

    rgb = np.zeros(l.shape[:3] + (3,), dtype=np.float32)
    for i in range(l.shape[0]):
        lab_one[:, :, :1] = l[i]
        lab_one[:, :, 1:] = cv2.resize(ab_small[i], (W, H))
        rgb[i] = cv2.cvtColor(lab_one, cv2.COLOR_LAB2BGR)
        pass
    rgb *=255
    return rgb
Beispiel #7
0
 def test_match_dims_like_star_z_matching_lengths(self):
     set_direction_names(x=['lat'],
                         y=['lon'],
                         z=['mid_levels', 'interface_levels'])
     input_state = {
         'air_temperature':
         DataArray(
             np.zeros([2, 3, 4]),
             dims=['lat', 'lon', 'interface_levels'],
             attrs={'units': 'degK'},
         ),
         'air_pressure':
         DataArray(
             np.zeros([2, 3, 4]),
             dims=['lat', 'lon', 'interface_levels'],
             attrs={'units': 'Pa'},
         ),
     }
     input_properties = {
         'air_temperature': {
             'dims': ['*', 'z'],
             'units': 'degK',
             'match_dims_like': 'air_pressure',
         },
         'air_pressure': {
             'dims': ['*', 'z'],
             'units': 'Pa',
         },
     }
     raw_arrays = get_numpy_arrays_with_properties(input_state,
                                                   input_properties)
     assert np.byte_bounds(raw_arrays['air_temperature']) == np.byte_bounds(
         input_state['air_temperature'].values)
     assert np.byte_bounds(raw_arrays['air_pressure']) == np.byte_bounds(
         input_state['air_pressure'].values)
def get_base_slice(view):
    '''Returns the 2D slice of view's parent that yields view.

    Note that this function will return a 2D slice even if view's parent
    is three-dimensional.'''
    if view.dtype != np.uint8:
        raise ValueError('Don''t know how to compute offset for types other '
                         'than uint8.')
    if not hasattr(view, 'base') or view.base is None:
        raise ValueError('`view` argument does not have `base` property.')
    if len(view.base.strides) not in (2, 3):
        raise ValueError('Base array must have two or three dimensions.')

    # Determine the strides
    base = view.base
    ystride, xstride = base.strides[:2]

    # Compute difference between base start and the view's end & start.
    base_start, _ = np.byte_bounds(base)
    view_start, view_end = np.byte_bounds(view)
    diff_start = view_start - base_start
    diff_end = view_end - base_start - 1

    # Compute slice offsets
    ystart = diff_start / ystride
    xstart = (diff_start % ystride) / xstride
    yend = diff_end / ystride
    xend = (diff_end % ystride) / xstride

    return slice(ystart, yend+1), slice(xstart, xend+1)
Beispiel #9
0
def _reduce_memmap_backed(a, m):
    """Pickling reduction for memmap backed arrays.

    a is expected to be an instance of np.ndarray (or np.memmap)
    m is expected to be an instance of np.memmap on the top of the ``base``
    attribute ancestry of a. ``m.base`` should be the real python mmap object.
    """
    # offset that comes from the striding differences between a and m
    a_start, a_end = np.byte_bounds(a)
    m_start = np.byte_bounds(m)[0]
    offset = a_start - m_start

    # offset from the backing memmap
    offset += m.offset

    if m.flags['F_CONTIGUOUS']:
        order = 'F'
    else:
        # The backing memmap buffer is necessarily contiguous hence C if not
        # Fortran
        order = 'C'

    if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']:
        # If the array is a contiguous view, no need to pass the strides
        strides = None
        total_buffer_len = None
    else:
        # Compute the total number of items to map from which the strided
        # view will be extracted.
        strides = a.strides
        total_buffer_len = (a_end - a_start) // a.itemsize
    return (_strided_from_memmap, (m.filename, a.dtype, m.mode, offset, order,
                                   a.shape, strides, total_buffer_len))
Beispiel #10
0
def test_match_dims_like_partly_hardcoded_dimensions_matching_lengths():
    input_state = {
        'air_temperature': DataArray(
            np.zeros([2, 3, 4]),
            dims=['lat', 'lon', 'mid_levels'],
            attrs={'units': 'degK'},
        ),
        'air_pressure': DataArray(
            np.zeros([2, 3, 4]),
            dims=['lat', 'lon', 'interface_levels'],
            attrs={'units': 'Pa'},
        ),
    }
    input_properties = {
        'air_temperature': {
            'dims': ['*', 'mid_levels'],
            'units': 'degK',
            'match_dims_like': 'air_pressure',
        },
        'air_pressure': {
            'dims': ['*', 'interface_levels'],
            'units': 'Pa',
        },
    }
    raw_arrays = get_numpy_arrays_with_properties(input_state, input_properties)
    assert np.byte_bounds(input_state['air_temperature'].values) == np.byte_bounds(raw_arrays['air_temperature'])
    assert np.byte_bounds(input_state['air_pressure'].values) == np.byte_bounds(raw_arrays['air_pressure'])
def _reduce_memmap_backed(a, m):
    """Pickling reduction for memmap backed arrays.

    a is expected to be an instance of np.ndarray (or np.memmap)
    m is expected to be an instance of np.memmap on the top of the ``base``
    attribute ancestry of a. ``m.base`` should be the real python mmap object.
    """
    # offset that comes from the striding differences between a and m
    a_start, a_end = np.byte_bounds(a)
    m_start = np.byte_bounds(m)[0]
    offset = a_start - m_start

    # offset from the backing memmap
    offset += m.offset

    if m.flags['F_CONTIGUOUS']:
        order = 'F'
    else:
        # The backing memmap buffer is necessarily contiguous hence C if not
        # Fortran
        order = 'C'

    if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']:
        # If the array is a contiguous view, no need to pass the strides
        strides = None
        total_buffer_len = None
    else:
        # Compute the total number of items to map from which the strided
        # view will be extracted.
        strides = a.strides
        total_buffer_len = (a_end - a_start) // a.itemsize
    return (_strided_from_memmap,
            (m.filename, a.dtype, m.mode, offset, order, a.shape, strides,
             total_buffer_len))
 def test_returns_simple_value(self):
     input_state = {
         'air_temperature': DataArray(
             np.zeros([2, 2, 4]),
             dims=['x', 'y', 'z'],
             attrs={'units': 'degK'},
         )
     }
     input_properties = {
         'air_temperature': {
             'dims': ['x', 'y', 'z'],
             'units': 'degK',
         }
     }
     raw_arrays = get_numpy_arrays_with_properties(input_state, input_properties)
     raw_arrays = {key + '_tendency': value for key, value in raw_arrays.items()}
     output_properties = {
         'air_temperature_tendency': {
             'dims': ['x', 'y', 'z'],
             'units': 'degK/s',
         }
     }
     return_value = restore_data_arrays_with_properties(
         raw_arrays, output_properties, input_state, input_properties
     )
     assert isinstance(return_value, dict)
     assert len(return_value.keys()) == 1
     assert isinstance(return_value['air_temperature_tendency'], DataArray)
     assert return_value['air_temperature_tendency'].attrs['units'] is 'degK/s'
     assert np.byte_bounds(
         return_value['air_temperature_tendency'].values) == np.byte_bounds(
         input_state['air_temperature'].values)
     assert (return_value['air_temperature_tendency'].values.base is
             input_state['air_temperature'].values)
     assert return_value['air_temperature_tendency'].shape == (2, 2, 4)
Beispiel #13
0
def ramp(img, out=None):
    '''Remove change in illumination across an image
    
    This function removes a wedge-like profile from the image
    
    .. note::
        
        For more detail, see: http://www.wadsworth.org/spider_doc/spider/docs/man/ra.html
    
    :Parameters:

    img : numpy.ndarray
          Input image
    out : numpy.ndarray
          Output image
    
    :Returns:
    
    img : numpy.ndarray
          Ramped image
    '''
    
    if out is None: 
        out = numpy.empty_like(img, dtype=numpy.float32)
    elif out.dtype != numpy.float32: raise ValueError, "Output array must be float 32"
    if numpy.byte_bounds(out) != numpy.byte_bounds(img):  out[:]=img
    tout = out.T if not out.flags.f_contiguous else out
    if _spider_filter.ramp(tout) != 0:
        raise ValueError, "Ramp filter failed"
    return out
def test_get_numpy_array_3d_no_change():
    array = DataArray(
        np.random.randn(2, 3, 4),
        dims=['x', 'y', 'z'],
        attrs={'units': ''},
    )
    numpy_array = get_numpy_array(array, ['x', 'y', 'z'])
    assert np.byte_bounds(numpy_array) == np.byte_bounds(array.values)
    assert np.all(numpy_array == array.values)
    assert numpy_array.base is array.values
Beispiel #15
0
def find_index(base, view):
    """
    Given an array that is a `view` of a `base`, find an index such that
    `base[index] is view`
    """

    if not isinstance(view, np.ndarray):
        return "..."

    itemsize = view.itemsize

    # Find the start and end pointer of the arrays using the byte_bound method
    offset_start = (np.byte_bounds(view)[0] - np.byte_bounds(base)[0]) // itemsize
    offset_stop = (np.byte_bounds(view)[-1] - np.byte_bounds(base)[-1] - 1) // itemsize

    # Calculate the start and stop indices from the offsets
    index_start = np.unravel_index(offset_start, base.shape)
    index_stop = np.unravel_index(base.size + offset_stop, base.shape)

    # Use the strides property to find the No. of bytes to go from one element to the other
    index_step = np.array(view.strides) // np.array(base.strides)

    index = ""
    for i in range(len(index_step)):
        start = index_start[i]
        stop = index_stop[i]
        step = index_step[i]

        if stop == start:
            stop, step = None, None
        else:
            if stop == base.shape[i] - 1:
                stop = None
            else:
                stop = stop
            if start == 0:
                start = None
        if step is not None and stop is not None:
            if step < 0:
                start, stop = stop, start - 1
            else:
                start, stop = start, stop + 1

        if start is not None:
            index += str(start)
        if stop is not None:
            index += ":" + str(stop)
        elif step is not None:
            index += ":"
        if step is not None:
            index += ":" + str(step)
        index += ','
    index = index[:-1]

    return index
Beispiel #16
0
def find_index(base, view):
    """
    Given an array that is `view` of `bave`, find an index sich that
    base[index] is view`

    Assumes both base and view has the same itemsize??
    """

    if not isinstance(view, np.ndarray):
        return "..."

    itemsize = view.itemsize
    offset_start = (np.byte_bounds(view)[0] -
                    np.byte_bounds(base)[0]) // itemsize
    # without the -1, we'd be pointing to first element not beginning to the view (exclusive). We want inclusive.
    offset_end = (np.byte_bounds(view)[-1] - np.byte_bounds(base)[-1] -
                  1) // itemsize

    index_start = np.unravel_index(offset_start, base.shape)
    index_stop = np.unravel_index((offset_end + base.size) % base.size,
                                  base.shape)
    index_step = np.array(view.strides) // np.array(base.strides)

    index = ""
    for i in range(len(index_step)):
        start = index_start[i]
        stop = index_stop[i]
        step = index_step[i]

        if start == stop:
            # Z[3:4] = '3' -> no need for stop or step
            stop, step = None, None
        else:
            if start == 0:
                start = None
            if stop == base.shape[i] - 1:
                stop = None
        if step is not None and stop is not None:
            if step > 0:
                start, stop = start, stop + 1
            else:
                start, stop = stop, start - 1

        # format the index properly
        if start is not None:
            index += str(start)
        if stop is not None:
            index += ":" + str(stop)
        elif step is not None:  # placeholder for "::2"
            index += ":"
        if step is not None:
            index += ":" + str(step)
        index += ","
    index = index[:-1]
    return index
Beispiel #17
0
 def get_raw_index(self, i):
     """Get index into base array's raw data, given the index into this
     segment
     """
     if self.is_indexed:
         return int(self.order[i])
     if self.data.base is None:
         return int(i)
     data_start, data_end = np.byte_bounds(self.data)
     base_start, base_end = np.byte_bounds(self.data.base)
     return int(data_start - base_start + i)
def test_get_numpy_array_invalid_dimension_collected_by_asterisk():
    array = DataArray(
        np.random.randn(2),
        dims=['sheep'],
        attrs={'units': ''},
    )
    numpy_array = get_numpy_array(array, ['*'])
    assert numpy_array.shape == (2,)
    assert np.all(numpy_array == array.values)
    assert np.byte_bounds(numpy_array) == np.byte_bounds(array.values)
    assert numpy_array.base is array.values
def test_get_numpy_array_asterisk_creates_new_dim():
    array = DataArray(
        np.random.randn(2),
        dims=['x'],
        attrs={'units': ''},
    )
    numpy_array = get_numpy_array(array, ['x', '*'])
    assert numpy_array.shape == (2, 1)
    assert np.all(numpy_array[:, 0] == array.values)
    assert np.byte_bounds(numpy_array) == np.byte_bounds(array.values)
    assert numpy_array.base is array.values
def test_get_numpy_array_asterisk_flattens():
    array = DataArray(
        np.random.randn(2, 3),
        dims=['y', 'z'],
        attrs={'units': ''},
    )
    numpy_array = get_numpy_array(array, ['*'])
    assert numpy_array.shape == (6,)
    assert np.all(numpy_array.reshape((2, 3)) == array.values)
    assert np.byte_bounds(numpy_array) == np.byte_bounds(array.values)
    assert numpy_array.base is array.values
def test_get_numpy_array_retrieves_explicit_dimensions():
    array = DataArray(
        np.random.randn(2, 3),
        dims=['alpha', 'zeta'],
        attrs={'units': ''},
    )
    numpy_array = get_numpy_array(array, ['zeta', 'alpha'])
    assert numpy_array.shape == (3, 2)
    assert np.all(np.transpose(numpy_array, (1, 0)) == array.values)
    assert np.byte_bounds(numpy_array) == np.byte_bounds(array.values)
    assert numpy_array.base is array.values
def test_get_numpy_array_1d():
    array = DataArray(
        np.random.randn(2),
        dims=['y'],
        attrs={'units': ''},
    )
    numpy_array = get_numpy_array(array, ['y'])
    assert numpy_array.shape == (2,)
    assert np.all(numpy_array == array.values)
    assert np.byte_bounds(numpy_array) == np.byte_bounds(array.values)
    assert numpy_array.base is array.values
Beispiel #23
0
 def get_raw_index(self, i):
     """Get index into base array's raw data, given the index into this
     segment
     """
     if self.is_indexed:
         return int(self.order[i])
     if self.data.base is None:
         return int(i)
     data_start, data_end = np.byte_bounds(self.data)
     base_start, base_end = np.byte_bounds(self.data.base)
     return int(data_start - base_start + i)
def test_get_numpy_array_2d_reverse():
    array = DataArray(
        np.random.randn(2, 3),
        dims=['y', 'z'],
        attrs={'units': ''},
    )
    numpy_array = get_numpy_array(array, ['z', 'y'])
    assert numpy_array.shape == (3, 2)
    assert np.all(np.transpose(numpy_array, (1, 0)) == array.values)
    assert np.byte_bounds(numpy_array) == np.byte_bounds(array.values)
    assert numpy_array.base is array.values
def test_get_numpy_array_creates_new_dim_in_front():
    array = DataArray(
        np.random.randn(2),
        dims=['x'],
        attrs={'units': ''},
    )
    numpy_array = get_numpy_array(array, ['y', 'x'])
    assert numpy_array.shape == (1, 2)
    assert np.all(numpy_array[0, :] == array.values)
    assert np.byte_bounds(numpy_array) == np.byte_bounds(array.values)
    assert numpy_array.base is array.values
def test_restore_dimensions_starz_to_zyx_doesnt_copy():
    array = DataArray(
        np.random.randn(2, 3, 4),
        dims=['z', 'y', 'x'],
        attrs={'units': ''}
    )
    numpy_array = get_numpy_array(array, ['*', 'z'])
    restored_array = restore_dimensions(
        numpy_array, from_dims=['*', 'z'], result_like=array)
    assert np.byte_bounds(restored_array.values) == np.byte_bounds(
        array.values)
    assert restored_array.values.base is array.values
Beispiel #27
0
 def test_factorize_converges(self):
     l = np.array([1e-12, 1e-3, 1e-12, 1e-3], np.dtype('float64'))
     max_iter = 1000
     max_diff = 0.1
     d_name = 'factorized'
     d = self.data[d_name]
     f = Factorization(d, self.n_features_split, self.n_factors)
     monitor = f.sfa(max_diff, max_iter, l, True)
     assert (monitor.n_iter < max_iter)
     assert (monitor.max_diff_factors[-1] < max_diff)
     assert (monitor.max_diff_coefficients[-1] < max_diff)
     self.assertEqual(np.byte_bounds(d), np.byte_bounds(f.data))
     self.check_factorization_type_and_shape(f)
def test_get_numpy_array_zyx_to_starz_doesnt_copy():
    array = DataArray(
        np.random.randn(2, 3, 4),
        dims=['z', 'y', 'x'],
        attrs={'units': ''}
    )
    original_array = array.values
    numpy_array = get_numpy_array(array, ['*', 'z'])
    for i in range(2):
        assert np.all(numpy_array[:, i] == array.values[i, :, :].flatten())
    assert original_array is array.values
    assert np.byte_bounds(numpy_array) == np.byte_bounds(array.values)
    assert numpy_array.base is array.values
Beispiel #29
0
 def offset(self):
     """The offset of the memory map in the file.
 
 Returns
 -------
 offset : int
   Offset of the memeory map in the file.
 """
     if self._array.base is not None:
         return np.byte_bounds(self._array)[0] - np.byte_bounds(
             self._array.base)[0]
     else:
         return 0
Beispiel #30
0
def find_view(base, view):
    """
    Given an array that is a `view` of a `base`, find an index such that
    `base[index] is view`
    """

    if not isinstance(view, np.ndarray):
        return "..."

    itemsize = view.itemsize
    offset_start = (np.byte_bounds(view)[0] -
                    np.byte_bounds(base)[0]) // itemsize
    offset_stop = (np.byte_bounds(view)[-1] - np.byte_bounds(base)[-1] -
                   1) // itemsize
    index_start = np.unravel_index(offset_start, base.shape)
    index_stop = np.unravel_index(base.size + offset_stop, base.shape)
    index_step = np.array(view.strides) // np.array(base.strides)

    index = ""
    for i in range(len(index_step)):
        start = index_start[i]
        stop = index_stop[i]
        step = index_step[i]

        if stop == start:
            stop, step = None, None
        else:
            if stop == base.shape[i] - 1:
                stop = None
            else:
                stop = stop
            if start == 0:
                start = None
        if step is not None and stop is not None:
            if step < 0:
                start, stop = stop, start - 1
            else:
                start, stop = start, stop + 1

        if start is not None:
            index += str(start)
        if stop is not None:
            index += ":" + str(stop)
        elif step is not None:
            index += ":"
        if step is not None:
            index += ":" + str(step)
        index += ','
    index = index[:-1]

    return index
def test_restore_dimensions_removes_dummy_axes():
    array = DataArray(
        np.random.randn(2),
        dims=['z'],
        attrs={'units': ''}
    )
    numpy_array = get_numpy_array(array, ['x', 'y', 'z'])
    restored_array = restore_dimensions(
        numpy_array, from_dims=['x', 'y', 'z'], result_like=array)
    assert np.all(restored_array.values == array.values)
    assert len(restored_array.attrs) == 0
    assert np.byte_bounds(restored_array.values) == np.byte_bounds(
        array.values)
    assert restored_array.values.base is array.values
Beispiel #32
0
 def test_factorize_gets_close(self):
     l2 = 0.1
     l = np.array([0.01, l2, 0.02, l2], np.dtype('float64'))
     max_iter = 1000
     max_diff = 1e-6
     d_name = 'factorized'
     d = self.data[d_name]
     f = Factorization(d, self.n_features_split, self.n_factors)
     f.sfa(max_diff, max_iter, l)
     self.assertEqual(np.byte_bounds(d), np.byte_bounds(f.data))
     self.check_factorization_type_and_shape(f)
     coef = f.coefficients * (1 + l2)
     data_rec = np.dot(coef, f.factors).T
     np.testing.assert_allclose(d, data_rec, atol=1)
Beispiel #33
0
 def byte_bounds_offset(self):
     """Return start and end offsets of this segment's data into the
     base array's data.
     
     This ignores the byte order index. Arrays using the byte order index
     will have the entire base array's raw data.
     """
     if self.data.base is None:
         if self.is_indexed:
             basearray = self.data.np_data
         else:
             basearray = self.data
         return 0, len(basearray)
     data_start, data_end = np.byte_bounds(self.data)
     base_start, base_end = np.byte_bounds(self.data.base)
     return int(data_start - base_start), int(data_end - base_start)
Beispiel #34
0
 def byte_bounds_offset(self):
     """Return start and end offsets of this segment's data into the
     base array's data.
     
     This ignores the byte order index. Arrays using the byte order index
     will have the entire base array's raw data.
     """
     if self.data.base is None:
         if self.is_indexed:
             basearray = self.data.np_data
         else:
             basearray = self.data
         return 0, len(basearray)
     data_start, data_end = np.byte_bounds(self.data)
     base_start, base_end = np.byte_bounds(self.data.base)
     return int(data_start - base_start), int(data_end - base_start)
Beispiel #35
0
 def calc_lookups(self):
     if self.is_indexed:
         end = len(self.data.np_data)
         self.data_start, self.data_end = 0, end
         self.base_start, self.base_end = 0, end
         base_size = end
     elif self.data.base is None:
         end = len(self.data)
         self.data_start, self.data_end = 0, end
         self.base_start, self.base_end = 0, end
         base_size = end
     else:
         self.data_start, self.data_end = np.byte_bounds(self.data)
         self.base_start, self.base_end = np.byte_bounds(self.data.base)
         base_size = len(self.data.base)
     self.base_length = base_size
     self.data_length = len(self.data)
     # Force regeneration of reverse index mapping the next time it's needed
     self._reverse_index_mapping = None
Beispiel #36
0
 def test_factorize(self):
     l = np.array([0.01, 1] * len(self.n_features_split),
                  np.dtype('float64'))
     max_iter = 10
     max_diff = 1e-6
     for d_name, d in self.data.items():
         with (self.subTest(data_name=d_name)):
             f = Factorization(d, self.n_features_split, self.n_factors)
             monitor = f.sfa(max_diff, max_iter, l, True)
             diff = max([
                 monitor.max_diff_factors[-1],
                 monitor.max_diff_coefficients[-1]
             ])
             assert (monitor.n_iter < max_iter + 1)
             assert (diff >= 0.0)
             if (diff > max_diff):
                 self.assertEqual(monitor.n_iter, max_iter)
             self.assertEqual(np.byte_bounds(d), np.byte_bounds(f.data))
             self.check_factorization_type_and_shape(f)
Beispiel #37
0
def share_memory(a, b):
    """Returns the number of shared bytes between arrays `a` and `b`."""
    #http://stackoverflow.com/a/11287440
    def byte_offset(a):
        """Returns a 1-d array of the byte offset of every element in `a`.
            Note that these will not in general be in order."""
        stride_offset = np.ix_(*map(range,a.shape))
        element_offset = sum(i*s for i, s in zip(stride_offset,a.strides))
        element_offset = np.asarray(element_offset).ravel()
        return np.concatenate([element_offset + x for x in range(a.itemsize)])
    a_low, a_high = np.byte_bounds(a)
    b_low, b_high = np.byte_bounds(b)
    
    beg, end = max(a_low,b_low), min(a_high,b_high)
    
    if end - beg > 0:
        # memory overlaps
        amem = a_low + byte_offset(a)
        bmem = b_low + byte_offset(b)
        
        return np.intersect1d(amem,bmem).size
    else:
        return 0
Beispiel #38
0
def unlock(arr):
    arr.flags.writable = True
    return byte_bounds(arr)
Beispiel #39
0
def lock(arr):
    arr.flags.writable = False
    return byte_bounds(arr)
Beispiel #40
0
import numpy as np
from numpy import byte_bounds
from numpush.shmem_views import sview, SyncNumpy

x = np.arange(0,100)
y = sview(x[:50])
z = sview(x[50:])
a = sview(x[2:98])

xptr = byte_bounds(x)
yptr = byte_bounds(y)
zptr = byte_bounds(y)
aptr = byte_bounds(a)

# Left aligned in memory
assert byte_bounds(x)[0] == byte_bounds(y)[0]

# Right aligned in memory
assert byte_bounds(x)[1] == byte_bounds(z)[1]

# Subset
assert xptr[0] < aptr[0] < aptr[1] < xptr[1]

from numpy import add
s = SyncNumpy(x)