Esempio n. 1
0
    def test_structured_to_unstructured(self):
        a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
        out = structured_to_unstructured(a)
        assert_equal(out, np.zeros((4, 5), dtype='f8'))

        b = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],
                     dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
        out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
        assert_equal(out, np.array([3., 5.5, 9., 11.]))

        c = np.arange(20).reshape((4, 5))
        out = unstructured_to_structured(c, a.dtype)
        want = np.array([(0, (1., 2), [3., 4.]), (5, (6., 7), [8., 9.]),
                         (10, (11., 12), [13., 14.]),
                         (15, (16., 17), [18., 19.])],
                        dtype=[('a', '<i4'),
                               ('b', [('f0', '<f4'), ('f1', '<u2')]),
                               ('c', '<f4', (2, ))])
        assert_equal(out, want)

        d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],
                     dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
        assert_equal(apply_along_fields(np.mean, d),
                     np.array([8.0 / 3, 16.0 / 3, 26.0 / 3, 11.]))
        assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
                     np.array([3., 5.5, 9., 11.]))

        # check that for uniform field dtypes we get a view, not a copy:
        d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],
                     dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
        dd = structured_to_unstructured(d)
        ddd = unstructured_to_structured(dd, d.dtype)
        assert_(dd.base is d)
        assert_(ddd.base is d)
    def test_structured_to_unstructured(self):
        a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
        out = structured_to_unstructured(a)
        assert_equal(out, np.zeros((4, 5), dtype='f8'))

        b = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],
                     dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
        out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
        assert_equal(out, np.array([3., 5.5, 9., 11.]))
        out = np.mean(structured_to_unstructured(b[['x']]), axis=-1)
        assert_equal(out, np.array([1., 4., 7., 10.]))

        c = np.arange(20).reshape((4, 5))
        out = unstructured_to_structured(c, a.dtype)
        want = np.array([(0, (1., 2), [3., 4.]), (5, (6., 7), [8., 9.]),
                         (10, (11., 12), [13., 14.]),
                         (15, (16., 17), [18., 19.])],
                        dtype=[('a', 'i4'), ('b', [('f0', 'f4'),
                                                   ('f1', 'u2')]),
                               ('c', 'f4', (2, ))])
        assert_equal(out, want)

        d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],
                     dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
        assert_equal(apply_along_fields(np.mean, d),
                     np.array([8.0 / 3, 16.0 / 3, 26.0 / 3, 11.]))
        assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
                     np.array([3., 5.5, 9., 11.]))

        # check that for uniform field dtypes we get a view, not a copy:
        d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],
                     dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
        dd = structured_to_unstructured(d)
        ddd = unstructured_to_structured(dd, d.dtype)
        assert_(dd.base is d)
        assert_(ddd.base is d)

        # including uniform fields with subarrays unpacked
        d = np.array([(1, [2, 3], [[4, 5], [6, 7]]),
                      (8, [9, 10], [[11, 12], [13, 14]])],
                     dtype=[('x0', 'i4'), ('x1', ('i4', 2)),
                            ('x2', ('i4', (2, 2)))])
        dd = structured_to_unstructured(d)
        ddd = unstructured_to_structured(dd, d.dtype)
        assert_(dd.base is d)
        assert_(ddd.base is d)

        # test that nested fields with identical names don't break anything
        point = np.dtype([('x', int), ('y', int)])
        triangle = np.dtype([('a', point), ('b', point), ('c', point)])
        arr = np.zeros(10, triangle)
        res = structured_to_unstructured(arr, dtype=int)
        assert_equal(res, np.zeros((10, 6), dtype=int))
Esempio n. 3
0
    def test_structured_to_unstructured(self):
        a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
        out = structured_to_unstructured(a)
        assert_equal(out, np.zeros((4,5), dtype='f8'))

        b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
                     dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
        out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
        assert_equal(out, np.array([ 3. ,  5.5,  9. , 11. ]))
        out = np.mean(structured_to_unstructured(b[['x']]), axis=-1)
        assert_equal(out, np.array([ 1. ,  4. ,  7. , 10. ]))

        c = np.arange(20).reshape((4,5))
        out = unstructured_to_structured(c, a.dtype)
        want = np.array([( 0, ( 1.,  2), [ 3.,  4.]),
                         ( 5, ( 6.,  7), [ 8.,  9.]),
                         (10, (11., 12), [13., 14.]),
                         (15, (16., 17), [18., 19.])],
                     dtype=[('a', 'i4'),
                            ('b', [('f0', 'f4'), ('f1', 'u2')]),
                            ('c', 'f4', (2,))])
        assert_equal(out, want)

        d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
                     dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
        assert_equal(apply_along_fields(np.mean, d),
                     np.array([ 8.0/3,  16.0/3,  26.0/3, 11. ]))
        assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
                     np.array([ 3. ,  5.5,  9. , 11. ]))

        # check that for uniform field dtypes we get a view, not a copy:
        d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
                     dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
        dd = structured_to_unstructured(d)
        ddd = unstructured_to_structured(dd, d.dtype)
        assert_(dd.base is d)
        assert_(ddd.base is d)

        # including uniform fields with subarrays unpacked
        d = np.array([(1, [2,  3], [[ 4,  5], [ 6,  7]]),
                      (8, [9, 10], [[11, 12], [13, 14]])],
                     dtype=[('x0', 'i4'), ('x1', ('i4', 2)), ('x2', ('i4', (2, 2)))])
        dd = structured_to_unstructured(d)
        ddd = unstructured_to_structured(dd, d.dtype)
        assert_(dd.base is d)
        assert_(ddd.base is d)

        # test that nested fields with identical names don't break anything
        point = np.dtype([('x', int), ('y', int)])
        triangle = np.dtype([('a', point), ('b', point), ('c', point)])
        arr = np.zeros(10, triangle)
        res = structured_to_unstructured(arr, dtype=int)
        assert_equal(res, np.zeros((10, 6), dtype=int))
Esempio n. 4
0
def fillna(array: np.ndarray, type="mean", value=None) -> np.ndarray:
    """
    List of filling functions applied to unstructured (normal) arrays and converted
    back to structured arrays as output.

    :param array: np.ndarray, input array with NaNs. 
    :param type: {mean, value, ffill, bfill} str,
        'mean' returns the column mean.
        'value' returns the value parameter.
        'ffill' returns a forward filled array.
        'bfill' returns a backwards filled array.
    :param value: optional, value to be used in type='value'.
    :param array: np.ndarray, input array with NaNs. 
    :return: np.ndarray, return mean filled array.
    """
    numeric_cols, nonnumeric_cols = columntype(array)
    dtyped = array[numeric_cols].dtype
    numeric_unstructured = rfn.structured_to_unstructured(
        array[numeric_cols]).T

    if type == "mean":
        numeric_unstructured = fillmean(numeric_unstructured.T)

    if type == "value":
        if value == None:
            value = 0
            print("To replace with anything different to 0, supply value=x")

        numeric_unstructured = np.nan_to_num(numeric_unstructured, nan=value)

    if type == "ffill":
        numeric_unstructured = ffill(numeric_unstructured)

    if type == "bfill":  ## ffi
        numeric_unstructured = bfill(numeric_unstructured)

    if type == "mean":
        numeric_structured = rfn.unstructured_to_structured(
            numeric_unstructured, dtype=dtyped)
    else:
        numeric_structured = rfn.unstructured_to_structured(
            numeric_unstructured.T, dtype=dtyped)

    if len(array[nonnumeric_cols].dtype):
        full_return = numeric_structured
    else:
        full_return = concat(array[nonnumeric_cols],
                             numeric_structured,
                             type="columns")

    return full_return
Esempio n. 5
0
def group(array: np.ndarray,
          groupby_cols: list,
          compute_functions: list,
          calcs_cols: list,
          display=True,
          length=None) -> np.ndarray:
    """
    Group the array according to a unique mapper of multiple columns (groupby_cols) by doing various calculations (compute_functions)
    over a select columns (calc_cols).

    :param array: np.ndarray, input array to be grouped.  
    :param groupby_cols: list, columns to be used to do the grouping.  
    :param compute_functions: list, columns to be used to specify the different calculations. 
    :param calcs_cols: list, columns over which the computations will be done. 
    :param display: bool, whether or not to display a printed HTML data frame. 
    :param length: int, how many rows of the displayed HTML table to print. 
    :return group_array: np.ndarray, grouped array. 
    """

    args_dict = {}
    for a in calcs_cols:
        for f in compute_functions:
            args_dict[a + "_" + f] = npg.aggregate(
                np.unique(array[groupby_cols], return_inverse=True)[1],
                array[a], f)

    struct_gb = rfn.unstructured_to_structured(np.c_[list(
        args_dict.values())].T,
                                               names=list(args_dict.keys()))
    grouped = np.unique(array[groupby_cols], return_inverse=True)[0]
    group_array = rfn.merge_arrays([grouped, struct_gb], flatten=True)
    if display:
        table(group_array, length)
    return group_array
    def clustering(self, min_number_of_clusters, max_number_of_clusters, SK):

        clinical = np.array(
            pd.read_csv(self.clinical,
                        usecols=['vital_status', 'days_to_death']))
        clinical_struct = rfn.unstructured_to_structured(
            clinical, np.dtype([('Status', '?'), ('Survival_in_days', '<f8')]))

        result = []
        for i in range(min_number_of_clusters, max_number_of_clusters + 1):
            clustering_i = SpectralClustering(n_clusters=i,
                                              affinity='precomputed',
                                              assign_labels='discretize',
                                              random_state=0).fit(SK)
            labels_i = clustering_i.labels_
            result.append(labels_i.tolist())

        result_array = np.array(result)
        result_array_t = result_array.transpose()

        # P-value
        n_features = result_array_t.shape[1]
        for j in range(n_features):
            Xj = result_array_t[:, j:j + 1]
            res = sksurv.compare.compare_survival(clinical_struct, Xj)
            print("[cluster: %d, P-value: %.8f]" % (j + 2, res[1]))
Esempio n. 7
0
def loadwedg6(mesh, fptr, ltag):
    """
    LOADWEDG6: load the WEDG6 data segment from file.

    """
    lnum = int(ltag[1])
    vnum = 7

    data = []
    for line in range(lnum):
        data.append(fptr.readline())

    data = " ".join(data).replace("\n", ";")

    cell = np.fromstring(data, dtype=np.int32, sep=";")

    cell = np.reshape(cell, (lnum, vnum), order="C")

    cell = rfn.unstructured_to_structured(cell,
                                          dtype=jigsaw_msh_t.WEDG6_t,
                                          align=True)

    mesh.wedg6 = cell

    return
Esempio n. 8
0
    def __call__(self, text_matrix, image, resample):
        """Applies formatting and colorization on the `text_matrix`
        and returns a single string.

        Args:
            text_matrix (numpy.ndarray): The subject text matrix,
                                        with `shape = (<height>, <width>)`,
                                        and `dtype = str`.
            image (PIL.Image.Image): The subject image.
            resample (int): The resampling filter.

        Returns:
            str: The formatted string of text with color (if specified).
        """

        text_size = text_matrix.shape

        # Apply any translations.
        text_matrix = self.translate(text_matrix)

        # Colorize if necessary
        if self.colorize:
            # Pool the colors from the original image by resizing it to the size of the text output.
            # Using the vectorized `color` method, color each element in the `text_martix`.
            # The vectorized operation takes a `str` from `text_matrix`
            # and a `List[int, int, int]` from the pooled colors.
            text_matrix = self.vcolor(
                text_matrix,
                unstructured_to_structured(
                    np.array(image.resize(text_size[::-1],
                                          resample=resample)).astype(
                                              np.uint8)).astype("O"),
            )

        return self.unify(text_matrix)
Esempio n. 9
0
def propagate_results(t, date0, results, propagator, num=None, params=None):
    if params is None:
        params = {}
    if num is None:
        num = range(len(results.trace))
    else:
        num = np.random.randint(len(results.trace), size=num)

    pbar = tqdm(total=len(num), ncols=100)

    states = np.empty((len(num), ), dtype=results.trace.dtype)

    it = 0
    for i in num:
        state = structured_to_unstructured(
            results.trace[i][['x', 'y', 'z', 'vx', 'vy', 'vz']])

        prop_state = propagator.propagate(np.array([t]), state,
                                          times.npdt2mjd(date0), **params)
        states[it] = unstructured_to_structured(prop_state.T,
                                                results.trace.dtype)
        it += 1
        pbar.update(1)

    return states, num
Esempio n. 10
0
def loadbound(mesh, fptr, ltag):
    """
    LOADBOUND: load the BOUND data segment from file.

    """
    lnum = int(ltag[1])
    vnum = 3

    data = []
    for line in range(lnum):
        data.append(fptr.readline())

    data = " ".join(data).replace("\n", ";")

    bnds = np.fromstring(data, dtype=np.int32, sep=";")

    bnds = np.reshape(bnds, (lnum, vnum), order="C")

    bnds = rfn.unstructured_to_structured(bnds,
                                          dtype=jigsaw_msh_t.BOUND_t,
                                          align=True)

    mesh.bound = bnds

    return
Esempio n. 11
0
def loadvert3(fptr, ltag):
    """
    LOADVERT3: load the 3-dim. vertex pos. from file.

    """
    lnum = int(ltag[1])
    vnum = 4

    data = []
    for line in range(lnum):
        data.append(fptr.readline())

    data = " ".join(data).replace("\n", ";")

    vert = np.fromstring(data, dtype=np.float64, sep=";")

    vert = np.reshape(vert, (
        lnum,
        vnum,
    ), order="C")

    vert = rfn.unstructured_to_structured(vert,
                                          dtype=jigsaw_msh_t.VERT3_t,
                                          align=True)

    return vert
Esempio n. 12
0
 def __init__(self, source: TimestampSeries,
              clean_reception_times: numpy.ndarray,
              clean_ts_vals: numpy.ndarray):
     unstructured_data = numpy.array([clean_ts_vals,
                                      clean_reception_times]).T
     structured_data = recfunctions.unstructured_to_structured(
         unstructured_data, dtype=self.DTYPE)
     super(NormTimestampSeries, self).__init__(source.key, structured_data)
Esempio n. 13
0
 def _image_deviation(params):
     """helper function evaluating the residuals"""
     # generate the droplet
     data_flat[free] = params
     droplet.data = unstructured_to_structured(data_flat, dtype=dtype)
     droplet.check_data()
     img = droplet._get_phase_field(phase_field.grid)[mask]
     return img - data_mask
Esempio n. 14
0
def test_Osiris_Dev_Hdf5_ParticleFile_is_valid_backend(
        make_prt_file: Callable[[str, np.ndarray, Optional[str]], Path]):
    data = unstructured_to_structured(np.random.random((10, 4)))
    data = rename_fields(data, {"f0": "q"})

    prt_path = make_prt_file("osiris_dev_particles_hdf5", data)

    assert Osiris_Dev_Hdf5_ParticleFile.is_valid_backend(prt_path)
Esempio n. 15
0
 def from_norm(cls, source: NormTimestampSeries,
               frequency) -> 'OffsetSeries':
     tsvals_secs = (source.ts_vals / frequency).round(decimals=6)
     offsets_raw = (tsvals_secs - source.reception_times) * 1000
     rounded_offsets = offsets_raw.round(decimals=6)
     data_unstructured = numpy.column_stack(
         (source.reception_times, rounded_offsets))
     data = recfunctions.unstructured_to_structured(data_unstructured,
                                                    dtype=cls.DTYPE)
     return cls(data)
Esempio n. 16
0
def concat(first: np.ndarray,
           second: np.ndarray,
           type: "{row, columns, array, melt}" = "row") -> np.ndarray:
    """
    Multiple methods of concatenation, some of them are experimental. The basic methods are 'columns' or 'row'. 
    The other methods do not necessarily provide unique outcomes to that of 'columns' and 'row'.

    Note, if you are concatenating a single column, always add double
    brackets so that the name can be easily retrieved i.e. array[[col]]

    :param array1: np.ndarray, the left/top concatenating array.
    :param array2: np.ndarray, the right/bottom concatenating array.
    :param type: str or in, the type of concatenation 'row', 'columns', 'array' or 'melt'
    :return concat : np.ndarray, newly concatenated array.
    """
    if type in ["row", "r", "rows", 0]:
        try:
            concat = np.concatenate([first, second])
        except:
            concat = np.concatenate([
                rfn.structured_to_unstructured(first),
                rfn.structured_to_unstructured(second)
            ])
            concat = rfn.unstructured_to_structured(concat,
                                                    names=first.dtype.names)
    elif type in ["columns", "column", "c", 1]:
        concat = concat_col(first, second)
        #concat = rfn.merge_arrays((first, second), asrecarray=False, flatten=True)  # tuples
    elif type == "array":
        concat = np.c_[[first, second]]
    elif type == "melt":  ## looks similar to columns but list instead of tuples
        try:
            concat = np.c_[(first, second)]
        except:
            concat = np.c_[(rfn.structured_to_unstructured(first),
                            rfn.structured_to_unstructured(second))]
            concat = rfn.unstructured_to_structured(concat,
                                                    names=first.dtype.names)
    else:
        raise ValueError(
            "type has to be set to either: row, columns, array or melt")
    return concat
Esempio n. 17
0
def vlctest(vlc: np.ndarray) -> int:
    """ Test the validity of an array of variable-length codes.

    Returns the total number of bits to code the vlc data. """
    from numpy.lib.recfunctions import (structured_to_unstructured,
                                        unstructured_to_structured)
    if not np.all(vlc[:, 1] >= 0):
        raise ValueError("Code words must be non-negative")
    bitwords = unstructured_to_structured(vlc, dtype=bitword.dtype)
    bitword.verify(bitwords)
    return bitwords['bits'].sum(dtype=np.intp)
Esempio n. 18
0
def to_struct(array: np.ndarray, name_list: list) -> np.ndarray:
    """
    Convert an unstructured (homogenous) array to a structured array. The data types
    are automatically picked up by looking at the data, using numpy's recfunctions.

    :param array: np.ndarray, unstructured array (i.e., normal numpy array)
    :param name_list: list or str, the names to be given to the columns to be
                                   given to the newly created structured array.
    :return : np.ndarray(structured), the newly converted structured array
    """
    return rfn.unstructured_to_structured(array, names=name_list)
Esempio n. 19
0
    def colorize(self):
        """Colors text obtained from processing
        """
        if self.args.color:
            hr = int(round(self.image.shape[0] / self.text.shape[0]))
            wr = int(round(self.image.shape[1] / self.text.shape[1]))

            # Get average color
            kernel = ones((hr, wr), dtype=float) / (hr * wr)
            colors = unstructured_to_structured(
                filter2D(self.image, -1, kernel)[::hr, ::wr, :]).astype("O")

            self.text = vectorize(color)(self.text, colors)
Esempio n. 20
0
def test_unstructured_to_structured():
    arr = da.array([1, 2, 3])
    structured_dtype = np.dtype([("f1", int), ("f2", float), ("f3", np.bool8)])

    # expected is based on doing numpy 'unstructured_to_structured'
    expected = recfunctions.unstructured_to_structured(arr.compute(),
                                                       structured_dtype)

    # do own 'unstructured_to_structured'
    converted_arr = unstructured_to_structured(arr, structured_dtype)

    assert isinstance(converted_arr, da.Array)
    np.testing.assert_array_equal(converted_arr, expected)
Esempio n. 21
0
 def __init__(self, offset_data: numpy.ndarray, internal_knots: List[float],
              x_values: numpy.ndarray):
     self.spline_obj = scipy_interpolate.LSQUnivariateSpline(
         x=offset_data[self.KEY_RECEPTION_TIME],
         y=offset_data[self.KEY_OFFSET],
         t=internal_knots,
         bbox=[None, None],  # bounding box
         k=self._SPLINE_DEGREE)
     unstructured_data = numpy.array([x_values,
                                      self.spline_obj(x_values)]).T
     structured_data = recfunctions.unstructured_to_structured(
         unstructured_data, dtype=self.DTYPE)
     super(OffsetSpline, self).__init__(structured_data)
     self.mean = numpy.mean(self.offsets)
Esempio n. 22
0
def test_Osiris_Dev_Hdf5_ParticleFile_properties(
        make_prt_file: Callable[[str, np.ndarray, Optional[str]], Path]):
    data = unstructured_to_structured(np.random.random((10, 4)))
    data = rename_fields(data, {"f0": "q"})

    prt_path = make_prt_file("osiris_dev_particles_hdf5",
                             data,
                             name="some particles")
    backend = Osiris_Dev_Hdf5_ParticleFile(prt_path)

    assert backend.name == "osiris_dev_particles_hdf5"
    assert backend.location == prt_path
    assert backend.dataset_name == "some_particles"
    assert backend.dataset_label == "some particles"

    assert backend.quantity_names == ["f1", "f2", "f3", "q"]
    assert backend.quantity_labels == [
        "f1 label",
        "f2 label",
        "f3 label",
        "q label",
    ]
    assert backend.quantity_units == [
        "f1 unit",
        "f2 unit",
        "f3 unit",
        "q unit",
    ]

    assert backend.shape == (10, )
    assert backend.dtype == np.dtype([
        ("f1", float),
        ("f2", float),
        ("f3", float),
        ("q", float),
    ])

    # taken function 'make_osiris_444_particles_hdf'
    assert backend.iteration == 12345
    assert np.isclose(backend.time_step, -321.9)
    assert backend.time_unit == "time unit"

    # check reading of data
    for indexing in (np.s_[0], np.s_[-1], np.s_[:], np.s_[3:7], np.s_[4:1]):
        expected_data = data[indexing]
        np.testing.assert_array_equal(backend.get_data((indexing, )),
                                      expected_data)
Esempio n. 23
0
def im2xlsx(file,resize=True,keep_aspect=False):
    path, fileName = os.path.split(file)
    extension = os.path.splitext(file)[1]
    if (path == ""):
        file = __default_path(fileName)
    if resize:
        file = __resize_picture(file,keep_aspect,extension)
    try:
        img = imread(file)
    except:
        raise Exception
    df = pd.DataFrame(nlr.unstructured_to_structured(img).astype('O'))
    df=df.applymap(lambda x: rgb2hex(*x))
    s=df.style.applymap(lambda x:"background-color:"+str(x))
    excelFile = file.replace(extension,".xlsx")
    s.to_excel(excelFile,engine='xlsxwriter')
    __changeZoom(excelFile)
Esempio n. 24
0
    def get_data_table(self):
        ''' 
            Returns data w/ time tick column as a structured
            numpy array (different from a regular np.array)
        '''
        if self.data is None:
            self._read_data()

        # Create dtype w/ column names
        names = self.get_labels()
        dtype = self.header._get_dtype()
        dtype = [(name, t) for name, t in zip(names, dtype.split(','))]

        # Convert data table to records format
        table = rfn.unstructured_to_structured(self.data,
                                               dtype=np.dtype(dtype))

        return table
Esempio n. 25
0
    def compress(self,
                 n_buckets: int,
                 sort_by: str = 'den',
                 stat: str = 'mean',
                 weights_dist: str = 'uniform',
                 reweigh: bool = False,
                 inplace=False):
        """
        Compress sample into ntile-buckets

        Parameters
        ----------
        n_buckets : int
            Number of buckets / unique values in the new sample.
        sort_by : str, {'num', 'den', 'taylor', 'naive'}
        stat
        weights_dist
        reweigh
        inplace

        Returns
        -------

        """
        _validate_compress_stat(stat)

        fullobs = self.fullobs
        if sort_by == 'num':
            sort_ind = np.argsort(fullobs, order=('num', 'den'))
        elif sort_by == 'den':
            sort_ind = np.argsort(fullobs, order=('den', 'num'))
        else:
            obs_to_sort = self.linearize(strategy=sort_by).fullobs
            sort_ind = np.argsort(obs_to_sort)

        fullobs = rfn.structured_to_unstructured(fullobs[sort_ind])
        compressed, weights = compress(fullobs, n_buckets, stat, weights_dist,
                                       reweigh)

        obs = rfn.unstructured_to_structured(compressed, dtype=self._obs_dtype)
        return _return_or_inplace(self, obs, weights, inplace)
Esempio n. 26
0
    def to_csv(self, name=None, prec=7):
        ''' Writes out the flat file data to a comma-separated-value file
            
            Optional name argument specifies an alternate filename to
            give to the .csv file
            Optional prec argument specifies the precision for the values
        '''
        # Format filename
        name = f'{self.name}.csv' if name is None else f'{name}.csv'

        # Get data
        data = self.get_data(include_times=True)
        ncols = len(data[0])

        # Convert first column in data to ISO timestamps
        epoch = self.get_epoch()
        timestamps = ff_time.ticks_to_iso_ts(data[:, 0], epoch)

        # Restructure data array so first column is of string type
        dtype = np.dtype('U72' + ',>f8' * (ncols - 1))
        data = rfn.unstructured_to_structured(data, dtype=dtype)
        data['f0'] = timestamps

        # Format header
        col_names = self.get_labels()
        time_lbl = col_names[0]
        col_names[0] = 'TIME' if 'time' not in time_lbl.lower() else time_lbl
        header = ','.join(col_names)

        # Generate formatting string
        fmt_str = ['%s'] + [f'%.{prec}f'] * (ncols - 1)

        # Save to file
        np.savetxt(name,
                   data,
                   delimiter=',',
                   header=header,
                   fmt=fmt_str,
                   comments='')
def launchTrimmer(pcd_path, in_camera_view, r_mask, g_mask):
    # load cloud.pcd
    cloud = pypcd.PointCloud.from_path(pcd_path)
    pprint.pprint(cloud.get_metadata())

    # convert the structured numpy array to a ndarray
    trimmed_cloud = structured_to_unstructured(cloud.pc_data)

    # print the shape of the new array
    print(trimmed_cloud.shape)
    tp_mask = np.logical_and(r_mask, g_mask)
    fn_mask = np.logical_and(g_mask, np.logical_not(r_mask))
    fp_mask = np.logical_and(r_mask, np.logical_not(g_mask))
    trimmed_cloud = trimmed_cloud[in_camera_view]
    trimmed_cloud[:, 3] = trimmed_cloud[:, 3].astype("int32")
    trimmed_cloud[:, 4] = trimmed_cloud[:, 4].astype("int32")

    fn_cloud = trimmed_cloud[fn_mask]
    tp_cloud = trimmed_cloud[tp_mask]
    fp_cloud = trimmed_cloud[fp_mask]

    # this is necessary to distinguish between TP / FN / FP in the pcl viewer
    # the pcl viewer chooses a different color depending on the values in the label column
    # 0 is the background (FP), 1 stands for the class person (FN and TP)
    # to distinguish between FN and TP we add +1 to the TPs
    # this way we end up with 0 for background (FP), 1 for FN and 2 for TP
    # values in the column object have no effect on visualization whatsoever
    tp_cloud[:, 3] = tp_cloud[:, 3] + 1

    fn_tp_fp_cloud = np.concatenate((fn_cloud, tp_cloud, fp_cloud), axis=0)

    structured_fn_tp_fp_cloud = unstructured_to_structured(fn_tp_fp_cloud)
    pcd_cloud = from_array(structured_fn_tp_fp_cloud)

    # this can be visualized with the pcl_viewer tool as follows:
    # pcl_viewer -multiview 1 fn_tp_fp.pcd
    pypcd.save_point_cloud(pcd_cloud, 'fn_tp_fp.pcd')
Esempio n. 28
0
def _transform_particle_data_array(data: np.ndarray):
    """Transform a array into a required particle data array.

    Data is assumed to fulfill the condition `data.ndim =< 3` for
    unstructured and `data.ndim =< 2` for structured array.

    Array specification:
        * axis == 0: time/iteration
        * axis == 1: particle index
        * dtype: [('q0', 'type'), ('q1', 'type')]
            -> 'q0/q1' represent quantity names
            -> 'type' represents the type of dtype (e.g. int, float, ...)
    """
    if data.ndim == 2 and data.dtype.fields:
        return data

    # data has fields
    if data.dtype.fields:
        if data.ndim == 0:
            data = data[np.newaxis, np.newaxis]
        else:  # data.ndim == 1
            data = data[np.newaxis]

    # data has not fields -> is associated with quantity index
    else:
        if data.ndim == 0:
            data = data[(np.newaxis,) * 3]
        elif data.ndim == 1:
            data = data[np.newaxis, ..., np.newaxis]
        elif data.ndim == 2:
            data = data[..., np.newaxis]

        field_names = [f"quant{i}" for i in range(data.shape[-1])]
        data = rfn.unstructured_to_structured(data, names=field_names)

    return data
def plot_voxel_grid(xs,
                    ys,
                    ts,
                    ps,
                    bins=5,
                    frames=[],
                    frame_ts=[],
                    sensor_size=None,
                    crop=None,
                    elev=0,
                    azim=45,
                    show_axes=False):
    if sensor_size is None:
        sensor_size = [np.max(ys) + 1, np.max(xs) +
                       1] if len(frames) == 0 else frames[0].shape
    if crop is not None:
        xs, ys, ts, ps = clip_events_to_bounds(xs, ys, ts, ps, crop)
        sensor_size = crop_to_size(crop)
        xs, ys = xs - crop[2], ys - crop[0]
    num = 10000
    xs, ys, ts, ps = xs[0:num], ys[0:num], ts[0:num], ps[0:num]
    if len(xs) == 0:
        return
    voxels = events_to_voxel(xs, ys, ts, ps, bins, sensor_size=sensor_size)
    voxels = block_reduce(voxels, block_size=(1, 10, 10), func=np.mean, cval=0)
    dimdiff = voxels.shape[1] - voxels.shape[0]
    filler = np.zeros((dimdiff, *voxels.shape[1:]))
    voxels = np.concatenate((filler, voxels), axis=0)
    voxels = voxels.transpose(0, 2, 1)

    pltvoxels = voxels != 0
    pvp, nvp = voxels > 0, voxels < 0
    pvox, nvox = voxels * np.where(voxels > 0, 1, 0), voxels * np.where(
        voxels < 0, 1, 0)
    pvox, nvox = (pvox / np.max(pvox)) * 0.5 + 0.5, (
        np.abs(nvox) / np.max(np.abs(nvox))) * 0.5 + 0.5
    zeros = np.zeros_like(voxels)

    colors = np.empty(voxels.shape, dtype=object)

    redvals = np.stack((pvox, zeros, pvox - 0.5), axis=3)
    redvals = nlr.unstructured_to_structured(redvals).astype('O')

    bluvals = np.stack((nvox - 0.5, zeros, nvox), axis=3)
    bluvals = nlr.unstructured_to_structured(bluvals).astype('O')

    colors[pvp] = redvals[pvp]
    colors[nvp] = bluvals[nvp]

    fig = plt.figure()
    ax = fig.gca(projection='3d')
    ax.voxels(pltvoxels, facecolors=colors, edgecolor='k')
    ax.view_init(elev=elev, azim=azim)

    ax.grid(False)
    # Hide panes
    ax.xaxis.pane.fill = False
    ax.yaxis.pane.fill = False
    ax.zaxis.pane.fill = False
    if not show_axes:
        # Hide spines
        ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
        ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
        ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
        ax.set_frame_on(False)
    # Hide xy axes
    ax.set_xticks([])
    ax.set_yticks([])
    ax.set_zticks([])

    ax.xaxis.set_visible(False)
    ax.axes.get_yaxis().set_visible(False)

    plt.show()
Esempio n. 30
0
 def inspect(dt, dtype=None):
     arr = np.zeros((), dt)
     ret = structured_to_unstructured(arr, dtype=dtype)
     backarr = unstructured_to_structured(ret, dt)
     return ret.shape, ret.dtype, backarr.dtype
Esempio n. 31
0
    def test_structured_to_unstructured(self):
        a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
        out = structured_to_unstructured(a)
        assert_equal(out, np.zeros((4,5), dtype='f8'))

        b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
                     dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
        out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
        assert_equal(out, np.array([ 3. ,  5.5,  9. , 11. ]))
        out = np.mean(structured_to_unstructured(b[['x']]), axis=-1)
        assert_equal(out, np.array([ 1. ,  4. ,  7. , 10. ]))

        c = np.arange(20).reshape((4,5))
        out = unstructured_to_structured(c, a.dtype)
        want = np.array([( 0, ( 1.,  2), [ 3.,  4.]),
                         ( 5, ( 6.,  7), [ 8.,  9.]),
                         (10, (11., 12), [13., 14.]),
                         (15, (16., 17), [18., 19.])],
                     dtype=[('a', 'i4'),
                            ('b', [('f0', 'f4'), ('f1', 'u2')]),
                            ('c', 'f4', (2,))])
        assert_equal(out, want)

        d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
                     dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
        assert_equal(apply_along_fields(np.mean, d),
                     np.array([ 8.0/3,  16.0/3,  26.0/3, 11. ]))
        assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
                     np.array([ 3. ,  5.5,  9. , 11. ]))

        # check that for uniform field dtypes we get a view, not a copy:
        d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
                     dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
        dd = structured_to_unstructured(d)
        ddd = unstructured_to_structured(dd, d.dtype)
        assert_(dd.base is d)
        assert_(ddd.base is d)

        # including uniform fields with subarrays unpacked
        d = np.array([(1, [2,  3], [[ 4,  5], [ 6,  7]]),
                      (8, [9, 10], [[11, 12], [13, 14]])],
                     dtype=[('x0', 'i4'), ('x1', ('i4', 2)),
                            ('x2', ('i4', (2, 2)))])
        dd = structured_to_unstructured(d)
        ddd = unstructured_to_structured(dd, d.dtype)
        assert_(dd.base is d)
        assert_(ddd.base is d)

        # test that nested fields with identical names don't break anything
        point = np.dtype([('x', int), ('y', int)])
        triangle = np.dtype([('a', point), ('b', point), ('c', point)])
        arr = np.zeros(10, triangle)
        res = structured_to_unstructured(arr, dtype=int)
        assert_equal(res, np.zeros((10, 6), dtype=int))


        # test nested combinations of subarrays and structured arrays, gh-13333
        def subarray(dt, shape):
            return np.dtype((dt, shape))

        def structured(*dts):
            return np.dtype([('x{}'.format(i), dt) for i, dt in enumerate(dts)])

        def inspect(dt, dtype=None):
            arr = np.zeros((), dt)
            ret = structured_to_unstructured(arr, dtype=dtype)
            backarr = unstructured_to_structured(ret, dt)
            return ret.shape, ret.dtype, backarr.dtype

        dt = structured(subarray(structured(np.int32, np.int32), 3))
        assert_equal(inspect(dt), ((6,), np.int32, dt))

        dt = structured(subarray(subarray(np.int32, 2), 2))
        assert_equal(inspect(dt), ((4,), np.int32, dt))

        dt = structured(np.int32)
        assert_equal(inspect(dt), ((1,), np.int32, dt))

        dt = structured(np.int32, subarray(subarray(np.int32, 2), 2))
        assert_equal(inspect(dt), ((5,), np.int32, dt))

        dt = structured()
        assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt))

        # these currently don't work, but we may make it work in the future
        assert_raises(NotImplementedError, structured_to_unstructured,
                                           np.zeros(3, dt), dtype=np.int32)
        assert_raises(NotImplementedError, unstructured_to_structured,
                                           np.zeros((3,0), dtype=np.int32))