Ejemplo n.º 1
0
def test_point_array(parray_cls):

    p = parray_cls(5)

    # Make sure length works
    assert len(p) == 5
    assert len(p["x"]) == 5
    assert len(p[["x", "y"]]) == 5

    # Check that single point getitem returns a Point class
    if parray_cls is PredictedPointArray:
        assert type(p[0]) is PredictedPoint
    else:
        assert type(p[0]) is Point

    # Check that slices preserve type as well
    assert type(p[0:4]) is type(p)

    # Check field access
    assert type(p.x) is np.ndarray

    # Check make_default
    d1 = parray_cls.make_default(3)
    d2 = parray_cls.make_default(3)

    # I have to convert from structured to unstructured to get this comparison
    # to work.
    from numpy.lib.recfunctions import structured_to_unstructured

    np.testing.assert_array_equal(structured_to_unstructured(d1),
                                  structured_to_unstructured(d2))
Ejemplo n.º 2
0
    def test_structured_to_unstructured(self):
        a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
        out = structured_to_unstructured(a)
        assert_equal(out, np.zeros((4, 5), dtype='f8'))

        b = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],
                     dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
        out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
        assert_equal(out, np.array([3., 5.5, 9., 11.]))

        c = np.arange(20).reshape((4, 5))
        out = unstructured_to_structured(c, a.dtype)
        want = np.array([(0, (1., 2), [3., 4.]), (5, (6., 7), [8., 9.]),
                         (10, (11., 12), [13., 14.]),
                         (15, (16., 17), [18., 19.])],
                        dtype=[('a', '<i4'),
                               ('b', [('f0', '<f4'), ('f1', '<u2')]),
                               ('c', '<f4', (2, ))])
        assert_equal(out, want)

        d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],
                     dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
        assert_equal(apply_along_fields(np.mean, d),
                     np.array([8.0 / 3, 16.0 / 3, 26.0 / 3, 11.]))
        assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
                     np.array([3., 5.5, 9., 11.]))

        # check that for uniform field dtypes we get a view, not a copy:
        d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],
                     dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
        dd = structured_to_unstructured(d)
        ddd = unstructured_to_structured(dd, d.dtype)
        assert_(dd.base is d)
        assert_(ddd.base is d)
Ejemplo n.º 3
0
 def callback(self, msg: PointCloud2):
     # converting ROS message to dense numpy array
     # print("1"+"--- %s seconds ---" % (time.time() - start_time))
     data = ros_numpy.numpify(msg)
     arr = ros_numpy.point_cloud2.split_rgb_field(data)
     point_cloud = structured_to_unstructured(arr[['x', 'y', 'z']])
     self.point_cloud_img.append(point_cloud)
     color = structured_to_unstructured(arr[['r', 'g', 'b']])
     self.color_img.append(color)
Ejemplo n.º 4
0
    def load_system(self, input, system=None, step=None):

        self.input = Path(input)
        self.cwd = self.cwd or self.input.parent

        if step is None:
            step = 0

        self._step = np.asarray(step)

        f = open(self.input, "rb")

        # Load system information
        n_qm_atoms, n_mm_atoms, qm_charge, qm_mult, _step = np.fromfile(
            f, dtype="i4", count=5)

        # Load QM information
        dtype = [('pos_x', "f8"), ('pos_y', "f8"), ('pos_z', "f8"),
                 ('charge', "f8"), ('element', "i4")]
        qm_atoms = np.fromfile(f, dtype=dtype, count=n_qm_atoms)

        # Load MM information
        if n_mm_atoms > 0:
            dtype = [('pos_x', "f8"), ('pos_y', "f8"), ('pos_z', "f8"),
                     ('charge', "f8")]
            mm_atoms = np.fromfile(f, dtype=dtype, count=n_mm_atoms)

        # Load unit cell information
        cell_basis = np.fromfile(f, dtype="f8", count=9).reshape(3, 3)
        cell_basis[np.isclose(cell_basis, 0.0)] = 0.0

        f.close()

        # Initialize System
        if system is None:
            n_atoms = n_qm_atoms + n_mm_atoms
            system = System(n_atoms,
                            n_qm_atoms,
                            qm_charge=qm_charge,
                            qm_mult=qm_mult)

        system.qm.atoms.positions[:] = structured_to_unstructured(
            qm_atoms[['pos_x', 'pos_y', 'pos_z']]).T
        system.qm.atoms.charges[:] = qm_atoms['charge']
        system.qm.atoms.elements[:] = qm_atoms['element']

        if n_mm_atoms > 0:
            system.mm.atoms.positions[:] = structured_to_unstructured(
                mm_atoms[['pos_x', 'pos_y', 'pos_z']]).T
            system.mm.atoms.charges[:] = mm_atoms['charge']

        if not np.all(cell_basis == 0.0):
            system.cell_basis[:] = cell_basis

        self._step[()] = _step

        return system
Ejemplo n.º 5
0
    def lidar_callback(self, msg):
        if self.model.inference_ctx is None or self.model.inference_ctx.anchor_cache is None:
            return

        for field in msg.fields:
            if field.name == "i" or field.name == "intensity":
                intensity_fname = field.name
                intensity_dtype = field.datatype
            else:
                intensity_fname = None
                intensity_dtype = None

        dtype_list = self._fields_to_dtype(msg.fields, msg.point_step)
        pc_arr = np.frombuffer(msg.data, dtype_list)

        if intensity_fname:
            pc_arr = structured_to_unstructured(
                pc_arr[["x", "y", "z", intensity_fname]]).copy()
            if intensity_dtype == 2:
                pc_arr[:, 3] = pc_arr[:, 3] / 255
        else:
            pc_arr = structured_to_unstructured(pc_arr[["x", "y", "z"]]).copy()
            pc_arr = np.hstack((pc_arr, np.zeros((pc_arr.shape[0], 1))))

        lidar_boxes = self.model.predcit(pc_arr)

        num_detects = len(lidar_boxes)
        arr_bbox = BoundingBoxArray()
        for i in range(num_detects):
            bbox = BoundingBox()

            bbox.header.frame_id = msg.header.frame_id
            bbox.header.stamp = rospy.Time.now()

            bbox.pose.position.x = float(lidar_boxes[i][0])
            bbox.pose.position.y = float(lidar_boxes[i][1])
            bbox.pose.position.z = float(
                lidar_boxes[i][2]) + float(lidar_boxes[i][5]) / 2
            bbox.dimensions.x = float(lidar_boxes[i][3])  # width
            bbox.dimensions.y = float(lidar_boxes[i][4])  # length
            bbox.dimensions.z = float(lidar_boxes[i][5])  # height

            q = Quaternion(axis=(0, 0, 1), radians=float(lidar_boxes[i][6]))
            bbox.pose.orientation.x = q.x
            bbox.pose.orientation.y = q.y
            bbox.pose.orientation.z = q.z
            bbox.pose.orientation.w = q.w

            arr_bbox.boxes.append(bbox)

        arr_bbox.header.frame_id = msg.header.frame_id
        arr_bbox.header.stamp = rospy.Time.now()
        print("Number of detections: {}".format(num_detects))

        self.pub_bbox.publish(arr_bbox)
Ejemplo n.º 6
0
def iterate_batches(dataset):
    for super_batch, batches in dataset:
        x_files = [
            gzip.GzipFile(os.path.join(SESSION_DIR, s['sessionid'], 'data.gz'))
            for s in super_batch
        ]
        y_files = [
            gzip.GzipFile(
                os.path.join(SESSION_DIR, s['sessionid'], 'labels.gz'))
            for s in super_batch
        ]
        # Actions only take effect in the next frame, offset by 1 time step
        for y_file in y_files:
            y_file.read(LABEL_DTYPE.itemsize)
        for super_batch_index in range(batches):
            x = np.zeros((BATCH_SIZE, SEQUENCE_LENGTH, 50, 90), dtype=np.uint8)
            y_target = np.zeros((BATCH_SIZE, SEQUENCE_LENGTH, 2),
                                dtype=np.float32)
            y_binary = np.zeros((BATCH_SIZE, SEQUENCE_LENGTH, 5),
                                dtype=np.int8)
            y_weapon = np.zeros((BATCH_SIZE, SEQUENCE_LENGTH, 1),
                                dtype=np.int8)
            mask = np.zeros((BATCH_SIZE, SEQUENCE_LENGTH), dtype=np.float32)
            for i, x_file in enumerate(x_files):
                data = x_file.read(90 * 50 * SEQUENCE_LENGTH)
                data = np.reshape(np.frombuffer(data, dtype=np.uint8),
                                  (-1, 50, 90))
                x[i, :len(data)] = data
            for i, y_file in enumerate(y_files):
                data = y_file.read(LABEL_DTYPE.itemsize * SEQUENCE_LENGTH)
                data = np.frombuffer(data, dtype=LABEL_DTYPE)
                # Can't aim to center, normalize vector
                target = structured_to_unstructured(
                    data[['targetx', 'targety']])
                invalid_target_mask = np.all(target == [0, 0], axis=1)
                target[invalid_target_mask] = [0, -1]
                y_target[i, :len(data)] = target / np.linalg.norm(
                    target, axis=1, keepdims=True)
                # Encode direction as left/right buttons
                y_binary[i, :len(data)][data['direction'] == -1, 0] = 1
                y_binary[i, :len(data)][data['direction'] == 1, 1] = 1
                y_binary[i, :len(data), 2:] = structured_to_unstructured(
                    data[['jump', 'fire', 'hook']])
                # Clip weapon to possible values
                y_weapon[i, :len(data), 0] = np.clip(data['weapon'], 0,
                                                     WEAPON_COUNT - 1)
                # Set mask based on y instead of x because it's shorter by 1 time step
                mask[i, :len(data)] = 1.
            y = [y_target, y_binary, y_weapon]
            mask = [mask, mask, mask]
            yield super_batch_index, x, y, mask
Ejemplo n.º 7
0
def test_from_and_to_array():
    p = PointArray(3)

    # Do a round trip conversion
    r = PredictedPointArray.to_array(PredictedPointArray.from_array(p))

    from numpy.lib.recfunctions import structured_to_unstructured

    np.testing.assert_array_equal(structured_to_unstructured(p),
                                  structured_to_unstructured(r))

    # Make sure conversion uses default score
    r = PredictedPointArray.from_array(p)
    assert r.score[0] == PredictedPointArray.make_default(1)[0].score
Ejemplo n.º 8
0
def get_body_energies(body_infos, body_name):
    bi = body_infos[body_name]
    K = 0.5 * bi['m'] * (bi['vx']**2 + bi['vy']**2 + bi['vz']**2)

    W = 0
    r_mine = structured_to_unstructured(bi[['x', 'y', 'z']])
    for other_body_name in body_infos.keys():
        if other_body_name == body_name:
            continue
        body_other = body_infos[other_body_name]
        r_other = structured_to_unstructured(body_other[['x', 'y', 'z']])
        W += body_other['m'] / scipy.linalg.norm(r_other - r_mine, axis=1)
    W *= -G * bi['m']

    return K + W
Ejemplo n.º 9
0
    def ustruct(
        self,
        fields: Optional[list[str]] = None,

        # type that all field values will be cast to
        # in the returned view.
        common_dtype: np.dtype = np.float,
    ) -> np.ndarray:

        array = self._array

        if fields:
            selection = array[fields]
            # fcount = len(fields)
        else:
            selection = array
            # fcount = len(array.dtype.fields)

        # XXX: manual ``.view()`` attempt that also doesn't work.
        # uview = selection.view(
        #     dtype='<f16',
        # ).reshape(-1, 4, order='A')

        # assert len(selection) == len(uview)

        u = rfn.structured_to_unstructured(
            selection,
            # dtype=float,
            copy=True,
        )

        # unstruct = np.ndarray(u.shape, dtype=a.dtype, buffer=shm.buf)
        # array[:] = a[:]
        return u
Ejemplo n.º 10
0
    def Analyse(self, parameters):

        data = np.genfromtxt("particles.csv", delimiter=",", names=True)

        # pick last cycle (given current parameter file)
        final_data = data[data["ncycle"] == 184]
        final_data.sort(order="particles_id")

        # see examples/particle_leapfrog/particle_leapfrog.cpp for reference data
        ref_data = np.array([
            [-0.1, 0.2, 0.3, 1.0, 0.0, 0.0],
            [0.4, -0.1, 0.3, 0.0, 1.0, 0.0],
            [-0.1, 0.3, 0.2, 0.0, 0.0, 0.5],
            [0.0, 0.0, 0.0, -1.0, 0.0, 0.0],
            [0.0, 0.0, 0.0, 0.0, -1.0, 0.0],
            [0.0, 0.0, 0.0, 0.0, 0.0, -1.0],
            [0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
            [0.0, 0.0, 0.0, -1.0, 1.0, 1.0],
            [0.0, 0.0, 0.0, 1.0, -1.0, 1.0],
            [0.0, 0.0, 0.0, 1.0, 1.0, -1.0],
            [0.0, 0.0, 0.0, -1.0, -1.0, 1.0],
            [0.0, 0.0, 0.0, 1.0, -1.0, -1.0],
            [0.0, 0.0, 0.0, -1.0, 1.0, -1.0],
            [0.0, 0.0, 0.0, -1.0, -1.0, -1.0],
        ])
        final_data = structured_to_unstructured(
            final_data[["x", "y", "z", "vx", "vy", "vz"]])
        if ref_data.shape != final_data.shape:
            print(
                "TEST FAIL: Mismatch between actual and reference data shape.")
            return False
        return (final_data == ref_data).all()
Ejemplo n.º 11
0
def propagate_results(t, date0, results, propagator, num=None, params=None):
    if params is None:
        params = {}
    if num is None:
        num = range(len(results.trace))
    else:
        num = np.random.randint(len(results.trace), size=num)

    pbar = tqdm(total=len(num), ncols=100)

    states = np.empty((len(num), ), dtype=results.trace.dtype)

    it = 0
    for i in num:
        state = structured_to_unstructured(
            results.trace[i][['x', 'y', 'z', 'vx', 'vy', 'vz']])

        prop_state = propagator.propagate(np.array([t]), state,
                                          times.npdt2mjd(date0), **params)
        states[it] = unstructured_to_structured(prop_state.T,
                                                results.trace.dtype)
        it += 1
        pbar.update(1)

    return states, num
Ejemplo n.º 12
0
    def _read_data(self):
        ''' Reads in the data from the file and stores it at self.data '''
        try:
            fd = open(f'{self.name}.ffd', 'rb')
            data = fd.read()
            fd.close()
        except:
            raise Exception('Error: Could not open data file for reading')

        # Determine the shape of the file and the expected # of bytes in the data
        recl = int(self.header.get_value('RECL'))
        rows = int(len(data) / recl)
        cols = int(self.header.get_value('NCOLS'))
        num_bytes = rows * recl

        # Convert binary records to data
        dtype = self.header._get_dtype()
        if num_bytes == len(data):  # If no extra bytes detected
            # Read data from file w/ given dtype and convert to unstructured array
            data = np.fromfile(f'{self.name}.ffd', dtype, rows)
            data = rfn.structured_to_unstructured(data, dtype='f8')
        else:
            # If data length is off, split by recl and convert to non-binary
            records = [data[i * recl:(i + 1) * recl] for i in range(0, rows)]
            data = [np.frombuffer(record, dtype=dtype) for record in records]
            data = np.array(data)

        self.data = data

        return data
Ejemplo n.º 13
0
def savepyra5(data, fptr, args):
    """
    SAVEPYRA5: save the PYRA5 data structure to *.msh file.

    """
    fptr.write("PYRA5=" + str(data.size) + "\n")

    rmax = 2**19
    next = 0

    while (next < data.size):

        nrow = min(rmax, data.size - next)
        nend = next + nrow

        sfmt = ";".join(["%d"] * 5) + ";%d\n"
        sfmt = sfmt * nrow

        fdat = sfmt % tuple(
            rfn.structured_to_unstructured(data[next:nend],
                                           dtype=np.int32).ravel())

        fptr.write(fdat)

        next = next + nrow

    return
Ejemplo n.º 14
0
def savevert3(ftag, data, fptr, args):
    """
    SAVEVERT3: save the POINT data structure to *.msh file.

    """
    fptr.write(ftag + "=" + str(data.size) + "\n")

    rmax = 2**19
    next = 0

    while (next < data.size):

        nrow = min(rmax, data.size - next)
        nend = next + nrow

        sfmt = "%%.%ug" % args.prec

        sfmt = ";".join([sfmt] * 3) + ";%d\n"
        sfmt = sfmt * nrow

        fdat = sfmt % tuple(
            rfn.structured_to_unstructured(data[next:nend],
                                           dtype=np.float64).ravel())

        fptr.write(fdat)

        next = next + nrow

    return
Ejemplo n.º 15
0
def ohlc_flatten(
    ohlc: np.ndarray,
    use_mxmn: bool = True,
) -> tuple[np.ndarray, np.ndarray]:
    '''
    Convert an OHLCV struct-array into a flat ready-for-line-plotting
    1-d array that is 4 times the size with x-domain values distributed
    evenly (by 0.5 steps) over each index.

    '''
    index = ohlc['index']

    if use_mxmn:
        # traces a line optimally over highs to lows
        # using numba. NOTE: pretty sure this is faster
        # and looks about the same as the below output.
        flat, x = hl2mxmn(ohlc)

    else:
        flat = rfn.structured_to_unstructured(
            ohlc[['open', 'high', 'low', 'close']]).flatten()

        x = np.linspace(
            start=index[0] - 0.5,
            stop=index[-1] + 0.5,
            num=len(flat),
        )
    return x, flat
Ejemplo n.º 16
0
def objective(threshold, fwhm, sigma_radius, roundlo, roundhi, sharplo,
              sharphi):
    res_table = DAOStarFinder(threshold=median + std * threshold,
                              fwhm=fwhm,
                              sigma_radius=sigma_radius,
                              sharplo=sharplo,
                              sharphi=sharphi,
                              roundlo=roundlo,
                              roundhi=roundhi,
                              exclude_border=True)(img)
    if not res_table:
        return 3000

    xys = structured_to_unstructured(
        np.array(res_table['xcentroid', 'ycentroid']))
    seen_indices = set()
    offsets = []
    for xy in xys:
        dist, index = lookup_tree.query(xy)
        if dist > 2 or index in seen_indices:
            offsets.append(np.nan)
        else:
            offsets.append(dist)
        seen_indices.add(index)

    offsets += [np.nan] * len(seen_indices - set(lookup_tree.indices))
    offsets += [np.nan] * abs(len(ref_table) - len(res_table))
    offsets = np.array(offsets)
    offsets -= np.nanmean(offsets)
    offsets[np.isnan(offsets)] = 3.

    return np.sqrt(np.sum(np.array(offsets)**2))
Ejemplo n.º 17
0
def draw_wfm_pillow(rect: QRect, wfm_arr: WFMArray) -> QImage:
    vmin, vmax = -20, 120
    vsize = vmax - vmin

    in_height, in_width = wfm_arr.shape
    in_rect = QRect(0, 0, in_width, in_height)

    rect_h, rect_w = rect.height(), rect.width()
    h_scale = rect_h - 1
    w_scale = rect_w - 1

    wfm_arr['ypos'] = (wfm_arr['ypos'] * 100 / vmax * vsize -
                       vmin) / vsize * h_scale
    wfm_arr['xpos'] *= w_scale

    img = Image.new('RGBA', (rect_w, rect_h), (0, 0, 0, 0))
    d = ImageDraw.Draw(img)

    xy_arr = rfn.structured_to_unstructured(wfm_arr[['xpos', 'ypos']])

    for y in range(in_height):
        d.line(xy_arr[y], fill=(255, 255, 255, 255), width=1)

    qimg = im.toqimage()
    if in_rect != rect:
        qimg = qimg.scaled(rect.size())
    return qimg.mirrored(False, True)
Ejemplo n.º 18
0
def savebound(data, fptr, args):
    """
    SAVEBOUND: save the BOUND data structure to *.msh file.

    """
    fptr.write("BOUND=" + str(data.size) + "\n")

    rmax = 2**19
    next = 0

    while (next < data.size):

        nrow = min(rmax, data.size - next)
        nend = next + nrow

        sfmt = "%d;%d;%d\n" * nrow

        fdat = sfmt % tuple(
            rfn.structured_to_unstructured(data[next:nend],
                                           dtype=np.int32).ravel())

        fptr.write(fdat)

        next = next + nrow

    return
Ejemplo n.º 19
0
def to_unstruct(array: np.ndarray) -> np.ndarray:
    """
    Convert an structured (non-homogenous) array to an unstructured array. 

    :param array: np.ndarray(structured), structured array (i.e., numpy array with columns/fields)
    :return : np.ndarray, the newly converted unstructured (normal) array
    """
    return rfn.structured_to_unstructured(array)
Ejemplo n.º 20
0
def load_map(fname):
    mapdata = np.loadtxt(fname,
                         dtype={
                             'names': ('type', 'xmin', 'ymin', 'zmin', 'xmax',
                                       'ymax', 'zmax', 'r', 'g', 'b'),
                             'formats': ('S8', 'f', 'f', 'f', 'f', 'f', 'f',
                                         'f', 'f', 'f')
                         })
    blockIdx = mapdata['type'] == b'block'
    # works on numpy-1.16.3
    boundary = nprec.structured_to_unstructured(mapdata[~blockIdx][[
        'xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax', 'r', 'g', 'b'
    ]])
    blocks = nprec.structured_to_unstructured(mapdata[blockIdx][[
        'xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax', 'r', 'g', 'b'
    ]])
    return boundary, blocks
Ejemplo n.º 21
0
 def loader(imageblock):
     if mmap is True:
         mrc = mrcfile.mmap(image_path)
     else:
         mrc = mrcfile.open(image_path)
     imageblock.data = mrc.data
     pixel_size = structured_to_unstructured(mrc.voxel_size)[::-1]
     imageblock.pixel_size = pixel_size
Ejemplo n.º 22
0
def _rec_to_ndarr(rec_arr, data_type=float):
    """
    Function to transform a numpy record array to a nd array.
    dupe of SimPEG.electromagnetics.natural_source.utils.rec_to_ndarr to avoid circular import
    """
    # fix for numpy >= 1.16.0
    # https://numpy.org/devdocs/release/1.16.0-notes.html#multi-field-views-return-a-view-instead-of-a-copy
    return np.array(recFunc.structured_to_unstructured(recFunc.repack_fields(rec_arr[list(rec_arr.dtype.names)])),
                    dtype=data_type)
Ejemplo n.º 23
0
def rec_to_ndarr(rec_arr, data_type=float):
    """
    Function to transform a numpy record array to a nd array.
    """
    # fix for numpy >= 1.16.0 with masked arrays
    # https://numpy.org/devdocs/release/1.16.0-notes.html#multi-field-views-return-a-view-instead-of-a-copy
    return np.array(recFunc.structured_to_unstructured(
        recFunc.repack_fields(rec_arr[list(rec_arr.dtype.names)])),
                    dtype=data_type)
Ejemplo n.º 24
0
def load_structured_data(file): ## file for data (x,y)
    if Path(str(file)).is_file():
        structured_data = np.genfromtxt(file, delimiter=',', names=True, dtype=float)
        data = rf.structured_to_unstructured(rf.repack_fields(structured_data)) 
    else:
        raise FileNotFoundError(file) # raise error
    data = data.reshape(1, -1) if len(data.shape) == 1 else data
    names = structured_data.dtype.names
    return names, data 
Ejemplo n.º 25
0
    def get_points_array(
        self, copy: bool = True, invisible_as_nan: bool = False, full: bool = False
    ) -> Union[np.ndarray, np.recarray]:
        """
        Return the instance's points in array form.

        Args:
            copy: If True, the return a copy of the points array as an ndarray.
                If False, return a view of the underlying recarray.
            invisible_as_nan: Should invisible points be marked as NaN.
                If copy is False, then invisible_as_nan is ignored since we
                don't want to set invisible points to NaNs in original data.
            full: If True, return all data for points. Otherwise, return just
                the x and y coordinates.

        Returns:
            Either a recarray (if copy is False) or an ndarray (if copy True).

            The order of the rows corresponds to the ordering of the skeleton
            nodes. Any skeleton node not defined will have NaNs present.

            Columns in recarray are accessed by name, e.g., ["x"], ["y"].

            Columns in ndarray are accessed by number. The order matches
            the order in `Point.dtype` or `PredictedPoint.dtype`.
        """
        self._fix_array()

        if not copy:
            if full:
                return self._points
            else:
                return self._points[["x", "y"]]
        else:
            if full:
                parray = structured_to_unstructured(self._points)
            else:
                parray = structured_to_unstructured(self._points[["x", "y"]])

            # Note that invisible_as_nan assumes copy is True.
            if invisible_as_nan:
                parray[~self._points.visible] = math.nan

            return parray
Ejemplo n.º 26
0
def structured_to_unstructured(
        structured_array: np.ndarray,
        **kwargs: Optional[np.dtype]) -> np.ndarray:  # pragma: no cover
    """
    Calls either local or numpy's structured_to_unstructured function.

    numpy 1.16.0 has introduced
    :func:`numpy.lib.recfunctions.structured_to_unstructured` function. To
    ensure backwards compatibility up to numpy 1.9.0 this package implements
    its own version of this function
    (:func:`fatf.utils.array.tools.fatf_structured_to_unstructured`).
    This function calls the latter if numpy version below 1.16.0 is installed.
    However, if numpy 1.16.0 or above is detected, numpy's implementation is
    used instead.

    For the description of ``structured_to_unstructured`` functionality either
    refer to the corresponding numpy
    (:func:`numpy.lib.recfunctions.structured_to_unstructured`) or local
    (:func:`fatf.utils.array.tools.fatf_structured_to_unstructured`)
    documentation.

    .. warning:: Since this function either calls a local implementation or a
       builtin numpy function there may be some inconsistencies in its
       behaviour. One that we are aware of is conversion of arrays that contain
       ``'V'`` -- raw data (void), ``'O'`` -- (Python) objects, ``'M'`` --
       datetime or ``'m'`` -- timedelta dtypes. These types are not supported
       by the local implementation, however some of them are supported by the
       numpy built-in, e.g. the ``'V'`` type.

    Parameters
    ----------
    structured_array : numpy.ndarray
        A structured numpy array to be converted into a plane numpy array.
    **kwargs : Optional[numpy.dtype]
        Named parameters that are passed to the appropriate structured to
        unstructured array converter. These parameters are ignored when calling
        the local implementation
        (:func:`fatf.utils.array.tools.fatf_structured_to_unstructured`).

    Returns
    -------
    classic_array : numpy.ndarray
        A classic numpy array representation of the ``structured_array`` with
        the most generic type out of the input array's dtypes.
    """
    # pylint: disable=no-member
    if _LOCAL_STRUCTURED_TO_UNSTRUCTURED:
        classic_array = fatf_structured_to_unstructured(structured_array)
    else:
        classic_array = recfn.structured_to_unstructured(
            structured_array, **kwargs)
        if (fuav.is_2d_array(structured_array)
                and fuav.is_1d_array(classic_array)):
            classic_array = classic_array.reshape(
                (structured_array.shape[0], 1))
    return classic_array
Ejemplo n.º 27
0
 def __getitem__(self, index):
     events = loris.read_file(self.samples[index])["events"]
     events = np.array(structured_to_unstructured(events, dtype=np.float))
     events[:, 2] -= self.minimum_y_value
     target = self.targets[index]
     if self.transform is not None:
         events = self.transform(events, self.sensor_size, self.ordering)
     if self.target_transform is not None:
         target = self.target_transform(target)
     return events, target
Ejemplo n.º 28
0
    def test_structured_to_unstructured(self):
        a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
        out = structured_to_unstructured(a)
        assert_equal(out, np.zeros((4,5), dtype='f8'))

        b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
                     dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
        out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
        assert_equal(out, np.array([ 3. ,  5.5,  9. , 11. ]))

        c = np.arange(20).reshape((4,5))
        out = unstructured_to_structured(c, a.dtype)
        want = np.array([( 0, ( 1.,  2), [ 3.,  4.]),
                         ( 5, ( 6.,  7), [ 8.,  9.]),
                         (10, (11., 12), [13., 14.]),
                         (15, (16., 17), [18., 19.])],
                     dtype=[('a', '<i4'),
                            ('b', [('f0', '<f4'), ('f1', '<u2')]),
                            ('c', '<f4', (2,))])
        assert_equal(out, want)

        d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
                     dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
        assert_equal(apply_along_fields(np.mean, d),
                     np.array([ 8.0/3,  16.0/3,  26.0/3, 11. ]))
        assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
                     np.array([ 3. ,  5.5,  9. , 11. ]))

        # check that for uniform field dtypes we get a view, not a copy:
        d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
                     dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
        dd = structured_to_unstructured(d)
        ddd = unstructured_to_structured(dd, d.dtype)
        assert_(dd.base is d)
        assert_(ddd.base is d)

        # test that nested fields with identical names don't break anything
        point = np.dtype([('x', int), ('y', int)])
        triangle = np.dtype([('a', point), ('b', point), ('c', point)])
        arr = np.zeros(10, triangle)
        res = structured_to_unstructured(arr, dtype=int)
        assert_equal(res, np.zeros((10, 6), dtype=int))
Ejemplo n.º 29
0
def concat(first: np.ndarray,
           second: np.ndarray,
           type: "{row, columns, array, melt}" = "row") -> np.ndarray:
    """
    Multiple methods of concatenation, some of them are experimental. The basic methods are 'columns' or 'row'. 
    The other methods do not necessarily provide unique outcomes to that of 'columns' and 'row'.

    Note, if you are concatenating a single column, always add double
    brackets so that the name can be easily retrieved i.e. array[[col]]

    :param array1: np.ndarray, the left/top concatenating array.
    :param array2: np.ndarray, the right/bottom concatenating array.
    :param type: str or in, the type of concatenation 'row', 'columns', 'array' or 'melt'
    :return concat : np.ndarray, newly concatenated array.
    """
    if type in ["row", "r", "rows", 0]:
        try:
            concat = np.concatenate([first, second])
        except:
            concat = np.concatenate([
                rfn.structured_to_unstructured(first),
                rfn.structured_to_unstructured(second)
            ])
            concat = rfn.unstructured_to_structured(concat,
                                                    names=first.dtype.names)
    elif type in ["columns", "column", "c", 1]:
        concat = concat_col(first, second)
        #concat = rfn.merge_arrays((first, second), asrecarray=False, flatten=True)  # tuples
    elif type == "array":
        concat = np.c_[[first, second]]
    elif type == "melt":  ## looks similar to columns but list instead of tuples
        try:
            concat = np.c_[(first, second)]
        except:
            concat = np.c_[(rfn.structured_to_unstructured(first),
                            rfn.structured_to_unstructured(second))]
            concat = rfn.unstructured_to_structured(concat,
                                                    names=first.dtype.names)
    else:
        raise ValueError(
            "type has to be set to either: row, columns, array or melt")
    return concat
Ejemplo n.º 30
0
def cumulative_flux(img, oversampling=1):
    extent = np.min(img.shape)/2
    rs = np.linspace(1, extent, int(extent*oversampling))
    xcenter, ycenter = centroid_quadratic(img)

    apertures = [CircularAperture((xcenter, ycenter), r=r) for r in rs]
    tab = aperture_photometry(img, apertures, method='exact')
    # every aperture has it's own column; exclude first three (id,x,y)
    cumulative_flux = rf.structured_to_unstructured(tab.as_array()).ravel()[3:]

    return rs, cumulative_flux
def test_emulsion_processing():
    """test identifying emulsions in phase fields"""
    grid = UnitGrid([32, 32], periodic=True)

    e1 = Emulsion(
        [
            DiffuseDroplet(position=[5, 6], radius=9, interface_width=1),
            DiffuseDroplet(position=[20, 19], radius=8, interface_width=1),
        ],
        grid=grid,
    )
    field = e1.get_phasefield()

    e2 = image_analysis.locate_droplets(field, refine=True)

    np.testing.assert_allclose(
        structured_to_unstructured(e1.data),
        structured_to_unstructured(e2.data),
        rtol=0.02,
    )