Esempio n. 1
0
 def add_data(self, lines):
     assert lines.shape[1] == 4
     x_coord = np.mgrid[0.0:1.0:lines.shape[0] * 1j].astype("f4")
     x_coord = x_coord.reshape((-1, 1))
     self.n_vertices = lines.shape[0]
     self.vertex_array.attributes.append(
         VertexAttribute(name="rgba_values", data=lines))
     self.vertex_array.attributes.append(
         VertexAttribute(name="x_coord", data=x_coord))
Esempio n. 2
0
 def add_data(self, field):
     v, d, i = self.get_mesh_data(self.data_source, field)
     v.shape = (v.size // 3, 3)
     v = np.concatenate([v, np.ones((v.shape[0], 1))], axis=-1).astype("f4")
     d.shape = (d.size, 1)
     i.shape = (i.size, 1)
     i = i.astype("uint32")
     # d[:] = np.mgrid[0.0:1.0:1j*d.size].astype("f4")[:,None]
     self.vertex_array.attributes.append(
         VertexAttribute(name="model_vertex", data=v)
     )
     self.vertex_array.attributes.append(
         VertexAttribute(name="vertex_data", data=d.astype("f4"))
     )
     self.vertex_array.indices = i
     self.size = i.size
Esempio n. 3
0
 def _default_vertex_array(self):
     va = VertexArray(name="box_outline", each=36)
     data = compute_box_geometry(self.left_edge, self.right_edge).copy()
     va.attributes.append(
         VertexAttribute(name="model_vertex", data=data.astype("f4")))
     N = data.size // 4
     le = np.concatenate([[self.left_edge.copy()] for _ in range(N)])
     re = np.concatenate([[self.right_edge.copy()] for _ in range(N)])
     dds = self.right_edge - self.left_edge
     dds = np.concatenate([[dds.copy()] for _ in range(N)])
     va.attributes.append(
         VertexAttribute(name="in_left_edge", data=le.astype("f4")))
     va.attributes.append(
         VertexAttribute(name="in_right_edge", data=re.astype("f4")))
     va.attributes.append(
         VertexAttribute(name="in_dx", data=dds.astype("f4")))
     return va
Esempio n. 4
0
 def _default_base_quad(self):
     bq = SceneData(
         name="fullscreen_quad",
         vertex_array=VertexArray(name="tri", each=6),
     )
     fq = FULLSCREEN_QUAD.reshape((6, 3), order="C")
     bq.vertex_array.attributes.append(
         VertexAttribute(name="vertexPosition_modelspace", data=fq))
     return bq
Esempio n. 5
0
    def add_data(self):

        self.n_vertices = self.data.shape[0]

        self.vertex_array.attributes.append(
            VertexAttribute(name="model_vertex", data=self.data.astype("f4")))

        self.vertex_array.indices = np.arange(0,
                                              self.n_vertices).astype("uint32")
        self.size = self.n_vertices
Esempio n. 6
0
 def _default_vertex_array(self):
     va = VertexArray(name="grid_bounds")
     positions = []
     dx = []
     le = []
     re = []
     for g in self.grid_list:
         dx.append(g.dds.tolist())
         le.append(g.LeftEdge.tolist())
         re.append(g.RightEdge.tolist())
     positions = np.ones((len(self.grid_list), 4), dtype="f4")
     dx = np.array(dx).astype("f4")
     le = np.array(le).astype("f4")
     re = np.array(re).astype("f4")
     va.attributes.append(
         VertexAttribute(name="model_vertex", data=positions))
     va.attributes.append(VertexAttribute(name="in_left_edge", data=le))
     va.attributes.append(VertexAttribute(name="in_dx", data=dx))
     va.attributes.append(VertexAttribute(name="in_right_edge", data=re))
     return va
Esempio n. 7
0
 def _default_vertex_array(self):
     model_vertex = np.array([[-1, 1], [-1, -1], [1, 1], [1, -1]],
                             order="F",
                             dtype="f4")
     va = VertexArray(name="particle_positions")
     va.attributes.append(
         VertexAttribute(name="model_vertex", data=model_vertex, divisor=0))
     for attr in ("position_field", "radius_field", "color_field"):
         if getattr(self, attr) is None:
             continue
         field = self.data_source[self.particle_type, getattr(self, attr)]
         if field.units.dimensions is length:
             field.convert_to_units("unitary")
         field = field.astype("f4").d
         if field.ndim == 1:
             field.shape = (field.size, 1)
         else:
             self.size = field.shape[0]  # for positions
         print(f"Setting {attr} to a field of shape {field.shape}")
         va.attributes.append(
             VertexAttribute(name=attr, data=field, divisor=1))
     print(f"Size is now: {self.size}")
     return va
Esempio n. 8
0
    def add_data(self, curve):

        # curve is a collection of ndarray of points
        assert curve.shape[0] > 1  # a curve needs at least 2 points
        assert curve.shape[1] == 3  # a curve needs at least 3 dimensions

        # add the singleton 4th dim
        data = np.ones((curve.shape[0], 4))
        data[:, 0:3] = curve

        self.n_vertices = curve.shape[0]
        self.data = data

        self.vertex_array.attributes.append(
            VertexAttribute(name="model_vertex", data=data.astype("f4")))

        self.vertex_array.indices = np.arange(0,
                                              self.n_vertices).astype("uint32")
        self.size = self.n_vertices
Esempio n. 9
0
 def build_textures(self):
     # This doesn't check if the textures have already been built
     self.font.set_size(self.font_size, 200)
     chars = [ord(_) for _ in string.printable]
     tex_ids = GL.glGenTextures(len(chars))
     vert = []
     for i, (tex_id, char_code) in enumerate(zip(tex_ids, chars)):
         self.font.clear()
         self.font.set_text(chr(char_code), flags=LOAD_FORCE_AUTOHINT)
         self.font.draw_glyphs_to_bitmap(antialiased=True)
         glyph = self.font.load_char(char_code)
         x0, y0, x1, y1 = glyph.bbox
         bitmap = self.font.get_image().astype(">f4") / 255.0
         dx = 1.0 / bitmap.shape[0]
         dy = 1.0 / bitmap.shape[1]
         triangles = np.array(
             [
                 [x0, y1, 0.0 + dx / 2.0, 0.0 + dy / 2.0],
                 [x0, y0, 0.0 + dx / 2.0, 1.0 - dy / 2.0],
                 [x1, y0, 1.0 - dx / 2.0, 1.0 - dy / 2.0],
                 [x0, y1, 0.0 + dx / 2.0, 0.0 + dy / 2.0],
                 [x1, y0, 1.0 - dx / 2.0, 1.0 - dy / 2.0],
                 [x1, y1, 1.0 - dx / 2.0, 0.0 + dy / 2.0],
             ],
             dtype="<f4",
         )
         vert.append(triangles)
         texture = Texture2D(texture_name=tex_id,
                             data=bitmap,
                             boundary_x="clamp",
                             boundary_y="clamp")
         # I can't find information as to why horiAdvance is a
         # factor of 8 larger than the other factors.  I assume it
         # is referenced somewhere, but I cannot find it.
         self.characters[chr(char_code)] = Character(
             texture, i, glyph.horiAdvance / 8.0, glyph.vertAdvance)
     vert = np.concatenate(vert)
     self.vertex_array.attributes.append(
         VertexAttribute(name="quad_vertex", data=vert.astype("<f4")))
Esempio n. 10
0
 def _default_vertex_array(self):
     va = VertexArray(name="tri", each=6)
     fq = FULLSCREEN_QUAD.reshape((6, 3), order="C")
     va.attributes.append(
         VertexAttribute(name="vertexPosition_modelspace", data=fq))
     return va
Esempio n. 11
0
    def add_data(self, field, no_ghost=False):
        r"""Adds a source of data for the block collection.

        Given a `data_source` and a `field` to populate from, adds the data
        to the block collection so that is able to be rendered.

        Parameters
        ----------
        data_source : YTRegion
            A YTRegion object to use as a data source.
        field : string
            A field to populate from.
        no_ghost : bool (False)
            Should we speed things up by skipping ghost zone generation?
        """
        self.data_source.tiles.set_fields([field], [False], no_ghost=no_ghost)
        # Every time we change our data source, we wipe all existing ones.
        # We now set up our vertices into our current data source.
        vert, dx, le, re = [], [], [], []
        self.min_val = +np.inf
        self.max_val = -np.inf
        if self.scale:
            left_min = np.ones(3, "f8") * np.inf
            right_max = np.ones(3, "f8") * -np.inf
            for block in self.data_source.tiles.traverse():
                np.minimum(left_min, block.LeftEdge, left_min)
                np.maximum(right_max, block.LeftEdge, right_max)
            scale = right_max.max() - left_min.min()
            for block in self.data_source.tiles.traverse():
                block.LeftEdge -= left_min
                block.LeftEdge /= scale
                block.RightEdge -= left_min
                block.RightEdge /= scale
        for i, block in enumerate(self.data_source.tiles.traverse()):
            self.min_val = min(self.min_val, np.nanmin(np.abs(block.my_data[0])).min())
            self.max_val = max(self.max_val, np.nanmax(np.abs(block.my_data[0])).max())
            self.blocks[id(block)] = (i, block)
            vert.append([1.0, 1.0, 1.0, 1.0])
            dds = (block.RightEdge - block.LeftEdge) / block.source_mask.shape
            dx.append(dds.tolist())
            le.append(block.LeftEdge.tolist())
            re.append(block.RightEdge.tolist())
        for (g, node, (sl, _dims, _gi)) in self.data_source.tiles.slice_traverse():
            block = node.data
            self.blocks_by_grid[g.id - g._id_offset].append((id(block), i))
            self.grids_by_block[id(node.data)] = (g.id - g._id_offset, sl)

        if hasattr(self.min_val, "in_units"):
            self.min_val = self.min_val.d
        if hasattr(self.max_val, "in_units"):
            self.max_val = self.max_val.d

        LE = np.array([b.LeftEdge for i, b in self.blocks.values()]).min(axis=0)
        RE = np.array([b.RightEdge for i, b in self.blocks.values()]).max(axis=0)
        self.diagonal = np.sqrt(((RE - LE) ** 2).sum())
        # Now we set up our buffer
        vert = np.array(vert, dtype="f4")
        dx = np.array(dx, dtype="f4")
        le = np.array(le, dtype="f4")
        re = np.array(re, dtype="f4")

        self.vertex_array.attributes.append(
            VertexAttribute(name="model_vertex", data=vert)
        )
        self.vertex_array.attributes.append(VertexAttribute(name="in_dx", data=dx))
        self.vertex_array.attributes.append(
            VertexAttribute(name="in_left_edge", data=le)
        )
        self.vertex_array.attributes.append(
            VertexAttribute(name="in_right_edge", data=re)
        )

        # Now we set up our textures
        self._load_textures()
Esempio n. 12
0
    def add_data(self, field):
        r"""Adds a source of data for the block collection.

        Given a `data_source` and a `field` to populate from, adds the data
        to the block collection so that is able to be rendered.

        Parameters
        ----------
        data_source : YTRegion
            A YTRegion object to use as a data source.
        field : string
            A field to populate from.
        no_ghost : bool (False)
            Should we speed things up by skipping ghost zone generation?
        """
        ds = self.data_source.ds
        ds.index._identify_base_chunk(self.data_source)
        left_edges = []
        right_edges = []
        dx = []
        data = []

        for obj in self.data_source._current_chunk.objs:
            bs = OctreeSubsetBlockSlice(obj, ds)
            LE = bs._fcoords[0, 0,
                             0, :, :].d - bs._fwidth[0, 0, 0, :, :].d * 0.5
            RE = bs._fcoords[-1, -1,
                             -1, :, :].d + bs._fwidth[-1, -1, -1, :, :].d * 0.5
            dx.append(bs._fwidth[-1, -1, -1, :, :].d)
            left_edges.append(LE)
            right_edges.append(RE)
            d = bs.get_vertex_centered_data([field])[field]
            data.append(
                np.concatenate([d[:, :, :, i] for i in range(d.shape[-1])],
                               axis=2).copy(order="C"))

        # Let's reshape ...
        left_edges = np.concatenate(left_edges, axis=0).astype("f4")
        right_edges = np.concatenate(right_edges, axis=0).astype("f4")
        dx = np.concatenate(dx, axis=0).astype("f4")
        data = np.concatenate(data, axis=-1).astype("f4")
        data = data.reshape((3, 3, -1))

        self.min_val = np.nanmin(data)
        self.max_val = np.nanmax(data)

        if hasattr(self.min_val, "in_units"):
            self.min_val = self.min_val.d
        if hasattr(self.max_val, "in_units"):
            self.max_val = self.max_val.d

        if self.max_val != self.min_val:
            data = (data - self.min_val) / (
                (self.max_val - self.min_val))  # * self.diagonal)

        self.vertex_array.attributes.append(
            VertexAttribute(name="model_vertex",
                            data=aabb_triangle_strip,
                            divisor=0))
        self.vertex_array.attributes.append(
            VertexAttribute(name="in_dx", data=dx, divisor=1))
        self.vertex_array.attributes.append(
            VertexAttribute(name="in_left_edge", data=left_edges, divisor=1))
        self.vertex_array.attributes.append(
            VertexAttribute(name="in_right_edge", data=right_edges, divisor=1))

        # Now we set up our textures; we need to break our texture up into
        # groups of MAX_3D_TEXTURE_SIZE
        tex_size = GL.glGetInteger(GL.GL_MAX_3D_TEXTURE_SIZE)
        # for now, use one texture for all the bitmaps
        bt = Texture3D(data=np.ones((3, 3, tex_size), dtype="u1") * 255)
        for start_index in np.mgrid[0:data.shape[-1]:tex_size]:
            d = data[:, :, start_index:start_index + tex_size]
            self.data_textures.append(Texture3D(data=d))
            self.bitmap_textures.append(bt)
            self.shapes.append(d.shape[-1])