예제 #1
0
def hex8_render(engine, field):
    ytcfg["yt", "ray_tracing_engine"] = engine
    ds = data_dir_load(hex8, kwargs={'step': -1})
    sc = create_scene(ds, field)
    im = sc.render()
    return compare(
        ds, im, "%s_render_answers_hex8_%s_%s" % (engine, field[0], field[1]))
예제 #2
0
def test_annotations():
    from matplotlib.image import imread

    curdir = os.getcwd()
    tmpdir = tempfile.mkdtemp()
    os.chdir(tmpdir)
    ds = fake_vr_orientation_test_ds(N=16)
    sc = create_scene(ds)
    sc.annotate_axes()
    sc.annotate_domain(ds)
    sc.render()
    # ensure that there are actually red, green, blue, and white pixels
    # in the image. see Issue #1595
    im = sc._last_render
    for c in ([1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1], [1, 1, 1, 1]):
        assert np.where((im == c).all(axis=-1))[0].shape[0] > 0
    sc[0].tfh.tf.add_layers(10, colormap="cubehelix")
    sc.save_annotated(
        "test_scene_annotated.png",
        text_annotate=[[(0.1, 1.05), "test_string"]],
    )
    image = imread("test_scene_annotated.png")
    assert image.shape == sc.camera.resolution + (4, )
    os.chdir(curdir)
    shutil.rmtree(tmpdir)
예제 #3
0
def hex8_render(engine, field):
    ytcfg["yt", "ray_tracing_engine"] = engine
    ds = data_dir_load(hex8, kwargs={"step": -1})
    sc = create_scene(ds, field)
    im = sc.render()
    return compare(ds, im,
                   f"{engine}_render_answers_hex8_{field[0]}_{field[1]}")
예제 #4
0
def wedge6_render(engine, field):
    ytcfg["yt", "ray_tracing_engine"] = engine
    ds = data_dir_load(wedge6, kwargs={"step": -1})
    sc = create_scene(ds, field)
    im = sc.render()
    return compare(
        ds, im, "%s_render_answers_wedge6_%s_%s" % (engine, field[0], field[1])
    )
예제 #5
0
def tet10_render(engine, field):
    ytcfg["yt", "ray_tracing_engine"] = engine
    ds = data_dir_load(tet10, kwargs={'step': -1})
    sc = create_scene(ds, field)
    ms = sc.get_source(0)
    ms.color_bounds = (-.01, .2)
    im = sc.render()
    return compare(
        ds, im, "%s_render_answers_tet10_%s_%s" % (engine, field[0], field[1]))
예제 #6
0
def tet10_render(engine, field):
    ytcfg["yt", "ray_tracing_engine"] = engine
    ds = data_dir_load(tet10, kwargs={"step": -1})
    sc = create_scene(ds, field)
    ms = sc.get_source(0)
    ms.color_bounds = (-0.01, 0.2)
    im = sc.render()
    return compare(ds, im,
                   f"{engine}_render_answers_tet10_{field[0]}_{field[1]}")
예제 #7
0
def test_fake_hexahedral_ds_render():
    ds = fake_hexahedral_ds()
    field_list = [("connect1", "elem"), ("connect1", "test")]
    for field in field_list:
        sc = create_scene(ds, field)
        im = sc.render()
        test_prefix = "yt_render_fake_hexahedral_%s_%s" % (field[0], field[1])
        yield compare(
            ds, im, test_prefix=test_prefix, test_name="fake_hexahedral_ds_render"
        )
예제 #8
0
def test_annotations():
    ds = fake_vr_orientation_test_ds(N=16)
    sc = create_scene(ds)
    sc.annotate_axes()
    sc.annotate_domain(ds)
    sc.render()
    # ensure that there are actually red, green, blue, and white pixels
    # in the image. see Issue #1595
    im = sc._last_render
    for c in ([1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1], [1, 1, 1, 1]):
        assert np.where((im == c).all(axis=-1))[0].shape[0] > 0
예제 #9
0
def perspective_mesh_render(engine):
    ytcfg["yt", "ray_tracing_engine"] = engine
    ds = data_dir_load(hex8)
    sc = create_scene(ds, ("connect2", "diffused"))
    cam = sc.add_camera(ds, lens_type='perspective')
    cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
    cam_pos = ds.arr([-4.5, 4.5, -4.5], 'code_length')
    north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
    cam.set_position(cam_pos, north_vector)
    cam.resolution = (800, 800)
    im = sc.render()
    return compare(ds, im, "%s_perspective_mesh_render" % engine)
예제 #10
0
파일: test_scene.py 프로젝트: hanjuezhu/yt
def test_annotations():
    ds = fake_vr_orientation_test_ds(N=16)
    sc = create_scene(ds)
    sc.annotate_axes()
    sc.annotate_domain(ds)
    sc.render()
    # ensure that there are actually red, green, blue, and white pixels
    # in the image. see Issue #1595
    im = sc._last_render
    for c in ([1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1], [1, 1, 1, 1]):
        assert np.where((im == c).all(axis=-1))[0].shape[0] > 0
    sc[0].tfh.tf.add_layers(10, colormap='cubehelix')
    sc.save_annotated('test_scene_annotated.png', text_annotate=[[(.1, 1.05), "test_string"]])