Пример #1
0
    def test_basic(self):
        """ test verifies that we can create, save, and load a store
        and search and exercise it getting consistent results"""

        print "*"*80
        print "testing filestore persistance and I/O functioning"

        thetas = [0, 10, 20, 30, 40]
        phis = [0, 10, 20]

        fname = "./test.json"
        cs = file_store.FileStore(fname)
        cs.filename_pattern = "data_{theta}_{phi}.txt"
        cs.add_parameter("theta", store.make_parameter('theta', thetas))
        cs.add_parameter("phi", store.make_parameter('phi', phis))

        s = set()

        for t in thetas:
            for p in phis:
                doc = store.Document({'theta': t, 'phi': p})
                doc.data = str(doc.descriptor)
                s.add((t, p))
                cs.insert(doc)

        try:
            cs.save()

            s2 = set()

            cs2 = file_store.FileStore(fname)
            # Test load
            cs2.load()
            for doc in cs2.find():
                s2.add(tuple(doc.descriptor.values()))

            self.assertEqual(s, s2)

            # Test search
            docs = cs2.find({'theta': 0})
            import ast
            for doc in docs:
                vals1 = [int(x) for x in doc.descriptor.values()]
                vals2 = ast.literal_eval(doc.data).values()
                self.assertEqual(vals1, vals2)
        except:
            self.clean_up(cs, fname)
            raise
        else:
            self.clean_up(cs, fname)
Пример #2
0
def pose_to_vtk(fname):
    """
    converts the view transformation matrices into a
    vtk polydata to visualize it
    """

    if fname[-5:] == ".json":
        cs = file_store.FileStore(fname)
        cs.load()
        poses = cs.get_parameter('pose')['values']
    else:
        poses = []
        f = open(fname, 'r')
        for line in f:
            asarray = eval(line)
            poses.append(asarray)

    coords = []
    coords.append([0, 0, 0])
    cells = []

    for pose_id in range(0, len(poses)):
        up = poses[pose_id][0]
        rt = [x * 2 for x in poses[pose_id][1]]
        dn = [x * 3 for x in poses[pose_id][2]]
        coords.append(up)
        coords.append(rt)
        coords.append(dn)
        cells.append([
            0,
            1 + (pose_id * 3 + 0),  # line from origin to up and back
            0,
            1 + (pose_id * 3 + 1),  # origin to rt and back
            0,
            1 + (pose_id * 3 + 2)
        ])  # origin to dn and back

    print "# vtk DataFile Version 2.0"
    print "Poses from ", fname
    print "ASCII"
    print "DATASET POLYDATA"
    print "POINTS", len(coords), "double"
    for c in coords:
        print c[0], c[1], c[2]
    print "LINES", len(cells), len(cells) * 7
    for c in cells:
        print 6, c[0], c[1], c[2], c[3], c[4], c[5]
    print "CELL_DATA", len(cells)
    print "SCALARS poseid int 1"
    print "LOOKUP_TABLE default"
    for c in range(0, len(cells)):
        print c
Пример #3
0
def demonstrate_analyze(fname):
    cs = file_store.FileStore(fname)
    cs.load()

    print "PARAMETERS ARE"
    for parameter in cs.parameter_list:
        print parameter
        print cs.get_parameter(parameter)['values']

    print "ONE PARAMETER'S FIRST VALUE IS"
    param = cs.parameter_list.keys()[0]
    val = cs.get_parameter(param)['values'][0]
    print val

    print "HISTOGRAMS OF MATCHING RECORDS FOR", param, "=", val, "ARE"
    for doc in cs.find({param: val}):
        print doc.descriptor
        image = PIL.Image.fromarray(doc.data)
        print image.histogram()
Пример #4
0
def load(filename):
    global __warning_count
    fs = file_store.FileStore(filename)
    fs.load()

    # check if we support this cinema database.
    if fs.metadata.get("type") != "composite-image-stack":
        print("Only 'composite-image-stack' file stores are supported.")
        raise RuntimeError(
            "Only 'composite-image-stack' file stores are supported.")
    if fs.metadata.get("camera_model") != "azimuth-elevation-roll":
        print("Only 'azimuth-elevation-roll' cameras are supported.")
        raise RuntimeError(
            "Only 'azimuth-elevation-roll' cameras are supported.")
    if fs.metadata.get("value_mode") != 2:
        if filename not in __warning_count:
            __warning_count[filename] = True
            print("Warning: the cinema store '" + filename.strip() +
                  "', encodes data values as RGB arrays which is known to " +
                  "have issues in current implementation. Scalar" +
                  "coloring may produce unexpected results.")

    return FileStoreSpecB(fs)
Пример #5
0
    def test_contour(self):
        pv.Connect()  # using a dedicated server state for each test
        print "\nTEST_CONTOUR"

        # set up some processing task
        view_proxy = pv.CreateRenderView()
        view_proxy.OrientationAxesVisibility = 0
        view_proxy.ViewSize = [1024, 768]
        s = pv.Wavelet()
        contour = pv.Contour(Input=s, ContourBy='RTData', ComputeScalars=1)
        sliceRep = pv.Show(contour)

        # make or open a cinema data store to put results in
        fname = "/tmp/test_pv_contour/info.json"
        cs = file_store.FileStore(fname)
        cs.add_metadata({'type': 'parametric-image-stack'})
        cs.add_metadata({'store_type': 'FS'})
        cs.add_metadata({'version': '0.0'})
        cs.filename_pattern = "{phi}_{theta}_{contour}_{color}_contour.png"
        cs.add_parameter(
            "phi", store.make_parameter('phi', [90, 120, 140]))
        cs.add_parameter(
            "theta", store.make_parameter('theta', [-90, -30, 30, 90]))
        cs.add_parameter(
            "contour",
            store.make_parameter('contour', [50, 100, 150, 200]))
        cs.add_parameter(
            "color",
            store.make_parameter(
                'color', ['white', 'RTData_1'], typechoice='list'))

        # associate control points with parameters of the data store
        cam = pv_explorers.Camera(
            [0, 0, 0], [0, 1, 0], 75.0, view_proxy)
        filt = pv_explorers.Contour("contour", contour)

        colorChoice = pv_explorers.ColorList()
        colorChoice.AddSolidColor('white', [1, 1, 1])
        colorChoice.AddLUT('POINTS', 'RTData_1', 'X')
        col = pv_explorers.Color("color", colorChoice, sliceRep)

        params = ["phi", "theta", "contour", "color"]
        e = pv_explorers.ImageExplorer(
            cs, params, [cam, filt, col], view_proxy)

        # run through all parameter combinations and put data into the store
        e.explore()

        # Reproduce an entry and compare vs. loaded

        # First set the parameters to reproduce
        cam.execute(store.Document({'theta': 30, 'phi': 140}))
        filt.execute(store.Document({'contour': 100}))
        col.execute(store.Document({'color': 'RTData_1'}))

        imageslice = ch.pvRenderToArray(view_proxy)

        # Now load the corresponding
        cs2 = file_store.FileStore(fname)
        cs2.load()
        docs = []
        for doc in cs2.find(
                {'theta': 30, 'phi': 140,
                 'contour': 100, 'color': 'RTData_1'}):
            docs.append(doc.data)

        # compare the two
        l2error = ch.compare_l2(imageslice, docs[0])
        ncc = ch.compare_ncc(imageslice, docs[0])
        success = (l2error < 1.0) and (ncc > 0.99)
        if not success:
            print "\n l2-error = ", l2error, " ; ncc = ", ncc, "\n"
        self.assertTrue(success)
        pv.Disconnect()  # using a dedicated server state for each test
Пример #6
0
    def test_composite(self):
        pv.Connect()  # get a new context like a normal script would
        print "\nTEST_COMPOSITE"

        # set up some processing task
        view_proxy = pv.CreateRenderView()
        view_proxy.OrientationAxesVisibility = 0
        s = pv.Wavelet()
        contour = pv.Contour(Input=s, ContourBy='RTData', ComputeScalars=1)
        sliceRep = pv.Show(contour)

        # make or open a cinema data store to put results in
        fname = "/tmp/test_pv_composite/info.json"
        cs = file_store.FileStore(fname)
        cs.add_metadata({'type': 'composite-image-stack'})
        cs.add_metadata({'store_type': 'FS'})
        cs.add_metadata({'version': '0.1'})

        cs.filename_pattern = "results.png"
        cs.add_parameter(
            "phi", store.make_parameter('phi', [90, 120, 140]))
        cs.add_parameter(
            "theta", store.make_parameter('theta', [-90, -30, 30, 90]))
        cs.add_layer(
            "vis", store.make_parameter("vis", ['contour']))
        contours = [50, 100, 150, 200]
        cs.add_control("isoval",
                       store.make_parameter('isoval', contours))
        cs.assign_parameter_dependence("isoval", "vis", ['contour'])
        cs.add_field("color",
                     store.make_field('color',
                                      {'white': 'rgb',
                                       'depth': 'depth',
                                       'lum': 'luminance',
                                       'RTData_1': 'lut'},),
                     "isoval", contours)

        # associate control points with parameters of the data store
        cam = pv_explorers.Camera([0, 0, 0], [0, 1, 0], 75.0, view_proxy)
        showcontour = pv_explorers.SourceProxyInLayer("contour",
                                                      sliceRep, contour)
        layertrack = explorers.Layer("vis", [showcontour])
        filt = pv_explorers.Contour("isoval", contour)

        # additional specification necessary for the color field
        colorChoice = pv_explorers.ColorList()
        colorChoice.AddSolidColor('white', [1, 1, 1])
        colorChoice.AddLUT('POINTS', 'RTData_1', 'X')
        colorChoice.AddDepth('depth')
        colorChoice.AddLuminance('lum')

        col = pv_explorers.Color("color", colorChoice, sliceRep)

        paramNames = ["phi", "theta", "vis", "isoval", "color"]
        trackList = [cam, layertrack, filt, col]
        e = pv_explorers.ImageExplorer(cs,
                                       paramNames, trackList, view_proxy)

        # run through all parameter combinations and put data into the store
        e.explore()

        # Reproduce an entry and compare vs. loaded
        # First set the parameters to reproduce
        cam.execute(store.Document({'theta': 30, 'phi': 140}))
        filt.execute(store.Document({'isoval': 100}))
        col.execute(store.Document({'color': 'RTData_1'}))
        imageslice = ch.pvRenderToArray(view_proxy)

        # Now load the corresponding
        cs2 = file_store.FileStore(fname)
        cs2.load()
        docs = []
        for doc in cs2.find({'theta': 30, 'phi': 140,
                             'isoval': 100, 'color': 'RTData_1'}):
            docs.append(doc.data)

        # compare the two
        l2error = ch.compare_l2(imageslice, docs[0])
        ncc = ch.compare_ncc(imageslice, docs[0])
        success = (l2error < 1.0) and (ncc > 0.99)

        if not success:
            print "\n l2-error = ", l2error, " ; ncc = ", ncc, "\n"

        self.assertTrue(success)
        pv.Disconnect()  # using a dedicated server state for each test
Пример #7
0
    def test_slice(self):
        pv.Connect()  # using a dedicated server state for each test
        print "\nTEST_SLICE"

        # set up some processing task
        view_proxy = pv.CreateRenderView()
        view_proxy.OrientationAxesVisibility = 0
        s = pv.Sphere()
        sliceFilt = pv.Slice(
            SliceType="Plane", Input=s, SliceOffsetValues=[0.0])
        sliceFilt.SliceType.Normal = [0, 1, 0]
        sliceRep = pv.Show(sliceFilt)

        # make or open a cinema data store to put results in
        fname = "/tmp/test_pv_slice/info.json"
        cs = file_store.FileStore(fname)
        cs.add_metadata({'type': 'parametric-image-stack'})
        cs.add_metadata({'store_type': 'FS'})
        cs.add_metadata({'version': '0.0'})
        cs.filename_pattern = "{phi}_{theta}_{offset}_{color}_slice.png"
        cs.add_parameter(
            "phi", store.make_parameter('phi', [90, 120, 140]))
        cs.add_parameter(
            "theta", store.make_parameter('theta', [-90, -30, 30, 90]))
        cs.add_parameter(
            "offset",
            store.make_parameter('offset', [-.4, -.2, 0, .2, .4]))
        cs.add_parameter(
            "color",
            store.make_parameter(
                'color', ['yellow', 'cyan', "purple"], typechoice='list'))

        colorChoice = pv_explorers.ColorList()
        colorChoice.AddSolidColor('yellow', [1, 1, 0])
        colorChoice.AddSolidColor('cyan', [0, 1, 1])
        colorChoice.AddSolidColor('purple', [1, 0, 1])

        # associate control points with parameters of the data store
        cam = pv_explorers.Camera([0, 0, 0], [0, 1, 0], 10.0, view_proxy)
        filt = pv_explorers.Slice("offset", sliceFilt)
        col = pv_explorers.Color("color", colorChoice, sliceRep)

        params = ["phi", "theta", "offset", "color"]
        e = pv_explorers.ImageExplorer(
            cs, params, [cam, filt, col], view_proxy)
        # run through all parameter combinations and put data into the store
        e.explore()

        # Reproduce an entry and compare vs. loaded

        # First set the parameters to reproduce
        cam.execute(store.Document({'theta': -30, 'phi': 120}))
        filt.execute(store.Document({'offset': -.4}))
        col.execute(store.Document({'color': 'cyan'}))
        imageslice = ch.pvRenderToArray(view_proxy)

        # Now load the corresponding entry
        cs2 = file_store.FileStore(fname)
        cs2.load()
        docs = []
        for doc in cs2.find(
                {'theta': -30, 'phi': 120, 'offset': -.4, 'color': 'cyan'}):
            docs.append(doc.data)

        # print "gen entry: \n",
        #        imageslice, "\n",
        #        imageslice.shape,"\n",
        #        "loaded: \n",
        #        docs[0], "\n",
        #        docs[0].shape
        # compare the two
        l2error = ch.compare_l2(imageslice, docs[0])
        ncc = ch.compare_ncc(imageslice, docs[0])
        self.assertTrue((l2error < 1.0) and (ncc > 0.99))
        pv.Disconnect()  # using a dedicated server state for each test
Пример #8
0
import cinema_python.images.querymaker_specb as qmsb
import json
import numpy
import PIL
import StringIO
import sys

fname = None
for i in range(0, len(sys.argv) - 1):
    if sys.argv[i] == "-fn":
        fname = sys.argv[i + 1]
if fname is None:
    print("Usage:", sys.argv[0], "-fn cinemastore/info.json")
    sys.exit(0)

cs = file_store.FileStore(fname)
cs.load()


@route('/speclevel')
def speclevel():
    """
    Entry point for web page to see what type of store we serve.
    """
    if cs.get_version_major() == 1:
        return "C"
    return "A"


@route('/cameramodel')
def cameramodel():
Пример #9
0
cf.SetInputArrayToProcess(0, 0, 0, "vtkDataObject::FIELD_ASSOCIATION_POINTS",
                          "RTData")
cf.SetNumberOfContours(1)
cf.SetValue(0, 200)
cf.ComputeScalarsOn()
m = vtk.vtkPolyDataMapper()
m.SetInputConnection(cf.GetOutputPort())
a = vtk.vtkActor()
a.SetMapper(m)
r.AddActor(a)

rw.Render()
r.ResetCamera()

# Create a new Cinema store
cs = file_store.FileStore("./contour.json")
cs.filename_pattern = "{phi}/{theta}/{contour}.png"

# These are the parameters that we will have in the store
cs.add_parameter("phi", store.make_parameter('phi', range(0, 200, 40)))
cs.add_parameter("theta", store.make_parameter('theta', range(-180, 200, 40)))
cs.add_parameter("contour", store.make_parameter('contour', [160, 200]))

# These objects are responsible of change VTK parameters during exploration
con = vtk_explorers.Contour('contour', cf, 'SetValue')
cam = vtk_explorers.Camera([0, 0, 0], [0, 1, 0], 300.0, r.GetActiveCamera())

# Let's create the store
e = vtk_explorers.ImageExplorer(cs, ['contour', 'phi', 'theta'], [cam, con],
                                rw)
e.explore()
Пример #10
0
def show_something(fname):
    cs = file_store.FileStore(fname)
    cs.load()

    defobject = cs.get_parameter('vis')['default']
    print "LOOKING AT OBJECT ", defobject

    # now find parameters that are needed for this object and choose values
    indep, field, dep = cs.parameters_for_object(defobject)

    # the object itself
    request = {}
    request['vis'] = set([defobject])

    # independent parameters (time, camera etc)
    for x in indep:
        defval = cs.get_parameter(x)['default']
        try:
            request[x] = set([defval])
        except TypeError:
            # happens with pose's which are unhashable lists
            request[x] = defval

    # dependent parameters, filter settings etc
    for x in dep:
        defval = cs.get_parameter(x)['default']
        request[x] = set([defval])

    # an array to color by
    defval = cs.get_parameter(field)['default']
    request[field] = set([defval])

    print "DEFAULT SETTINGS ARE: "
    print "{"
    for k in list(request):
        print " '{}': {},".format(k, request[k])
    print "}"

    # now hand that over to the maker so it can make up a series
    # of queries that return the required rasters that go into
    # the object we've selected
    qm = qmsb.QueryMaker_SpecB()
    qm.setStore(cs)
    res = qm.translateQuery(request)

    print "RASTERS ARE KEPT IN", res

    # now setup the deferred renderer
    compo = compositor.Compositor_SpecB()
    compo.enableLighting(True)
    compo.set_background_color([0, 0, 0])

    # make up a color transfer function
    cmaps = []
    luts.add_rainbow(cmaps)
    lut = cmaps[0]
    glut = luts.LookupTable()
    glut.name = "Rainbow"
    glut.colorSpace = "RGB"
    glut.ingest(lut['RGBPoints'])
    lstruct = {defobject: {'colorLut': glut, 'geometryColor': [255, 255, 255]}}
    compo.setColorDefinitions(lstruct)
    print "COLOR TRANSFER FUNCTION TO USE", lstruct

    # ask it to render
    image = compo.render(res)

    # now we should have an RGB image in numpy, show it on screen
    im = PIL.Image.fromarray(image)
    im.show()
    print "If you see an empty window, most likely default values are off"
Пример #11
0
    # try to open up a store
    with open(sys.argv[1], mode="rb") as file:
        try:
            info_json = json.load(file)
        except IOError as e:
            print e
            sys.exit(1)

    try:
        if info_json["metadata"]["store_type"] == "SFS":
            cs = vti_store.VTIFileStore(sys.argv[1])
        else:
            raise TypeError

    except(TypeError, KeyError):
        cs = file_store.FileStore(sys.argv[1])

    cs.load()

    class validateStore(explorers.Explorer):
        def __init__(self, *args):
            super(validateStore, self).__init__(*args)
            self.isValid = True
            self.raster_wrangler = raster_wrangler.RasterWrangler()

        def execute(self, desc):
            try:
                self.raster_wrangler.assertvalidimage(cs._get_filename(desc))
            except IOError as e:
                print e
                self.isValid = False
Пример #12
0
def make_cinema_store(proxies,
                      ocsfname,
                      view,
                      forcetime=False,
                      userDefined={},
                      specLevel="A",
                      camType='phi-theta',
                      arrayRanges={},
                      extension=".png",
                      disableValues=False):
    """
    Takes in the pipeline, structured as a tree, and makes a cinema store
    definition containing all the parameters we will vary.
    """

    phis = userDefined.get('phi', [0, 180, 360])
    thetas = userDefined.get('theta', [0, 90, 180])
    rolls = userDefined.get('roll', [0, 45, 90, 135, 180, 225, 270, 315])

    if camType == 'static' or camType == 'phi-theta':
        rolls = [0]

    tvalues = []
    cs = file_store.FileStore(ocsfname)

    try:
        cs.load()
        tprop = cs.get_parameter('time')
        tvalues = tprop['values']

        # start with clean slate, other than time
        cs = file_store.FileStore(ocsfname)
    except (IOError, KeyError):
        pass

    eye_values = cs.metadata.get('camera_eye', [])
    at_values = cs.metadata.get('camera_at', [])
    up_values = cs.metadata.get('camera_up', [])
    nearfar_values = cs.metadata.get('camera_nearfar', [])
    viewangle_values = cs.metadata.get('camera_angle', [])

    cs.add_metadata({'store_type': 'FS'})
    if specLevel == "A":
        cs.add_metadata({'type': 'parametric-image-stack'})
        cs.add_metadata({'version': '0.0'})
    if specLevel == "B":
        cs.add_metadata({'type': 'composite-image-stack'})
        cs.add_metadata({'version': '0.2'})
    pipeline = get_pipeline()
    cs.add_metadata({'pipeline': pipeline})
    cs.add_metadata({'camera_model': camType})
    cs.add_metadata({'camera_eye': eye_values})
    cs.add_metadata({'camera_at': at_values})
    cs.add_metadata({'camera_up': up_values})
    cs.add_metadata({'camera_nearfar': nearfar_values})
    cs.add_metadata({'camera_angle': viewangle_values})

    vis = [proxy['name'] for proxy in proxies]
    if specLevel != "A":
        cs.add_layer("vis", store.make_parameter('vis', vis))

    pnames = []
    for proxy in proxies:
        proxy_name = proxy['name']
        ret = add_filter_value(proxy_name, cs, userDefined)
        if specLevel == "A" and ret:
            pnames.append(proxy_name)
        dependency_set = set([proxy['id']])
        repeat = True
        while repeat:
            repeat = False
            deps = set(proxy['id'] for proxy in proxies
                       if proxy['parent'] in dependency_set)
            if deps - dependency_set:
                dependency_set = dependency_set.union(deps)
                repeat = True
        dependency_list = [
            proxy['name'] for proxy in proxies if proxy['id'] in dependency_set
        ]
        if specLevel != "A":
            cs.assign_parameter_dependence(proxy_name, 'vis', dependency_list)
            add_control_and_colors(proxy_name, cs, userDefined, arrayRanges,
                                   disableValues)
            cs.assign_parameter_dependence("color" + proxy_name, 'vis',
                                           [proxy_name])

    fnp = ""
    if forcetime:
        # time specified, use it, being careful to append if already a list
        tvalues.append(forcetime)
        tprop = store.make_parameter('time', tvalues)
        cs.add_parameter('time', tprop)
        fnp = "{time}"
    else:
        # time not specified, try and make them automatically
        times = paraview.simple.GetAnimationScene().TimeKeeper.TimestepValues
        if times:
            prettytimes = [float_limiter(t) for t in times]
            cs.add_parameter("time", store.make_parameter('time', prettytimes))
            fnp = "{time}"

    if camType == "phi-theta":
        bestp = phis[len(phis) / 2]
        bestt = thetas[len(thetas) / 2]
        cs.add_parameter("phi", store.make_parameter('phi',
                                                     phis,
                                                     default=bestp))
        cs.add_parameter("theta",
                         store.make_parameter('theta', thetas, default=bestt))
        if fnp == "":
            fnp = "{phi}/{theta}"
        else:
            fnp = fnp + "/{phi}/{theta}"

    elif camType != "static":
        # for AER and YPR, make up a set of view matrices corresponding
        # to the requested number of samples in each axis
        def MatrixMul(mtx_a, mtx_b):
            tpos_b = zip(*mtx_b)
            rtn = [[sum(ea * eb for ea, eb in zip(a, b)) for b in tpos_b]
                   for a in mtx_a]
            return rtn

        poses = []  # holds phi, theta and roll angle tuples
        matrices = []  # holds corresponding transform matrices

        v = rolls[0]
        rolls = []
        if v < 2:
            rolls.append(0)
        else:
            j = -180
            while j < 180:
                rolls.append(j)
                j = j + 360 / v

        v = thetas[0]
        thetas = []
        if v < 2:
            thetas.append(0)
        else:
            j = -90
            while j <= 90:
                thetas.append(j)
                j = j + 180 / v

        for r in rolls:
            for t in thetas:
                v = phis[0]
                if v < 2:
                    poses.append((0, t, r))
                else:
                    # sample longitude less frequently toward the pole
                    increment_Scale = math.cos(math.pi * t / 180.0)
                    if increment_Scale == 0:
                        increment_Scale = 1
                    # increment_Scale = 1  # for easy comparison
                    p = -180
                    while p < 180:
                        poses.append((p, t, r))
                        p = p + 360 / (v * increment_Scale)

        # default is one closest to 0,0,0
        dist = math.sqrt((poses[0][0] * poses[0][0]) +
                         (poses[0][1] * poses[0][1]) +
                         (poses[0][2] * poses[0][2]))
        default_mat = 0
        for i in poses:
            p, t, r = i
            cP = math.cos(-math.pi * (p / 180.0))  # phi is right to left
            sP = math.sin(-math.pi * (p / 180.0))
            cT = math.cos(-math.pi * (t / 180.0))  # theta is up down
            sT = math.sin(-math.pi * (t / 180.0))
            cR = math.cos(-math.pi *
                          (r / 180.0))  # roll is around gaze direction
            sR = math.sin(-math.pi * (r / 180.0))
            rY = [[cP, 0, sP], [0, 1, 0], [-sP, 0, cP]]  # x,z interchange
            rX = [[1, 0, 0], [0, cT, -sT], [0, sT, cT]]  # y,z interchange
            rZ = [[cR, -sR, 0], [sR, cR, 0], [0, 0, 1]]  # x,y interchange
            m1 = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
            m2 = MatrixMul(m1, rY)
            m3 = MatrixMul(m2, rX)
            m4 = MatrixMul(m3, rZ)
            matrices.append(m4)
            newdist = math.sqrt(p * p + t * t + r * r)
            if newdist < dist:
                default_mat = m4
                dist = newdist

        cs.add_parameter(
            "pose", store.make_parameter('pose', matrices,
                                         default=default_mat))
        fnp = fnp + "{pose}"

    if specLevel == "A":
        for pname in pnames:
            if fnp == "":
                fnp = "{" + pname + "}"
            else:
                fnp = fnp + "/{" + pname + "}"

    if fnp == "":
        fnp = "image"

    cs.filename_pattern = fnp + extension
    return cs
Пример #13
0
    def test_basic(self):
        print "\nTEST BASIC"

        # a VTK program
        rw = vtk.vtkRenderWindow()
        rw.SetSize(1024, 768)
        r = vtk.vtkRenderer()
        rw.AddRenderer(r)

        s = vtk.vtkRTAnalyticSource()
        s.SetWholeExtent(-25, 25, -25, 25, -25, 25)

        cf = vtk.vtkContourFilter()
        cf.SetInputConnection(s.GetOutputPort())
        cf.SetInputArrayToProcess(0, 0, 0,
                                  "vtkDataObject::FIELD_ASSOCIATION_POINTS",
                                  "RTData")
        cf.SetNumberOfContours(1)
        cf.SetValue(0, 200)
        cf.ComputeScalarsOn()
        m = vtk.vtkPolyDataMapper()
        m.SetInputConnection(cf.GetOutputPort())
        a = vtk.vtkActor()
        a.SetMapper(m)
        r.AddActor(a)

        rw.Render()
        r.ResetCamera()

        # Create a Cinema store
        fname = "/tmp/test_vtk_basic/info.json"
        cs = file_store.FileStore(fname)
        cs.add_metadata({'type': 'parametric-image-stack'})
        cs.add_metadata({'store_type': 'FS'})
        cs.add_metadata({'version': '0.0'})
        cs.filename_pattern = "{phi}_{theta}_{contour}.png"

        # These are the parameters that will vary in the store
        cs.add_parameter("phi", store.make_parameter('phi', range(0, 200, 80)))
        cs.add_parameter("theta",
                         store.make_parameter('theta', range(-180, 200, 80)))
        cs.add_parameter("contour",
                         store.make_parameter('contour', [160, 200]))

        # These objects respond to changes in parameters during exploration
        con = vtk_explorers.Contour('contour', cf, 'SetValue')
        cam = vtk_explorers.Camera([0, 0, 0], [0, 1, 0], 150.0,
                                   r.GetActiveCamera())  # phi,theta implied

        # Runs through all the combinations and saves each result
        e = vtk_explorers.ImageExplorer(cs, ['contour', 'phi', 'theta'],
                                        [cam, con], rw)
        # Go.
        e.explore()

        # Manually reproduce the first entry in the store
        # First set the camera to {'theta': -180, 'phi': 0}
        doc = store.Document({'theta': -180, 'phi': 0, 'contour': 160})
        con.execute(doc)
        cam.execute(doc)
        imageslice = ch.vtkRenderToArray(rw)

        # Load the first entry from the store
        cs2 = file_store.FileStore(fname)
        cs2.load()
        docs = []
        for doc in cs2.find({'theta': -180, 'phi': 0, 'contour': 160}):
            docs.append(doc.data)

        # compare the two
        l2error = ch.compare_l2(imageslice, docs[0])
        ncc = ch.compare_ncc(imageslice, docs[0])
        success = (l2error < 1.0) and (ncc > 0.99)

        if not success:
            print "\n l2error: ", l2error, " ; ncc = ", ncc, "\n"

        self.assertTrue(success)
Пример #14
0
    def test_composite(self):
        print "\nTEST VTK LAYERS"

        # set up some processing task
        s = vtk.vtkRTAnalyticSource()
        s.SetWholeExtent(-50, 50, -50, 50, -50, 50)

        rw = vtk.vtkRenderWindow()
        r = vtk.vtkRenderer()
        rw.AddRenderer(r)

        ac1 = vtk.vtkArrayCalculator()
        ac1.SetInputConnection(s.GetOutputPort())
        ac1.SetAttributeModeToUsePointData()
        ac1.AddCoordinateVectorVariable("coords", 0, 1, 2)
        ac1.SetResultArrayName("Coords")
        ac1.SetFunction("coords")

        ac2 = vtk.vtkArrayCalculator()
        ac2.SetInputConnection(ac1.GetOutputPort())
        ac2.SetAttributeModeToUsePointData()
        ac2.AddCoordinateVectorVariable("coords", 0, 1, 2)
        ac2.SetResultArrayName("radii")
        ac2.SetFunction("mag(coords)")

        cf = vtk.vtkContourFilter()
        cf.SetInputConnection(ac2.GetOutputPort())
        cf.SetInputArrayToProcess(0, 0, 0,
                                  "vtkDataObject::FIELD_ASSOCIATION_POINTS",
                                  "radii")
        cf.SetNumberOfContours(1)
        cf.ComputeScalarsOff()
        cf.SetValue(0, 40)

        m = vtk.vtkPolyDataMapper()
        m.SetInputConnection(cf.GetOutputPort())
        a = vtk.vtkActor()
        a.SetMapper(m)
        r.AddActor(a)

        rw.Render()
        r.ResetCamera()

        # make a cinema data store by defining the things that vary
        fname = "/tmp/test_vtk_composite/info.json"
        cs = file_store.FileStore(fname)
        cs.add_metadata({'type': 'composite-image-stack'})
        cs.add_metadata({'store_type': 'FS'})
        cs.add_metadata({'version': '0.1'})
        cs.filename_pattern = "{phi}/{theta}/{vis}.png"
        cs.add_parameter("phi", store.make_parameter('phi', range(0, 200, 50)))
        cs.add_parameter("theta",
                         store.make_parameter('theta', range(-180, 200, 45)))
        cs.add_layer("vis", store.make_parameter("vis", ['contour']))
        contours = [15, 30, 55, 70, 85]
        cs.add_control("isoval", store.make_parameter('isoval', contours))
        cs.assign_parameter_dependence("isoval", "vis", ['contour'])
        cs.add_field(
            "color",
            store.make_field(
                'color', {
                    'white': 'rgb',
                    'red': 'rgb',
                    'depth': 'depth',
                    'lum': 'luminance',
                    'RTData': 'value',
                    'point_X': 'value',
                    'point_Y': 'value',
                    'point_Z': 'value'
                }), "isoval", contours)

        # associate control points with parameters of the data store
        cam = vtk_explorers.Camera([0, 0, 0], [0, 1, 0], 300.0,
                                   r.GetActiveCamera())
        showcontour = vtk_explorers.ActorInLayer('contour', a)
        layertrack = explorers.Layer('vis', [showcontour])
        controltrack = vtk_explorers.Contour('isoval', cf, 'SetValue')
        # additional specification necessary for the color field
        colorChoice = vtk_explorers.ColorList()
        colorChoice.AddSolidColor('white', [1, 1, 1])
        colorChoice.AddSolidColor('red', [1, 0, 0])
        colorChoice.AddDepth('depth')
        colorChoice.AddLuminance('lum')
        colorChoice.AddValueRender('RTData',
                                   vtk.VTK_SCALAR_MODE_USE_POINT_FIELD_DATA,
                                   'RTData', 0, [0, 250])
        colorChoice.AddValueRender('point_X',
                                   vtk.VTK_SCALAR_MODE_USE_POINT_FIELD_DATA,
                                   'Coords', 0, [-50, 50])
        colorChoice.AddValueRender('point_Y',
                                   vtk.VTK_SCALAR_MODE_USE_POINT_FIELD_DATA,
                                   'Coords', 1, [-50, 50])
        colorChoice.AddValueRender('point_Z',
                                   vtk.VTK_SCALAR_MODE_USE_POINT_FIELD_DATA,
                                   'Coords', 2, [-50, 50])
        colortrack = vtk_explorers.Color('color', colorChoice, a)

        paramNames = ['phi', 'theta', 'vis', 'isoval', 'color']
        trackList = [cam, layertrack, controltrack, colortrack]
        e = vtk_explorers.ImageExplorer(cs, paramNames, trackList, rw)
        colortrack.imageExplorer = e
        e.explore()

        cs.save()
Пример #15
0
    def test_contour(self):
        print "\nTEST CONTOUR"
        # set up some processing task
        s = vtk.vtkRTAnalyticSource()
        s.SetWholeExtent(-50, 50, -50, 50, -50, 50)
        cf = vtk.vtkContourFilter()
        cf.SetInputConnection(s.GetOutputPort())
        cf.SetInputArrayToProcess(0, 0, 0,
                                  "vtkDataObject::FIELD_ASSOCIATION_POINTS",
                                  "RTData")
        cf.SetNumberOfContours(1)
        cf.SetValue(0, 100)

        m = vtk.vtkPolyDataMapper()
        m.SetInputConnection(cf.GetOutputPort())

        rw = vtk.vtkRenderWindow()
        r = vtk.vtkRenderer()
        rw.AddRenderer(r)

        a = vtk.vtkActor()
        a.SetMapper(m)
        r.AddActor(a)

        rw.Render()
        r.ResetCamera()

        # make or open a cinema data store to put results in

        fname = "/tmp/test_vtk_contour/info.json"
        cs = file_store.FileStore(fname)
        cs.add_metadata({'type': 'parametric-image-stack'})
        cs.add_metadata({'store_type': 'FS'})
        cs.add_metadata({'version': '0.0'})
        cs.filename_pattern = "{contour}_{color}.png"
        cs.add_parameter(
            "contour",
            store.make_parameter(
                'contour', [0, 25, 50, 75, 100, 125, 150, 175, 200, 225, 250]))
        cs.add_parameter("color",
                         store.make_parameter('color', ['white', 'red']))

        colorChoice = vtk_explorers.ColorList()
        colorChoice.AddSolidColor('white', [1, 1, 1])
        colorChoice.AddSolidColor('red', [1, 0, 0])

        # associate control points with parameters of the data store
        g = vtk_explorers.Contour('contour', cf, 'SetValue')
        c = vtk_explorers.Color('color', colorChoice, a)
        e = vtk_explorers.ImageExplorer(cs, ['contour', 'color'], [g, c], rw)

        # run through all parameter combinations and put data into the store
        e.explore()

        # Now let's reproduce an entry in the store

        # First set the parameters to {'contour': 75} and {'color': 'white'}
        g.execute(store.Document({'contour': 75}))
        c.execute(store.Document({'color': 'white'}))
        imageslice = ch.vtkRenderToArray(rw)

        # Now load the same entry from the store
        cs2 = file_store.FileStore(fname)
        cs2.load()
        docs = []
        for doc in cs2.find({'contour': 75, 'color': 'white'}):
            docs.append(doc.data)

        # compare the two
        l2error = ch.compare_l2(imageslice, docs[0])
        ncc = ch.compare_ncc(imageslice, docs[0])
        self.assertTrue((l2error < 1.0) and (ncc > 0.99))
Пример #16
0
    def test_clip(self):
        print "\nTEST CLIP"
        # set up some processing task
        s = vtk.vtkSphereSource()
        plane = vtk.vtkPlane()
        plane.SetOrigin(0, 0, 0)
        plane.SetNormal(-1, -1, 0)
        clip = vtk.vtkClipPolyData()
        clip.SetInputConnection(s.GetOutputPort())
        clip.SetClipFunction(plane)
        clip.GenerateClipScalarsOn()
        clip.GenerateClippedOutputOn()
        clip.SetValue(0)
        m = vtk.vtkPolyDataMapper()
        m.SetInputConnection(clip.GetOutputPort())
        rw = vtk.vtkRenderWindow()
        rw.SetSize(300, 200)
        r = vtk.vtkRenderer()
        rw.AddRenderer(r)
        a = vtk.vtkActor()
        a.SetMapper(m)
        r.AddActor(a)

        # make or open a cinema data store to put results in
        fname = "/tmp/test_vtk_clip/info.json"
        cs = file_store.FileStore(fname)
        cs.add_metadata({'type': 'parametric-image-stack'})
        cs.add_metadata({'store_type': 'FS'})
        cs.add_metadata({'version': '0.0'})
        cs.filename_pattern = "{phi}_{theta}_{offset}_slice.png"
        cs.add_parameter("phi", store.make_parameter('phi', range(0, 200, 80)))
        cs.add_parameter("theta",
                         store.make_parameter('theta', range(-180, 200, 80)))
        cs.add_parameter("offset", store.make_parameter('offset', [0, .2, .4]))

        # associate control points with parameters of the data store
        cam = vtk_explorers.Camera([0, 0, 0], [0, 1, 0], 3.0,
                                   r.GetActiveCamera())  # phi,theta implied
        g = vtk_explorers.Clip('offset', clip)
        e = vtk_explorers.ImageExplorer(cs, ['offset', 'phi', 'theta'],
                                        [cam, g], rw)

        # run through all parameter combinations and put data into the store
        rw.Render()
        e.explore()

        # Now let's reproduce an entry in the store
        doc = store.Document({'theta': -100, 'phi': 80, 'offset': .2})
        g.execute(doc)
        cam.execute(doc)
        imageslice = ch.vtkRenderToArray(rw)

        # Now load the same entry from the store
        cs2 = file_store.FileStore(fname)
        cs2.load()
        docs = []
        for doc in cs2.find({'theta': -100, 'phi': 80, 'offset': .2}):
            docs.append(doc.data)

        # compare the two
        l2error = ch.compare_l2(imageslice, docs[0])
        ncc = ch.compare_ncc(imageslice, docs[0])
        self.assertTrue((l2error < 1.0) and (ncc > 0.99))