def traits_view(self): auth_grp = VGroup(Item('host'), Item('user'), Item('password'), show_border=True, label='Authentication') v = View(VGroup(auth_grp, HGroup(icon_button_editor('check_status_button', 'database_go_fatcow', tooltip='Check slave status. Equivalent to "Show Slave Status"'), icon_button_editor('start_button', 'start', tooltip='Start replication. Equivalent to "Start Slave"', enabled_when='not running'), icon_button_editor('stop_button', 'stop', tooltip='Stop replication. Equivalent to "Stop Slave"', enabled_when='running'), icon_button_editor('skip_button', 'skip_occurrence', tooltip='Set global skip counter.\n' 'Equivalent to "Stop Slave;' 'Set GLOBAL SQL_SKIP_COUNTER=N;Start Slave;"\n' 'where N is the number of SQL statements to skip'), UItem('skip_count', tooltip='Number of SQL statements to skip')), UItem('status_items', editor=TabularEditor(adapter=StatusItemsAdapter(), editable=False)), VGroup(UReadonly('last_error', height=200, style_sheet='color: red; font-weight: bold'), label='Error', show_border=True), label='Slave', show_border=True)) v.width = 500 return v
def traits_view(self): imgrp = Item('_image', show_label=False, editor=ImageEditor(), width=self.width, height=self.height, style='custom' ) v = View( # Item('thresholdv'), imgrp, handler=ImageHandler, x=0.55, y=35, width=self.width, height=self.height + 22, resizable=True ) if self.title is not None: v.title = self.title if self.view_identifier is not None: v.id = self.view_identifier return v
def edit_view(self): # v = self.traits_view() v = View( Group( Item('reference_detector', editor=EnumEditor(name='detectors')), Item('integration_time', label='Integration (s)'), label='Magnet Scan', show_border=True)) v.title = self.title v.buttons = ['OK', 'Cancel'] return v
def _view_factory(self, grps): v = View(grps, resizable=self.resizable, x=self.window_x, y=self.window_y, title=self.title, handler=self.handler_klass ) if self.window_width: v.width = self.window_width if self.window_height: v.height = self.window_height return v
def edit_view(self): # v = self.traits_view() v = View( Group( Item("reference_detector", editor=EnumEditor(name="detectors")), Item("integration_time", label="Integration (s)"), label="Magnet Scan", show_border=True, ) ) v.title = self.title v.buttons = ["OK", "Cancel"] return v
def _view_factory(self, grps): v = View(grps, resizable=self.resizable, x=self.window_x, y=self.window_y, title=self.title, handler=self.handler_klass, # buttons=[Action(name='Save', action='save')] ) if self.window_width: v.width = self.window_width if self.window_height: v.height = self.window_height return v
def _view_factory(self): editor = myTabularEditor(adapter=self.tabular_adapter(), dclicked='object.dclicked', selected='object.selected', selected_row='object.selected_row', update='update', # auto_update=True, column_clicked='object.column_clicked', editable=False, multi_select=not self.style == 'single', ) button_grp = self._get_button_grp() qgrp = Item('queries', show_label=False, style='custom', height=0.25, editor=ListEditor(mutable=False, style='custom', editor=InstanceEditor()), defined_when='style in ["normal","panel"]') v = View( # HGroup(Item('multi_select_graph', # defined_when='multi_graphable' # ), # spring, Item('limit')), VGroup( CustomLabel('dbstring', color='red'), Item('records', style='custom', editor=editor, show_label=False, height=0.75, width=600, ), qgrp, button_grp, ), resizable=True, handler=SelectorHandler ) if self.style == 'single': v.buttons = ['OK', 'Cancel'] return v
def _view_factory(self): editor = myTabularEditor( adapter=self.tabular_adapter, dclicked="object.dclicked", selected="object.selected", selected_row="object.selected_row", update="update", scroll_to_row="scroll_to_row", # auto_update=True, column_clicked="object.column_clicked", editable=False, multi_select=not self.style == "single", ) button_grp = self._get_button_grp() v = View( VGroup( CustomLabel("id_string", color="red"), VSplit( Item( "records", style="custom", editor=editor, show_label=False, height=0.75, # width=600, ), Item( "queries", show_label=False, style="custom", height=0.25, editor=ListEditor(mutable=False, style="custom", editor=InstanceEditor()), defined_when='style in ["normal","panel"]', ), ), button_grp, ), resizable=True, handler=SelectorHandler, ) if self.style == "single": v.buttons = ["OK", "Cancel"] return v
def configure(self): """Pops up the GUI control widget.""" if self.ui is None: self._show_glyphs() view = View(Group(Item(name='light_mode'), Item(name='number_of_lights'), label='LightManager'), Group(Item(name='lights', style='custom'), label='Lights', selected=True, show_labels=False), resizable=True, buttons=['OK'], handler=CloseHandler()) self.ui = view.ui(self) else: try: self.ui.control.Raise() except AttributeError: pass
def traits_view(self): plot = Item('plotcontainer', style='custom', show_label=False, editor=ComponentEditor( size=(self.width, self.height))) v = View(plot, resizable=self.resizable, title=self.window_title, width=self.window_width, height=self.window_height, x=self.window_x, y=self.window_y, handler=self.handler_klass) if self.view_identifier: v.id = self.view_identifier return v
def _view_factory(self): editor = myTabularEditor(adapter=self.tabular_adapter, dclicked='object.dclicked', selected='object.selected', selected_row='object.selected_row', update='update', scroll_to_row='scroll_to_row', # auto_update=True, column_clicked='object.column_clicked', editable=False, multi_select=not self.style == 'single') button_grp = self._get_button_grp() v = View( VGroup( CustomLabel('id_string', color='red'), VSplit( Item('records', style='custom', editor=editor, show_label=False, height=0.75, # width=600, )), # Item('queries', show_label=False, # style='custom', # height=0.25, # editor=ListEditor(mutable=False, # style='custom', # editor=InstanceEditor()), # defined_when='style in ["normal","panel"]')), # button_grp ), buttons=['OK','Cancel'], resizable=True, handler=SelectorHandler) if self.style == 'single': v.buttons = ['OK', 'Cancel'] return v
def default_traits_view(self): view = View( Item('scene', editor=SceneEditor(scene_class=MayaviScene), height=600, width=600, show_label=False), HGroup( Item("current_time", label="Date"), Item(" "), Item("num_of_shown_days", label="Show"), Item("_selected_source_name", label="Selection"), Item("_selected_event_name", editor=CheckListEditor(name='_selected_events_list'), show_label=False), Item("_back1", show_label=False), Item("Relative_Start_Day", show_label=False, editor=RangeEditor(mode="slider", low_name="_low_start_day_number", high_name="_high_start_day_number"), tooltip="Shows total number of days in data set and the currently selected day", springy=True, full_size=True), Item("_forward1", show_label=False), Item("move_step", show_label=False), Item("play_button", label='Play') ), title="Visualization of Events", resizable=True ) view.resizable = True return view
def __init__(self, renwin=None, **traits): """Initializes the object. Parameters ---------- - renwin: `Scene` instance. Defaults to None. This may be passed in addition to the renwins attribute which can be a list of scenes. """ super(PipelineBrowser, self).__init__(**traits) self.ui = None self.view = None if renwin: self.renwins.append(renwin) self._root_object_changed(self.root_object) menu = Menu(Action(name='Refresh', action='editor.update_editor'), Action(name='Expand all', action='editor.expand_all')) self.menu = menu nodes = self.tree_generator.get_nodes(menu) self.tree_editor = TreeEditor(nodes=nodes, editable=False, orientation='vertical', hide_root=True, on_dclick=self._on_dclick) self.view = View(Group(Item(name='_root', editor=self.tree_editor, resizable=True), show_labels=False, show_border=False, orientation='vertical'), title='Pipeline browser', help=False, resizable=True, undo=False, revert=False, width=.3, height=.3)
class BCSlice(HasStrictTraits): ''' Implements the IBC functionality for a constrained dof. ''' implements(IBCond) name = Str('<unnamed>') var = Enum('u', 'f', 'eps', 'sig') # slice = Instance(FEGridIdxSlice) slice = Trait() link_slice = Instance(FEGridIdxSlice) bcdof_list = List(BCDof) # List of dofs that determine the value of the current dof # # If this list is empty, then the current dof is # prescribed. Otherwise, the dof value is given by the # linear combination of DOFs in the list (see the example below) # # link_dofs = List( Int ) # Coefficients of the linear combination of DOFs specified in the # above list. # link_coeffs = List(Float) dims = List(Int) _link_dims = List(Int) link_dims = Property(List(Int)) def _get_link_dims(self): if self._link_dims == None: return self.dims else: return self._link_dims def _set_link_dims(self, link_dims): self._link_dims = link_dims value = Float integ_domain = Enum(['global', 'local']) # TODO - adapt the definition time_function = Callable space_function = Callable def _space_function_default(self): return lambda x: 1.0 def _time_function_default(self): return lambda t: t def is_essential(self): return self.var == 'u' def is_linked(self): return self.link_dofs != [] def is_constrained(self): ''' Return true if a DOF is either explicitly prescribed or it depends on other DOFS. ''' return self.is_essential() or self.is_linked() def is_natural(self): return self.var == 'f' or self.var == 'eps' or self.var == 'sig' def setup(self, sctx): ''' Locate the spatial context. ''' if self.link_slice == None: for el, el_dofs, el_dofs_X in zip(self.slice.elems, self.slice.dofs, self.slice.dof_X): # print 'el_dofs', el_dofs for node_dofs, dof_X in zip(el_dofs, el_dofs_X): # print 'node_dofs ', node_dofs for dof in node_dofs[self.dims]: self.bcdof_list.append(BCDof(var=self.var, dof=dof, value=self.value, link_coeffs=self.link_coeffs, time_function=self.time_function)) else: # apply the linked slice n_link_nodes = len(self.link_slice.dof_nodes.flatten()) link_dofs = self.link_dofs[0, 0, self.link_dims] if n_link_nodes == 1: # link_dof = self.link_slice.dofs.flatten()[0] link_coeffs = self.link_coeffs for el, el_dofs, el_dofs_X in \ zip(self.slice.elems, self.slice.dofs, self.slice.dof_X): for node_dofs, dof_X in zip(el_dofs, el_dofs_X): for dof, link_dof, link_coeff in zip(node_dofs[self.dims], link_dofs, link_coeffs): self.bcdof_list.append(BCDof(var=self.var, dof=dof, link_dofs=[link_dof], value=self.value, link_coeffs=[ link_coeff], time_function=self.time_function)) else: for el, el_dofs, el_dofs_X, el_link, el_link_dofs, el_link_dofs_X in \ zip(self.slice.elems, self.slice.dofs, self.slice.dof_X, self.link_slice.elems, self.link_slice.dofs, self.link_slice.dof_X): # the link slice is compatible with the bc slice for node_dofs, dof_X, node_link_dofs, link_dof_X in \ zip(el_dofs, el_dofs_X, el_link_dofs, el_link_dofs_X): for dof, link_dof, link_coeff in zip(node_dofs[self.dims], node_link_dofs[ self.link_dims], self.link_coeffs): self.bcdof_list.append(BCDof(var=self.var, dof=dof, link_dofs=[link_dof], value=self.value, link_coeffs=[ link_coeff], time_function=self.time_function)) def apply_essential(self, K): for bcond in self.bcdof_list: bcond.apply_essential(K) def apply(self, step_flag, sctx, K, R, t_n, t_n1): if self.is_essential(): for bcond in self.bcdof_list: bcond.apply(step_flag, sctx, K, R, t_n, t_n1) else: self.apply_natural(step_flag, sctx, K, R, t_n, t_n1) def apply_natural(self, step_flag, sctx, K, R, t_n, t_n1): fets_eval = self.slice.fe_grid.fets_eval e_idx, n_idx = self.slice.dof_grid_slice.idx_tuple r_arr, w_arr, ix = fets_eval.get_sliced_ip_scheme(n_idx) slice_geo_X = self.slice.fe_grid.elem_X_map[self.slice.elems] slice_dofs = self.slice.fe_grid.elem_dof_map[self.slice.elems] p_value = self.value * float(self.time_function(t_n1)) p_vct = zeros((fets_eval.n_nodal_dofs,), dtype='float_') for d in self.dims: p_vct[d] = p_value for el, el_dofs, el_geo_X in zip(self.slice.elems, slice_dofs, slice_geo_X): f_vct = zeros((fets_eval.n_e_dofs,), dtype='float_') for r_pnt, w in zip(r_arr, w_arr): if len(ix) > 0: J_mtx = fets_eval.get_J_mtx(r_pnt, el_geo_X) if self.integ_domain == 'global': # Integration projected onto the global coordinates # axes - use submatrix of the J_mtx # J_det = det(J_mtx[ix_(ix, ix)]) elif self.integ_domain == 'local': # Integration over the parametric coordinates on the # surface # J_det = 1.0 for i in ix: J_det *= norm(J_mtx[:, i]) else: J_det = 1.0 sctx.r_pnt = r_pnt sctx.X = el_geo_X X_pnt = fets_eval.get_X_pnt(sctx) space_factor = self.space_function(X_pnt) # add to the element vector N_mtx = fets_eval.get_N_mtx(r_pnt) f_vct += dot(N_mtx.T, p_vct * space_factor) * w * J_det R[el_dofs] += f_vct #------------------------------------------------------------------------- # Ccnstrained DOFs #------------------------------------------------------------------------- dofs = Property def _get_dofs(self): return self.slice.dofs dof_X = Property def _get_dof_X(self): return self.slice.dof_X n_dof_nodes = Property def _get_n_dof_nodes(self): sliceshape = self.dofs.shape return sliceshape[0] * sliceshape[1] # register the pipelines for plotting labels and geometry # mvp_dofs = Trait(MVPointLabels) def _mvp_dofs_default(self): return MVPointLabels(name='Boundary condition', points=self._get_mvpoints, vectors=self._get_labels, color=(0.0, 0.0, 0.882353)) def _get_mvpoints(self): # blow up if self.dof_X.shape[2] == 2: dof_X = np.hstack([self.dof_X.reshape(self.n_dof_nodes, 2), np.zeros((self.n_dof_nodes, 1), dtype='f')]) elif self.dof_X.shape[2] == 1: dof_X = np.hstack([self.dof_X.reshape(self.n_dof_nodes, 1), np.zeros((self.n_dof_nodes, 2), dtype='f')]) else: dof_X = self.dof_X.reshape(self.n_dof_nodes, 3) return dof_X def _get_labels(self): # blow up n_points = self.n_dof_nodes dofs = repeat(-1., n_points * 3).reshape(n_points, 3) dofs[:, tuple(self.dims)] = self.dofs return dofs #------------------------------------------------------------------------- # Link DOFs #------------------------------------------------------------------------- link_dofs = Property(List) def _get_link_dofs(self): if self.link_slice != None: return self.link_slice.dofs else: return [] link_dof_X = Property def _get_link_dof_X(self): return self.link_slice.dof_X n_link_dof_nodes = Property def _get_n_link_dof_nodes(self): sliceshape = self.link_dofs.shape return sliceshape[0] * sliceshape[1] # register the pipelines for plotting labels and geometry # mvp_link_dofs = Trait(MVPointLabels) def _mvp_link_dofs_default(self): return MVPointLabels(name='Link boundary condition', points=self._get_link_mvpoints, vectors=self._get_link_labels, color=(0.0, 0.882353, 0.0)) def _get_link_mvpoints(self): # blow up # blow up if self.link_slice == None: return np.zeros((0, 3), dtype='f') if self.dof_X.shape[2] == 2: dof_X = np.hstack([self.link_dof_X.reshape(self.n_dof_nodes, 2), np.zeros((self.n_dof_nodes, 1), dtype='f')]) elif self.dof_X.shape[2] == 1: dof_X = np.hstack([self.link_dof_X.reshape(self.n_dof_nodes, 1), np.zeros((self.n_dof_nodes, 2), dtype='f')]) else: dof_X = self.link_dof_X.reshape(self.n_dof_nodes, 3) return dof_X def _get_link_labels(self): # blow up if self.link_slice == None: return np.zeros((0, 3), dtype='f') n_points = self.n_link_dof_nodes dofs = repeat(-1., n_points * 3).reshape(n_points, 3) dofs[:, tuple(self.dims)] = self.link_dofs return dofs redraw_button = Button('Redraw') def _redraw_button_fired(self): self.mvp_dofs.redraw(label_mode='label_vectors') self.mvp_link_dofs.redraw(label_mode='label_vectors') traits_view = View(HSplit(Group('var', 'dims', 'value', 'redraw_button'), Item('bcdof_list', style='custom', editor=bcond_list_editor, show_label=False)), resizable=True, )
class Visualization(HasTraits): path = Str data = None op = Operater() start = time.time() op.load_model() print('load model cost: %f' % (time.time() - start)) r_dset = ReadyData() x = None y = None z = None opacity = Range(0, 100, 100) # the layout of the dialog screated 创建的对话框布局 view = View(Item('scene', editor=SceneEditor(scene_class=MayaviScene), height=512, width=512, show_label=False), Group('_','opacity'), resizable=True # 需要用父窗口小部件调整大小 ) scene = Instance(MlabSceneModel, ()) @on_trait_change('scene.activated') def update_plot(self): # 打开视图时调用此函数。当视图尚未打开时,不会填充场景,因为某些VTK功能需要GLContext。 # 可以在嵌入的场景上进行正常的MLAB调用。 print('*_*'*10) if self.data is None: return print('-*-'*10) self.plot = self.scene.mlab.contour3d(self.x,self.y,self.z,self.data, color=(0.53, 0, 0.098), transparent=True, opacity=1.0) @on_trait_change('opacity') def update_opacity(self): s = self.scene.mlab.gcf() source = s.children[0] manager = source.children[0] surface = manager.children[0] surface.actor.property.opacity = self.opacity*1.0/100 def update_data(self, path): self.path = path self.data = [] if path == '' or path == None: return elif path[-4:] == '.nii': ds = sitk.ReadImage(path) img = sitk.GetArrayFromImage(ds) img = img.astype(np.uint8) img = img[46:] lz = img.shape[0] lx = img.shape[1] ly = img.shape[2] spacing = ds.GetSpacing() # x, y, z self.x, self.y, self.z = np.mgrid[ -lz * spacing[2] / 2:lz * spacing[2] / 2:spacing[2], -lx * spacing[0] / 2:lx * spacing[0] / 2:spacing[0], -ly * spacing[1] / 2:ly * spacing[1] / 2:spacing[1]] start = time.time() for i in range(img.shape[0]): self.r_dset.update_data(img[i]) self.data.append(self.op.predict_pic(self.r_dset.getitem().float())) self.data = np.array(self.data) print(lz) print(self.x.shape) print(self.y.shape) print(self.z.shape) print(self.data.shape) print('predict cost: %f' % (time.time() - start)) else: self.data = [] filelist = os.listdir(path) types = '.' + filelist[0].split('.')[1] for i in range(len(filelist)): image = Image.open(path + '/' + str(i) + types) image = np.array(image) self.data.append(image) self.data = np.array(self.data) self.x, self.y, self.z = np.mgrid[ -len(filelist) * 1 / 2:len(filelist) * 1 / 2: 1, -512 * 0.72 / 2:512 * 0.72 / 2: 0.72, -512 * 0.72 / 2:512 * 0.72 / 2: 0.72]
class Ellipsoid(BaseMirror): name = "Ellipsoid" abstract = False focus1 = Tuple(-50., 0., 0.) focus2 = Tuple(0., 50., 0.) size = Float(100.0, desc="twice the major axis length, or the distance from one\ focus to the ellipsoid edge to the other focus") X_bounds = MinMax(-25., 25.) Y_bounds = MinMax(-25., 25.) Z_bounds = MinMax(0., 50.) show_foci = Bool(True) foci_Actors = Instance(tvtk.ActorCollection, ()) axes = Property(Tuple, depends_on="focus1, focus2, size", desc="(major, minor) axis lengths") ellipse_trans = Instance(tvtk.Transform, (), transient=True) combined_trans = Instance(tvtk.Transform, transient=True) f1_glyph = Instance(tvtk.SphereSource, (), transient=True) f2_glyph = Instance(tvtk.SphereSource, (), transient=True) f1_act = Instance(tvtk.Follower, (), transient=True) f2_act = Instance(tvtk.Follower, (), transient=True) vtk_grid = Instance(EmptyGridSource, (), transient=True) vtk_quadric = Instance(tvtk.Quadric, (), transient=True) vtkproperty = tvtk.Property(opacity=0.7, color=(0.8, 0.8, 0)) traits_view = View( VGroup(Traceable.uigroup, Item("focus1", editor=VectorEditor), Item("focus2", editor=VectorEditor), Item("show_foci"), Item("size", editor=NumEditor), Item("X_bounds"), Item("Y_bounds"), Item("Z_bounds")), ) def _faces_default(self): facelist = FaceList(owner=self) facelist.faces = [ EllipsoidalFace(owner=self, x1=self.X_bounds[0], x2=self.X_bounds[1], y1=self.Y_bounds[0], y2=self.Y_bounds[1], z1=self.Z_bounds[0], z2=self.Z_bounds[1], major=self.axes[0], minor=self.axes[1]) ] return facelist @on_trait_change("focus1, focus2, size") def config_trans(self, *args): f1 = numpy.asarray(self.focus1) f2 = numpy.asarray(self.focus2) size = self.size centre = (f2 + f1) / 2. ellipse_t = self.ellipse_trans ellipse_t.identity() #ellipse major axis along the X axis delta = f2 - f1 ax = normaliseVector(delta) axy = numpy.sqrt(ax[0]**2 + ax[1]**2) ellipse_t.rotate_y(numpy.arctan2(ax[2], axy) * 180 / numpy.pi) ellipse_t.rotate_z(-numpy.arctan2(ax[1], ax[0]) * 180 / numpy.pi) ellipse_t.translate(*-centre) @cached_property def _get_axes(self): f1 = numpy.asarray(self.focus1) f2 = numpy.asarray(self.focus2) delta = f2 - f1 size = self.size h = numpy.sqrt((delta**2).sum()) a = size / 2. b = 0.5 * numpy.sqrt(size**2 - h**2) return (a, b) def _combined_trans_default(self): t = tvtk.Transform() t.concatenate(self.transform) t.concatenate(self.ellipse_trans.linear_inverse) return t def _show_foci_changed(self, val): for act in self.foci_Actors: act.visibility = val self.render = True def update_grid(self): xmin, xmax = self.X_bounds ymin, ymax = self.Y_bounds zmin, zmax = self.Z_bounds source = self.vtk_grid size = 20 dx = (xmax - xmin) / (size - 1) dy = (ymax - ymin) / (size - 1) dz = (zmax - zmin) / (size - 1) source.dimensions = (size, size, size) source.origin = (xmin, ymin, zmin) source.spacing = (dx, dy, dz) @on_trait_change("X_bounds, Y_bounds, Z_bounds") def change_bounds(self): self.update_grid() self.vtk_grid.modified() self.update = True @on_trait_change("focus1, focus2, size") def config_pipeline(self, *args): tp = self.transform.transform_point self.f1_act.position = tp(self.focus1) self.f2_act.position = tp(self.focus2) f1 = numpy.asarray(self.focus1) f2 = numpy.asarray(self.focus2) self.f1_glyph.center = f1 self.f2_glyph.center = f2 centre = (f2 + f1) / 2. ellipse_t = self.ellipse_trans ellipse_t.identity() #ellipse major axis along the X axis delta = f2 - f1 ax = normaliseVector(delta) axy = numpy.sqrt(ax[0]**2 + ax[1]**2) ellipse_t.rotate_y(numpy.arctan2(ax[2], axy) * 180 / numpy.pi) ellipse_t.rotate_z(-numpy.arctan2(ax[1], ax[0]) * 180 / numpy.pi) ellipse_t.translate(*-centre) a, b = self.axes #b = 0.5 * numpy.sqrt(size**2 - h**2) ##not required q = self.vtk_quadric A1 = A2 = 1 / (b**2) A0 = 1 / (a**2) #A8 = 1 A9 = -1 q.coefficients = (A0, A1, A2, 0, 0, 0, 0, 0, 0, A9) self.update = True def _pipeline_default(self): grid = self.vtk_grid #grid.set_execute_method(self.create_grid) grid.modified() quad = self.vtk_quadric quad.transform = self.ellipse_trans clip = tvtk.ClipVolume(input_connection=grid.output_port, clip_function=quad, inside_out=0) topoly = tvtk.GeometryFilter(input_connection=clip.output_port) norm = tvtk.PolyDataNormals(input_connection=topoly.output_port) transF = tvtk.TransformFilter(input_connection=norm.output_port, transform=self.transform) self.config_pipeline() grid.modified() return transF def get_actors(self, scene): actors = [] sList = [self.f1_glyph, self.f2_glyph] cList = [(0, 1, 0), (1, 0, 0)] for s, c in zip(sList, cList): s.radius = 1.0 map = tvtk.PolyDataMapper(input_connection=s.output_port) act = tvtk.Actor(mapper=map, user_transform=self.transform) act.property.color = c actors.append(act) line = tvtk.LineSource(point1=(-100, 0, 0), point2=(100, 0, 0)) t_line = tvtk.TransformFilter( input_connection=line.output_port, transform=self.ellipse_trans.linear_inverse) map = tvtk.PolyDataMapper(input_connection=t_line.output_port) act = tvtk.Actor(mapper=map, user_transform=self.transform) act.property.color = (0, 0, 0) actors.append(act) l1 = tvtk.VectorText(text="F1") l2 = tvtk.VectorText(text="F2") m1 = tvtk.PolyDataMapper(input_connection=l1.output_port) m2 = tvtk.PolyDataMapper(input_connection=l2.output_port) act1 = self.f1_act act2 = self.f2_act act1.mapper = m1 act2.mapper = m2 scale = (5, 5, 5) act1.scale = scale act2.scale = scale act1.property.color = (0, 0, 0) act2.property.color = (0, 0, 0) act1.position = self.focus1 act2.position = self.focus2 def on_editor(new_ed): if new_ed is not None: act1.camera = new_ed._camera act2.camera = new_ed._camera scene.on_trait_change(on_editor, "scene_editor") actors.append(act1) actors.append(act2) for actor in actors: self.actors.append(actor) self.foci_Actors.append(actor) actor.visibility = self.show_foci return self.actors def make_step_shape(self): from raytrace.step_export import make_ellipsoid_mirror ell = make_ellipsoid_mirror(self.focus1, self.focus2, self.size / 2., self.X_bounds, self.Y_bounds, self.Z_bounds, self.centre, self.direction, self.x_axis) return ell, "yellow"
class Model(HasTraits): #Traits view definitions: traits_view = View(Group( Item('function'), HGroup(Item('npts_x', label="Number X Points"), Item('npts_y', label="Number Y Points")), HGroup(Item('min_x', label="Min X value"), Item('max_x', label="Max X value")), HGroup(Item('min_y', label="Min Y value"), Item('max_y', label="Max Y value"))), buttons=["OK", "Cancel"]) function = Str("tanh(x**2+y)*cos(y)*jn(0,x+y*2)") npts_x = CInt(400) npts_y = CInt(200) min_x = CFloat(-2 * pi) max_x = CFloat(2 * pi) min_y = CFloat(-1.5 * pi) max_y = CFloat(1.5 * pi) xs = Array ys = Array zs = Array minz = Float maxz = Float model_changed = Event def __init__(self, *args, **kwargs): super(Model, self).__init__(*args, **kwargs) self.compute_model() def compute_model(self): # The xs and ys used for the image plot range need to be the # edges of the cells. self.xs = linspace(self.min_x, self.max_x, self.npts_x + 1) self.ys = linspace(self.min_y, self.max_y, self.npts_y + 1) # The grid of points at which we will evaluate the 2D function # is located at cell centers, so use halfsteps from the # min/max values (which are edges) xstep = (self.max_x - self.min_x) / self.npts_x ystep = (self.max_y - self.min_y) / self.npts_y gridx = linspace(self.min_x + xstep / 2, self.max_x - xstep / 2, self.npts_x) gridy = linspace(self.min_y + xstep / 2, self.max_y - xstep / 2, self.npts_y) x, y = meshgrid(gridx, gridy) try: d = dict(x=x, y=y) exec("from scipy import *", d) exec("from scipy.special import *", d) self.zs = eval(self.function, d) self.minz = nanmin(self.zs) self.maxz = nanmax(self.zs) self.model_changed = True self._function = self.function except: self.set(function=self._function, trait_change_notify=False) def _anytrait_changed(self, name, value): if name in [ 'function', 'npts_x', 'npts_y', 'min_x', 'max_x', 'min_y', 'max_y' ]: self.compute_model()
class VolumeSlicer(HasTraits): # The data to plot data = Array() # The 4 views displayed scene3d = Instance(MlabSceneModel, ()) scene_x = Instance(MlabSceneModel, ()) scene_y = Instance(MlabSceneModel, ()) scene_z = Instance(MlabSceneModel, ()) # The data source data_src3d = Instance(Source) # The image plane widgets of the 3D scene ipw_3d_x = Instance(PipelineBase) ipw_3d_y = Instance(PipelineBase) ipw_3d_z = Instance(PipelineBase) _axis_names = dict(x=0, y=1, z=2) #--------------------------------------------------------------------------- def __init__(self, **traits): super(VolumeSlicer, self).__init__(**traits) # Force the creation of the image_plane_widgets: self.ipw_3d_x self.ipw_3d_y self.ipw_3d_z #--------------------------------------------------------------------------- # Default values #--------------------------------------------------------------------------- def _data_src3d_default(self): return mlab.pipeline.scalar_field(self.data, figure=self.scene3d.mayavi_scene) def make_ipw_3d(self, axis_name): ipw = mlab.pipeline.image_plane_widget( self.data_src3d, figure=self.scene3d.mayavi_scene, plane_orientation='%s_axes' % axis_name) mlab.colorbar(object=ipw) return ipw def _ipw_3d_x_default(self): return self.make_ipw_3d('x') def _ipw_3d_y_default(self): return self.make_ipw_3d('y') def _ipw_3d_z_default(self): return self.make_ipw_3d('z') #--------------------------------------------------------------------------- # Scene activation callbaks #--------------------------------------------------------------------------- @on_trait_change('scene3d.activated') def display_scene3d(self): outline = mlab.pipeline.outline( self.data_src3d, figure=self.scene3d.mayavi_scene, ) self.scene3d.mlab.view(40, 50) # Interaction properties can only be changed after the scene # has been created, and thus the interactor exists for ipw in (self.ipw_3d_x, self.ipw_3d_y, self.ipw_3d_z): # Turn the interaction off ipw.ipw.interaction = 0 self.scene3d.scene.background = (0, 0, 0) # Keep the view always pointing up self.scene3d.scene.interactor.interactor_style = \ tvtk.InteractorStyleTerrain() def make_side_view(self, axis_name): scene = getattr(self, 'scene_%s' % axis_name) # To avoid copying the data, we take a reference to the # raw VTK dataset, and pass it on to mlab. Mlab will create # a Mayavi source from the VTK without copying it. # We have to specify the figure so that the data gets # added on the figure we are interested in. outline = mlab.pipeline.outline( self.data_src3d.mlab_source.dataset, figure=scene.mayavi_scene, ) ipw = mlab.pipeline.image_plane_widget(outline, plane_orientation='%s_axes' % axis_name) setattr(self, 'ipw_%s' % axis_name, ipw) # Synchronize positions between the corresponding image plane # widgets on different views. ipw.ipw.sync_trait('slice_position', getattr(self, 'ipw_3d_%s' % axis_name).ipw) # Make left-clicking create a crosshair ipw.ipw.left_button_action = 0 # Add a callback on the image plane widget interaction to # move the others def move_view(obj, evt): position = obj.GetCurrentCursorPosition() for other_axis, axis_number in self._axis_names.iteritems(): if other_axis == axis_name: continue ipw3d = getattr(self, 'ipw_3d_%s' % other_axis) ipw3d.ipw.slice_position = position[axis_number] ipw.ipw.add_observer('InteractionEvent', move_view) ipw.ipw.add_observer('StartInteractionEvent', move_view) # Center the image plane widget ipw.ipw.slice_position = 0.5 * self.data.shape[ self._axis_names[axis_name]] # Position the view for the scene views = dict( x=(0, 90), y=(90, 90), z=(0, 0), ) scene.mlab.view(*views[axis_name]) # 2D interaction: only pan and zoom scene.scene.interactor.interactor_style = \ tvtk.InteractorStyleImage() scene.scene.background = (0, 0, 0) @on_trait_change('scene_x.activated') def display_scene_x(self): return self.make_side_view('x') @on_trait_change('scene_y.activated') def display_scene_y(self): return self.make_side_view('y') @on_trait_change('scene_z.activated') def display_scene_z(self): return self.make_side_view('z') #--------------------------------------------------------------------------- # The layout of the dialog created #--------------------------------------------------------------------------- view = View( HGroup( Group( Item('scene_y', editor=SceneEditor(scene_class=Scene), height=250, width=300), Item('scene_z', editor=SceneEditor(scene_class=Scene), height=250, width=300), show_labels=False, ), Group( Item('scene_x', editor=SceneEditor(scene_class=Scene), height=250, width=300), Item('scene3d', editor=SceneEditor(scene_class=MayaviScene), height=250, width=300), show_labels=False, ), ), resizable=True, title='Volume Slicer', )
class SystemMonitorView(HasTraits): python_console_cmds = Dict() _threads_table_list = List() threads = List() uart_a_crc_error_count = Int(0) uart_a_io_error_count = Int(0) uart_a_rx_buffer = Float(0) uart_a_tx_buffer = Float(0) uart_a_tx_KBps = Float(0) uart_a_rx_KBps = Float(0) uart_b_crc_error_count = Int(0) uart_b_io_error_count = Int(0) uart_b_rx_buffer = Float(0) uart_b_tx_buffer = Float(0) uart_b_tx_KBps = Float(0) uart_b_rx_KBps = Float(0) ftdi_crc_error_count = Int(0) ftdi_io_error_count = Int(0) ftdi_rx_buffer = Float(0) ftdi_tx_buffer = Float(0) ftdi_tx_KBps = Float(0) ftdi_rx_KBps = Float(0) msg_obs_avg_latency_ms = Int(0) msg_obs_min_latency_ms = Int(0) msg_obs_max_latency_ms = Int(0) msg_obs_window_latency_ms = Int(0) traits_view = View( VGroup( Item( '_threads_table_list', style='readonly', editor=TabularEditor(adapter=SimpleAdapter()), show_label=False, width=0.85, ), HGroup( VGroup( Item('msg_obs_window_latency_ms', label='Obs Latency', style='readonly', format_str='%dms'), Item('msg_obs_avg_latency_ms', label='Obs Latency (Avg ms)', style='readonly', format_str='%dms'), Item('msg_obs_min_latency_ms', label='Obs Latency (Min ms)', style='readonly', format_str='%dms'), Item('msg_obs_max_latency_ms', label='Obs Latency (Max ms)', style='readonly', format_str='%dms'), label='Connection Monitor', show_border=True, ), VGroup( Item('uart_a_crc_error_count', label='CRC Errors', style='readonly'), Item('uart_a_io_error_count', label='IO Errors', style='readonly'), Item('uart_a_tx_buffer', label='TX Buffer %', style='readonly', format_str='%.1f'), Item('uart_a_rx_buffer', label='RX Buffer %', style='readonly', format_str='%.1f'), Item('uart_a_tx_KBps', label='TX KBytes/s', style='readonly', format_str='%.2f'), Item('uart_a_rx_KBps', label='RX KBytes/s', style='readonly', format_str='%.2f'), label='UART A', show_border=True, ), VGroup( Item('uart_b_crc_error_count', label='CRC Errors', style='readonly'), Item('uart_b_io_error_count', label='IO Errors', style='readonly'), Item('uart_b_tx_buffer', label='TX Buffer %', style='readonly', format_str='%.1f'), Item('uart_b_rx_buffer', label='RX Buffer %', style='readonly', format_str='%.1f'), Item('uart_b_tx_KBps', label='TX KBytes/s', style='readonly', format_str='%.2f'), Item('uart_b_rx_KBps', label='RX KBytes/s', style='readonly', format_str='%.2f'), label='UART B', show_border=True, ), VGroup( Item('ftdi_crc_error_count', label='CRC Errors', style='readonly'), Item('ftdi_io_error_count', label='IO Errors', style='readonly'), Item('ftdi_tx_buffer', label='TX Buffer %', style='readonly', format_str='%.1f'), Item('ftdi_rx_buffer', label='RX Buffer %', style='readonly', format_str='%.1f'), Item('ftdi_tx_KBps', label='TX KBytes/s', style='readonly', format_str='%.2f'), Item('ftdi_rx_KBps', label='RX KBytes/s', style='readonly', format_str='%.2f'), label='USB UART', show_border=True, ), ), )) def update_threads(self): self._threads_table_list = [ (thread_name, state.cpu, state.stack_free) for thread_name, state in sorted( self.threads, key=lambda x: x[1].cpu, reverse=True) ] def heartbeat_callback(self, data): self.update_threads() self.threads = [] def thread_state_callback(self, data): th = ThreadState() th.from_binary(data) self.threads.append((th.name, th)) def uart_state_callback(self, data): state = struct.unpack('<ffHHBBffHHBBffHHBBiiii', data) uarta = state[0:6] uartb = state[6:12] ftdi = state[12:18] self.uart_a_tx_KBps, self.uart_a_rx_KBps = uarta[0:2] self.uart_a_crc_error_count = uarta[2] self.uart_a_io_error_count = uarta[3] self.uart_a_tx_buffer, self.uart_a_rx_buffer = map( lambda x: 100.0 * x / 255.0, uarta[4:6]) self.uart_b_tx_KBps, self.uart_b_rx_KBps = uartb[0:2] self.uart_b_crc_error_count = uartb[2] self.uart_b_io_error_count = uartb[3] self.uart_b_tx_buffer, self.uart_b_rx_buffer = map( lambda x: 100.0 * x / 255.0, uartb[4:6]) self.ftdi_tx_KBps, self.ftdi_rx_KBps = ftdi[0:2] self.ftdi_crc_error_count = ftdi[2] self.ftdi_io_error_count = ftdi[3] self.ftdi_tx_buffer, self.ftdi_rx_buffer = map( lambda x: 100.0 * x / 255.0, ftdi[4:6]) self.msg_obs_avg_latency_ms = state[-4] self.msg_obs_min_latency_ms = state[-3] self.msg_obs_max_latency_ms = state[-2] self.msg_obs_window_latency_ms = state[-1] def __init__(self, link): super(SystemMonitorView, self).__init__() self.link = link self.link.add_callback(sbp_messages.SBP_HEARTBEAT, self.heartbeat_callback) self.link.add_callback(sbp_messages.THREAD_STATE, self.thread_state_callback) self.link.add_callback(sbp_messages.UART_STATE, self.uart_state_callback) self.python_console_cmds = {'mon': self}
class TemplateDataNames(HasPrivateTraits): #-- Public Traits ---------------------------------------------------------- # The data context to which bindings are made: context = Instance(ITemplateDataContext) # The current set of data names to be bound to the context: data_names = List(TemplateDataName) # The list of unresolved, required bindings: unresolved_data_names = Property(depends_on='data_names.resolved') # The list of optional bindings: optional_data_names = Property(depends_on='data_names.optional') # The list of unresolved optional bindings: unresolved_optional_data_names = Property( depends_on='data_names.[resolved,optional]') #-- Private Traits --------------------------------------------------------- # List of 'virtual' data names for use by table editor: virtual_data_names = List # The list of table editor columns: table_columns = Property(depends_on='data_names') # List( ObjectColumn ) #-- Traits View Definitions ------------------------------------------------ view = View( Item( 'virtual_data_names', show_label=False, style='custom', editor=table_editor)) #-- Property Implementations ----------------------------------------------- @cached_property def _get_unresolved_data_names(self): return [ dn for dn in self.data_names if (not dn.resolved) and (not dn.optional) ] @cached_property def _get_optional_data_names(self): return [dn for dn in self.data_names if dn.optional] @cached_property def _get_unresolved_optional_data_names(self): return [ dn for dn in self.data_names if (not dn.resolved) and dn.optional ] @cached_property def _get_table_columns(self): n = max([len(dn.items) for dn in self.data_names]) if n == 1: return std_columns + [ BindingsColumn( name='value0', label='Name', width=0.43) ] width = 0.43 / n return (std_columns + [ BindingsColumn( name='value%d' % i, index=i, label='Name %d' % (i + 1), width=width) for i in range(n) ]) #-- Trait Event Handlers --------------------------------------------------- def _context_changed(self, context): for data_name in self.data_names: data_name.context = context def _data_names_changed(self, old, new): """ Handles the list of 'data_names' being changed. """ # Make sure that all of the names are unique: new = set(new) # Update the old and new context links: self._update_contexts(old, new) # Update the list of virtual names based on the new set: dns = [VirtualDataName(data_name=dn) for dn in new] dns.sort(lambda l, r: cmp(l.description, r.description)) self.virtual_data_names = dns def _data_names_items_changed(self, event): # Update the old and new context links: old, new = event.old, event.new self._update_contexts(old, new) # Update the list of virtual names based on the old and new sets: i = event.index self.virtual_data_names[i:i + len(old)] = [ VirtualDataName(data_name=dn) for dn in new ] #-- Private Methods -------------------------------------------------------- def _update_contexts(self, old, new): """ Updates the data context for an old and new set of data names. """ for data_name in old: data_name.context = None context = self.context for data_name in new: data_name.context = context
class calibration_gui(HasTraits): camera = List status_text = Str("") ori_img_name = [] ori_img = [] pass_init = Bool(False) pass_init_disabled = Bool(False) # ------------------------------------------------------------- button_edit_cal_parameters = Button() button_showimg = Button() button_detection = Button() button_manual = Button() button_file_orient = Button() button_init_guess = Button() button_sort_grid = Button() button_sort_grid_init = Button() button_orient = Button() button_orient_part = Button() button_orient_shaking = Button() button_orient_dumbbell = Button() button_restore_orient = Button() button_checkpoint = Button() button_ap_figures = Button() button_edit_ori_files = Button() button_test = Button() # Defines GUI view -------------------------- view = View(HGroup(VGroup( VGroup( Item(name='button_showimg', label='Load/Show Images', show_label=False), Item(name='button_detection', label='Detection', show_label=False, enabled_when='pass_init'), Item(name='button_manual', label='Manual orient.', show_label=False, enabled_when='pass_init'), Item(name='button_file_orient', label='Orient. with file', show_label=False, enabled_when='pass_init'), Item(name='button_init_guess', label='Show initial guess', show_label=False, enabled_when='pass_init'), Item(name='button_sort_grid', label='Sortgrid', show_label=False, enabled_when='pass_init'), Item(name='button_sort_grid_init', label='Sortgrid = initial guess', show_label=False, enabled_when='pass_init'), Item(name='button_orient', label='Orientation', show_label=False, enabled_when='pass_init'), Item(name='button_orient_part', label='Orientation with particles', show_label=False, enabled_when='pass_init'), Item(name='button_orient_dumbbell', label='Orientation from dumbbell', show_label=False, enabled_when='pass_init'), Item(name='button_restore_orient', label='Restore ori files', show_label=False, enabled_when='pass_init'), Item(name='button_checkpoint', label='Checkpoints', show_label=False, enabled_when='pass_init_disabled'), Item(name='button_ap_figures', label='Ap figures', show_label=False, enabled_when='pass_init_disabled'), show_left=False, ), VGroup( Item(name='button_edit_cal_parameters', label='Edit calibration parameters', show_label=False), Item( name='button_edit_ori_files', label='Edit ori files', show_label=False, ), show_left=False, ), ), Item('camera', style='custom', editor=ListEditor( use_notebook=True, deletable=False, dock_style='tab', page_name='.name', ), show_label=False), orientation='horizontal'), title='Calibration', id='view1', width=1., height=1., resizable=True, statusbar='status_text') #-------------------------------------------------- def _button_edit_cal_parameters_fired(self): cp = exp.Calib_Params(par_path=self.par_path) cp.edit_traits(kind='modal') par.copy_params_dir(self.par_path, par.temp_path) def _button_showimg_fired(self): if os.path.isfile( os.path.join(self.exp1.active_params.par_path, 'man_ori.dat')): shutil.copyfile( os.path.join(self.exp1.active_params.par_path, 'man_ori.dat'), os.path.join(os.getcwd(), 'man_ori.dat')) print("Load Image fired") self.load_init_v1( ) # < - this should be united with the Calib_Params in experiment_01a.py print(len(self.ori_img)) self.ptv.py_calibration(1) self.pass_init = True self.status_text = "Initialization finished." def _button_detection_fired(self): if self.need_reset: self.reset_show_images() self.need_reset = 0 print("Detection procedure") self.ptv.py_calibration(2) x = [] y = [] self.ptv.py_get_pix(x, y) self.drawcross("x", "y", x, y, "blue", 4) for i in range(len(self.camera)): self.camera[i]._right_click_avail = 1 def _button_manual_fired(self): points_set = True for i in range(len(self.camera)): if len(self.camera[i]._x) < 4: print "inside manual click" print self.camera[i]._x points_set = False if points_set: man_ori_path = os.path.join(os.getcwd(), 'man_ori.dat') f = open(man_ori_path, 'w') if f is None: self.status_text = "Error saving man_ori.dat." else: for i in range(len(self.camera)): for j in range(4): f.write("%f %f\n" % (self.camera[i]._x[j], self.camera[i]._y[j])) self.status_text = "man_ori.dat saved." f.close() else: self.status_text = "Set 4 points on each calibration image for manual orientation" def _button_file_orient_fired(self): if self.need_reset: self.reset_show_images() self.need_reset = 0 man_ori_path = os.path.join(os.getcwd(), 'man_ori.dat') try: f = open(man_ori_path, 'r') except: self.status_text = "Error loading man_ori.dat." else: for i in range(len(self.camera)): self.camera[i]._x = [] self.camera[i]._y = [] for j in range(4): line = f.readline().split() self.camera[i]._x.append(float(line[0])) self.camera[i]._y.append(float(line[1])) self.status_text = "man_ori.dat loaded." f.close() shutil.copyfile( man_ori_path, os.path.join(self.exp1.active_params.par_path, 'man_ori.dat')) # TODO: rewrite using Parameters subclass man_ori_par_path = os.path.join(os.getcwd(), 'parameters', 'man_ori.par') f = open(man_ori_par_path, 'r') if f is None: self.status_text = "Error loading man_ori.par." else: for i in range(len(self.camera)): for j in range(4): self.camera[i].man_ori[j] = int(f.readline().split()[0]) self.status_text = "man_ori.par loded." self.camera[i].left_clicked_event() f.close() self.ptv.py_calibration(4) self.status_text = "Loading orientation data from file finished." def _button_init_guess_fired(self): if self.need_reset: self.reset_show_images() self.need_reset = 0 self.ptv.py_calibration(9) x = [] y = [] self.ptv.py_get_from_calib(x, y) self.drawcross("init_x", "init_y", x, y, "yellow", 3) self.status_text = "Initial guess finished." def _button_sort_grid_fired(self): if self.need_reset: self.reset_show_images() self.need_reset = 0 self.ptv.py_calibration(5) x = [] y = [] x1_cyan = [] y1_cyan = [] pnr = [] self.ptv.py_get_from_sortgrid(x, y, pnr) # filter out -999 which is returned for the missing points: for i in range(len(self.camera)): while -999 in x[i]: id = x[i].index(-999) del x[i][id] del y[i][id] del pnr[i][id] self.drawcross("sort_x", "sort_y", x, y, "white", 4) self.ptv.py_get_from_calib(x1_cyan, y1_cyan) self.drawcross("init_x", "init_y", x1_cyan, y1_cyan, "cyan", 4) for i in range(len(self.camera)): self.camera[i]._plot.overlays = [] self.camera[i].plot_num_overlay(x[i], y[i], pnr[i]) self.status_text = "Sort grid finished." def _button_sort_grid_init_fired(self): if self.need_reset: self.reset_show_images() self.need_reset = 0 self.ptv.py_calibration(14) x = [] y = [] x1_cyan = [] y1_cyan = [] pnr = [] self.ptv.py_get_from_sortgrid(x, y, pnr) self.drawcross("sort_x_init", "sort_y_init", x, y, "white", 4) self.ptv.py_get_from_calib(x1_cyan, y1_cyan) self.drawcross("init_x", "init_y", x1_cyan, y1_cyan, "cyan", 4) for i in range(len(self.camera)): self.camera[i]._plot.overlays = [] self.camera[i].plot_num_overlay(x[i], y[i], pnr[i]) self.status_text = "Sort grid initial guess finished." def _button_orient_fired(self): # backup the ORI/ADDPAR files first self.backup_ori_files() self.ptv.py_calibration(6) self.protect_ori_files() self.need_reset = 1 x1 = [] y1 = [] x2 = [] y2 = [] self.ptv.py_get_from_orient(x1, y1, x2, y2) self.reset_plots() for i in range(len(self.camera)): self.camera[i]._plot_data.set_data( 'imagedata', self.ori_img[i].astype(np.float)) self.camera[i]._img_plot = self.camera[i]._plot.img_plot( 'imagedata', colormap=gray)[0] self.camera[i].drawquiver(x1[i], y1[i], x2[i], y2[i], "red", scale=10.0) self.camera[i]._plot.index_mapper.range.set_bounds(0, self.h_pixel) self.camera[i]._plot.value_mapper.range.set_bounds(0, self.v_pixel) self.drawcross("orient_x", "orient_y", x1, y1, "orange", 4) self.status_text = "Orientation finished." def _button_orient_part_fired(self): self.backup_ori_files() self.ptv.py_calibration(10) x1, y1, x2, y2 = [], [], [], [] self.ptv.py_get_from_orient(x1, y1, x2, y2) self.reset_plots() for i in range(len(self.camera)): self.camera[i]._plot_data.set_data( 'imagedata', self.ori_img[i].astype(np.float)) self.camera[i]._img_plot = self.camera[i]._plot.img_plot( 'imagedata', colormap=gray)[0] self.camera[i].drawquiver(x1[i], y1[i], x2[i], y2[i], "red") self.camera[i]._plot.index_mapper.range.set_bounds(0, self.h_pixel) self.camera[i]._plot.value_mapper.range.set_bounds(0, self.v_pixel) self.drawcross("orient_x", "orient_y", x1, y1, "orange", 4) self.status_text = "Orientation with particles finished." def _button_orient_dumbbell_fired(self): print "Starting orientation from dumbbell" self.backup_ori_files() self.ptv.py_ptv_set_dumbbell(1) n_camera = len(self.camera) print("Starting sequence action") seq_first = self.exp1.active_params.m_params.Seq_First seq_last = self.exp1.active_params.m_params.Seq_Last print seq_first, seq_last base_name = [] for i in range(n_camera): exec( "base_name.append(self.exp1.active_params.m_params.Basename_%d_Seq)" % (i + 1)) print base_name[i] self.ptv.py_sequence_init(1) stepshake = self.ptv.py_get_from_sequence_init() if not stepshake: stepshake = 1 temp_img = np.array([], dtype=np.ubyte) for i in range(seq_first, seq_last + 1, stepshake): seq_ch = "%04d" % i print seq_ch for j in range(n_camera): print("j %d" % j) img_name = base_name[j] + seq_ch print("Setting image: ", img_name) try: temp_img = imread(img_name).astype(np.ubyte) except: print "Error reading file" self.ptv.py_set_img(temp_img, j) self.ptv.py_sequence_loop(1, i) print "Orientation from dumbbell - sequence finished" self.ptv.py_calibration(12) self.ptv.py_ptv_set_dumbbell(1) print "Orientation from dumbbell finished" def _button_restore_orient_fired(self): self.restore_ori_files() def load_init_v1(self): calOriParams = par.CalOriParams(len(self.camera), path=self.par_path) calOriParams.read() (fixp_name, img_cal_name, img_ori, tiff_flag, pair_flag, chfield) = \ (calOriParams.fixp_name, calOriParams.img_cal_name, calOriParams.img_ori, calOriParams.tiff_flag, calOriParams.pair_flag, calOriParams.chfield) self.ori_img_name = img_cal_name ptvParams = par.PtvParams(path=self.par_path) ptvParams.read() (n_img, img_name, img_cal, hp_flag, allCam_flag, tiff_flag, imx, imy, pix_x, pix_y, chfield, mmp_n1, mmp_n2, mmp_n3, mmp_d) = \ (ptvParams.n_img, ptvParams.img_name, ptvParams.img_cal, ptvParams.hp_flag, ptvParams.allCam_flag, ptvParams.tiff_flag, ptvParams.imx, ptvParams.imy, ptvParams.pix_x, ptvParams.pix_y, ptvParams.chfield, ptvParams.mmp_n1, ptvParams.mmp_n2, ptvParams.mmp_n3, ptvParams.mmp_d) self.h_pixel = imx self.v_pixel = imy self.ori_img = [] print("len(self.camera)") print(len(self.camera)) for i in range(len(self.camera)): print("reading " + self.ori_img_name[i]) try: img1 = imread(self.ori_img_name[i], flatten=1).astype(np.ubyte) print img1.shape except: print("Error reading image " + self.ori_img_name[i]) break self.ori_img.append(img1) self.ptv.py_set_img(self.ori_img[i], i) self.reset_show_images() # Loading manual parameters here # TODO: rewrite using Parameters subclass man_ori_path = os.path.join(os.getcwd(), 'parameters', 'man_ori.par') f = open(man_ori_path, 'r') if f is None: print('\n Error loading man_ori.par') else: for i in range(len(self.camera)): for j in range(4): self.camera[i].man_ori[j] = int(f.readline().strip()) f.close() def reset_plots(self): for i in range(len(self.camera)): self.camera[i]._plot.delplot( *self.camera[i]._plot.plots.keys()[0:]) self.camera[i]._plot.overlays = [] for j in range(len(self.camera[i]._quiverplots)): self.camera[i]._plot.remove(self.camera[i]._quiverplots[j]) self.camera[i]._quiverplots = [] def reset_show_images(self): for i in range(len(self.camera)): self.camera[i]._plot.delplot( *self.camera[i]._plot.plots.keys()[0:]) self.camera[i]._plot.overlays = [] # self.camera[i]._plot_data.set_data('imagedata',self.ori_img[i].astype(np.byte)) self.camera[i]._plot_data.set_data( 'imagedata', self.ori_img[i].astype(np.ubyte)) self.camera[i]._img_plot = self.camera[i]._plot.img_plot( 'imagedata', colormap=gray)[0] self.camera[i]._x = [] self.camera[i]._y = [] self.camera[i]._img_plot.tools = [] self.camera[i].attach_tools() self.camera[i]._plot.request_redraw() for j in range(len(self.camera[i]._quiverplots)): self.camera[i]._plot.remove(self.camera[i]._quiverplots[j]) self.camera[i]._quiverplots = [] def _button_edit_ori_files_fired(self): editor = codeEditor(path=self.par_path) editor.edit_traits(kind='livemodal') def drawcross(self, str_x, str_y, x, y, color1, size1): for i in range(len(self.camera)): self.camera[i].drawcross(str_x, str_y, x[i], y[i], color1, size1) def backup_ori_files(self): # backup ORI/ADDPAR files to the backup_cal directory calOriParams = par.CalOriParams(len(self.camera), path=self.par_path) calOriParams.read() for f in calOriParams.img_ori: shutil.copyfile(f, f + '.bck') g = f.replace('ori', 'addpar') shutil.copyfile(g, g + '.bck') def restore_ori_files(self): # backup ORI/ADDPAR files to the backup_cal directory calOriParams = par.CalOriParams(len(self.camera), path=self.par_path) calOriParams.read() for f in calOriParams.img_ori: print "restored %s " % f shutil.copyfile(f + '.bck', f) g = f.replace('ori', 'addpar') shutil.copyfile(g + '.bck', g) def protect_ori_files(self): # backup ORI/ADDPAR files to the backup_cal directory calOriParams = par.CalOriParams(len(self.camera), path=self.par_path) calOriParams.read() for f in calOriParams.img_ori: d = file(f, 'r').read().split() if not np.all(np.isfinite(np.asarray(d).astype('f'))): print "protected ORI file %s " % f shutil.copyfile(f + '.bck', f) def load_init(self): calOriParams = par.CalOriParams(len(self.camera), path=self.par_path) calOriParams.read() (fixp_name, img_cal_name, img_ori, tiff_flag, pair_flag, chfield) = \ (calOriParams.fixp_name, calOriParams.img_cal_name, calOriParams.img_ori, calOriParams.tiff_flag, calOriParams.pair_flag, calOriParams.chfield) self.ori_img_name = img_cal_name for i in range(len(self.camera)): print("reading " + self.ori_img_name[i]) try: img1 = imread(self.ori_img_name[i]).astype(np.ubyte) except: print("Error reading image " + self.ori_img_name[i]) break self.ori_img.append(img1) if self.camera[i]._plot is not None: self.camera[i]._plot.delplot( *self.camera[i]._plot.plots.keys()[0:]) self.camera[i]._plot_data.set_data('imagedata', self.ori_img[i].astype(np.byte)) self.camera[i]._img_plot = self.camera[i]._plot.img_plot( 'imagedata', colormap=gray)[0] self.camera[i]._x = [] self.camera[i]._y = [] self.camera[i]._plot.overlays = [] self.camera[i]._img_plot.tools = [] self.camera[i].attach_tools() self.camera[i]._plot.request_redraw() self.ptv.py_set_img(self.ori_img[i], i) f.close() # Loading manual parameters here # TODO: rewrite using Parameters subclass man_ori_path = os.path.join(os.getcwd(), 'parameters', 'man_ori.par') f = open(man_ori_path, 'r') if f == None: printf('\nError loading man_ori.par') else: for i in range(len(self.camera)): for j in range(4): self.camera[i].man_ori[j] = int(f.readline().strip()) # # def drawcross(self,str_x,str_y,x,y,color1,size1): # for i in range(len(self.camera)): # self.camera[i].drawcross(str_x,str_y,x[i],y[i],color1,size1) def update_plots(self, images, is_float=0): for i in range(len(images)): self.camera[i].update_image(images[i], is_float) #--------------------------------------------------- # Constructor #--------------------------------------------------- def __init__(self, par_path): # this is needed according to chaco documentation super(calibration_gui, self).__init__() self.need_reset = 0 self.par_path = par_path self.ptv = ptv
class SolutionView(HasTraits): python_console_cmds = Dict() # we need to doubleup on Lists to store the psuedo absolutes separately # without rewriting everything """ logging_v : toggle logging for velocity files directory_name_v : location and name of velocity files logging_p : toggle logging for position files directory_name_p : location and name of velocity files """ plot_history_max = Int(1000) last_plot_update_time = Float() last_stale_update_time = Float() logging_v = Bool(False) display_units = Enum(["degrees", "meters"]) directory_name_v = File logging_p = Bool(False) directory_name_p = File lats_psuedo_abs = List() lngs_psuedo_abs = List() alts_psuedo_abs = List() table = List() dops_table = List() pos_table = List() vel_table = List() rtk_pos_note = Str( "It is necessary to enter the \"Surveyed Position\" settings for the base station in order to view the RTK Positions in this tab." ) plot = Instance(Plot) plot_data = Instance(ArrayPlotData) # Store plots we care about for legend running = Bool(True) zoomall = Bool(False) position_centered = Bool(False) clear_button = SVGButton( label='', tooltip='Clear', filename=resource_filename('console/images/iconic/x.svg'), width=16, height=16) zoomall_button = SVGButton( label='', tooltip='Zoom All', toggle=True, filename=resource_filename('console/images/iconic/fullscreen.svg'), width=16, height=16) center_button = SVGButton( label='', tooltip='Center on Solution', toggle=True, filename=resource_filename('console/images/iconic/target.svg'), width=16, height=16) paused_button = SVGButton( label='', tooltip='Pause', toggle_tooltip='Run', toggle=True, filename=resource_filename('console/images/iconic/pause.svg'), toggle_filename=resource_filename('console/images/iconic/play.svg'), width=16, height=16) traits_view = View( HSplit( VGroup( Item('table', style='readonly', editor=TabularEditor(adapter=SimpleAdapter()), show_label=False, width=0.3), Item('rtk_pos_note', show_label=False, resizable=True, editor=MultilineTextEditor(TextEditor(multi_line=True)), style='readonly', width=0.3, height=-40), ), VGroup( HGroup( Item('paused_button', show_label=False), Item('clear_button', show_label=False), Item('zoomall_button', show_label=False), Item('center_button', show_label=False), Item('display_units', label="Display Units"), ), Item('plot', show_label=False, editor=ComponentEditor(bgcolor=(0.8, 0.8, 0.8))), ))) def _zoomall_button_fired(self): self.zoomall = not self.zoomall def _center_button_fired(self): self.position_centered = not self.position_centered def _paused_button_fired(self): self.running = not self.running def _reset_remove_current(self): self.plot_data.update_data(self._get_update_current()) def _get_update_current(self, current_dict={}): out_dict = { 'cur_lat_spp': [], 'cur_lng_spp': [], 'cur_lat_dgnss': [], 'cur_lng_dgnss': [], 'cur_lat_float': [], 'cur_lng_float': [], 'cur_lat_fixed': [], 'cur_lng_fixed': [], 'cur_lat_sbas': [], 'cur_lng_sbas': [], 'cur_lat_dr': [], 'cur_lng_dr': [] } out_dict.update(current_dict) return out_dict def _synchronize_plot_data_by_mode(self, mode_string, update_current=False): # do all required plot_data updates for a single # new solution with mode defined by mode_string pending_update = { 'lat_' + mode_string: [x for x in self.slns['lat_' + mode_string] if not np.isnan(x)], 'lng_' + mode_string: [y for y in self.slns['lng_' + mode_string] if not np.isnan(y)] } if update_current: current = {} if len(pending_update['lat_' + mode_string]) != 0: current = { 'cur_lat_' + mode_string: [pending_update['lat_' + mode_string][-1]], 'cur_lng_' + mode_string: [pending_update['lng_' + mode_string][-1]] } else: current = { 'cur_lat_' + mode_string: [], 'cur_lng_' + mode_string: [] } pending_update.update(self._get_update_current(current)) self.plot_data.update_data(pending_update) def _append_empty_sln_data(self, exclude_mode=None): for each_mode in mode_string_dict.values(): if exclude_mode is None or each_mode != exclude_mode: self.slns['lat_' + each_mode].append(np.nan) self.slns['lng_' + each_mode].append(np.nan) def _update_sln_data_by_mode(self, soln, mode_string): # do backend deque updates for a new solution of type # mode string self.scaling_lock.acquire() lat = (soln.lat - self.offset[0]) * self.sf[0] lng = (soln.lon - self.offset[1]) * self.sf[1] self.scaling_lock.release() self.slns['lat_' + mode_string].append(lat) self.slns['lng_' + mode_string].append(lng) # Rotate old data out by appending to deque self._append_empty_sln_data(exclude_mode=mode_string) def _clr_sln_data(self): for each in self.slns: self.slns[each].clear() def _clear_history(self): for each in self.slns: self.slns[each].clear() pending_update = { 'lat_spp': [], 'lng_spp': [], 'alt_spp': [], 'lat_dgnss': [], 'lng_dgnss': [], 'alt_dgnss': [], 'lat_float': [], 'lng_float': [], 'alt_float': [], 'lat_fixed': [], 'lng_fixed': [], 'alt_fixed': [], 'lat_sbas': [], 'lng_sbas': [], 'alt_sbas': [], 'lat_dr': [], 'lng_dr': [], 'alt_dr': [] } pending_update.update(self._get_update_current()) self.plot_data.update(pending_update) def _clear_button_fired(self): self._clear_history() def age_corrections_callback(self, sbp_msg, **metadata): age_msg = MsgAgeCorrections(sbp_msg) if age_msg.age != 0xFFFF: self.age_corrections = age_msg.age / 10.0 else: self.age_corrections = None def update_table(self): self.table = self.pos_table + self.vel_table + self.dops_table def auto_survey(self): if len(self.lats) != 0: self.latitude = sum(self.lats) / len(self.lats) self.altitude = sum(self.alts) / len(self.alts) self.longitude = sum(self.lngs) / len(self.lngs) def pos_llh_callback(self, sbp_msg, **metadata): if sbp_msg.msg_type == SBP_MSG_POS_LLH_DEP_A: soln = MsgPosLLHDepA(sbp_msg) else: soln = MsgPosLLH(sbp_msg) self.last_pos_mode = get_mode(soln) if self.last_pos_mode != 0: self.last_soln = soln mode_string = mode_string_dict[self.last_pos_mode] if mode_string not in self.pending_draw_modes: # this list allows us to tell GUI thread which solutions to update # (if we decide not to update at full data rate) # we use short strings to identify each solution mode self.pending_draw_modes.append(mode_string) self.list_lock.acquire() self._update_sln_data_by_mode(soln, mode_string) self.list_lock.release() else: self.list_lock.acquire() self._append_empty_sln_data() self.list_lock.release() self.ins_used = ((soln.flags & 0x8) >> 3) == 1 pos_table = [] soln.h_accuracy *= 1e-3 soln.v_accuracy *= 1e-3 tow = soln.tow * 1e-3 if self.nsec is not None: tow += self.nsec * 1e-9 # Return the best estimate of my local and receiver time in convenient # format that allows changing precision of the seconds ((tloc, secloc), (tgps, secgps)) = log_time_strings(self.week, tow) if self.utc_time: ((tutc, secutc)) = datetime_2_str(self.utc_time) if (self.directory_name_p == ''): filepath_p = time.strftime("position_log_%Y%m%d-%H%M%S.csv") else: filepath_p = os.path.join( self.directory_name_p, time.strftime("position_log_%Y%m%d-%H%M%S.csv")) if not self.logging_p: self.log_file = None if self.logging_p: if self.log_file is None: self.log_file = sopen(filepath_p, 'w') self.log_file.write( "pc_time,gps_time,tow(sec),latitude(degrees),longitude(degrees),altitude(meters)," "h_accuracy(meters),v_accuracy(meters),n_sats,flags\n") log_str_gps = "" if tgps != "" and secgps != 0: log_str_gps = "{0}:{1:06.6f}".format(tgps, float(secgps)) self.log_file.write( '%s,%s,%.3f,%.10f,%.10f,%.4f,%.4f,%.4f,%d,%d\n' % ("{0}:{1:06.6f}".format(tloc, float(secloc)), log_str_gps, tow, soln.lat, soln.lon, soln.height, soln.h_accuracy, soln.v_accuracy, soln.n_sats, soln.flags)) self.log_file.flush() if self.last_pos_mode == 0: pos_table.append(('GPS Week', EMPTY_STR)) pos_table.append(('GPS TOW', EMPTY_STR)) pos_table.append(('GPS Time', EMPTY_STR)) pos_table.append(('Num. Signals', EMPTY_STR)) pos_table.append(('Lat', EMPTY_STR)) pos_table.append(('Lng', EMPTY_STR)) pos_table.append(('Height', EMPTY_STR)) pos_table.append(('Horiz Acc', EMPTY_STR)) pos_table.append(('Vert Acc', EMPTY_STR)) else: self.last_stime_update = monotonic() if self.week is not None: pos_table.append(('GPS Week', str(self.week))) pos_table.append(('GPS TOW', "{:.3f}".format(tow))) if self.week is not None: pos_table.append( ('GPS Time', "{0}:{1:06.3f}".format(tgps, float(secgps)))) if self.utc_time is not None: pos_table.append( ('UTC Time', "{0}:{1:06.3f}".format(tutc, float(secutc)))) pos_table.append(('UTC Src', self.utc_source)) if self.utc_time is None: pos_table.append(('UTC Time', EMPTY_STR)) pos_table.append(('UTC Src', EMPTY_STR)) pos_table.append(('Sats Used', soln.n_sats)) pos_table.append(('Lat', "{:.12g}".format(soln.lat))) pos_table.append(('Lng', "{:.12g}".format(soln.lon))) pos_table.append(('Height', "{0:.3f}".format(soln.height))) pos_table.append(('Horiz Acc', "{:.12g}".format(soln.h_accuracy))) pos_table.append(('Vert Acc', "{:.12g}".format(soln.v_accuracy))) pos_table.append(('Pos Flags', '0x%03x' % soln.flags)) pos_table.append(('INS Used', '{}'.format(self.ins_used))) pos_table.append(('Pos Fix Mode', mode_dict[self.last_pos_mode])) if self.age_corrections is not None: pos_table.append(('Corr. Age [s]', self.age_corrections)) # only store valid solutions for auto survey and degrees to meter transformation if self.last_pos_mode != 0: self.lats.append(soln.lat) self.lngs.append(soln.lon) self.alts.append(soln.height) self.tows.append(soln.tow) self.modes.append(self.last_pos_mode) self.auto_survey() # set-up table variables self.pos_table = pos_table self.update_table() # setup_plot variables # Updating array plot data is not thread safe, so we have to fire an event # and have the GUI thread do it if monotonic() - self.last_plot_update_time > GUI_UPDATE_PERIOD: self.update_scheduler.schedule_update('_solution_draw', self._solution_draw) def _display_units_changed(self): # we store current extents of plot and current scalefactlrs self.scaling_lock.acquire() self.recenter = True # recenter flag tells _solution_draw to update view extents self.prev_extents = (self.plot.index_range.low_setting, self.plot.index_range.high_setting, self.plot.value_range.low_setting, self.plot.value_range.high_setting) self.prev_offsets = (self.offset[0], self.offset[1]) self.prev_sfs = (self.sf[0], self.sf[1]) if self.display_units == "meters": self.offset = ( np.mean( np.array(self.lats)[~(np.equal(np.array(self.modes), 0))]), np.mean( np.array(self.lngs)[~(np.equal(np.array(self.modes), 0))]), np.mean( np.array(self.alts)[~(np.equal(np.array(self.modes), 0))])) (self.meters_per_lat, self.meters_per_lon) = meters_per_deg( np.mean( np.array(self.lats)[~(np.equal(np.array(self.modes), 0))])) self.sf = (self.meters_per_lat, self.meters_per_lon) self.plot.value_axis.title = 'Latitude (meters)' self.plot.index_axis.title = 'Longitude (meters)' else: self.offset = (0, 0, 0) self.sf = (1, 1) self.plot.value_axis.title = 'Latitude (degrees)' self.plot.index_axis.title = 'Longitude (degrees)' self.scaling_lock.release() self.list_lock.acquire() # now we update the existing sln deques to go from meters back to degrees or vice versa for each_array in self.slns: index = 0 if 'lat' in str(each_array) else 1 # going from degrees to meters; do scaling with new offset and sf if self.display_units == "meters": self.slns[each_array] = deque( (np.array(self.slns[each_array]) - self.offset[index]) * self.sf[index], maxlen=PLOT_HISTORY_MAX) # going from degrees to meters; do inverse scaling with former offset and sf if self.display_units == "degrees": self.slns[each_array] = deque( np.array(self.slns[each_array]) / self.prev_sfs[index] + self.prev_offsets[index], maxlen=PLOT_HISTORY_MAX) self.pending_draw_modes = list(mode_string_dict.values()) self.list_lock.release() def rescale_for_units_change(self): # Chaco scales view automatically when 'auto' is stored if self.prev_extents[0] != 'auto': # Otherwise use has used mousewheel zoom and we need to transform if self.display_units == 'meters': new_scaling = ( (self.prev_extents[0] - self.offset[1]) * self.sf[1], (self.prev_extents[1] - self.offset[1]) * self.sf[1], (self.prev_extents[2] - self.offset[0]) * self.sf[0], (self.prev_extents[3] - self.offset[0]) * self.sf[0]) else: new_scaling = (self.prev_extents[0] / self.prev_sfs[1] + self.prev_offsets[1], self.prev_extents[1] / self.prev_sfs[1] + self.prev_offsets[1], self.prev_extents[2] / self.prev_sfs[0] + self.prev_offsets[0], self.prev_extents[3] / self.prev_sfs[0] + self.prev_offsets[0]) # set plot scaling accordingly self.plot.index_range.low_setting = new_scaling[0] self.plot.index_range.high_setting = new_scaling[1] self.plot.value_range.low_setting = new_scaling[2] self.plot.value_range.high_setting = new_scaling[3] def _solution_draw(self): self.list_lock.acquire() current_time = monotonic() self.last_plot_update_time = current_time pending_draw_modes = self.pending_draw_modes current_mode = pending_draw_modes[-1] if len( pending_draw_modes) > 0 else None # Periodically, we make sure to redraw older data to expire old plot data if current_time - self.last_stale_update_time > STALE_DATA_PERIOD: # we don't update old solution modes every timestep to try and save CPU pending_draw_modes = list(mode_string_dict.values()) self.last_stale_update_time = current_time for mode_string in pending_draw_modes: if self.running: update_current = mode_string == current_mode if current_mode else True self._synchronize_plot_data_by_mode( mode_string, update_current=update_current) if mode_string in self.pending_draw_modes: self.pending_draw_modes.remove(mode_string) self.list_lock.release() if not self.zoomall and self.position_centered and self.running: d = (self.plot.index_range.high - self.plot.index_range.low) / 2. self.plot.index_range.set_bounds( (self.last_soln.lon - self.offset[1]) * self.sf[1] - d, (self.last_soln.lon - self.offset[1]) * self.sf[1] + d) d = (self.plot.value_range.high - self.plot.value_range.low) / 2. self.plot.value_range.set_bounds( (self.last_soln.lat - self.offset[0]) * self.sf[0] - d, (self.last_soln.lat - self.offset[0]) * self.sf[0] + d) if self.zoomall: self.recenter = False plot_square_axes(self.plot, ('lng_spp', 'lng_dgnss', 'lng_float', 'lng_fixed', 'lng_sbas', 'lng_dr'), ('lat_spp', 'lat_dgnss', 'lat_float', 'lat_fixed', 'lat_sbas', 'lat_dr')) if self.recenter: try: self.rescale_for_units_change() self.recenter = False except AttributeError: pass def dops_callback(self, sbp_msg, **metadata): flags = 0 if sbp_msg.msg_type == SBP_MSG_DOPS_DEP_A: dops = MsgDopsDepA(sbp_msg) flags = 1 else: dops = MsgDops(sbp_msg) flags = dops.flags if flags != 0: self.dops_table = [('PDOP', '%.1f' % (dops.pdop * 0.01)), ('GDOP', '%.1f' % (dops.gdop * 0.01)), ('TDOP', '%.1f' % (dops.tdop * 0.01)), ('HDOP', '%.1f' % (dops.hdop * 0.01)), ('VDOP', '%.1f' % (dops.vdop * 0.01))] else: self.dops_table = [('PDOP', EMPTY_STR), ('GDOP', EMPTY_STR), ('TDOP', EMPTY_STR), ('HDOP', EMPTY_STR), ('VDOP', EMPTY_STR)] self.dops_table.append(('DOPS Flags', '0x%03x' % flags)) def vel_ned_callback(self, sbp_msg, **metadata): flags = 0 if sbp_msg.msg_type == SBP_MSG_VEL_NED_DEP_A: vel_ned = MsgVelNEDDepA(sbp_msg) flags = 1 else: vel_ned = MsgVelNED(sbp_msg) flags = vel_ned.flags tow = vel_ned.tow * 1e-3 if self.nsec is not None: tow += self.nsec * 1e-9 ((tloc, secloc), (tgps, secgps)) = log_time_strings(self.week, tow) if self.directory_name_v == '': filepath_v = time.strftime("velocity_log_%Y%m%d-%H%M%S.csv") else: filepath_v = os.path.join( self.directory_name_v, time.strftime("velocity_log_%Y%m%d-%H%M%S.csv")) if not self.logging_v: self.vel_log_file = None if self.logging_v: if self.vel_log_file is None: self.vel_log_file = sopen(filepath_v, 'w') self.vel_log_file.write( 'pc_time,gps_time,tow(sec),north(m/s),east(m/s),down(m/s),speed(m/s),flags,num_signals\n' ) log_str_gps = '' if tgps != "" and secgps != 0: log_str_gps = "{0}:{1:06.6f}".format(tgps, float(secgps)) self.vel_log_file.write( '%s,%s,%.3f,%.6f,%.6f,%.6f,%.6f,%d,%d\n' % ("{0}:{1:06.6f}".format(tloc, float(secloc)), log_str_gps, tow, vel_ned.n * 1e-3, vel_ned.e * 1e-3, vel_ned.d * 1e-3, math.sqrt(vel_ned.n * vel_ned.n + vel_ned.e * vel_ned.e) * 1e-3, flags, vel_ned.n_sats)) self.vel_log_file.flush() if (flags & 0x7) != 0: self.vel_table = [ ('Vel. N', '% 8.4f' % (vel_ned.n * 1e-3)), ('Vel. E', '% 8.4f' % (vel_ned.e * 1e-3)), ('Vel. D', '% 8.4f' % (vel_ned.d * 1e-3)), ] else: self.vel_table = [ ('Vel. N', EMPTY_STR), ('Vel. E', EMPTY_STR), ('Vel. D', EMPTY_STR), ] self.vel_table.append(('Vel Flags', '0x%03x' % flags)) self.update_table() def gps_time_callback(self, sbp_msg, **metadata): if sbp_msg.msg_type == SBP_MSG_GPS_TIME_DEP_A: time_msg = MsgGPSTimeDepA(sbp_msg) flags = 1 elif sbp_msg.msg_type == SBP_MSG_GPS_TIME: time_msg = MsgGPSTime(sbp_msg) flags = time_msg.flags if flags != 0: self.week = time_msg.wn self.nsec = time_msg.ns_residual def utc_time_callback(self, sbp_msg, **metadata): tmsg = MsgUtcTime(sbp_msg) microseconds = int(tmsg.ns / 1000.00) if tmsg.flags & 0x7 != 0: dt = datetime.datetime(tmsg.year, tmsg.month, tmsg.day, tmsg.hours, tmsg.minutes, tmsg.seconds, microseconds) self.utc_time = dt self.utc_time_flags = tmsg.flags if (tmsg.flags >> 3) & 0x3 == 0: self.utc_source = "Factory Default" elif (tmsg.flags >> 3) & 0x3 == 1: self.utc_source = "Non Volatile Memory" elif (tmsg.flags >> 3) & 0x3 == 2: self.utc_source = "Decoded this Session" else: self.utc_source = "Unknown" else: self.utc_time = None self.utc_source = None def __init__(self, link, dirname=''): super(SolutionView, self).__init__() self.pending_draw_modes = [] self.recenter = False self.offset = (0, 0, 0) self.sf = (1, 1) self.list_lock = threading.Lock() self.scaling_lock = threading.Lock() self.slns = { 'lat_spp': deque(maxlen=PLOT_HISTORY_MAX), 'lng_spp': deque(maxlen=PLOT_HISTORY_MAX), 'alt_spp': deque(maxlen=PLOT_HISTORY_MAX), 'lat_dgnss': deque(maxlen=PLOT_HISTORY_MAX), 'lng_dgnss': deque(maxlen=PLOT_HISTORY_MAX), 'alt_dgnss': deque(maxlen=PLOT_HISTORY_MAX), 'lat_float': deque(maxlen=PLOT_HISTORY_MAX), 'lng_float': deque(maxlen=PLOT_HISTORY_MAX), 'alt_float': deque(maxlen=PLOT_HISTORY_MAX), 'lat_fixed': deque(maxlen=PLOT_HISTORY_MAX), 'lng_fixed': deque(maxlen=PLOT_HISTORY_MAX), 'alt_fixed': deque(maxlen=PLOT_HISTORY_MAX), 'lat_sbas': deque(maxlen=PLOT_HISTORY_MAX), 'lng_sbas': deque(maxlen=PLOT_HISTORY_MAX), 'alt_sbas': deque(maxlen=PLOT_HISTORY_MAX), 'lat_dr': deque(maxlen=PLOT_HISTORY_MAX), 'lng_dr': deque(maxlen=PLOT_HISTORY_MAX), 'alt_dr': deque(maxlen=PLOT_HISTORY_MAX) } self.lats = deque(maxlen=PLOT_HISTORY_MAX) self.lngs = deque(maxlen=PLOT_HISTORY_MAX) self.alts = deque(maxlen=PLOT_HISTORY_MAX) self.tows = deque(maxlen=PLOT_HISTORY_MAX) self.modes = deque(maxlen=PLOT_HISTORY_MAX) self.log_file = None self.directory_name_v = dirname self.directory_name_p = dirname self.vel_log_file = None self.last_stime_update = 0 self.last_soln = None self.altitude = 0 self.longitude = 0 self.latitude = 0 self.last_pos_mode = 0 self.ins_used = False self.last_plot_update_time = 0 self.last_stale_update_time = 0 self.plot_data = ArrayPlotData(lat_spp=[], lng_spp=[], alt_spp=[], cur_lat_spp=[], cur_lng_spp=[], lat_dgnss=[], lng_dgnss=[], alt_dgnss=[], cur_lat_dgnss=[], cur_lng_dgnss=[], lat_float=[], lng_float=[], alt_float=[], cur_lat_float=[], cur_lng_float=[], lat_fixed=[], lng_fixed=[], alt_fixed=[], cur_lat_fixed=[], cur_lng_fixed=[], lat_sbas=[], lng_sbas=[], cur_lat_sbas=[], cur_lng_sbas=[], lng_dr=[], lat_dr=[], cur_lat_dr=[], cur_lng_dr=[]) self.plot = Plot(self.plot_data) # 1000 point buffer self.plot.plot(('lng_spp', 'lat_spp'), type='line', line_width=0.1, name='', color=color_dict[SPP_MODE]) self.plot.plot(('lng_spp', 'lat_spp'), type='scatter', name='', color=color_dict[SPP_MODE], marker='dot', line_width=0.0, marker_size=1.0) self.plot.plot(('lng_dgnss', 'lat_dgnss'), type='line', line_width=0.1, name='', color=color_dict[DGNSS_MODE]) self.plot.plot(('lng_dgnss', 'lat_dgnss'), type='scatter', name='', color=color_dict[DGNSS_MODE], marker='dot', line_width=0.0, marker_size=1.0) self.plot.plot(('lng_float', 'lat_float'), type='line', line_width=0.1, name='', color=color_dict[FLOAT_MODE]) self.plot.plot(('lng_float', 'lat_float'), type='scatter', name='', color=color_dict[FLOAT_MODE], marker='dot', line_width=0.0, marker_size=1.0) self.plot.plot(('lng_fixed', 'lat_fixed'), type='line', line_width=0.1, name='', color=color_dict[FIXED_MODE]) self.plot.plot(('lng_fixed', 'lat_fixed'), type='scatter', name='', color=color_dict[FIXED_MODE], marker='dot', line_width=0.0, marker_size=1.0) self.plot.plot(('lng_sbas', 'lat_sbas'), type='line', line_width=0.1, name='', color=color_dict[SBAS_MODE]) self.plot.plot(('lng_sbas', 'lat_sbas'), type='scatter', name='', color=color_dict[SBAS_MODE], marker='dot', line_width=0.0, marker_size=1.0) self.plot.plot(('lng_dr', 'lat_dr'), type='line', line_width=0.1, name='', color=color_dict[DR_MODE]) self.plot.plot(('lng_dr', 'lat_dr'), type='scatter', color=color_dict[DR_MODE], marker='dot', line_width=0.0, marker_size=1.0) # current values spp = self.plot.plot(('cur_lng_spp', 'cur_lat_spp'), type='scatter', name=mode_dict[SPP_MODE], color=color_dict[SPP_MODE], marker='plus', line_width=1.5, marker_size=5.0) dgnss = self.plot.plot(('cur_lng_dgnss', 'cur_lat_dgnss'), type='scatter', name=mode_dict[DGNSS_MODE], color=color_dict[DGNSS_MODE], marker='plus', line_width=1.5, marker_size=5.0) rtkfloat = self.plot.plot(('cur_lng_float', 'cur_lat_float'), type='scatter', name=mode_dict[FLOAT_MODE], color=color_dict[FLOAT_MODE], marker='plus', line_width=1.5, marker_size=5.0) rtkfix = self.plot.plot(('cur_lng_fixed', 'cur_lat_fixed'), type='scatter', name=mode_dict[FIXED_MODE], color=color_dict[FIXED_MODE], marker='plus', line_width=1.5, marker_size=5.0) sbas = self.plot.plot(('cur_lng_sbas', 'cur_lat_sbas'), type='scatter', name=mode_dict[SBAS_MODE], color=color_dict[SBAS_MODE], marker='plus', line_width=1.5, marker_size=5.0) dr = self.plot.plot(('cur_lng_dr', 'cur_lat_dr'), type='scatter', name=mode_dict[DR_MODE], color=color_dict[DR_MODE], marker='plus', line_width=1.5, marker_size=5.0) plot_labels = ['SPP', 'SBAS', 'DGPS', 'RTK float', 'RTK fixed', 'DR'] plots_legend = dict( zip(plot_labels, [spp, sbas, dgnss, rtkfloat, rtkfix, dr])) self.plot.legend.plots = plots_legend self.plot.legend.labels = plot_labels # sets order self.plot.legend.visible = True self.plot.index_axis.tick_label_position = 'inside' self.plot.index_axis.tick_label_color = 'gray' self.plot.index_axis.tick_color = 'gray' self.plot.index_axis.title = 'Longitude (degrees)' self.plot.index_axis.title_spacing = 5 self.plot.value_axis.tick_label_position = 'inside' self.plot.value_axis.tick_label_color = 'gray' self.plot.value_axis.tick_color = 'gray' self.plot.value_axis.title = 'Latitude (degrees)' self.plot.value_axis.title_spacing = 5 self.plot.padding = (25, 25, 25, 25) self.plot.tools.append(PanTool(self.plot)) zt = ZoomTool(self.plot, zoom_factor=1.1, tool_mode="box", always_on=False) self.plot.overlays.append(zt) self.link = link self.link.add_callback(self.pos_llh_callback, [SBP_MSG_POS_LLH_DEP_A, SBP_MSG_POS_LLH]) self.link.add_callback(self.vel_ned_callback, [SBP_MSG_VEL_NED_DEP_A, SBP_MSG_VEL_NED]) self.link.add_callback(self.dops_callback, [SBP_MSG_DOPS_DEP_A, SBP_MSG_DOPS]) self.link.add_callback(self.gps_time_callback, [SBP_MSG_GPS_TIME_DEP_A, SBP_MSG_GPS_TIME]) self.link.add_callback(self.utc_time_callback, [SBP_MSG_UTC_TIME]) self.link.add_callback(self.age_corrections_callback, SBP_MSG_AGE_CORRECTIONS) self.week = None self.utc_time = None self.age_corrections = None self.nsec = 0 self.meters_per_lat = None self.meters_per_lon = None self.python_console_cmds = {'solution': self} self.update_scheduler = UpdateScheduler()
class ULSRxyz(LS): '''Ultimate limit state ''' #-------------------------------------------------------- # ULS: material parameters (Inputs) #-------------------------------------------------------- # shear Resistance # Rx_Rd = Float(4.8, input = True) # pull-out Resistance # Rz_Rd = Float(4.7, input=True) # (unused as Ry = 0. for all cases) # Ry_Rd = Float(1., input = True) Mx_Rd = Float(1., input=True) My_Rd = Float(1., input=True) Mz_Rd = Float(1., input=True) # ------------------------------------------------------------ # ULS: outputs # ------------------------------------------------------------ ls_values = Property(depends_on='+input') @cached_property def _get_ls_values(self): '''get the outputs for ULS ''' #--------------------------------------------------------- # conditions for case distinction # (-- tension / compression reactions --) #--------------------------------------------------------- # @todo: use this in order to sort out the cases with compression # up to now for cases with compression only the positive part is taken into account # leading to eta=0 for this direction and a possible non-zero value in the other direction # # reaction force tangential to the shell # # # cond_Rx_ge_0 = self.Rx >= 0. # positive value corresponds to shear in the screw # cond_Rx_le_0 = self.Rx <= 0. # negative value corresponds to compression # # # reaction force radial to the shell # # # cond_Rz_ge_0 = self.Rz >= 0. # positive value corresponds to compression # cond_Rz_le_0 = self.Rz <= 0. # negative value corresponds to pull-out force in the screw #--------------------------------------------------------- # resulting reaction forces and 'eta' #--------------------------------------------------------- # evaluate resulting forces and moments # Rres = sqrt( self.Rx * self.Rx + self.Ry * self.Ry + self.Rz * self.Rz ) Mres = sqrt( self.Mx * self.Mx + self.My * self.My + self.Mz * self.Mz ) # note: positive values of 'Rx' correspond to shear forces for the support screw # negative values are taken by the compression cushion at the support directly # Therefore take only the positive part of support force 'Rx' into account # for the evaluation of 'eta_Rx' Rx_pos = (abs(self.Rx) + self.Rx) / 2. # eta shear forces # eta_Rx = Rx_pos / self. Rx_Rd # note: negative values of 'Rz' correspond to pull-out forces for the support screw # positive values are taken by the compression cushion at the support directly # Therefore take only the negative values of the support force 'Rz' into account # for the evaluation of 'eta_Rz' Rz_neg = (abs(self.Rz) - self.Rz) / 2. # eta pull-out # eta_Rz = Rz_neg / self. Rz_Rd # eta shear forces (unused as shear force in y-direction is always 0.) # eta_Ry = abs(self.Ry) / self. Ry_Rd # total eta for linear interaction: # eta_R_tot = eta_Rx + eta_Rz eta_Mx = self.Mx / self. Mx_Rd eta_My = self.My / self. My_Rd eta_Mz = self.Mz / self. Mz_Rd #------------------------------------------------------------ # construct a dictionary containing the return values #------------------------------------------------------------ return { 'Rres' : Rres, 'Mres' : Mres, 'eta_Rx' : eta_Rx, 'eta_Ry' : eta_Ry, 'eta_Rz' : eta_Rz, 'eta_R_tot' : eta_R_tot, 'eta_Mx' : eta_Mx, 'eta_My' : eta_My, 'eta_Mz' : eta_Mz, } #----------------------------------------------- # LS_COLUMNS: specify the properties that are displayed in the view #----------------------------------------------- # NOTE: the definition of ls_table.assess_name is given in constructor of 'LCCTable' # # assess_name = 'max_Rx' # @todo: compare with shear resistance of the screw # assess_name = 'min_Rx' # assess_name = 'max_Ry' # assess_name = 'min_Ry' # assess_name = 'max_Rz' # assess_name = 'min_Rz' # @todo: compare with pull-out resistance of the screw # assess_name = 'max_Rres' assess_name = 'max_eta_R_tot' ls_columns = List(['Rx', 'Ry', 'Rz', 'Rres', 'Mx', 'My', 'Mz', 'Mres', 'eta_Rx', 'eta_Ry', 'eta_Rz', 'eta_R_tot', 'eta_Mx', 'eta_My', 'eta_Mz']) Rres = Property(Array) def _get_Rres(self): return self.ls_values['Rres'] Mres = Property(Array) def _get_Mres(self): return self.ls_values['Mres'] eta_Rx = Property(Array) def _get_eta_Rx(self): return self.ls_values['eta_Rx'] eta_Ry = Property(Array) def _get_eta_Ry(self): return self.ls_values['eta_Ry'] eta_Rz = Property(Array) def _get_eta_Rz(self): return self.ls_values['eta_Rz'] eta_R_tot = Property(Array) def _get_eta_R_tot(self): return self.ls_values['eta_R_tot'] eta_Mx = Property(Array) def _get_eta_Mx(self): return self.ls_values['eta_Mx'] eta_My = Property(Array) def _get_eta_My(self): return self.ls_values['eta_My'] eta_Mz = Property(Array) def _get_eta_Mz(self): return self.ls_values['eta_Mz'] #------------------------------------------------- # choose the assess parameter used for sorting # defined by the property name 'assess_name' #------------------------------------------------- max_Rx = Property(depends_on='+input') @cached_property def _get_max_Rx(self): return np.max(self.Rx) min_Rx = Property(depends_on='+input') @cached_property def _get_min_Rx(self): return np.min(self.Rx) max_Ry = Property(depends_on='+input') @cached_property def _get_max_Ry(self): return np.max(self.Ry) min_Ry = Property(depends_on='+input') @cached_property def _get_min_Ry(self): return np.min(self.Ry) max_Rz = Property(depends_on='+input') @cached_property def _get_max_Rz(self): return np.max(self.Rz) min_Rz = Property(depends_on='+input') @cached_property def _get_min_Rz(self): return np.min(self.Rz) max_Rres = Property(depends_on='+input') @cached_property def _get_max_Rres(self): return ndmax(self.Rres) max_eta_R_tot = Property(depends_on='+input') @cached_property def _get_max_eta_R_tot(self): return ndmax(self.eta_R_tot) #------------------------------- # ls view #------------------------------- # @todo: the dynamic selection of the columns to be displayed # does not work in connection with the LSArrayAdapter traits_view = View( VGroup( HGroup( VGroup( Item(name = 'Rx_Rd', label = 'resistance R_xd [kN]', style = 'readonly', format_str = "%.1f"), Item(name = 'Ry_Rd', label = 'resistance R_yd [kN]', style = 'readonly', format_str = "%.1f"), Item(name = 'Rz_Rd', label = 'resistance R_zd [kN]', style = 'readonly', format_str = "%.1f"), label = 'material properties (longitudinal)' ), VGroup( Item(name = 'assess_name', label = 'assess_name', style = 'readonly', format_str = "%s"), label = 'sort rows according to' ) ), VGroup( Include('ls_group'), Item('ls_array', show_label=False, editor=TabularEditor(adapter=LSArrayAdapter())) ), ), resizable=True, scrollable=True, height=1000, width=1100 )
class LSTableRxyz(HasTraits): '''Assessment tool ''' is_id = Int(0) # geo data: coordinates and element thickness # geo_data = Dict node_no = Property(Array) def _get_node_no(self): # return self.state_data['node_no'] return self.geo_data['node_no'] # state data: stress resultants # state_data = Dict Rx = Property(Array) def _get_Rx(self): return self.state_data['Rx'] Ry = Property(Array) def _get_Ry(self): return self.state_data['Ry'] Rz = Property(Array) def _get_Rz(self): return self.state_data['Rz'] Mx = Property(Array) def _get_Mx(self): return self.state_data['Mx'] My = Property(Array) def _get_My(self): return self.state_data['My'] Mz = Property(Array) def _get_Mz(self): return self.state_data['Mz'] #------------------------------------------ # combinations of limit states, stress resultants and directions #------------------------------------------ ls = Trait('ULS', {'ULS' : ULSRxyz, 'SLS' : SLSRxyz }) ls_class = Instance(LS) def _ls_class_default(self): '''ls instances, e.g. ULS() ''' ls_class = self.ls_ return ls_class(ls_table=self) assess_name = Str assess_value = Property def _get_assess_value(self): ls = self.ls_class return getattr(ls, self.assess_name) traits_view = View(Tabbed( Item('ls_class@' , label="ls", show_label=False), scrollable=False, ), resizable=True, scrollable=True, height=1000, width=1100 )
class TraitsTest(HasTraits): #------------------------------------------------------------------------- # Trait definitions: #------------------------------------------------------------------------- integer_text = Int(1) enumeration = Enum('one', 'two', 'three', 'four', 'five', 'six', cols=3) float_range = Range(0.0, 10.0, 10.0) int_range = Range(1, 6) int_range2 = Range(1, 50) compound = Trait(1, Range(1, 6), 'one', 'two', 'three', 'four', 'five', 'six') boolean = Bool(True) instance = Trait(Instance()) color = Color font = Font check_list = List(editor=CheckListEditor( values=['one', 'two', 'three', 'four'], cols=4)) list = List(Str, ['East of Eden', 'The Grapes of Wrath', 'Of Mice and Men']) button = Event(0, editor=ButtonEditor(label='Click')) file = File directory = Directory image_enum = Trait(editor=ImageEnumEditor(values=origin_values, suffix='_origin', cols=4, klass=Instance), *origin_values) #------------------------------------------------------------------------- # View definitions: #------------------------------------------------------------------------- view = View( ('|{Enum}', ('|<[Enumeration]', 'enumeration[Simple]', '_', 'enumeration[Custom]@', '_', 'enumeration[Text]*', '_', 'enumeration[Readonly]~'), ('|<[Check List]', 'check_list[Simple]', '_', 'check_list[Custom]@', '_', 'check_list[Text]*', '_', 'check_list[Readonly]~') ), ('|{Range}', ('|<[Float Range]', 'float_range[Simple]', '_', 'float_range[Custom]@', '_', 'float_range[Text]*', '_', 'float_range[Readonly]~'), ('|<[Int Range]', 'int_range[Simple]', '_', 'int_range[Custom]@', '_', 'int_range[Text]*', '_', 'int_range[Readonly]~'), ('|<[Int Range 2]', 'int_range2[Simple]', '_', 'int_range2[Custom]@', '_', 'int_range2[Text]*', '_', 'int_range2[Readonly]~') ), ('|{Misc}', ('|<[Integer Text]', 'integer_text[Simple]', '_', 'integer_text[Custom]@', '_', 'integer_text[Text]*', '_', 'integer_text[Readonly]~'), ('|<[Compound]', 'compound[Simple]', '_', 'compound[Custom]@', '_', 'compound[Text]*', '_', 'compound[Readonly]~'), ('|<[Boolean]', 'boolean[Simple]', '_', 'boolean[Custom]@', '_', 'boolean[Text]*', '_', 'boolean[Readonly]~') ), ('|{Color/Font}', ('|<[Color]', 'color[Simple]', '_', 'color[Custom]@', '_', 'color[Text]*', '_', 'color[Readonly]~'), ('|<[Font]', 'font[Simple]', '_', 'font[Custom]@', '_', 'font[Text]*', '_', 'font[Readonly]~') ), ('|{List}', ('|<[List]', 'list[Simple]', '_', 'list[Custom]@', '_', 'list[Text]*', '_', 'list[Readonly]~') ), ('|{Button}', ('|<[Button]', 'button[Simple]', '_', 'button[Custom]@'), # 'button[Text]*', # 'button[Readonly]~' ), ('|<[Image Enum]', 'image_enum[Simple]', '_', 'image_enum[Custom]@', '_', 'image_enum[Text]*', '_', 'image_enum[Readonly]~'), ('|<[Instance]', 'instance[Simple]', '_', 'instance[Custom]@', '_', 'instance[Text]*', '_', 'instance[Readonly]~'), ), ('|{File}', ('|<[File]', 'file[Simple]', '_', 'file[Custom]@', '_', 'file[Text]*', '_', 'file[Readonly]~', ), ('|<[Directory]', 'directory[Simple]', '_', 'directory[Custom]@', '_', 'directory[Text]*', '_', 'directory[Readonly]~') ), buttons=['Apply', 'Revert', 'Undo', 'OK'] )
class IMUView(HasTraits): python_console_cmds = Dict() plot = Instance(Plot) plot_data = Instance(ArrayPlotData) imu_temp = Float(0) imu_conf = Int(0) rms_acc_x = Float(0) rms_acc_y = Float(0) rms_acc_z = Float(0) traits_view = View( VGroup( Item( 'plot', editor=ComponentEditor(bgcolor=(0.8, 0.8, 0.8)), show_label=False, ), HGroup( Item('imu_temp', format_str='%.2f C'), Item('imu_conf', format_str='0x%02X'), Item('rms_acc_x', format_str='%.2f g'), Item('rms_acc_y', format_str='%.2f g'), Item('rms_acc_z', format_str='%.2f g'), ), )) def imu_set_data(self): self.plot_data.set_data('acc_x', self.acc[:, 0]) self.plot_data.set_data('acc_y', self.acc[:, 1]) self.plot_data.set_data('acc_z', self.acc[:, 2]) self.plot_data.set_data('gyr_x', self.gyro[:, 0]) self.plot_data.set_data('gyr_y', self.gyro[:, 1]) self.plot_data.set_data('gyr_z', self.gyro[:, 2]) def imu_aux_callback(self, sbp_msg, **metadata): if sbp_msg.imu_type == 0: self.imu_temp = 23 + sbp_msg.temp / 2.**9 self.imu_conf = sbp_msg.imu_conf else: print("IMU type %d not known" % sbp_msg.imu_type) def imu_raw_callback(self, sbp_msg, **metadata): self.acc[:-1, :] = self.acc[1:, :] self.gyro[:-1, :] = self.gyro[1:, :] self.acc[-1] = (sbp_msg.acc_x, sbp_msg.acc_y, sbp_msg.acc_z) self.gyro[-1] = (sbp_msg.gyr_x, sbp_msg.gyr_y, sbp_msg.gyr_z) if self.imu_conf is not None: acc_range = self.imu_conf & 0xF sf = 2.**(acc_range + 1) / 2.**15 self.rms_acc_x = sf * np.sqrt(np.mean(np.square(self.acc[:, 0]))) self.rms_acc_y = sf * np.sqrt(np.mean(np.square(self.acc[:, 1]))) self.rms_acc_z = sf * np.sqrt(np.mean(np.square(self.acc[:, 2]))) def __init__(self, link): super(IMUView, self).__init__() self.acc = np.zeros((NUM_POINTS, 3)) self.gyro = np.zeros((NUM_POINTS, 3)) self.plot_data = ArrayPlotData(t=np.arange(NUM_POINTS), acc_x=[0.0], acc_y=[0.0], acc_z=[0.0], gyr_x=[0.0], gyr_y=[0.0], gyr_z=[0.0]) self.plot = Plot(self.plot_data, auto_colors=colours_list, emphasized=True) self.plot.title = 'Raw IMU Data' self.plot.title_color = [0, 0, 0.43] self.ylim = self.plot.value_mapper.range self.ylim.low = -32768 self.ylim.high = 32767 # self.plot.value_range.bounds_func = lambda l, h, m, tb: (0, h * (1 + m)) self.plot.value_axis.orientation = 'right' self.plot.value_axis.axis_line_visible = False self.plot.value_axis.title = 'LSB count' call_repeatedly(0.2, self.imu_set_data) self.legend_visible = True self.plot.legend.visible = True self.plot.legend.align = 'll' self.plot.legend.line_spacing = 1 self.plot.legend.font = 'modern 8' self.plot.legend.draw_layer = 'overlay' self.plot.legend.tools.append( LegendTool(self.plot.legend, drag_button="right")) acc_x = self.plot.plot(('t', 'acc_x'), type='line', color='auto', name='Accn. X') acc_x = self.plot.plot(('t', 'acc_y'), type='line', color='auto', name='Accn. Y') acc_x = self.plot.plot(('t', 'acc_z'), type='line', color='auto', name='Accn. Z') acc_x = self.plot.plot(('t', 'gyr_x'), type='line', color='auto', name='Gyro X') acc_x = self.plot.plot(('t', 'gyr_y'), type='line', color='auto', name='Gyro Y') acc_x = self.plot.plot(('t', 'gyr_z'), type='line', color='auto', name='Gyro Z') self.link = link self.link.add_callback(self.imu_raw_callback, SBP_MSG_IMU_RAW) self.link.add_callback(self.imu_aux_callback, SBP_MSG_IMU_AUX) self.python_console_cmds = {'track': self}
class TrackingView(CodeFiltered): python_console_cmds = Dict() legend_visible = Bool() plot = Instance(Plot) plots = List() plot_data = Instance(ArrayPlotData) traits_view = View( VGroup( Item( 'plot', editor=ComponentEditor(bgcolor=(0.8, 0.8, 0.8)), show_label=False, ), HGroup( Spring(width=8, springy=False), Item('legend_visible', label="Show Legend:"), CodeFiltered.get_filter_group(), ))) def clean_cn0(self, t): assert self.CN0_lock.locked() for k in list(self.CN0_dict.keys()): if self.CN0_age[k] < self.time[0]: del self.CN0_dict[k] del self.CN0_age[k] def measurement_state_callback(self, sbp_msg, **metadata): with self.CN0_lock: codes_that_came = [] t = monotonic() - self.t_init self.time.append(t) # first we loop over all the SIDs / channel keys we have stored and set 0 in for CN0 for i, s in enumerate(sbp_msg.states): if code_is_glo(s.mesid.code): # for Glonass satellites, store in two dictionaries FCN and SLOT # so that they can both be retrieved when displaying the channel if (s.mesid.sat > 90): self.glo_fcn_dict[i] = s.mesid.sat - 100 sat = self.glo_fcn_dict.get(i, 0) if (s.mesid.sat <= 90): self.glo_slot_dict[sat] = s.mesid.sat else: sat = s.mesid.sat key = (s.mesid.code, sat) codes_that_came.append(key) if s.cn0 != 0: self.CN0_dict[key].append(s.cn0 / 4.0) self.CN0_age[key] = t received_code_list = getattr(self, "received_codes", []) if s.mesid.code not in received_code_list: received_code_list.append(s.mesid.code) self.received_codes = received_code_list for key, cno_array in list(self.CN0_dict.items()): if key not in codes_that_came: cno_array.append(0) self.clean_cn0(t) self.update_scheduler.schedule_update('update_plot', self.update_plot) def tracking_state_callback(self, sbp_msg, **metadata): with self.CN0_lock: codes_that_came = [] t = monotonic() - self.t_init self.time.append(t) # first we loop over all the SIDs / channel keys we have stored and set 0 in for CN0 # for each SID, an array of size MAX PLOT with the history of CN0's stored # If there is no CN0 or not tracking for an epoch, 0 will be used # each array can be plotted against host_time, t for i, s in enumerate(sbp_msg.states): if code_is_glo(s.sid.code): if (s.sid.sat > 90): sat = s.sid.sat - 100 else: sat = s.fcn - GLO_FCN_OFFSET self.glo_slot_dict[sat] = s.sid.sat else: sat = s.sid.sat key = (s.sid.code, sat) codes_that_came.append(key) if s.cn0 != 0: self.CN0_dict[key].append(s.cn0 / 4.0) self.CN0_age[key] = t received_code_list = getattr(self, "received_codes", []) if s.sid.code not in received_code_list: received_code_list.append(s.sid.code) self.received_codes = received_code_list for key, cno_array in list(self.CN0_dict.items()): if key not in codes_that_came: cno_array.append(0) self.clean_cn0(t) self.update_scheduler.schedule_update('update_plot', self.update_plot) def update_plot(self): with self.CN0_lock: plot_labels = [] plots = [] # Update the underlying plot data from the CN0_dict for selected items new_plot_data = {'t': self.time} for k, cno_array in self.CN0_dict.items(): key = str(k) # set plot data if (getattr(self, 'show_{}'.format(int(k[0])), True)): new_plot_data[key] = cno_array self.plot_data.update_data(new_plot_data) # Remove any stale plots that got removed from the dictionary for each in list(self.plot.plots.keys()): if each not in [str(a) for a in self.CN0_dict.keys()] and each != 't': try: self.plot.delplot(each) except KeyError: pass try: self.plot_data.del_data(each) except KeyError: pass # add/remove plot as neccesary and build legend for k, cno_array in self.CN0_dict.items(): key = str(k) if (getattr(self, 'show_{}'.format(int(k[0])), True) and not cno_array.count(0) == NUM_POINTS): if key not in self.plot.plots.keys(): pl = self.plot.plot(('t', key), type='line', color=get_color(k), name=key) else: pl = self.plot.plots[key] plots.append(pl) plot_labels.append(get_label(k, self.glo_slot_dict)) # if not selected or all 0, remove else: if key in list(self.plot.plots.keys()): self.plot.delplot(key) plots = dict(list(zip(plot_labels, plots))) self.plot.legend.plots = plots def _legend_visible_changed(self): if self.plot: if not self.legend_visible: self.plot.legend.visible = False else: self.plot.legend.visible = True self.plot.legend.tools.append( LegendTool(self.plot.legend, drag_button="right")) def __init__(self, link): super(TrackingView, self).__init__() self.t_init = monotonic() self.time = deque([x * 1 / TRK_RATE for x in range(-NUM_POINTS, 0, 1)], maxlen=NUM_POINTS) self.CN0_lock = threading.Lock() self.CN0_dict = defaultdict(lambda: deque([0] * NUM_POINTS, maxlen=NUM_POINTS)) self.CN0_age = defaultdict(lambda: -1) self.glo_fcn_dict = {} self.glo_slot_dict = {} self.n_channels = None self.plot_data = ArrayPlotData(t=[0.0]) self.plot = Plot(self.plot_data, emphasized=True) self.plot.title = 'Tracking C/N0' self.plot.title_color = [0, 0, 0.43] self.ylim = self.plot.value_mapper.range self.ylim.low = SNR_THRESHOLD self.ylim.high = 60 self.plot.value_range.bounds_func = lambda l, h, m, tb: (0, h * (1 + m)) self.plot.value_axis.orientation = 'right' self.plot.value_axis.axis_line_visible = False self.plot.value_axis.title = 'dB-Hz' self.plot_data.set_data('t', self.time) self.plot.index_axis.title = 'seconds' self.plot.index_range.bounds_func = lambda l, h, m, tb: (h - 100, h) self.legend_visible = True self.plot.legend.visible = True self.plot.legend.align = 'll' self.plot.legend.line_spacing = 1 self.plot.legend.font = 'monospace 8' self.plot.legend.draw_layer = 'overlay' self.plot.legend.tools.append( LegendTool(self.plot.legend, drag_button="right")) self.link = link self.link.add_callback(self.measurement_state_callback, SBP_MSG_MEASUREMENT_STATE) self.link.add_callback(self.tracking_state_callback, SBP_MSG_TRACKING_STATE) self.python_console_cmds = {'track': self} self.update_scheduler = UpdateScheduler()
class MATS3DMicroplaneDamageWu(MATSXDMicroplaneDamageFatigueWu, MATS3DEval): implements(IMATSEval) # number of spatial dimensions # n_dim = Constant(3) # number of components of engineering tensor representation # n_eng = Constant(6) #------------------------------------------------------------------------- # PolarDiscr related data #------------------------------------------------------------------------- # # number of microplanes - currently fixed for 3D # n_mp = Constant(28) # get the normal vectors of the microplanes # _MPN = Property(depends_on='n_mp') @cached_property def _get__MPN(self): # microplane normals: return array([[.577350259, .577350259, .577350259], [.577350259, .577350259, -.577350259], [.577350259, -.577350259, .577350259], [.577350259, -.577350259, -.577350259], [.935113132, .250562787, .250562787], [.935113132, .250562787, -.250562787], [.935113132, -.250562787, .250562787], [.935113132, -.250562787, -.250562787], [.250562787, .935113132, .250562787], [.250562787, .935113132, -.250562787], [.250562787, -.935113132, .250562787], [.250562787, -.935113132, -.250562787], [.250562787, .250562787, .935113132], [.250562787, .250562787, -.935113132], [.250562787, -.250562787, .935113132], [.250562787, -.250562787, -.935113132], [.186156720, .694746614, .694746614], [.186156720, .694746614, -.694746614], [.186156720, -.694746614, .694746614], [.186156720, -.694746614, -.694746614], [.694746614, .186156720, .694746614], [.694746614, .186156720, -.694746614], [.694746614, -.186156720, .694746614], [.694746614, -.186156720, -.694746614], [.694746614, .694746614, .186156720], [.694746614, .694746614, -.186156720], [.694746614, -.694746614, .186156720], [.694746614, -.694746614, -.186156720]]) # get the weights of the microplanes # _MPW = Property(depends_on='n_mp') @cached_property def _get__MPW(self): # Note that the values in the array must be multiplied by 6 (cf. [Baz05])! # The sum of of the array equals 0.5. (cf. [BazLuz04])) # The values are given for an Gaussian integration over the unit # hemisphere. return array([ .0160714276, .0160714276, .0160714276, .0160714276, .0204744730, .0204744730, .0204744730, .0204744730, .0204744730, .0204744730, .0204744730, .0204744730, .0204744730, .0204744730, .0204744730, .0204744730, .0158350505, .0158350505, .0158350505, .0158350505, .0158350505, .0158350505, .0158350505, .0158350505, .0158350505, .0158350505, .0158350505, .0158350505 ]) * 6.0 #------------------------------------------------------------------------- # Cached elasticity tensors #------------------------------------------------------------------------- #------------------------------------------------------------------------- # Dock-based view with its own id #------------------------------------------------------------------------- traits_view = View(Include('polar_fn_group'), dock='tab', id='ibvpy.mats.mats3D.mats_3D_cmdm.MATS3D_cmdm', kind='modal', resizable=True, scrollable=True, width=0.6, height=0.8, buttons=['OK', 'Cancel'])
class ParticleScanner(HasTraits): median_filter_width = Range(1, 31) threshold_block_size = Int(21) threshold_level = Range(-255,255,50,mode="slider") live_filter = Enum(["None","Denoised","Thresholded"]) scan_current_view = Button() abort_scan_button = Button(label="abort_scan") scan_status = String("Not Scanning") scan_progress = Range(0., 100., 0.) scanning = Bool(False) tiled_scan_size=Array(shape=(2,),dtype=np.int) start_tiled_scan=Button() border_pixels = 15 traits_view = View( Tabbed( VGroup( Item(name="median_filter_width"), Item(name="threshold_block_size"), Item(name="threshold_level"), Item(name="live_filter"), label="Image Processing", ), VGroup( Item(name="scan_status",style="readonly"), Item(name="scan_progress",style="readonly"), Item(name="scanning",style="readonly"), Item(name="scan_current_view"), Item(name="abort_scan_button"), Item(name="tiled_scan_size"), Item(name="start_tiled_scan"), label="Scan Control", ), ), title="Particle Scanner" ) """ Find particles in an image and move to them """ def __init__(self, camera_stage_mapper, spectrometer, spectrometer_aligner, datafile): super(ParticleScanner,self).__init__() self.csm=camera_stage_mapper self.spectrometer = spectrometer self.datafile = datafile self.aligner = spectrometer_aligner self._live_filter_changed() #enable video filter if required self._scan_lock = threading.Lock() self._abort_scan_event = threading.Event() def SendCompleteMessage(self,number): gmail_user = "******" gmail_pwd = "NQ3dPv6SXZUEdfTE" FROM = '*****@*****.**' TO = ['*****@*****.**'] #must be a list SUBJECT = "Scan finished" TEXT = "%d particles scanned" %number # Prepare actual message message = """\From: %s\nTo: %s\nSubject: %s\n\n%s """ % (FROM, ", ".join(TO), SUBJECT, TEXT) try: server = smtplib.SMTP("smtp.gmail.com", 587) #or port 465 doesn't seem to work! server.ehlo() server.starttls() server.login(gmail_user, gmail_pwd) server.sendmail(FROM, TO, message) server.close() print 'successfully sent the mail' except: print "failed to send mail" def denoise_image(self,img): """apply the current denoising filter to the image""" if(self.median_filter_width>0): if self.median_filter_width % 2 == 0: self.median_filter_width += 1 #guard agains even integers! #return cv2.blur(img,self.median_filter_width) return cv2.blur(img,(self.median_filter_width,self.median_filter_width)) else: return img def threshold_image(self,img): """apply threshold with the current settings to an image""" #return cv2.threshold(self.denoise_image(img),self.threshold_level,255,cv2.THRESH_BINARY)[1] img = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,(int(self.threshold_block_size)/2)*2+1,self.threshold_level) kernel = np.ones((self.median_filter_width, self.median_filter_width),np.uint8) return cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel, iterations=1) #after thresholding, erode then dilate to kill small blobs/noise def camera_filter_function(self,frame): img = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY) if self.live_filter == "Denoised": img=self.denoise_image(img) elif self.live_filter == "Thresholded": img = self.threshold_image(self.denoise_image(img)) return cv2.cvtColor(img,cv2.COLOR_GRAY2RGB) def _live_filter_changed(self): if self.live_filter == "None": self.csm.camera.filter_function = None else: self.csm.camera.filter_function = self.camera_filter_function @on_trait_change("find_particles") def find_particles_in_new_image(self): #necessary to stop extra arguments from Traits messing things up self.find_particles() def find_particles(self,img=None): """find particles in the supplied image, or in the camera image""" if img is None: ret, frame = self.csm.camera.raw_snapshot() img = self.threshold_image(self.denoise_image( cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)))[self.border_pixels:-self.border_pixels,self.border_pixels:-self.border_pixels] #ignore the edges labels, nlabels = ndimage.measurements.label(img) return [np.array(p)+15 for p in ndimage.measurements.center_of_mass(img, labels, range(1,nlabels+1))] #add 15 onto all the positions def go_to_particles(self,payload_function=lambda: time.sleep(2),background=True,max_n_particles=None): """Find particles, then visit each one in turn and execute a payload. This function returns immediately as it spawns a background thread. The scan can be monitored through traits scan_status, scan_progress, scanning. It can be aborted with the abort_scan() method. By default it simply waits for 2 seconds at each position. """ if self.scanning: return def worker_function(): if not self._scan_lock.acquire(False): raise Exception("Tried to start a scan, but one was in progress!") aborted=False self.scanning = True self.scan_progress = 0 self.scan_status = "Setting up scan..." here = self.csm.camera_centre_position() pixel_positions = self.find_particles() positions = [self.csm.camera_pixel_to_sample(p) for p in pixel_positions] image = self.csm.camera.color_image() feature_images = [image[p[0]-self.border_pixels:p[0]+self.border_pixels,p[1]-self.border_pixels:p[1]+self.border_pixels] for p in pixel_positions] #extract feature images for index, p in enumerate(positions): if max_n_particles is not None and index >= max_n_particles: print "Terminating scan as we've now scanned enough particles" break self.scan_status = "Scanning particle %d of %d" % (index, len(positions)) self.csm.move_to_sample_position(p) time.sleep(0.3) self.csm.centre_on_feature(feature_images[index]) payload_function() self.scan_progress = float(index)/float(len(positions))*100 if self._abort_scan_event.is_set(): #this event lets us abort a scan self.scan_status="Scan Aborted." self._abort_scan_event.clear() aborted=True break self.csm.move_to_sample_position(here) self.scan_status = "Scan Finished" self.scan_progress=100.0 print "Scan Finished :)" self.scanning=False self._scan_lock.release() return not aborted #execute the above function in the background if background: self._scan_thread = threading.Thread(target=worker_function) self._scan_thread.start() else: #if we elected not to use a thread, just do it! return worker_function() @on_trait_change("abort_scan_button") def abort_scan(self): """Abort a currently-running scan in a background thread.""" if self._scan_thread is not None and self._scan_thread.is_alive(): self._abort_scan_event.set() # if self._scan_thread is not None: # self._scan_thread.join() # self._abort_scan_event.clear() def tile_scans(self, size, background = True, tile_start_function=None, ts_args=[], ts_kwargs={}, *args, **kwargs): def worker_function(): grid_size = np.array(size) here = self.csm.camera_centre_position() scan_centres = [self.csm.camera_point_to_sample(np.array([i,j])-grid_size/2) for i in range(grid_size[0]) for j in ( range(grid_size[1]) if i % 2 == 0 else reversed(range(grid_size[1]))) #snake-style raster scanning ] for centre in scan_centres: print "Taking a scan with centre %.1f, %.1f um" % tuple(centre) self.csm.move_to_sample_position(centre) if tile_start_function is not None: tile_start_function(*ts_args, **ts_kwargs) ret = self.go_to_particles(background=False *args, **kwargs) if not ret: print "Scan aborted!" break self.csm.move_to_sample_position(here) print "Scan Finished!" latest_group = sorted([v for k, v in self.datafile['particleScans'].iteritems() if 'scan' in k], key=lambda g: int(re.search(r"(\d+)$",g.name).groups()[0]))[-1] number_of_particles = len([k for k in latest_group.keys() if 'z_scan_' in k]) self.SendCompleteMessage(number_of_particles) #execute the above function in the background if background: self._scan_thread = threading.Thread(target=worker_function) self._scan_thread.start() else: #if we elected not to use a thread, just do it! worker_function() def _scan_current_view_fired(self): self.take_zstacks_of_particles() def take_zstacks_of_particles(self, dz=np.arange(-2.5,2.5,0.2), datafile_group = None, *args, **kwargs): """visit each particle and scan it spectrally""" self.spectrometer.live_view=False g = self.new_data_group("particleScans/scan%d",self.datafile) if datafile_group is None else datafile_group g.create_dataset("Raman_wavelengths",data=raman.GetWavelength()) self.save_overview_images(g) self.go_to_particles(self.pf_align_and_take_z_scan(dz,g), *args, **kwargs) def _start_tiled_scan_fired(self): self.take_zstacks_of_particles_tiled(self.tiled_scan_size) def take_zstacks_of_particles_tiled(self, shape, **kwargs): """Take z-stacked spectra of all the particles in several fields-of-view. We essentially run take_zstacks_of_particles for several fields of view, tiling them together in to the "shape" specified (2-element tuple). The centre of the tiled image is the current position. """ self.spectrometer.live_view=False g = self.new_data_group("particleScans/scan%d",self.datafile) g.create_dataset("Raman_wavelengths",data=raman.GetWavelength()) self.tile_scans(shape, tile_start_function=self.save_overview_images, ts_args=[g], payload_function=self.pf_align_and_take_z_scan(datafile_group=g,**kwargs)) def new_data_group(self,name="particleScans/scan%d",parent=None): if parent is None: parent=self.datafile n=0 while name % n in parent: n+=1 return parent.create_group(name % n) def new_dataset_name(self,g,name): n=0 while name % n in g: n+=1 return name % n def save_overview_images(self, datafile_group): self.csm.autofocus_iterate(np.arange(-5,5,0.5)) """save an unmodified and a thresholded image, as a reference for scans""" time.sleep(1) self.csm.camera.update_latest_frame() img1 = datafile_group.create_dataset(self.new_dataset_name(datafile_group,"overview_image_%d"),data=self.csm.camera.color_image()) img1.attrs.create("stage_position",self.csm.stage.position()) img1.attrs.create("camera_centre_position",self.csm.camera_centre_position()) img1.attrs.create("mapping_matrix_camera_to_sample",self.csm.camera_to_sample) img1.attrs.create("timestamp",datetime.datetime.now().isoformat()) img2 = datafile_group.create_dataset(self.new_dataset_name(datafile_group,"overview_image_%d_thresholded"),data=self.threshold_image(self.denoise_image( self.csm.camera.gray_image()))) img2.attrs.create("stage_position",self.csm.stage.position()) img2.attrs.create("camera_centre_position",self.csm.camera_centre_position()) for key, val in self.get(['median_filter_width','threshold_level']).iteritems(): img2.attrs.create(key,val) img2.attrs.create("camera_to_sample_matrix",self.csm.camera_to_sample) img2.attrs.create("timestamp",datetime.datetime.now().isoformat()) def pf_align_and_take_z_scan(self, dz=np.arange(-4,4,0.4), datafile_group=None): """Set up for a scan of all particles, then return a payload function. The "payload function" is suitable for the eponymous argument of go_to_particles, and will autofocus, align particle to fibre, and take a Z stack. NB the payload function "wraps up" the arguments neatly so we don't need to store things like the depth of the Z stack. """ if datafile_group is None: datafile_group=self.new_data_group("particleScans/scan%d",self.datafile) def align_and_take_z_scan(): self.csm.autofocus_iterate(np.arange(-2.5,2.5,0.5)) self.aligner.spectrometer.integration_time = 300. #short integration time for alignment #self.aligner.optimise_2D(tolerance=0.07,stepsize=0.3) self.aligner.optimise_2D(tolerance=0.03,stepsize=0.2) #self.aligner.optimise_2D(tolerance=0.03,stepsize=0.1) self.aligner.spectrometer.integration_time = 1000. #long integration time for measurement g = self.new_data_group("z_scan_%d",datafile_group) dset = g.create_dataset("z_scan", data=self.aligner.z_scan(dz)) for key, val in self.aligner.spectrometer.get_metadata().iteritems(): dset.attrs.create(key,val) dset.attrs.create("stage_position",self.csm.stage.position()) dset.attrs.create("camera_centre_position",self.csm.camera_centre_position()) dset.attrs.create("timestamp",datetime.datetime.now().isoformat()) dset.attrs.create("dz",dz) #we're going to take a picture - best make sure we've waited a moment for the focus to return time.sleep(0.3) self.csm.camera.update_latest_frame() #take a frame and ignore (for freshness) image = self.csm.camera.color_image() img = g.create_dataset("camera_image",data=image[image.shape[0]/2-50:image.shape[0]/2+50, image.shape[1]/2-50:image.shape[1]/2+50]) img.attrs.create("stage_position",self.csm.stage.position()) img.attrs.create("timestamp",datetime.datetime.now().isoformat()) "first bring the laser into focus" here = stage.position() laser_focus = raman.AlignHeNe(dset) print "Moving to HeNe Focus (%g)" % (laser_focus) stage.move_rel([0,0,laser_focus]) time.sleep(1) light_shutter.close_shutter() "record fast kinetic raman scan" time.sleep(0.3) raman.take_fast_kinetic() "save the data to the HDF5 output file" Raman_spc = g.create_dataset("kinetic_raman",data=raman.kinetic_scan_data) Raman_spc.attrs.create("laser power",raman.laserpower) Raman_spc.attrs.create("integration time",raman.exptime) Raman_spc.attrs.create("focus height",laser_focus) Raman_spc.attrs.create("times",raman.times) Raman_spc.attrs.create("wavelengths",raman.Raman_wavelengths) light_shutter.open_shutter() #=================================================================== self.csm.autofocus_iterate(np.arange(-2.5,2.5,0.5)) self.aligner.spectrometer.integration_time = 300. #short integration time for alignment #self.aligner.optimise_2D(tolerance=0.07,stepsize=0.3) self.aligner.optimise_2D(tolerance=0.03,stepsize=0.2) #self.aligner.optimise_2D(tolerance=0.03,stepsize=0.1) self.aligner.spectrometer.integration_time = 1000. #long integration time for measurement dset2 = g.create_dataset("z_scan2", data=self.aligner.z_scan(dz)) for key, val in self.aligner.spectrometer.get_metadata().iteritems(): dset2.attrs.create(key,val) dset2.attrs.create("stage_position",self.csm.stage.position()) dset2.attrs.create("camera_centre_position",self.csm.camera_centre_position()) dset2.attrs.create("timestamp",datetime.datetime.now().isoformat()) dset2.attrs.create("dz",dz) #we're going to take a picture - best make sure we've waited a moment for the focus to return time.sleep(0.3) self.csm.camera.update_latest_frame() #take a frame and ignore (for freshness) image = self.csm.camera.color_image() img = g.create_dataset("camera_image2",data=image[image.shape[0]/2-50:image.shape[0]/2+50, image.shape[1]/2-50:image.shape[1]/2+50]) img.attrs.create("stage_position",self.csm.stage.position()) img.attrs.create("timestamp",datetime.datetime.now().isoformat()) #=================================================================== time.sleep(0.3) stage.move(here) datafile_group.file.flush() return align_and_take_z_scan def plot_latest_scan(self): """plot the spectra from the most recent scan""" g = self.latest_scan_group for name, scangroup in g.iteritems(): if re.match(r"z_scan_\d+",name): scan=scangroup['z_scan'] spectrum = np.sum(scan,0) plt.plot(scan.attrs['wavelengths'],spectrum/scan.shape[0] - scan.attrs['background']) plt.show(block=False)
class plot_window(HasTraits): _plot_data = Instance(ArrayPlotData) _plot = Instance(Plot) _click_tool = Instance(clicker_tool) _img_plot = Instance(ImagePlot) _right_click_avail = 0 name = Str view = View(Item(name='_plot', editor=ComponentEditor(), show_label=False), ) def __init__(self): # -------------- Initialization of plot system ---------------- padd = 25 self._plot_data = ArrayPlotData() self._x = [] self._y = [] self.man_ori = [1, 2, 3, 4] self._plot = Plot(self._plot_data, default_origin="top left") self._plot.padding_left = padd self._plot.padding_right = padd self._plot.padding_top = padd self._plot.padding_bottom = padd self._quiverplots = [] # ------------------------------------------------------------- def left_clicked_event(self): print("left clicked") if len(self._x) < 4: self._x.append(self._click_tool.x) self._y.append(self._click_tool.y) print self._x print self._y self.drawcross("coord_x", "coord_y", self._x, self._y, "red", 5) self._plot.overlays = [] self.plot_num_overlay(self._x, self._y, self.man_ori) def right_clicked_event(self): print("right clicked") if len(self._x) > 0: self._x.pop() self._y.pop() print self._x print self._y self.drawcross("coord_x", "coord_y", self._x, self._y, "red", 5) self._plot.overlays = [] self.plot_num_overlay(self._x, self._y, self.man_ori) else: if (self._right_click_avail): print "deleting point" self.py_rclick_delete(self._click_tool.x, self._click_tool.y, self.cameraN) x = [] y = [] self.py_get_pix_N(x, y, self.cameraN) self.drawcross("x", "y", x[0], y[0], "blue", 4) def attach_tools(self): self._click_tool = clicker_tool(self._img_plot) self._click_tool.on_trait_change(self.left_clicked_event, 'left_changed') self._click_tool.on_trait_change(self.right_clicked_event, 'right_changed') self._img_plot.tools.append(self._click_tool) self._zoom_tool = SimpleZoom(component=self._plot, tool_mode="box", always_on=False) self._zoom_tool.max_zoom_out_factor = 1.0 self._img_plot.tools.append(self._zoom_tool) if self._plot.index_mapper is not None: self._plot.index_mapper.on_trait_change(self.handle_mapper, 'updated', remove=False) if self._plot.value_mapper is not None: self._plot.value_mapper.on_trait_change(self.handle_mapper, 'updated', remove=False) def drawcross(self, str_x, str_y, x, y, color1, mrk_size): self._plot_data.set_data(str_x, x) self._plot_data.set_data(str_y, y) self._plot.plot((str_x, str_y), type="scatter", color=color1, marker="plus", marker_size=mrk_size) self._plot.request_redraw() def drawline(self, str_x, str_y, x1, y1, x2, y2, color1): self._plot_data.set_data(str_x, [x1, x2]) self._plot_data.set_data(str_y, [y1, y2]) self._plot.plot((str_x, str_y), type="line", color=color1) self._plot.request_redraw() def drawquiver(self, x1c, y1c, x2c, y2c, color, linewidth=1.0, scale=1.0): """ drawquiver draws multiple lines at once on the screen x1,y1->x2,y2 in the current camera window parameters: x1c - array of x1 coordinates y1c - array of y1 coordinates x2c - array of x2 coordinates y2c - array of y2 coordinates color - color of the line linewidth - linewidth of the line example usage: drawquiver ([100,200],[100,100],[400,400],[300,200],'red',linewidth=2.0) draws 2 red lines with thickness = 2 : 100,100->400,300 and 200,100->400,200 """ x1, y1, x2, y2 = self.remove_short_lines(x1c, y1c, x2c, y2c, min_length=0) if len(x1) > 0: xs = ArrayDataSource(x1) ys = ArrayDataSource(y1) quiverplot = QuiverPlot( index=xs, value=ys, index_mapper=LinearMapper(range=self._plot.index_mapper.range), value_mapper=LinearMapper(range=self._plot.value_mapper.range), origin=self._plot.origin, arrow_size=0, line_color=color, line_width=linewidth, ep_index=np.array(x2) * scale, ep_value=np.array(y2) * scale) self._plot.add(quiverplot) # we need this to track how many quiverplots are in the current # plot self._quiverplots.append(quiverplot) # import pdb; pdb.set_trace() def remove_short_lines(self, x1, y1, x2, y2, min_length=2): """ removes short lines from the array of lines parameters: x1,y1,x2,y2 - start and end coordinates of the lines returns: x1f,y1f,x2f,y2f - start and end coordinates of the lines, with short lines removed example usage: x1,y1,x2,y2=remove_short_lines([100,200,300],[100,200,300],[100,200,300],[102,210,320]) 3 input lines, 1 short line will be removed (100,100->100,102) returned coordinates: x1=[200,300]; y1=[200,300]; x2=[200,300]; y2=[210,320] """ # dx, dy = 2, 2 # minimum allowable dx,dy x1f, y1f, x2f, y2f = [], [], [], [] for i in range(len(x1)): if abs(x1[i] - x2[i]) > min_length or abs(y1[i] - y2[i]) > min_length: x1f.append(x1[i]) y1f.append(y1[i]) x2f.append(x2[i]) y2f.append(y2[i]) return x1f, y1f, x2f, y2f def handle_mapper(self): for i in range(0, len(self._plot.overlays)): if hasattr(self._plot.overlays[i], 'real_position'): coord_x1, coord_y1 = self._plot.map_screen( [self._plot.overlays[i].real_position])[0] self._plot.overlays[i].alternate_position = (coord_x1, coord_y1) def plot_num_overlay(self, x, y, txt): for i in range(0, len(x)): coord_x, coord_y = self._plot.map_screen([(x[i], y[i])])[0] ovlay = TextBoxOverlay(component=self._plot, text=str(txt[i]), alternate_position=(coord_x, coord_y), real_position=(x[i], y[i]), text_color="white", border_color="red") self._plot.overlays.append(ovlay) def update_image(self, image, is_float): if is_float: self._plot_data.set_data('imagedata', image.astype(np.float)) else: self._plot_data.set_data('imagedata', image.astype(np.byte)) self._plot.request_redraw()
def traits_view(self): v = View(Item('save_directory', label='Output Directory')) return v
editable = False, horizontal_alignment = 'center' ) ], sortable = False, auto_size = False, show_toolbar = False, show_column_labels = False ) #-- The table editor used for the main view ------------------------------------ factors_view = View( Item( 'factors', id = 'factors', show_label = False, editor = factor_table_editor ), id = 'traits.examples.demo.Advanced.factors_view', kind = 'info', height = 0.30, ) factors_table_editor = TableEditor( columns = [ ObjectColumn( name = 'n', width = 0.5, editable = False, horizontal_alignment = 'center' ), ObjectColumn( name = 'factors', width = 0.5, editable = False, horizontal_alignment = 'center',
class Polygon(Component): """ A filled polygon component. """ # ------------------------------------------------------------------------- # Trait definitions. # ------------------------------------------------------------------------- # The background color of this polygon. background_color = ColorTrait("white") # The color of the border of this polygon. border_color = ColorTrait("black") # The dash pattern to use for this polygon. border_dash = Any # The thickness of the border of this polygon. border_size = border_size_trait(1) # Event fired when the polygon is "complete". complete = Event # The rule to use to determine the inside of the polygon. inside_rule = Map( { "winding": FILL_STROKE, "oddeven": EOF_FILL_STROKE }, default_value="winding", ) # The points that make up this polygon. model = Instance(PolygonModel, ()) # Convenience property to access the model's points. points = Property # The color of each vertex. vertex_color = ColorTrait("black") # The size of each vertex. vertex_size = Float(3.0) traits_view = View( Group("<component>", id="component"), Group("<links>", id="links"), Group( "background_color", "_", "border_color", "_", "border_size", id="Box", style="custom", ), ) colorchip_map = {"color": "color", "alt_color": "border_color"} # ------------------------------------------------------------------------- # Traits property accessors # ------------------------------------------------------------------------- def _get_points(self): return self.model.points # ------------------------------------------------------------------------- # 'Polygon' interface # ------------------------------------------------------------------------- def reset(self): "Reset the polygon to the initial state" self.model.reset() self.event_state = "normal" # ------------------------------------------------------------------------- # 'Component' interface # ------------------------------------------------------------------------- def _draw_mainlayer(self, gc, view_bounds=None, mode="normal"): "Draw the component in the specified graphics context" self._draw_closed(gc) # ------------------------------------------------------------------------- # Protected interface # ------------------------------------------------------------------------- def _is_in(self, point): """ Test if the point (an x, y tuple) is within this polygonal region. To perform the test, we use the winding number inclusion algorithm, referenced in the comp.graphics.algorithms FAQ (http://www.faqs.org/faqs/graphics/algorithms-faq/) and described in detail here: http://softsurfer.com/Archive/algorithm_0103/algorithm_0103.htm """ point_array = array((point, )) vertices = array(self.model.points) winding = self.inside_rule == "winding" result = points_in_polygon(point_array, vertices, winding) return result[0] # ------------------------------------------------------------------------- # Private interface # ------------------------------------------------------------------------- def _draw_closed(self, gc): "Draw this polygon as a closed polygon" if len(self.model.points) > 2: # Set the drawing parameters. gc.set_fill_color(self.background_color_) gc.set_stroke_color(self.border_color_) gc.set_line_width(self.border_size) gc.set_line_dash(self.border_dash) # Draw the path. gc.begin_path() gc.move_to( self.model.points[0][0] - self.x, self.model.points[0][1] + self.y, ) offset_points = [(x - self.x, y + self.y) for x, y in self.model.points] gc.lines(offset_points) gc.close_path() gc.draw_path(self.inside_rule_) # Draw the vertices. self._draw_vertices(gc) def _draw_open(self, gc): "Draw this polygon as an open polygon" if len(self.model.points) > 2: # Set the drawing parameters. gc.set_fill_color(self.background_color_) gc.set_stroke_color(self.border_color_) gc.set_line_width(self.border_size) gc.set_line_dash(self.border_dash) # Draw the path. gc.begin_path() gc.move_to( self.model.points[0][0] - self.x, self.model.points[0][1] + self.y, ) offset_points = [(x - self.x, y + self.y) for x, y in self.model.points] gc.lines(offset_points) gc.draw_path(self.inside_rule_) # Draw the vertices. self._draw_vertices(gc) def _draw_vertices(self, gc): "Draw the vertices of the polygon." gc.set_fill_color(self.vertex_color_) gc.set_line_dash(None) offset = self.vertex_size / 2.0 offset_points = [(x + self.x, y + self.y) for x, y in self.model.points] if hasattr(gc, "draw_path_at_points"): path = gc.get_empty_path() path.rect(-offset, -offset, self.vertex_size, self.vertex_size) gc.draw_path_at_points(offset_points, path, FILL_STROKE) else: for x, y in offset_points: gc.draw_rect( ( x - offset, y - offset, self.vertex_size, self.vertex_size, ), FILL, )
class PipelineBrowser(HasTraits): # The tree generator to use. tree_generator = Trait(FullTreeGenerator(), Instance(TreeGenerator)) # The TVTK render window(s) associated with this browser. renwins = List # The root object to view in the pipeline. If None (default), the # root object is the render_window of the Scene instance passed at # object instantiation time. root_object = List(TVTKBase) # Private traits. # The root of the tree to display. _root = Any ########################################################################### # `object` interface. ########################################################################### def __init__(self, renwin=None, **traits): """Initializes the object. Parameters ---------- - renwin: `Scene` instance. Defaults to None. This may be passed in addition to the renwins attribute which can be a list of scenes. """ super(PipelineBrowser, self).__init__(**traits) self.ui = None self.view = None if renwin: self.renwins.append(renwin) self._root_object_changed(self.root_object) menu = Menu(Action(name='Refresh', action='editor.update_editor'), Action(name='Expand all', action='editor.expand_all')) self.menu = menu nodes = self.tree_generator.get_nodes(menu) self.tree_editor = TreeEditor(nodes=nodes, editable=False, orientation='vertical', hide_root=True, on_dclick=self._on_dclick) self.view = View(Group(Item(name='_root', editor=self.tree_editor, resizable=True), show_labels=False, show_border=False, orientation='vertical'), title='Pipeline browser', help=False, resizable=True, undo=False, revert=False, width=.3, height=.3) ########################################################################### # `PipelineBrowser` interface. ########################################################################### def show(self, parent=None): """Show the tree view if not already show. If optional `parent` widget is passed, the tree is displayed inside the passed parent widget.""" # If UI already exists, raise it and return. if self.ui and self.ui.control: try: self.ui.control.Raise() except AttributeError: pass else: return else: # No active ui, create one. if parent: self.ui = self.view.ui(self, parent=parent, kind='subpanel') else: self.ui = self.view.ui(self, parent=parent) def update(self): """Update the tree view.""" # This is a hack. if self.ui and self.ui.control: try: ed = self.ui._editors[0] ed.update_editor() self.ui.control.Refresh() except (AttributeError, IndexError): pass # Another name for update. refresh = update def render(self): """Calls render on all render windows associated with this browser.""" for rw in self.renwins: rw.render() ########################################################################### # Non-public interface. ########################################################################### def _make_default_root(self): tree_gen = self.tree_generator objs = [x.render_window for x in self.renwins] node = TVTKCollectionNode(object=objs, name="Root", tree_generator=tree_gen) return node def _tree_generator_changed(self, tree_gen): """Traits event handler.""" if self._root: root_obj = self._root.object else: root_obj = self.root_object if root_obj: ro = root_obj if not hasattr(root_obj, '__len__'): ro = [root_obj] self._root = TVTKCollectionNode(object=ro, name="Root", tree_generator=tree_gen) else: self._root = self._make_default_root() self.tree_editor.nodes = tree_gen.get_nodes(self.menu) self.update() def _root_object_changed(self, root_obj): """Trait handler called when the root object is assigned to.""" tg = self.tree_generator if root_obj: self._root = TVTKCollectionNode(object=root_obj, name="Root", tree_generator=tg) else: self._root = self._make_default_root() self.root_object = self._root.object self.update() def _root_object_items_changed(self, list_event): """Trait handler called when the items of the list change.""" self._root_object_changed(self.root_object) def _on_dclick(self, obj): """Callback that is called when nodes are double-clicked.""" if hasattr(obj, 'object') and hasattr(obj.object, 'edit_traits'): object = obj.object view = object.trait_view() view.handler = UICloseHandler(browser=self) object.on_trait_change(self.render) ui = object.edit_traits(view=view)
class PythonShellView(HasTraits): ns = Dict() view = View(Item('ns', editor=ShellEditor(), show_label=False))
class MayaviViewer(HasTraits): """ This class represents a Mayavi based viewer for the particles. They are queried from a running solver. """ particle_arrays = List(Instance(ParticleArrayHelper), []) pa_names = List(Str, []) interpolator = Instance(InterpolatorView) # The default scalar to load up when running the viewer. scalar = Str("rho") scene = Instance(MlabSceneModel, ()) ######################################## # Traits to pull data from a live solver. live_mode = Bool(False, desc='if data is obtained from a running solver ' 'or from saved files') shell = Button('Launch Python Shell') host = Str('localhost', desc='machine to connect to') port = Int(8800, desc='port to use to connect to solver') authkey = Password('pysph', desc='authorization key') host_changed = Bool(True) client = Instance(MultiprocessingClient) controller = Property(depends_on='live_mode, host_changed') ######################################## # Traits to view saved solver output. files = List(Str, []) directory = Directory() current_file = Str('', desc='the file being viewed currently') update_files = Button('Refresh') file_count = Range(low='_low', high='_n_files', value=0, desc='the file counter') play = Bool(False, desc='if all files are played automatically') play_delay = Float(0.2, desc='the delay between loading files') loop = Bool(False, desc='if the animation is looped') # This is len(files) - 1. _n_files = Int(0) _low = Int(0) ######################################## # Timer traits. timer = Instance(Timer) interval = Range(0.5, 20.0, 2.0, desc='frequency in seconds with which plot is updated') ######################################## # Solver info/control. current_time = Float(0.0, desc='the current time in the simulation') time_step = Float(0.0, desc='the time-step of the solver') iteration = Int(0, desc='the current iteration number') pause_solver = Bool(False, desc='if the solver should be paused') ######################################## # Movie. record = Bool(False, desc='if PNG files are to be saved for animation') frame_interval = Range(1, 100, 5, desc='the interval between screenshots') movie_directory = Str # internal counters. _count = Int(0) _frame_count = Int(0) _last_time = Float _solver_data = Any _file_name = Str _particle_array_updated = Bool ######################################## # The layout of the dialog created view = View(HSplit( Group( Group(Group( Item(name='directory'), Item(name='current_file'), Item(name='file_count'), HGroup(Item(name='play'), Item(name='play_delay', label='Delay', resizable=True), Item(name='loop'), Item(name='update_files', show_label=False), padding=0), padding=0, label='Saved Data', selected=True, enabled_when='not live_mode', ), Group( Item(name='live_mode'), Group( Item(name='host'), Item(name='port'), Item(name='authkey'), enabled_when='live_mode', ), label='Connection', ), layout='tabbed'), Group( Group( Item(name='current_time'), Item(name='time_step'), Item(name='iteration'), Item(name='pause_solver', enabled_when='live_mode'), Item(name='interval', enabled_when='not live_mode'), label='Solver', ), Group( Item(name='record'), Item(name='frame_interval'), Item(name='movie_directory'), label='Movie', ), layout='tabbed', ), Group(Item(name='particle_arrays', style='custom', show_label=False, editor=ListEditor(use_notebook=True, deletable=False, page_name='.name')), Item(name='interpolator', style='custom', show_label=False), layout='tabbed'), Item(name='shell', show_label=False), ), Group( Item('scene', editor=SceneEditor(scene_class=MayaviScene), height=400, width=600, show_label=False), )), resizable=True, title='PySPH Particle Viewer', height=640, width=1024, handler=ViewerHandler) ###################################################################### # `MayaviViewer` interface. ###################################################################### def on_close(self): self._handle_particle_array_updates() @on_trait_change('scene:activated') def start_timer(self): if not self.live_mode: # No need for the timer if we are rendering files. return # Just accessing the timer will start it. t = self.timer if not t.IsRunning(): t.Start(int(self.interval * 1000)) @on_trait_change('scene:activated') def update_plot(self): # No need to do this if files are being used. if not self.live_mode: return # do not update if solver is paused if self.pause_solver: return if self.client is None: self.host_changed = True controller = self.controller if controller is None: return self.current_time = t = controller.get_t() self.time_step = controller.get_dt() self.iteration = controller.get_count() arrays = [] for idx, name in enumerate(self.pa_names): pa = controller.get_named_particle_array(name) arrays.append(pa) pah = self.particle_arrays[idx] pah.set(particle_array=pa, time=t) self.interpolator.particle_arrays = arrays if self.record: self._do_snap() def run_script(self, path): """Execute a script in the namespace of the viewer. """ with open(path) as fp: data = fp.read() ns = self._get_shell_namespace() exec(compile(data, path, 'exec'), ns) ###################################################################### # Private interface. ###################################################################### def _do_snap(self): """Generate the animation.""" p_arrays = self.particle_arrays if len(p_arrays) == 0: return if self.current_time == self._last_time: return if len(self.movie_directory) == 0: controller = self.controller output_dir = controller.get_output_directory() movie_dir = os.path.join(output_dir, 'movie') self.movie_directory = movie_dir else: movie_dir = self.movie_directory if not os.path.exists(movie_dir): os.mkdir(movie_dir) interval = self.frame_interval count = self._count if count % interval == 0: fname = 'frame%06d.png' % (self._frame_count) p_arrays[0].scene.save_png(os.path.join(movie_dir, fname)) self._frame_count += 1 self._last_time = self.current_time self._count += 1 @on_trait_change('host,port,authkey') def _mark_reconnect(self): if self.live_mode: self.host_changed = True @cached_property def _get_controller(self): ''' get the controller, also sets the iteration count ''' if not self.live_mode: return None reconnect = self.host_changed if not reconnect: try: c = self.client.controller except Exception as e: logger.info('Error: no connection or connection closed: ' 'reconnecting: %s' % e) reconnect = True self.client = None else: try: self.client.controller.get_count() except IOError: self.client = None reconnect = True if reconnect: self.host_changed = False try: if MultiprocessingClient.is_available((self.host, self.port)): self.client = MultiprocessingClient(address=(self.host, self.port), authkey=self.authkey) else: logger.info('Could not connect: Multiprocessing Interface' ' not available on %s:%s' % (self.host, self.port)) return None except Exception as e: logger.info('Could not connect: check if solver is ' 'running:%s' % e) return None c = self.client.controller self.iteration = c.get_count() if self.client is None: return None else: return self.client.controller def _client_changed(self, old, new): if not self.live_mode: return self._clear() if new is None: return else: self.pa_names = self.client.controller.get_particle_array_names() self.particle_arrays = [ self._make_particle_array_helper(self.scene, x) for x in self.pa_names ] self.interpolator = InterpolatorView(scene=self.scene) # Turn on the legend for the first particle array. if len(self.particle_arrays) > 0: self.particle_arrays[0].set(show_legend=True, show_time=True) def _timer_event(self): # catch all Exceptions else timer will stop try: self.update_plot() except Exception as e: logger.info('Exception: %s caught in timer_event' % e) def _interval_changed(self, value): t = self.timer if t is None: return if t.IsRunning(): t.Stop() t.Start(int(value * 1000)) def _timer_default(self): return Timer(int(self.interval * 1000), self._timer_event) def _pause_solver_changed(self, value): if self.live_mode: c = self.controller if c is None: return if value: c.pause_on_next() else: c.cont() def _record_changed(self, value): if value: self._do_snap() def _files_changed(self, value): if len(value) == 0: return else: d = os.path.dirname(os.path.abspath(value[0])) self.movie_directory = os.path.join(d, 'movie') self.set(directory=d, trait_change_notify=False) self._n_files = len(value) - 1 self._frame_count = 0 self._count = 0 self.frame_interval = 1 fc = self.file_count self.file_count = 0 if fc == 0: # Force an update when our original file count is 0. self._file_count_changed(fc) t = self.timer if not self.live_mode: if t.IsRunning(): t.Stop() else: if not t.IsRunning(): t.Stop() t.Start(self.interval * 1000) def _file_count_changed(self, value): # Save out any updates for the previous file if needed. self._handle_particle_array_updates() # Load the new file. fname = self.files[value] self._file_name = fname self.current_file = os.path.basename(fname) # Code to read the file, create particle array and setup the helper. data = load(fname) solver_data = data["solver_data"] arrays = data["arrays"] self._solver_data = solver_data self.current_time = t = float(solver_data['t']) self.time_step = float(solver_data['dt']) self.iteration = int(solver_data['count']) names = list(arrays.keys()) pa_names = self.pa_names if len(pa_names) == 0: self.interpolator = InterpolatorView(scene=self.scene) self.pa_names = names pas = [] for name in names: pa = arrays[name] pah = self._make_particle_array_helper(self.scene, name) # Must set this after setting the scene. pah.set(particle_array=pa, time=t) pas.append(pah) self.particle_arrays = pas else: for idx, name in enumerate(pa_names): pa = arrays[name] pah = self.particle_arrays[idx] pah.set(particle_array=pa, time=t) self.interpolator.particle_arrays = list(arrays.values()) if self.record: self._do_snap() def _loop_changed(self, value): if value and self.play: self._play_changed(self.play) def _play_changed(self, value): t = self.timer if value: t.Stop() t.callable = self._play_event t.Start(1000 * self.play_delay) else: t.Stop() t.callable = self._timer_event def _clear(self): self.pa_names = [] self.scene.mayavi_scene.children[:] = [] def _play_event(self): nf = self._n_files pc = self.file_count pc += 1 if pc > nf: if self.loop: pc = 0 else: self.timer.Stop() pc = nf self.file_count = pc self._handle_particle_array_updates() def _play_delay_changed(self): if self.play: self._play_changed(self.play) def _scalar_changed(self, value): for pa in self.particle_arrays: pa.scalar = value def _update_files_fired(self): fc = self.file_count files = glob_files(self.files[fc]) sort_file_list(files) self.files = files self.file_count = fc if self.play: self._play_changed(self.play) def _shell_fired(self): ns = self._get_shell_namespace() obj = PythonShellView(ns=ns) obj.edit_traits() def _get_shell_namespace(self): return dict(viewer=self, particle_arrays=self.particle_arrays, interpolator=self.interpolator, scene=self.scene, mlab=self.scene.mlab) def _directory_changed(self, d): ext = os.path.splitext(self.files[-1])[1] files = glob.glob(os.path.join(d, '*' + ext)) if len(files) > 0: self._clear() sort_file_list(files) self.files = files self.file_count = min(self.file_count, len(files)) else: pass def _live_mode_changed(self, value): if value: self._file_name = '' self.client = None self._clear() self._mark_reconnect() self.start_timer() else: self.client = None self._clear() self.timer.Stop() def _particle_array_helper_updated(self, value): self._particle_array_updated = True def _handle_particle_array_updates(self): # Called when the particle array helper fires an updated event. if self._particle_array_updated and self._file_name: sd = self._solver_data arrays = [x.particle_array for x in self.particle_arrays] detailed = self._requires_detailed_output(arrays) dump(self._file_name, arrays, sd, detailed_output=detailed, only_real=False) self._particle_array_updated = False def _requires_detailed_output(self, arrays): detailed = False for pa in arrays: props = set(pa.properties.keys()) output = set(pa.output_property_arrays) diff = props - output for prop in diff: array = pa.get(prop) if (array.max() - array.min()) > 0: detailed = True break if detailed: break return detailed def _make_particle_array_helper(self, scene, name): pah = ParticleArrayHelper(scene=scene, name=name, scalar=self.scalar) pah.on_trait_change(self._particle_array_helper_updated, 'updated') return pah
class PlotUI(HasTraits): #Traits view definitions: traits_view = View(Group( Item('container', editor=ComponentEditor(size=(800, 600)), show_label=False)), buttons=NoButtons, resizable=True) plot_edit_view = View(Group(Item('num_levels'), Item('colormap')), buttons=["OK", "Cancel"]) num_levels = Int(15) colormap = Enum(list(color_map_name_dict.keys())) #--------------------------------------------------------------------------- # Private Traits #--------------------------------------------------------------------------- _image_index = Instance(GridDataSource) _image_value = Instance(ImageData) _cmap = Trait(jet, Callable) #--------------------------------------------------------------------------- # Public View interface #--------------------------------------------------------------------------- def __init__(self, *args, **kwargs): super(PlotUI, self).__init__(*args, **kwargs) self.create_plot() def create_plot(self): # Create the mapper, etc self._image_index = GridDataSource(array([]), array([]), sort_order=("ascending", "ascending")) image_index_range = DataRange2D(self._image_index) self._image_index.on_trait_change(self._metadata_changed, "metadata_changed") self._image_value = ImageData(data=array([]), value_depth=1) image_value_range = DataRange1D(self._image_value) # Create the contour plots self.polyplot = ContourPolyPlot(index=self._image_index, value=self._image_value, index_mapper=GridMapper(range= image_index_range), color_mapper=\ self._cmap(image_value_range), levels=self.num_levels) self.lineplot = ContourLinePlot( index=self._image_index, value=self._image_value, index_mapper=GridMapper(range=self.polyplot.index_mapper.range), levels=self.num_levels) # Add a left axis to the plot left = PlotAxis(orientation='left', title="y", mapper=self.polyplot.index_mapper._ymapper, component=self.polyplot) self.polyplot.overlays.append(left) # Add a bottom axis to the plot bottom = PlotAxis(orientation='bottom', title="x", mapper=self.polyplot.index_mapper._xmapper, component=self.polyplot) self.polyplot.overlays.append(bottom) # Add some tools to the plot self.polyplot.tools.append( PanTool(self.polyplot, constrain_key="shift")) self.polyplot.overlays.append( ZoomTool(component=self.polyplot, tool_mode="box", always_on=False)) self.polyplot.overlays.append( LineInspector(component=self.polyplot, axis='index_x', inspect_mode="indexed", write_metadata=True, is_listener=False, color="white")) self.polyplot.overlays.append( LineInspector(component=self.polyplot, axis='index_y', inspect_mode="indexed", write_metadata=True, color="white", is_listener=False)) # Add these two plots to one container contour_container = OverlayPlotContainer(padding=20, use_backbuffer=True, unified_draw=True) contour_container.add(self.polyplot) contour_container.add(self.lineplot) # Create a colorbar cbar_index_mapper = LinearMapper(range=image_value_range) self.colorbar = ColorBar(index_mapper=cbar_index_mapper, plot=self.polyplot, padding_top=self.polyplot.padding_top, padding_bottom=self.polyplot.padding_bottom, padding_right=40, resizable='v', width=30) self.pd = ArrayPlotData(line_index=array([]), line_value=array([]), scatter_index=array([]), scatter_value=array([]), scatter_color=array([])) self.cross_plot = Plot(self.pd, resizable="h") self.cross_plot.height = 100 self.cross_plot.padding = 20 self.cross_plot.plot(("line_index", "line_value"), line_style="dot") self.cross_plot.plot( ("scatter_index", "scatter_value", "scatter_color"), type="cmap_scatter", name="dot", color_mapper=self._cmap(image_value_range), marker="circle", marker_size=8) self.cross_plot.index_range = self.polyplot.index_range.x_range self.pd.set_data("line_index2", array([])) self.pd.set_data("line_value2", array([])) self.pd.set_data("scatter_index2", array([])) self.pd.set_data("scatter_value2", array([])) self.pd.set_data("scatter_color2", array([])) self.cross_plot2 = Plot(self.pd, width=140, orientation="v", resizable="v", padding=20, padding_bottom=160) self.cross_plot2.plot(("line_index2", "line_value2"), line_style="dot") self.cross_plot2.plot( ("scatter_index2", "scatter_value2", "scatter_color2"), type="cmap_scatter", name="dot", color_mapper=self._cmap(image_value_range), marker="circle", marker_size=8) self.cross_plot2.index_range = self.polyplot.index_range.y_range # Create a container and add components self.container = HPlotContainer(padding=40, fill_padding=True, bgcolor="white", use_backbuffer=False) inner_cont = VPlotContainer(padding=0, use_backbuffer=True) inner_cont.add(self.cross_plot) inner_cont.add(contour_container) self.container.add(self.colorbar) self.container.add(inner_cont) self.container.add(self.cross_plot2) def update(self, model): self.minz = model.minz self.maxz = model.maxz self.colorbar.index_mapper.range.low = self.minz self.colorbar.index_mapper.range.high = self.maxz self._image_index.set_data(model.xs, model.ys) self._image_value.data = model.zs self.pd.set_data("line_index", model.xs) self.pd.set_data("line_index2", model.ys) self.container.invalidate_draw() self.container.request_redraw() #--------------------------------------------------------------------------- # Event handlers #--------------------------------------------------------------------------- def _metadata_changed(self, old, new): """ This function takes out a cross section from the image data, based on the line inspector selections, and updates the line and scatter plots.""" self.cross_plot.value_range.low = self.minz self.cross_plot.value_range.high = self.maxz self.cross_plot2.value_range.low = self.minz self.cross_plot2.value_range.high = self.maxz if "selections" in self._image_index.metadata: x_ndx, y_ndx = self._image_index.metadata["selections"] if y_ndx and x_ndx: self.pd.set_data("line_value", self._image_value.data[y_ndx, :]) self.pd.set_data("line_value2", self._image_value.data[:, x_ndx]) xdata, ydata = self._image_index.get_data() xdata, ydata = xdata.get_data(), ydata.get_data() self.pd.set_data("scatter_index", array([xdata[x_ndx]])) self.pd.set_data("scatter_index2", array([ydata[y_ndx]])) self.pd.set_data("scatter_value", array([self._image_value.data[y_ndx, x_ndx]])) self.pd.set_data("scatter_value2", array([self._image_value.data[y_ndx, x_ndx]])) self.pd.set_data("scatter_color", array([self._image_value.data[y_ndx, x_ndx]])) self.pd.set_data("scatter_color2", array([self._image_value.data[y_ndx, x_ndx]])) else: self.pd.set_data("scatter_value", array([])) self.pd.set_data("scatter_value2", array([])) self.pd.set_data("line_value", array([])) self.pd.set_data("line_value2", array([])) def _colormap_changed(self): self._cmap = color_map_name_dict[self.colormap] if hasattr(self, "polyplot"): value_range = self.polyplot.color_mapper.range self.polyplot.color_mapper = self._cmap(value_range) value_range = self.cross_plot.color_mapper.range self.cross_plot.color_mapper = self._cmap(value_range) # FIXME: change when we decide how best to update plots using # the shared colormap in plot object self.cross_plot.plots["dot"][0].color_mapper = self._cmap( value_range) self.cross_plot2.plots["dot"][0].color_mapper = self._cmap( value_range) self.container.request_redraw() def _num_levels_changed(self): if self.num_levels > 3: self.polyplot.levels = self.num_levels self.lineplot.levels = self.num_levels
class PolarDiscr(HasTraits): ''' Manager of the microplane arrays. This class is responsible for the generation and initialization and state management of an array of microplanes. Additionally, it can perform the setup of damage function parameters using the value of the microplane integrator object. ''' mfn_class = Class(None) #------------------------------------------------------------------------- # Common parameters for for isotropic and anisotropic damage function specifications #------------------------------------------------------------------------- n_mp = Range(0, 50, 6, label='Number of microplanes', auto_set=False) E = Float(34e+3, label="E", desc="Young's Modulus", auto_set=False, enter_set=True) nu = Float(0.2, label='nu', desc="Poison's ratio", auto_set=False, enter_set=True) c_T = Float( 0.0, label='c_T', desc='fraction of tangential stress accounted on each microplane', auto_set=False, enter_set=True) #------------------------------------------------------------------------- # list of angles #------------------------------------------------------------------------- alpha_list = Property(Array, depends_on='n_mp') @cached_property def _get_alpha_list(self): return array( [Pi / self.n_mp * (i - 0.5) for i in range(1, self.n_mp + 1)]) #------------------------------------------------------------------------- # Damage function specification #------------------------------------------------------------------------- phi_fn = EitherType(klasses=[ PhiFnGeneral, PhiFnGeneralExtended, PhiFnGeneralExtendedExp, PhiFnStrainSoftening, PhiFnStrainHardening, PhiFnStrainHardeningLinear, PhiFnStrainHardeningBezier ]) def _phi_fn_default(self): print 'setting phi_fn default' return PhiFnStrainSoftening(polar_discr=self) def _phi_fn_changed(self): print 'setting phi_fn changed' self.phi_fn.polar_discr = self varied_params = List(Str, []) #------------------------------------------------------------------------- # Management of spatially varying parameters depending on the value of mats_eval #------------------------------------------------------------------------- varpars = Dict def _varpars_default(self): return self._get_varpars() @on_trait_change('phi_fn,varied_params') def _update_varpars(self): self.varpars = self._get_varpars() def _get_varpars(self): ''' reset the varpar list according to the current phi_fn object. ''' params = self.phi_fn.identify_parameters() varset = {} for key in params: varset[key] = VariedParam(phi_fn=self.phi_fn, mats_eval=self, varname=key) if key in self.varied_params: varset[key].switched_on = True return varset varpar_list = Property(List(VariedParam), depends_on='varpars') @cached_property def _get_varpar_list(self): return [self.varpars[key] for key in self.phi_fn.identify_parameters()] # variable selectable in the table of varied params (just for viewing) current_varpar = Instance(VariedParam) def _current_varpar_default(self): if len(self.varpar_list) > 0: return self.varpar_list[0] return None @on_trait_change('phi_fn') def set_current_varpar(self): if len(self.varpar_list) > 0: self.current_varpar = self.varpar_list[0] #------------------------------------------------------------------------- # Get the damage state for all microplanes #------------------------------------------------------------------------- def get_phi_arr(self, sctx, e_max_arr): ''' Return the damage coefficients ''' # gather the coefficients for parameters depending on the orientation carr_list = [ self.varpars[key].polar_fn_vectorized(self.alpha_list) for key in self.phi_fn.identify_parameters() ] # vectorize the damage function evaluation n_arr = 1 + len(carr_list) phi_fn_vectorized = frompyfunc(self.phi_fn.get_value, n_arr, 1) # damage parameter for each microplane return phi_fn_vectorized(e_max_arr, *carr_list) def get_polar_fn_fracture_energy_arr(self, sctx, e_max_arr): ''' Return the fracture energy contributions ''' carr_list = [ self.varpars[key].polar_fn_vectorized(self.alpha_list) for key in self.phi_fn.identify_parameters() ] # vectorize the damage function evaluation n_arr = 1 + len(carr_list) integ_phi_fn_vectorized = frompyfunc(self.phi_fn.get_integ, n_arr, 1) return self.E * integ_phi_fn_vectorized(e_max_arr, *carr_list) polar_fn_group = Group( Group(Item('n_mp@', width=200), Item('E'), Item('nu'), Item('c_T'), Spring(), label='Elasticity parameters'), Group(Item('phi_fn@', show_label=False), label='Damage parameters'), Group(VSplit( Item('varpar_list', label='List of material variables', show_label=False, editor=varpar_editor), Item('current_varpar', label='Selected variable', show_label=False, style='custom', resizable=True), dock='tab', ), label='Angle-dependent variations'), Include('config_param_vgroup'), layout='tabbed', springy=True, dock='tab', id='ibvpy.mats.matsXD_cmdm.MATSXDPolarDiscr', ) traits_view = View(Include('polar_fn_group'), resizable=True, scrollable=True, width=0.6, height=0.9)
class DigSource(HasPrivateTraits): """Expose digitization information from a file. Parameters ---------- file : File Path to the BEM file (*.fif). Attributes ---------- fid : Array, shape = (3, 3) Each row contains the coordinates for one fiducial point, in the order Nasion, RAP, LAP. If no file is set all values are 0. """ file = File(exists=True, filter=['*.fif']) inst_fname = Property(Str, depends_on='file') inst_dir = Property(depends_on='file') _info = Property(depends_on='file') points_filter = Any(desc="Index to select a subset of the head shape " "points") n_omitted = Property(Int, depends_on=['points_filter']) # head shape _hsp_points = Property(depends_on='_info', desc="Head shape points in the file (n x 3 array)") points = Property(depends_on=['_hsp_points', 'points_filter'], desc="Head shape points selected by the filter (n x 3 " "array)") # fiducials lpa = Property(depends_on='_info', desc="LPA coordinates (1 x 3 array)") nasion = Property(depends_on='_info', desc="Nasion coordinates (1 x 3 array)") rpa = Property(depends_on='_info', desc="RPA coordinates (1 x 3 array)") # EEG eeg_points = Property(depends_on='_info', desc="EEG sensor coordinates (N x 3 array)") hpi_points = Property(depends_on='_info', desc='HPI coil coordinates (N x 3 array)') view = View( VGroup(Item('file'), Item('inst_fname', show_label=False, style='readonly'))) @cached_property def _get_n_omitted(self): if self.points_filter is None: return 0 else: return np.sum(self.points_filter == False) # noqa: E712 @cached_property def _get__info(self): if self.file: info = None fid, tree, _ = fiff_open(self.file) fid.close() if len(dir_tree_find(tree, FIFF.FIFFB_MEAS_INFO)) > 0: info = read_info(self.file, verbose=False) elif len(dir_tree_find(tree, FIFF.FIFFB_ISOTRAK)) > 0: info = read_dig_montage(fif=self.file) if isinstance(info, DigMontage): info.transform_to_head() digs = list() _append_fiducials(digs, info.lpa, info.nasion, info.rpa) for idx, pos in enumerate(info.hsp): dig = { 'coord_frame': FIFF.FIFFV_COORD_HEAD, 'ident': idx, 'kind': FIFF.FIFFV_POINT_EXTRA, 'r': pos } digs.append(dig) info = _empty_info(1) info['dig'] = digs elif info is None or info['dig'] is None: error( None, "The selected FIFF file does not contain " "digitizer information. Please select a different " "file.", "Error Reading FIFF File") self.reset_traits(['file']) return else: # check that all fiducial points are present has_point = { FIFF.FIFFV_POINT_LPA: False, FIFF.FIFFV_POINT_NASION: False, FIFF.FIFFV_POINT_RPA: False } for d in info['dig']: if d['kind'] == FIFF.FIFFV_POINT_CARDINAL: has_point[d['ident']] = True if not all(has_point.values()): points = _fiducial_coords(info['dig']) if len(points) == 3: _append_fiducials(info['dig'], *points.T) else: missing = [] if not has_point[FIFF.FIFFV_POINT_LPA]: missing.append('LPA') if not has_point[FIFF.FIFFV_POINT_NASION]: missing.append('Nasion') if not has_point[FIFF.FIFFV_POINT_RPA]: missing.append('RPA') error( None, "The selected FIFF file does not contain " "all cardinal points (missing: %s). Please " "select a different file." % ', '.join(missing), "Error Reading FIFF File") self.reset_traits(['file']) return return info @cached_property def _get_inst_dir(self): return op.dirname(self.file) @cached_property def _get_inst_fname(self): if self.file: return op.basename(self.file) else: return '-' @cached_property def _get__hsp_points(self): if not self._info: return np.zeros((0, 3)) points = np.array([ d['r'] for d in self._info['dig'] if d['kind'] == FIFF.FIFFV_POINT_EXTRA ]) points = np.empty((0, 3)) if len(points) == 0 else points return points @cached_property def _get_points(self): if self.points_filter is None: return self._hsp_points else: return self._hsp_points[self.points_filter] def _cardinal_point(self, ident): """Coordinates for a cardinal point.""" if self._info: for d in self._info['dig']: if (d['kind'] == FIFF.FIFFV_POINT_CARDINAL and d['ident'] == ident): return d['r'][None, :] return np.zeros((1, 3)) @cached_property def _get_nasion(self): return self._cardinal_point(FIFF.FIFFV_POINT_NASION) @cached_property def _get_lpa(self): return self._cardinal_point(FIFF.FIFFV_POINT_LPA) @cached_property def _get_rpa(self): return self._cardinal_point(FIFF.FIFFV_POINT_RPA) @cached_property def _get_eeg_points(self): if self._info: out = [ d['r'] for d in self._info['dig'] if d['kind'] == FIFF.FIFFV_POINT_EEG and d['coord_frame'] == FIFF.FIFFV_COORD_HEAD ] out = np.empty((0, 3)) if len(out) == 0 else np.array(out) return out else: return np.empty((0, 3)) @cached_property def _get_hpi_points(self): if self._info: out = [ d['r'] for d in self._info['dig'] if d['kind'] == FIFF.FIFFV_POINT_HPI and d['coord_frame'] == FIFF.FIFFV_COORD_HEAD ] out = np.empty((0, 3)) if len(out) == 0 else np.array(out) return out else: return np.empty((0, 3)) def _file_changed(self): self.reset_traits(('points_filter', ))
html_editor = HTMLEditor() else: from traitsui.api import HTMLEditor html_editor = HTMLEditor(format_text=False) # Local imports import rest_html from python_function_info import PythonFunctionInfo html_view = View( Item('_html', show_label=False, editor=html_editor, springy= True, resizable=True, ), id='help_view', resizable=True, buttons=NoButtons, ) class HtmlInfoUI(HasTraits): """ Model for the a window to display Html in an application. This widget has the following APIs: set_text(text): display raw text. set_html(text): display html formatted text. set_function_help(function_name, module_name):
class InterpolatorView(HasTraits): # The bounds on which to interpolate. bounds = Array(cols=3, dtype=float, desc='spatial bounds for the interpolation ' '(xmin, xmax, ymin, ymax, zmin, zmax)') # The number of points to interpolate onto. num_points = Int(100000, enter_set=True, auto_set=False, desc='number of points on which to interpolate') # The particle arrays to interpolate from. particle_arrays = List # The scalar to interpolate. scalar = Str('rho', desc='name of the active scalar to view') # Sync'd trait with the scalar lut manager. show_legend = Bool(False, desc='if the scalar legend is to be displayed') # Enable/disable the interpolation visible = Bool(False, desc='if the interpolation is to be displayed') # A button to use the set bounds. set_bounds = Button('Set Bounds') # A button to recompute the bounds. recompute_bounds = Button('Recompute Bounds') # Private traits. ###################################################### # The interpolator we are a view for. interpolator = Instance(Interpolator) # The mlab plot for this particle array. plot = Instance(PipelineBase) scalar_list = List scene = Instance(MlabSceneModel) source = Instance(PipelineBase) _arrays_changed = Bool(False) # View definition ###################################################### view = View( Item(name='visible'), Item(name='scalar', editor=EnumEditor(name='scalar_list')), Item(name='num_points'), Item(name='bounds'), Item(name='set_bounds', show_label=False), Item(name='recompute_bounds', show_label=False), Item(name='show_legend'), ) # Private protocol ################################################### def _change_bounds(self): interp = self.interpolator if interp is not None: interp.set_domain(self.bounds, self.interpolator.shape) self._update_plot() def _setup_interpolator(self): if self.interpolator is None: interpolator = Interpolator(self.particle_arrays, num_points=self.num_points) self.bounds = interpolator.bounds self.interpolator = interpolator else: if self._arrays_changed: self.interpolator.update_particle_arrays(self.particle_arrays) self._arrays_changed = False # Trait handlers ##################################################### def _particle_arrays_changed(self, pas): if len(pas) > 0: all_props = reduce(set.union, [set(x.properties.keys()) for x in pas]) else: all_props = set() self.scalar_list = list(all_props) self._arrays_changed = True self._update_plot() def _num_points_changed(self, value): interp = self.interpolator if interp is not None: bounds = self.interpolator.bounds shape = get_nx_ny_nz(value, bounds) interp.set_domain(bounds, shape) self._update_plot() def _recompute_bounds_fired(self): bounds = get_bounding_box(self.particle_arrays) self.bounds = bounds self._change_bounds() def _set_bounds_fired(self): self._change_bounds() def _bounds_default(self): return [0, 1, 0, 1, 0, 1] @on_trait_change('scalar, visible') def _update_plot(self): if self.visible: mlab = self.scene.mlab self._setup_interpolator() interp = self.interpolator prop = interp.interpolate(self.scalar) if self.source is None: src = mlab.pipeline.scalar_field(interp.x, interp.y, interp.z, prop) self.source = src else: self.source.mlab_source.reset(x=interp.x, y=interp.y, z=interp.z, scalars=prop) src = self.source if self.plot is None: if interp.dim == 3: plot = mlab.pipeline.scalar_cut_plane(src) else: plot = mlab.pipeline.surface(src) self.plot = plot scm = plot.module_manager.scalar_lut_manager scm.set(show_legend=self.show_legend, use_default_name=False, data_name=self.scalar) self.sync_trait('show_legend', scm, mutual=True) else: self.plot.visible = True scm = self.plot.module_manager.scalar_lut_manager scm.data_name = self.scalar else: if self.plot is not None: self.plot.visible = False
class Visualization(HasTraits): ''' Mayavi visualization ''' scene = Instance(MlabSceneModel, ()) view = View( Item('scene', editor=SceneEditor(scene_class=MayaviScene), height=250, width=300, show_label=False), resizable=True # We need this to resize with the parent widget ) def createOrbital(self): global points self.scene.mlab.clf() x, y, z, psi = points.readPoints() self.mesh = self.scene.mlab.mesh(x, y, z, scalars=psi, colormap='hsv', vmax=np.pi, vmin=-np.pi) # This low res colormap is simpler and less distracting my_map = [[255, 0, 0, 255], [140, 0, 140, 255], [140, 0, 140, 255], [0, 0, 255, 255], [0, 0, 255, 255], [255, 153, 18, 255], [255, 153, 18, 255], [255, 0, 0, 255]] # Comment out the next line to use a smoother high res colormap self.mesh.module_manager.scalar_lut_manager.lut.table = my_map self.axes = self.scene.mlab.axes() self.fig = self.scene.mlab.gcf() self.source = self.mesh.mlab_source if zoom.read(): self.scene.mlab.view(0, 90, np.max((x, y, z)) * 5, (0, 0, 0)) self.scene.reset_zoom() def updateOrbital(self): global points, zoom x, y, z, psi = points.readPoints() self.source.set(x=x, y=y, z=z, scalars=psi) if zoom.read(): self.scene.reset_zoom() self.axes.remove() self.axes = self.scene.mlab.axes() def createCrossing(self): self.scene.mlab.clf() bond_length, Low, High = points.readPoints() self.High_mesh = self.scene.mlab.mesh(High[0], High[1], High[2], color=(.9, .1, .1), opacity=0.95) self.Low_mesh = self.scene.mlab.mesh(Low[0], Low[1], Low[2], color=(.1, .1, .9), opacity=0.95) self.Cl_point = self.scene.mlab.points3d(0, 0, 0, resolution=12, color=(0, 1, 0)) self.Br_point = self.scene.mlab.points3d(0., 0., 1.5, resolution=12, color=(0.6, 0.4, 0.7)) self.axes = self.scene.mlab.axes() self.fig = self.scene.mlab.gcf() self.Br_source = self.Br_point.mlab_source self.Low_source = self.Low_mesh.mlab_source self.High_source = self.High_mesh.mlab_source self.scene.mlab.view(0, 90, roll=0, focalpoint=(0, 0, 0)) if zoom.read(): self.scene.reset_zoom() def updateCrossing(self): bond_length, Low, High = points.readPoints() self.Low_source.set(x=Low[0], y=Low[1], z=Low[2]) self.High_source.set(x=High[0], y=High[1], z=High[2]) self.Br_source.set(x=0, y=0, z=bond_length) if zoom.read(): self.scene.reset_zoom() self.axes.remove() self.axes = self.scene.mlab.axes()
class ParticleArrayHelper(HasTraits): """ This class manages a particle array and sets up the necessary plotting related information for it. """ # The particle array we manage. particle_array = Instance(ParticleArray) # The name of the particle array. name = Str # Current time. time = Float(0.0) # The active scalar to view. scalar = Str('rho', desc='name of the active scalar to view') # The mlab scalar plot for this particle array. plot = Instance(PipelineBase) # The mlab vectors plot for this particle array. plot_vectors = Instance(PipelineBase) # List of available scalars in the particle array. scalar_list = List(Str) scene = Instance(MlabSceneModel) # Sync'd trait with the scalar lut manager. show_legend = Bool(False, desc='if the scalar legend is to be displayed') # Show all scalars. list_all_scalars = Bool(False, desc='if all scalars should be listed') # Sync'd trait with the dataset to turn on/off visibility. visible = Bool(True, desc='if the particle array is to be displayed') # Show the time of the simulation on screen. show_time = Bool(False, desc='if the current time is displayed') # Edit the scalars. edit_scalars = Button('More options ...') # Show vectors. show_vectors = Bool(False, desc='if vectors should be displayed') vectors = Str('u, v, w', enter_set=True, auto_set=False, desc='the vectors to display') mask_on_ratio = Int(3, desc='mask one in specified points') scale_factor = Float(1.0, desc='scale factor for vectors', enter_set=True, auto_set=False) edit_vectors = Button('More options ...') # Private attribute to store the Text module. _text = Instance(PipelineBase) # Extra scalars to show. These will be added and saved to the data if # needed. extra_scalars = List(Str) # Set to True when the particle array is updated with a new property say. updated = Event # Private attribute to store old value of visibility in case of empty # arrays. _old_visible = Bool(True) ######################################## # View related code. view = View( Item(name='name', show_label=False, editor=TitleEditor()), Group(Group( Group( Item(name='visible'), Item(name='show_legend'), Item(name='scalar', editor=EnumEditor(name='scalar_list')), Item(name='list_all_scalars'), Item(name='show_time'), columns=2, ), Item(name='edit_scalars', show_label=False), label='Scalars', ), Group( Item(name='show_vectors'), Item(name='vectors'), Item(name='mask_on_ratio'), Item(name='scale_factor'), Item(name='edit_vectors', show_label=False), label='Vectors', ), layout='tabbed')) # Private protocol ############################################ def _add_vmag(self, pa): if 'vmag' not in pa.properties: if 'vmag2' in pa.output_property_arrays: vmag = numpy.sqrt(pa.get('vmag2', only_real_particles=False)) else: u, v, w = pa.get('u', 'v', 'w', only_real_particles=False) vmag = numpy.sqrt(u**2 + v**2 + w**2) pa.add_property(name='vmag', data=vmag) if len(pa.output_property_arrays) > 0: # We do not call add_output_arrays when the default is empty # as if it is empty, all arrays are saved anyway. However, # adding just vmag in this case will mean that when the # particle array is saved it will only save vmag! This is # not what we want, hence we add vmag *only* if the # output_property_arrays is non-zero length. pa.add_output_arrays(['vmag']) self.updated = True def _get_scalar(self, pa, scalar): """Return the requested scalar from the given particle array. """ if scalar in self.extra_scalars: method_name = '_add_' + scalar method = getattr(self, method_name) method(pa) return pa.get(scalar, only_real_particles=False) # Traits handlers ############################################# def _edit_scalars_fired(self): self.plot.edit_traits() def _edit_vectors_fired(self): self.plot_vectors.edit_traits() def _particle_array_changed(self, old, pa): self.name = pa.name self._list_all_scalars_changed(self.list_all_scalars) # Update the plot. x, y, z = pa.get('x', 'y', 'z', only_real_particles=False) s = self._get_scalar(pa, self.scalar) p = self.plot mlab = self.scene.mlab empty = len(x) == 0 if old is None: old_empty = True else: old_x = old.get('x', only_real_particles=False) old_empty = len(old_x) == 0 if p is None and not empty: src = mlab.pipeline.scalar_scatter(x, y, z, s) p = mlab.pipeline.glyph(src, mode='point', scale_mode='none') p.actor.property.point_size = 3 scm = p.module_manager.scalar_lut_manager scm.set(show_legend=self.show_legend, use_default_name=False, data_name=self.scalar) self.sync_trait('visible', p, mutual=True) self.sync_trait('show_legend', scm, mutual=True) # set_arrays(p.mlab_source.m_data, pa) self.plot = p elif not empty: if len(x) == len(p.mlab_source.x): p.mlab_source.set(x=x, y=y, z=z, scalars=s) if self.plot_vectors: self._vectors_changed(self.vectors) else: if self.plot_vectors: u, v, w = self._get_vectors_for_plot(self.vectors) p.mlab_source.reset(x=x, y=y, z=z, scalars=s, u=u, v=v, w=w) else: p.mlab_source.reset(x=x, y=y, z=z, scalars=s) p.mlab_source.update() if empty and not old_empty: if p is not None: src = p.parent.parent self._old_visible = src.visible src.visible = False if old_empty and not empty: if p is not None: p.parent.parent.visible = self._old_visible self._show_vectors_changed(self.show_vectors) # Setup the time. self._show_time_changed(self.show_time) def _scalar_changed(self, value): p = self.plot if p is not None: p.mlab_source.scalars = self._get_scalar(self.particle_array, value) p.module_manager.scalar_lut_manager.data_name = value def _list_all_scalars_changed(self, list_all_scalars): pa = self.particle_array if list_all_scalars: sc_list = pa.properties.keys() self.scalar_list = sorted(set(sc_list + self.extra_scalars)) else: if len(pa.output_property_arrays) > 0: self.scalar_list = sorted( set(pa.output_property_arrays + self.extra_scalars)) else: sc_list = pa.properties.keys() self.scalar_list = sorted(set(sc_list + self.extra_scalars)) def _show_time_changed(self, value): txt = self._text mlab = self.scene.mlab if value: if txt is not None: txt.visible = True elif self.plot is not None: mlab.get_engine().current_object = self.plot txt = mlab.text(0.01, 0.01, 'Time = 0.0', width=0.35) self._text = txt self._time_changed(self.time) else: if txt is not None: txt.visible = False def _get_vectors_for_plot(self, vectors): pa = self.particle_array comps = [x.strip() for x in vectors.split(',')] if len(comps) == 3: try: vec = pa.get(*comps, only_real_particles=False) except AttributeError: return None else: return vec def _vectors_changed(self, value): vec = self._get_vectors_for_plot(value) if vec is not None: self.plot.mlab_source.set(vectors=numpy.c_[vec[0], vec[1], vec[2]]) def _show_vectors_changed(self, value): pv = self.plot_vectors if pv is not None: pv.visible = value elif self.plot is not None and value: self._vectors_changed(self.vectors) pv = self.scene.mlab.pipeline.vectors( self.plot.mlab_source.m_data, mask_points=self.mask_on_ratio, scale_factor=self.scale_factor) self.plot_vectors = pv def _mask_on_ratio_changed(self, value): pv = self.plot_vectors if pv is not None: pv.glyph.mask_points.on_ratio = value def _scale_factor_changed(self, value): pv = self.plot_vectors if pv is not None: pv.glyph.glyph.scale_factor = value def _time_changed(self, value): txt = self._text if txt is not None: txt.text = 'Time = %.3e' % (value) def _extra_scalars_default(self): return ['vmag']
def traits_view(self): cols = [ ObjectColumn(name='name', editable=False), ObjectColumn(name='value', format='%0.7f', width=100), ObjectColumn(name='error', format='%0.7f', width=100) ] det_cols = [ ObjectColumn(name='name', editable=False), ObjectColumn(name='detector', editable=False), ObjectColumn(name='value', format='%0.7f', width=100), ObjectColumn(name='error', format='%0.7f', width=100) ] iso_grp = VGroup(UItem('isotopes', editor=TableEditor(columns=cols, sortable=False)), label='Intercepts', show_border=True) baseline_grp = VGroup(UItem('baselines', editor=TableEditor(sortable=False, columns=det_cols)), label='Baselines', show_border=True) blank_grp = VGroup(UItem('blanks', editor=TableEditor(sortable=False, columns=cols)), label='Blanks', show_border=True, defined_when='blanks') icgrp = VGroup(UItem('ic_factors', editor=TableEditor(sortable=False, columns=det_cols)), label='IC Factors', show_border=True, defined_when='ic_factors') bgrp = HGroup( icon_button_editor('revert_button', 'arrow_undo', tooltip='Undo changes', enabled_when='dirty'), icon_button_editor('revert_original_button', 'arrow_left', tooltip='Revert to original values'), icon_button_editor('save_button', 'disk', enabled_when='dirty', tooltip='Save changes'), spring) flux_grp = HGroup(UItem('object.flux.value'), Label(PLUSMINUS), UItem('object.flux.error'), label='Flux (J)', defined_when='object.flux', show_border=True) v = View(VGroup(Group(iso_grp, baseline_grp, icgrp, layout='tabbed'), blank_grp, flux_grp, bgrp), buttons=['OK', 'Cancel'], handler=AnalysisEditViewHandler(), resizable=True, title=self.title, x=0.05, y=0.05) return v
def init(self, parent): """Finishes initializing the editor by creating the underlying toolkit widget.""" factory = self.factory self.filter = factory.filter columns = factory.columns[:] if (len(columns) == 0) and (len(self.value) > 0): columns = [ ObjectColumn(name=name) for name in self.value[0].editable_traits() ] self.columns = columns if factory.table_view_factory is not None: self.table_view = factory.table_view_factory(editor=self) if factory.source_model_factory is not None: self.source_model = factory.source_model_factory(editor=self) if factory.model_factory is not None: self.model = factory.model_factory(editor=self) # Create the table view and model self.model.setDynamicSortFilter(True) self.model.setSourceModel(self.source_model) self.table_view.setModel(self.model) # Create the vertical header context menu and connect to its signals self.header_menu = QtGui.QMenu(self.table_view) insertable = factory.row_factory is not None if factory.editable: if insertable: action = self.header_menu.addAction("Insert new item") action.triggered.connect(self._on_context_insert) if factory.deletable: action = self.header_menu.addAction("Delete item") action.triggered.connect(self._on_context_remove) if factory.reorderable: if factory.editable and (insertable or factory.deletable): self.header_menu.addSeparator() self.header_menu_up = self.header_menu.addAction("Move item up") self.header_menu_up.triggered.connect(self._on_context_move_up) self.header_menu_down = self.header_menu.addAction( "Move item down" ) self.header_menu_down.triggered.connect(self._on_context_move_down) # Create the empty space context menu and connect its signals self.empty_menu = QtGui.QMenu(self.table_view) action = self.empty_menu.addAction("Add new item") action.triggered.connect(self._on_context_append) # When sorting is enabled, the first column is initially displayed with # the triangle indicating it is the sort index, even though no sorting # has actually been done. Sort here for UI/model consistency. if self.factory.sortable and not self.factory.reorderable: self.model.sort(0, QtCore.Qt.AscendingOrder) # Connect to the mode specific selection handler and select the first # row/column/cell. Do this before creating the edit_view to make sure # that it has a valid item to use when constructing its view. smodel = self.table_view.selectionModel() mode_slot = getattr(self, "_on_%s_selection" % factory.selection_mode) smodel.selectionChanged.connect(mode_slot) self.table_view.setCurrentIndex(self.model.index(0, 0)) # Create the toolbar if necessary if factory.show_toolbar and len(factory.filters) > 0: main_view = QtGui.QWidget() layout = QtGui.QVBoxLayout(main_view) layout.setContentsMargins(0, 0, 0, 0) self.toolbar_ui = self.edit_traits( parent=parent, kind="subpanel", view=View( Group( Item("filter{View}", editor=factory._filter_editor), Item("filter_summary{Results}", style="readonly"), spring, orientation="horizontal", ), resizable=True, ), ) self.toolbar_ui.parent = self.ui layout.addWidget(self.toolbar_ui.control) layout.addWidget(self.table_view) else: main_view = self.table_view # Create auxiliary editor and encompassing splitter if necessary mode = factory.selection_mode if (factory.edit_view == " ") or mode not in {"row", "rows"}: self.control = main_view else: if factory.orientation == "horizontal": self.control = QtGui.QSplitter(QtCore.Qt.Horizontal) else: self.control = QtGui.QSplitter(QtCore.Qt.Vertical) self.control.setSizePolicy( QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding ) self.control.addWidget(main_view) self.control.setStretchFactor(0, 2) # Create the row editor below the table view editor = InstanceEditor(view=factory.edit_view, kind="subpanel") self._ui = self.edit_traits( parent=self.control, kind="subpanel", view=View( Item( "selected_row", style="custom", editor=editor, show_label=False, resizable=True, width=factory.edit_view_width, height=factory.edit_view_height, ), resizable=True, handler=factory.edit_view_handler, ), ) self._ui.parent = self.ui self.control.addWidget(self._ui.control) self.control.setStretchFactor(1, 1) # Connect to the click and double click handlers self.table_view.clicked.connect(self._on_click) self.table_view.doubleClicked.connect(self._on_dclick) # Make sure we listen for 'items' changes as well as complete list # replacements self.context_object.on_trait_change( self.update_editor, self.extended_name + "_items", dispatch="ui" ) # Listen for changes to traits on the objects in the list self.context_object.on_trait_change( self.refresh_editor, self.extended_name + ".-", dispatch="ui" ) # Listen for changes on column definitions self.on_trait_change(self._update_columns, "columns", dispatch="ui") self.on_trait_change( self._update_columns, "columns_items", dispatch="ui" ) # Set up the required externally synchronized traits is_list = mode in ("rows", "columns", "cells") self.sync_value(factory.click, "click", "to") self.sync_value(factory.dclick, "dclick", "to") self.sync_value(factory.columns_name, "columns", is_list=True) self.sync_value(factory.selected, "selected", is_list=is_list) self.sync_value( factory.selected_indices, "selected_indices", is_list=is_list ) self.sync_value(factory.filter_name, "filter", "from") self.sync_value(factory.filtered_indices, "filtered_indices", "to") self.sync_value(factory.update_filter_name, "update_filter", "from") self.auto_size = self.factory.auto_size # Initialize the ItemDelegates for each column self._update_columns()