Exemple #1
0
class XDOTSEval(DOTSEval):
    '''
    Domain with uniform FE-time-step-eval.
    '''
    elem_r_contours = Property(depends_on='sdomain.changed_structure')

    @cached_property
    def _get_elem_r_contours(self):
        '''
        Get coordinates of vertices grouped in three subsets
        1) vertices with positive value of level set function
        2) vertices with negative value of level set function
        3) intersection of the level set with the element edges   

        This serves as an input for the triangulation

        It has to be done in the loop as the number of points changes for the elems
        Works for 1 and 2D
        '''

        corner_dof_r = array(self.fets_eval.dof_r)[self.dof_r_corner_idx]

        i_r = self.sdomain.ls_intersection_r

        # print 'dof r corner ',self.dof_r_corner_idx
        pos_r = []
        neg_r = []
        dn_ls_val = self.dof_node_ls_values[:, self.dof_r_corner_idx]
        for dof_vals in dn_ls_val:
            pos_r.append(corner_dof_r[dof_vals.flatten() > 0.])
            neg_r.append(corner_dof_r[dof_vals.flatten() < 0.])
        return [pos_r, neg_r, i_r]

    # @todo - use the4 vertex_X_map available in sdomain
    dof_r_corner_idx = Property(Array(bool))

    @cached_property
    def _get_dof_r_corner_idx(self):
        '''
        Extracts indices of the corner dofs for visualization
        Works for 1 and 2D

        @todo - this is done in cell_grid_spec - use the method from there
        '''
        dof_r = array(self.fets_eval.dof_r)
        abs_dof_r = abs(dof_r)
        if abs_dof_r.shape[1] == 1:
            return (abs_dof_r[:] == 1.).flatten()
        elif abs_dof_r.shape[1] == 2:
            return (abs_dof_r[:, 0] * abs_dof_r[:, 1]) == 1.

    elem_triangulation = Property(depends_on='sdomain.changed_structure')

    @cached_property
    def _get_elem_triangulation(self):
        '''Get discretization for integration over the element.

        In 1D - the output is a set of line segments
        In 2D - a list of triangles is returned
        In 3D - does not work - probably some topological info needs to be added.
        '''
        division = []
        for pos_r, neg_r, i_r in zip(
                self.elem_r_contours[0], self.elem_r_contours[1],
                self.elem_r_contours[2]):  # TODO:this can be done better
            point_set = [vstack((pos_r, i_r)), vstack((neg_r, i_r))]
            division.append(self.fets_eval.get_triangulation(point_set))
        return division

    def get_eps(self, sctx, u):
        e_id = sctx.e_id
        p_id = sctx.p_id
        X_mtx = sctx.X
        r_pnt = sctx.loc
        B_mtx = self.fets_eval.get_B_mtx(r_pnt, X_mtx,
                                         self.dof_node_ls_values[e_id],
                                         self.vtk_ls_values[e_id][p_id])
        eps = dot(B_mtx, u)
        shape = eps.shape[0]
        if shape == 1:
            return eps
        elif shape == 3:
            return map2d_eps_eng_to_mtx(eps)
        elif shape == 6:
            return map3d_eps_eng_to_mtx(eps)

    def get_eps_m(self, sctx, u):
        e_id = sctx.e_id
        p_id = sctx.p_id
        X_mtx = sctx.X
        r_pnt = sctx.loc
        B_mtx = self.fets_eval.get_B_mtx(r_pnt, X_mtx,
                                         self.dof_node_ls_values[e_id],
                                         self.vtk_ls_values[e_id][p_id])
        eps = dot(B_mtx, u)
        return array([[eps[0], eps[2]], [eps[2], eps[1]]])

    def get_eps_f(self, sctx, u):
        e_id = sctx.e_id
        p_id = sctx.p_id
        X_mtx = sctx.X
        r_pnt = sctx.loc
        B_mtx = self.fets_eval.get_B_mtx(r_pnt, X_mtx,
                                         self.dof_node_ls_values[e_id],
                                         self.vtk_ls_values[e_id][p_id])
        eps = dot(B_mtx, u)
        return array([[eps[3], eps[5]], [eps[5], eps[4]]])

    def get_u(self, sctx, u):
        e_id = sctx.e_id
        p_id = sctx.p_id
        N_mtx = self.fets_eval.get_N_mtx(sctx.loc,
                                         self.dof_node_ls_values[e_id],
                                         self.vtk_ls_values[e_id][p_id])
        #        print "N ",N_mtx
        #        print "u ",u
        #        print "x u",dot( N_mtx, u )
        return dot(N_mtx, u)

    def get_u_m(self, sctx, u):
        e_id = sctx.e_id
        p_id = sctx.p_id
        N_mtx = self.fets_eval.get_N_mtx(sctx.loc,
                                         self.dof_node_ls_values[e_id],
                                         self.vtk_ls_values[e_id][p_id])
        return dot(N_mtx, u)[:2]

    def get_u_rf(self, sctx, u):
        e_id = sctx.e_id
        p_id = sctx.p_id
        N_mtx = self.fets_eval.get_N_mtx(sctx.loc,
                                         self.dof_node_ls_values[e_id],
                                         self.vtk_ls_values[e_id][p_id])
        return dot(N_mtx, u)[1:2]

    def get_u_rm(self, sctx, u):
        e_id = sctx.e_id
        p_id = sctx.p_id
        N_mtx = self.fets_eval.get_N_mtx(sctx.loc,
                                         self.dof_node_ls_values[e_id],
                                         self.vtk_ls_values[e_id][p_id])
        return dot(N_mtx, u)[:1]

    def get_u_f(self, sctx, u):
        e_id = sctx.e_id
        p_id = sctx.p_id
        N_mtx = self.fets_eval.get_N_mtx(sctx.loc,
                                         self.dof_node_ls_values[e_id],
                                         self.vtk_ls_values[e_id][p_id])
        return dot(N_mtx, u)[2:]

    def map_u(self, sctx, U):
        ix = sctx.elem.get_dof_map()
        sctx.dots = self  # todo: this is ugly
        #        sctx.r = fets_eval.map_to_local( sctx.elem, sctx.X )
        u = U[ix]
        return u

    rte_dict = Property(Dict, depends_on='fets_eval')

    @cached_property
    def _get_rte_dict(self):
        rte_dict = {}

        rte_dict.update({
            'eps':
            RTraceEvalUDomainFieldVar(eval=self.get_eps,
                                      ts=self,
                                      u_mapping=self.map_u),
            'u':
            RTraceEvalUDomainFieldVar(eval=self.get_u,
                                      ts=self,
                                      u_mapping=self.map_u),
            'u_m':
            RTraceEvalUDomainFieldVar(eval=self.get_u_m,
                                      ts=self,
                                      u_mapping=self.map_u),
            'u_f':
            RTraceEvalUDomainFieldVar(eval=self.get_u_f,
                                      ts=self,
                                      u_mapping=self.map_u),
            'eps_m':
            RTraceEvalUDomainFieldVar(eval=self.get_eps_m,
                                      ts=self,
                                      u_mapping=self.map_u),
            'eps_f':
            RTraceEvalUDomainFieldVar(eval=self.get_eps_f,
                                      ts=self,
                                      u_mapping=self.map_u),
            'u_rm':
            RTraceEvalUDomainFieldVar(eval=self.get_u_rm,
                                      ts=self,
                                      u_mapping=self.map_u),
            'u_rf':
            RTraceEvalUDomainFieldVar(eval=self.get_u_rf,
                                      ts=self,
                                      u_mapping=self.map_u),
        })
        for key, eval in list(self.fets_eval.rte_dict.items()):
            rte_dict[key] = RTraceEvalUDomainFieldVar(name=key,
                                                      u_mapping=self.map_u,
                                                      eval=eval,
                                                      fets_eval=self.fets_eval)
        return rte_dict

    rt_triangles = Property(depends_on='sdomain.changed_structure')

    @cached_property
    def _get_rt_triangles(self):
        triangles = []
        for pos_r, neg_r, i_r in zip(
                self.elem_r_contours[0], self.elem_r_contours[1],
                self.elem_r_contours[2]):  # TODO:this can be done better
            if i_r.shape[1] == 1:
                shift = self.sdomain.rt_tol
                norm_vct = 1

            elif i_r.shape[1] == 2:
                # direction vector of the intersection
                dir_vct = i_r[1] - i_r[0]
                # normal_vector
                norm_vct = array([-dir_vct[1], dir_vct[0]], dtype=float)
                shift_vct = norm_vct / \
                    linalg.norm(norm_vct) * self.sdomain.rt_tol
                # ???
                delta_rs = array([
                    shift_vct[1]**2 / shift_vct[0],
                    shift_vct[0]**2 / shift_vct[1]
                ],
                                 dtype=float)
                # check for zero division
                i_inf = isinf(delta_rs)
                delta_rs[i_inf] = zeros(2)[i_inf]
                shift_rs = shift_vct + delta_rs
                # TODO:make for arbitrary number of i_r
                shift_rs_pts = vstack((shift_rs, shift_rs))
                # print 'new directions ',shift_rs
            pos_pts = i_r.copy()
            neg_pts = i_r.copy()
            # check which coordinate lies not on the element edge
            edge_idx = where(abs(i_r) != 1)

            pos_dir = pos_r[0] - i_r[0]  # TODO:generalize also for 1d
            # check that the normal points out from the crack
            if dot(pos_dir, norm_vct) > 0.:
                #pos_pts =  i_r + shift_vct
                #neg_pts =  i_r - shift_vct
                pos_pts[edge_idx] += shift_rs_pts[edge_idx]
                neg_pts[edge_idx] -= shift_rs_pts[edge_idx]
            else:
                #pos_pts =  i_r - shift_vct
                #neg_pts =  i_r + shift_vct
                pos_pts[edge_idx] -= shift_rs_pts[edge_idx]
                neg_pts[edge_idx] += shift_rs_pts[edge_idx]

            if self.sdomain.ls_side_tag == 'both':
                point_set = [
                    vstack((pos_r, pos_pts)),
                    vstack((neg_r, neg_pts))
                ]
            elif self.sdomain.ls_side_tag == 'pos':
                point_set = [vstack((pos_r, pos_pts))]
            elif self.sdomain.ls_side_tag == 'neg':
                point_set = [vstack((neg_r, neg_pts))]
            triangles.append(self.fets_eval.get_triangulation(point_set))
        return triangles

    ip_X = Property(depends_on='sdomain.changed_structure')

    @cached_property
    def _get_ip_X(self):
        '''
        Integration points in global coords 3D needed for evaluation of ls values,
        can be also used for postprocesing
        '''
        ip_X = []
        for X_mtx, ip_addr0, ip_addr1 in zip(self.sdomain.elem_X_map,
                                             self.ip_offset[:-1],
                                             self.ip_offset[1:]):
            ip_slice = slice(ip_addr0, ip_addr1)
            ip_X.append(
                self.fets_eval.get_vtk_r_glb_arr(X_mtx,
                                                 self.ip_coords[ip_slice]))
        return vstack(ip_X)

    ip_offset = Property

    def _get_ip_offset(self):
        return self.integ_structure[0]

    ip_coords = Property

    def _get_ip_coords(self):
        return self.integ_structure[1]

    ip_weights = Property

    def _get_ip_weights(self):
        return self.integ_structure[2]

    state_start_elem_grid = Property(depends_on='sdomain.changed_structure')

    @cached_property
    def _get_state_start_elem_grid(self):
        # create the element grid to store the offsets of the elements
        state_elem_grid = self.sdomain.intg_elem_grid.copy()
        elem_grid_ix = self.sdomain.intg_grid_ix
        state_elem_grid[elem_grid_ix] = self.ip_offset[:-1]
        return state_elem_grid

    state_end_elem_grid = Property(depends_on='sdomain.changed_structure')

    @cached_property
    def _get_state_end_elem_grid(self):
        # create the element grid to store the offsets of the elements
        state_elem_grid = self.sdomain.intg_elem_grid.copy()
        elem_grid_ix = self.sdomain.intg_grid_ix
        state_elem_grid[elem_grid_ix] = self.ip_offset[1:]
        return state_elem_grid

    integ_structure = Property(depends_on='sdomain.changed_structure')

    @cached_property
    def _get_integ_structure(self):
        ip_off_list = [0]
        ip_coo_list = []
        ip_wei_list = []
        ip_offset = 0

        for elem_triangles in self.elem_triangulation:
            ip_coords = self.fets_eval.get_ip_coords(elem_triangles,
                                                     self.fets_eval.int_order)
            ip_offset += ip_coords.shape[0]
            ip_off_list.append(ip_offset)
            ip_coo_list.append(ip_coords)
            ip_wei_list.append(
                self.fets_eval.get_ip_weights(elem_triangles,
                                              self.fets_eval.int_order))
        ip_off_arr = array(ip_off_list, dtype=int)

        # handle the case of empty domain
        if len(ip_coo_list) == 0:
            raise ValueError(
                'empty subdomain - something wrong in the fe_domain management'
            )

        ip_coo_arr = vstack(ip_coo_list)[:, self.fets_eval.dim_slice]
        ip_w_arr = hstack(ip_wei_list)

        return (ip_off_arr, ip_coo_arr, ip_w_arr)

    # Integration over discontinuity
    # discretize the integration points lying in the level set
    disc_integ_structure = Property(depends_on='sdomain.changed_structure')

    @cached_property
    def _get_disc_integ_structure(self):
        # nip_disc - number of integration points along the discontinuity
        if self.fets_eval.nip_disc == 0:
            return array([zeros_like(self.ip_offset)], dtype=int)
        ip_off_list = [0]
        ip_coo_list = []
        ip_wei_list = []
        ip_offset = 0
        id_map = array([arange(self.sdomain.fe_grid_slice.r_i.shape[1])],
                       dtype=int)

        # @todo - the r_i in fe_grid_slice ignores the boundary in the FEXSubdomain
        # change - to remove the irrelevant elements beyound the boundary.

        for segment in self.sdomain.fe_grid_slice.r_i:
            ip_coords = self.fets_eval.get_ip_coords([segment, id_map],
                                                     self.fets_eval.nip_disc)
            ip_offset += ip_coords.shape[0]
            ip_off_list.append(ip_offset)
            ip_coo_list.append(ip_coords)
            ip_wei_list.append(
                self.fets_eval.get_ip_weights([segment, id_map],
                                              self.fets_eval.nip_disc))
        return array(
            ip_off_list,
            dtype=int), vstack(ip_coo_list)[:,
                                            self.fets_eval.dim_slice], vstack(
                                                ip_wei_list).flatten()

#        if self.fets_eval.nip_disc:
#            ip_off_list = [0,1]
#        else:
# ip_off_list = zeros_like(self.ip_offset)#necessary for consistent interface in corr pred
#
# ip_coord = self.sdomain.fe_grid_slice.r_i.flatten()#this can work just in 1D
# return array(ip_off_list),vstack(ip_coo_list)[:, self.fets_eval.dim_slice],vstack(ip_wei_list).flatten()
# return array(ip_off_list , dtype =
# int),array([ip_coord],dtype=float),array([2.],dtype=float)

    ip_disc_coords = Property

    def _get_ip_disc_coords(self):
        return self.disc_integ_structure[1]

    ip_disc_weights = Property

    def _get_ip_disc_weights(self):
        return self.disc_integ_structure[2]

    ip_disc_offset = Property

    def _get_ip_disc_offset(self):
        return self.disc_integ_structure[0]

    ip_ls_values = Property(Array(Float))

    def _get_ip_ls_values(self):
        # TODO:define the ineraction wirh ls
        ls_fn = frompyfunc(self.sdomain.ls_fn_X, 2, 1)

        X, Y, Z = self.ip_X.T  # 3d coords - vtk
        return ls_fn(X, Y).flatten()

    ip_normal = Property(Array(Float))

    def _get_ip_normal(self):
        ir_shape = self.sdomain.ls_intersection_r.shape[2]
        if ir_shape == 1:  # 1D
            # assuming that the first node is the left-most
            if self.dof_node_ls_values[0, 0] < 0.:
                normal = [1.]
            else:
                normal = [-1.]
        elif ir_shape == 2:  # 2D
            for i_r in self.sdomain.ls_intersection_r:
                # direction vector of the intersection
                dir_vct = i_r[1] - i_r[0]
                normal = array([-dir_vct[1], dir_vct[0]])  # normal_vector
        return normal

    dof_node_ls_values = Property(Array(Float))

    def _get_dof_node_ls_values(self):
        return self.sdomain.dof_node_ls_values

    def get_vtk_X(self, position):
        return vstack(self.vtk_X)

    vtk_X = Property(
        depends_on='sdomain.changed_structure, sdomain.+changed_geometry')

    @cached_property
    def _get_vtk_X(self):
        '''Get the discretization points based on the fets_eval 
        associated with the current domain.
        '''
        vtk_X = []
        for triangle, e in zip(self.rt_triangles, self.sdomain.elements):
            X_mtx = e.get_X_mtx()
            # TODO:slicing works just for 2D
            vtk_X.append(
                self.fets_eval.get_vtk_r_glb_arr(X_mtx, triangle[0][:, :2]))
        # print 'vtk_X ',vtk_X
        # return array(vtk_X)
        return vtk_X  # have to stay list for arbitraly number of pts

    debug_cell_data = Bool(False)

    def get_vtk_cell_data(self, position, point_offset, cell_offset):
        cells = []
        for triangle in self.rt_triangles:  # TODO:offset can be done simpler
            cells.append(triangle[1] + point_offset)
            point_offset += triangle[0].shape[0]
        vtk_cells = vstack(cells)
        #        vtk_cells = vstack([triangle[1]
        #                     for triangle in self.rt_triangles])
        # print "vtk_cells_array", vtk_cells
        n_cell_points = vtk_cells.shape[1]
        n_cells = vtk_cells.shape[0]
        vtk_cell_array = hstack((ones(
            (n_cells, 1), dtype=int) * n_cell_points, vtk_cells))
        vtk_cell_offsets = arange(
            n_cells, dtype=int) * (n_cell_points + 1) + cell_offset
        if n_cell_points == 3:
            cell_str = 'Triangle'
        elif n_cell_points == 2:
            cell_str = 'Line'
        cell_class = tvtk_helper.get_class(cell_str)
        cell_type = cell_class().cell_type
        vtk_cell_types = ones(n_cells, dtype=int) * cell_type
        if self.debug_cell_data:
            print('vtk_cells_array', vtk_cell_array)
            print('vtk_cell_offsets', vtk_cell_offsets)
            print('vtk_cell_types', vtk_cell_types)

        return vtk_cell_array.flatten(), vtk_cell_offsets, vtk_cell_types

    def get_vtk_r_arr(self, idx):
        return self.rt_triangles[idx][0]

    def get_vtk_pnt_ip_map(self, idx):
        return self.fets_eval.get_vtk_pnt_ip_map_data(
            self.rt_triangles[idx][0])

    vtk_ls_values = Property(
        depends_on='sdomain.changed_structure, sdomain.+changed_geometry')

    @cached_property
    def _get_vtk_ls_values(self):
        vtk_val = []
        # TODO:define the ineraction wirh ls
        ls_fn = frompyfunc(self.sdomain.ls_fn_X, 2, 1)
        for ip_coords_X_e in self.vtk_X:
            X, Y, Z = ip_coords_X_e.T
            vtk_val.append(ls_fn(X, Y))  # TODO:3D
        return vtk_val

    def _apply_on_ip_pnts(self, fn):
        X_el = self.sdomain.elem_X_map

        # Prepare the result array of the same dimension as the result of one call to fn (for a single ip
        # - must get the first ip of the first element
        ###

        # test call to the function with single output - to get the shape of
        # the result.
        out_single = fn(self.ip_coords[0], X_el[0], self.dof_node_ls_values[0],
                        self.ip_ls_values[0])
        out_grid_shape = (self.ip_coords.shape[0], ) + out_single.shape
        out_grid = zeros(out_grid_shape)

        # loop over elements
        i_el = 0
        for ip_addr0, ip_addr1, X_e, node_ls in zip(self.ip_offset[:-1],
                                                    self.ip_offset[1:], X_el,
                                                    self.dof_node_ls_values):
            ip_slice = slice(ip_addr0, ip_addr1)
            for ip_r, ip_ls in zip(
                    self.ip_coords[ip_slice], self.ip_ls_values[ip_slice]
            ):  # this could be dangerous when the discontinuity has more int pts than 'volume', othervise it is just overwritten in the procedure
                out_grid[i_el] = fn(ip_r, X_e, node_ls, ip_ls)
                i_el += 1
        return out_grid

    B_mtx_grid = Property(
        Array,
        depends_on='sdomain.changed_structure, sdomain.+changed_geometry')

    @cached_property
    def _get_B_mtx_grid(self):
        B_mtx_grid = self._apply_on_ip_pnts(self.fets_eval.get_B_mtx)
        return B_mtx_grid

    J_det_grid = Property(
        Array,
        depends_on='sdomain.changed_structure, sdomain.+changed_geometry')

    @cached_property
    def _get_J_det_grid(self):
        return self._apply_on_ip_pnts(self.fets_eval.get_J_det)

    # Integration over the discontinuity domain
    #
    def _apply_on_ip_disc(self, fn):
        X_el = self.sdomain.elem_X_map
        X_d = self.X_i

        # Prepare the result array of the same dimension as the result of one call to fn (for a single ip
        # - must get the first ip of the first element
        ###

        # test call to the function with single output - to get the shape of
        # the result.
        out_single = fn(self.ip_disc_coords[0], X_d[0], X_el[0],
                        self.dof_node_ls_values[0], self.ip_normal[0])
        out_grid_shape = (self.ip_disc_coords.shape[0], ) + out_single.shape
        out_grid = zeros(out_grid_shape)

        # loop over elements
        i_el = 0
        for ip_addr0, ip_addr1, X_ed, X_el, node_ls in zip(
                self.ip_disc_offset[:-1], self.ip_disc_offset[1:], X_d, X_el,
                self.dof_node_ls_values):
            ip_slice = slice(ip_addr0, ip_addr1)
            for ip_r, ip_norm in zip(self.ip_disc_coords[ip_slice],
                                     self.ip_normal[ip_slice]):
                out_grid[i_el] = fn(ip_r, X_ed, X_el, node_ls, ip_norm)
                i_el += 1
        return out_grid

    # Cached terms for in the integration points in the discontinuity domain.
    B_disc_grid = Property(
        Array,
        depends_on='sdomain.changed_structure, sdomain.+changed_geometry')

    @cached_property
    def _get_B_disc_grid(self):
        return self._apply_on_ip_disc(self.fets_eval.get_B_disc)

    J_disc_grid = Property(
        Array,
        depends_on='sdomain.changed_structure, sdomain.+changed_geometry')

    @cached_property
    def _get_J_disc_grid(self):
        return self._apply_on_ip_disc(self.fets_eval.get_J_det_disc)

    X_i = Property(
        depends_on='sdomain.changed_structure, sdomain.+changed_geometry')

    @cached_property
    def _get_X_i(self):
        '''
        Intersection points in global coords 3D
        '''
        X_i = []
        for e, r_i_e in zip(self.sdomain.elements,
                            self.sdomain.ls_intersection_r):
            X_mtx = e.get_X_mtx()
            X_i.append(self.fets_eval.get_vtk_r_glb_arr(X_mtx, r_i_e))
        return array(X_i)

    #########################################################################
    # STATE ARRAY MANAGEMENT
    #########################################################################
    state_array_size = Property(depends_on='sdomain.changed_structure')

    @cached_property
    def _get_state_array_size(self):
        '''
        overloading the default method
        as the number of ip differs from element to element
        '''
        n_ip = self.ip_weights.shape[0]  # number of ip on the domain
        mats_arr_size = self.fets_eval.m_arr_size
        dots_arr_size = n_ip * mats_arr_size
        return dots_arr_size

    #---------------------------------------------------
    # backup state arrays storing the values of the previous
    # --------------------------------------------------
    old_state_start_elem_grid = Array
    old_state_end_elem_grid = Array
    old_state_array = Array
    old_state_grid_ix = Tuple

    state_array = Property(Array, depends_on='sdomain.changed_structure')

    @cached_property
    def _get_state_array(self):
        '''
        overloading the default method
        state array of fets has to account for number of ip
        '''
        state_array = zeros((self.state_array_size, ), dtype='float_')

        sctx = self.sdomain.domain.new_scontext()
        # Run the setup of sub-evaluator
        #
        mats_arr_size = self.fets_eval.m_arr_size
        # print 'mats_arr_size ', mats_arr_size
        # print 'self.ip_offset ', self.ip_offset
        for e_id, elem in enumerate(self.sdomain.elements):
            sctx.elem = elem
            sctx.elem_state_array = state_array[self.ip_offset[e_id] *
                                                mats_arr_size:self.ip_offset[
                                                    (e_id + 1)] *
                                                mats_arr_size]
            # print 'elem_state_array ', sctx.elem_state_array
            self.fets_eval.setup(
                sctx, (self.ip_offset[(e_id + 1)] - self.ip_offset[e_id]))

        # Transfer the values from the old state array - if an old_array was
        # there
        if len(self.old_state_array):

            # get the start-end indices of the elements in the old grid
            old_start_arr = self.old_state_start_elem_grid[
                self.old_state_grid_ix]
            old_end_arr = self.old_state_end_elem_grid[self.old_state_grid_ix]

            # get the start-end indexes of the elements in the new grid
            new_start_arr = self.state_start_elem_grid[self.old_state_grid_ix]
            new_end_arr = self.state_end_elem_grid[self.old_state_grid_ix]

            # the elements in the new grid might get masked so that they should
            # be skipped - what should happen with their state - actually
            # a state transfer should be started.
            for new_masked, ns, ne, os, oe in zip(new_start_arr.mask,
                                                  new_start_arr, new_end_arr,
                                                  old_start_arr, old_end_arr):
                if new_masked:
                    # The element has been overloaded - the old state must be
                    # transfered to the new state - this depends on the adaptive
                    # strategy at hand. Either the new state must be reiterated
                    # for the current time once again using zero state as a
                    # start vector - or the old values could be reused as start
                    # value.
                    pass
                else:
                    # The element is present also in the changed grid - copy
                    # the state to its new place in the state array
                    state_array[ns:ne] = self.old_state_array[os:oe]

        # backup the reference to an array for the case the discretization
        # changes and transfer of state variables is reguired
        self.old_state_array = state_array
        self.old_state_start_elem_grid = self.state_start_elem_grid
        self.old_state_end_elem_grid = self.state_end_elem_grid
        self.old_state_grid_ix = self.sdomain.intg_grid_ix

        # return the new state array
        #
        return state_array

    def get_elem_state_array(self, e_id):
        '''
        used for response tracing
        @param e_id: element id
        '''
        mats_arr_size = self.fets_eval.m_arr_size
        return self.state_array[self.ip_offset[e_id] *
                                mats_arr_size:self.ip_offset[(e_id + 1)] *
                                mats_arr_size]

    def get_corr_pred(self, sctx, u, du, tn, tn1, F_int):

        # in order to avoid allocation of the array in every time step
        # of the computation
        k_arr = self.k_arr
        k_arr[...] = 0.0
        #k_con = zeros( ( self.fets_eval.n_e_dofs, self.fets_eval.n_e_dofs ) )
        mats_arr_size = self.fets_eval.m_arr_size

        if self.cache_geo_matrices:
            B_mtx_grid = self.B_mtx_grid
            J_det_grid = self.J_det_grid
            if self.fets_eval.nip_disc:
                B_disc_grid = self.B_disc_grid
                J_disc_grid = self.J_disc_grid

        Be_mtx_grid = None
        Je_det_grid = None

        state_array = self.state_array

        tstepper = self.sdomain.tstepper
        U = tstepper.U_k
        d_U = tstepper.d_U

        for e_id, elem, ip_addr0, ip_addr1, ip_disc_addr0, ip_disc_addr1 in zip(
                self.sdomain.idx_active_elems, self.sdomain.elements,
                self.ip_offset[:-1], self.ip_offset[1:],
                self.ip_disc_offset[:-1], self.ip_disc_offset[1:]):
            ip_slice = slice(ip_addr0, ip_addr1)
            ip_disc_slice = slice(ip_disc_addr0, ip_disc_addr1)
            ix = elem.get_dof_map()
            sctx.elem = elem
            #sctx.elem_state_array = state_array[ e_id * e_arr_size : ( e_id + 1 ) * e_arr_size ]
            # print 'sctx.elem_state_array ', sctx.elem_state_array
            sctx.elem_state_array = state_array[
                self.ip_offset[e_id] *
                mats_arr_size:self.ip_offset[(e_id + 1)] *
                mats_arr_size]  # differs from the homogenous case
            sctx.X = elem.get_X_mtx()
            sctx.x = elem.get_x_mtx()
            if self.cache_geo_matrices:
                # differs from the homogenious case
                Be_mtx_grid = B_mtx_grid[ip_slice, ...]
                Je_det_grid = J_det_grid[ip_slice, ...]
                if self.fets_eval.nip_disc:
                    Be_disc_grid = B_disc_grid[ip_disc_slice, ...]
                    Je_disc_grid = J_disc_grid[ip_disc_slice, ...]
            sctx.ls_val = self.ip_ls_values[ip_slice]  # values of ls in ip
            f, k = self.fets_eval.get_corr_pred(
                sctx,
                U[ix_(ix)],
                d_U[ix_(ix)],
                tn,
                tn1,
                B_mtx_grid=Be_mtx_grid,
                J_det_grid=Je_det_grid,
                ip_coords=self.ip_coords[ip_slice],
                ip_weights=self.ip_weights[ip_slice])

            id = [0, 2, 8, 10]
            # print 'k before \n',k[meshgrid(id,id)]
            # print 'k before \n',k
            k_arr[e_id] = k
            #k_con[:, :] = k
            F_int[ix_(ix)] += f
            # print 'f before \n',F_int
            if self.fets_eval.nip_disc:
                k_c, f_int_c = self.fets_eval.get_corr_pred_disc(
                    sctx,
                    U[ix_(ix)],
                    B_mtx_grid=Be_disc_grid,
                    J_det_grid=Je_disc_grid,
                    ip_coords=self.ip_disc_coords[ip_disc_slice],
                    ip_weights=self.ip_disc_weights[ip_disc_slice])
                k_arr[e_id] += k_c
                # print 'f_int_c ',f_int_c
                F_int[ix_(ix)] += f_int_c

            # print 'k_con ', k_con
            # print 'K__mtx', k_arr

        return SysMtxArray(mtx_arr=k_arr,
                           dof_map_arr=self.sdomain.elem_dof_map)
Exemple #2
0
class MarkerPointDest(MarkerPoints):  # noqa: D401
    """MarkerPoints subclass that serves for derived points."""

    src1 = Instance(MarkerPointSource)
    src2 = Instance(MarkerPointSource)

    name = Property(Str, depends_on='src1.name,src2.name')
    dir = Property(Str, depends_on='src1.dir,src2.dir')

    points = Property(Array(float, (5, 3)),
                      depends_on=['method', 'src1.points', 'src1.use',
                                  'src2.points', 'src2.use'])
    enabled = Property(Bool, depends_on=['points'])

    method = Enum('Transform', 'Average', desc="Transform: estimate a rotation"
                  "/translation from mrk1 to mrk2; Average: use the average "
                  "of the mrk1 and mrk2 coordinates for each point.")

    view = View(VGroup(Item('method', style='custom'),
                       Item('save_as', enabled_when='can_save',
                            show_label=False)))

    @cached_property
    def _get_dir(self):
        return self.src1.dir

    @cached_property
    def _get_name(self):
        n1 = self.src1.name
        n2 = self.src2.name

        if not n1:
            if n2:
                return n2
            else:
                return ''
        elif not n2:
            return n1

        if n1 == n2:
            return n1

        i = 0
        l1 = len(n1) - 1
        l2 = len(n1) - 2
        while n1[i] == n2[i]:
            if i == l1:
                return n1
            elif i == l2:
                return n2

            i += 1

        return n1[:i]

    @cached_property
    def _get_enabled(self):
        return np.any(self.points)

    @cached_property
    def _get_points(self):
        # in case only one or no source is enabled
        if not (self.src1 and self.src1.enabled):
            if (self.src2 and self.src2.enabled):
                return self.src2.points
            else:
                return np.zeros((5, 3))
        elif not (self.src2 and self.src2.enabled):
            return self.src1.points

        # Average method
        if self.method == 'Average':
            if len(np.union1d(self.src1.use, self.src2.use)) < 5:
                error(None, "Need at least one source for each point.",
                      "Marker Average Error")
                return np.zeros((5, 3))

            pts = (self.src1.points + self.src2.points) / 2.
            for i in np.setdiff1d(self.src1.use, self.src2.use):
                pts[i] = self.src1.points[i]
            for i in np.setdiff1d(self.src2.use, self.src1.use):
                pts[i] = self.src2.points[i]

            return pts

        # Transform method
        idx = np.intersect1d(np.array(self.src1.use),
                             np.array(self.src2.use), assume_unique=True)
        if len(idx) < 3:
            error(None, "Need at least three shared points for trans"
                  "formation.", "Marker Interpolation Error")
            return np.zeros((5, 3))

        src_pts = self.src1.points[idx]
        tgt_pts = self.src2.points[idx]
        est = fit_matched_points(src_pts, tgt_pts, out='params')
        rot = np.array(est[:3]) / 2.
        tra = np.array(est[3:]) / 2.

        if len(self.src1.use) == 5:
            trans = np.dot(translation(*tra), rotation(*rot))
            pts = apply_trans(trans, self.src1.points)
        elif len(self.src2.use) == 5:
            trans = np.dot(translation(* -tra), rotation(* -rot))
            pts = apply_trans(trans, self.src2.points)
        else:
            trans1 = np.dot(translation(*tra), rotation(*rot))
            pts = apply_trans(trans1, self.src1.points)
            trans2 = np.dot(translation(* -tra), rotation(* -rot))
            for i in np.setdiff1d(self.src2.use, self.src1.use):
                pts[i] = apply_trans(trans2, self.src2.points[i])

        return pts
Exemple #3
0
class FETS1D52ULRH(FETSEval):

    '''
    Fe Bar 2 nodes, deformation
    '''

    implements(IFETSEval)

    debug_on = True

    # Dimensional mapping
    dim_slice = slice(0, 1)

    n_nodal_dofs = Int(2)

    dof_r = Array(value=[[-1], [1]])
    geo_r = Array(value=[[-1], [1]])
    vtk_r = Array(value=[[-1.], [1.]])
    vtk_cells = [[0, 1]]
    vtk_cell_types = 'Line'

    n_dof_r = Property
    '''Number of node positions associated with degrees of freedom. 
    '''
    @cached_property
    def _get_n_dof_r(self):
        return len(self.dof_r)

    n_e_dofs = Property
    '''Number of element degrees
    '''
    @cached_property
    def _get_n_dofs(self):
        return self.n_nodal_dofs * self.n_dof_r

    def _get_ip_coords(self):
        offset = 1e-6
        return np.array([[-1 + offset, 0., 0.], [1 - offset, 0., 0.]])

    def _get_ip_weights(self):
        return np.array([1., 1.], dtype=float)

    # Integration parameters
    #
    ngp_r = 2

    def get_N_geo_mtx(self, r_pnt):
        '''
        Return geometric shape functions
        @param r_pnt:
        '''
        r = r_pnt[0]
        N_mtx = np.array([[0.5 - r / 2., 0.5 + r / 2.]])
        return N_mtx

    def get_dNr_geo_mtx(self, r_pnt):
        '''
        Return the matrix of shape function derivatives.
        Used for the conrcution of the Jacobi matrix.
        '''
        return np.array([[-1. / 2, 1. / 2]])

    def get_N_mtx(self, r_pnt):
        '''
        Return shape functions
        @param r_pnt:local coordinates
        '''
        return self.get_N_geo_mtx(r_pnt)

    def get_dNr_mtx(self, r_pnt):
        '''
        Return the derivatives of the shape functions
        '''
        return self.get_dNr_geo_mtx(r_pnt)
Exemple #4
0
class DataPlot(Component):
    '''An individual data plot to be presented in the LogPlot'''
    t = Array(value=np.linspace(0, 20, 100))
    data = Instance(ArrayPlotData)

    plot = Instance(Plot)

    eqn = Str("t")

    def _data_default(self):
        return ArrayPlotData(t=self.t, y0=eval(self.eqn, variables))

    def _t_changed(self):
        self.plot.data.set_data("t", self.t)

    def _eqn_changed(self):
        try:
            y = eval(self.eqn, variables)
        except Exception as e:
            import sys
            exc_type, exc_value, exc_traceback = sys.exc_info()
            import traceback
            print traceback.print_exception(type(e), e, exc_traceback)
            pass
        else:
            #Start by hiding all the current plots
            #TODO: this doesn't really work yet
            for p in self.plot.plots.values():
                p.visible = False

            if isinstance(y, tuple):
                for i, d in enumerate(y):
                    #Type check the output
                    if isinstance(d, np.ndarray) and (d.dtype != np.object):
                        self.data.set_data("y%s" % i, d)

                        if "plot%s" % i not in self.plot.plots:
                            self.plot.plot(('t', 'y%s' % i),
                                           name="plot%s" % i,
                                           color=colors[i])
                        else:
                            self.plot.plots['plot%s' % i].visible = True
            else:
                if isinstance(y, np.ndarray) and (y.dtype != np.object):
                    self.data.set_data("y0", y)
                    self.plot.plots['plot0'].visible = True

    def _plot_default(self):
        plot = Plot(self.data)
        plot.plot(('t', 'y0'), color=colors[0])
        plot.padding = 20
        plot.padding_left = 40

        plot.tools.append(
            PanTool(plot))  #, constrain=True, constrain_direction="y"))
        #TODO: zoomtool works on both axes, should only affect y
        plot.tools.append(
            ZoomTool(plot,
                     tool_mode="range",
                     constrain=True,
                     constrain_direction="y"))
        return plot

    traits_view = View(
        Item('plot',
             editor=ComponentEditor(),
             height=200,
             show_label=False,
             padding=0),
        Item('eqn', style="simple"),
        width=700,
        height=200,
    )
Exemple #5
0
class FEQ8U(FETSEval):
    debug_on = True

    # Dimensional mapping
    dim_slice = slice(0, 2)

    n_e_dofs = Int(8 * 2)
    t = Float(1.0, label='thickness')
    E = Float(1.0, label="Young's modulus")
    nu = Float(0.2, label="Poisson's ratio")

    # Integration parameters
    #
    ngp_r = 3
    ngp_s = 3

    field_entity_type = 'quad'
    # 4 corner nodes, 4 edge nodes and 1 interior nodes
    vtk_r = [[-1., -1.], [0., -1.], [1., -1.], [-1., 0.], [0., 0.], [1., 0.],
             [-1., 1.], [0., 1.], [1., 1.]]
    field_faces = [[0, 1, 4, 3], [1, 2, 5, 4], [3, 4, 7, 6], [4, 5, 8, 7]]

    n_nodal_dofs = Int(2)

    # Order of node positions for the formulation of shape function
    #
    _node_coord_map = Array(Float, (8, 2),
                            [[-1., -1.], [1., -1.], [1., 1.], [-1., 1.],
                             [0., -1.], [1., 0.], [0., 1.], [-1., 0.]])

    # Order of node positions for the geometry approximation
    #
    # @TODO is it possible to use 'vtk_r' instead ?
    _node_coord_map_geo = Array('float_', (4, 2),
                                [[-1., -1.], [1., -1.], [1., 1.], [-1., 1.]])

    #---------------------------------------------------------------------
    # Method required to represent the element geometry
    #---------------------------------------------------------------------
    def get_N_geo_mtx(self, r):
        '''
        Return the value of shape functions for the specified local coordinate r
        '''
        cx = self._node_coord_map_geo
        N_geo = array([[
            1 / 4. * (1 + r[0] * cx[i, 0]) * (1 + r[1] * cx[i, 1])
            for i in range(0, 4)
        ]])
        return N_geo

    def get_dNr_geo_mtx(self, r):
        '''
        Return the matrix of shape function derivatives.
        Used for the conrcution of the Jacobi matrix.

        @TODO - the B matrix is used
        just for uniaxial bar here with a trivial differential
        operator.
        '''
        cx = self._node_coord_map_geo
        dNr_geo = array(
            [[1 / 4. * cx[i, 0] * (1 + r[1] * cx[i, 1]) for i in range(0, 4)],
             [1 / 4. * cx[i, 1] * (1 + r[0] * cx[i, 0]) for i in range(0, 4)]])
        return dNr_geo

    #---------------------------------------------------------------------
    # Method delivering the shape functions for the field variables and their derivatives
    #---------------------------------------------------------------------
    def get_N_mtx(self, r):
        '''
        Returns the matrix of the shape functions used for the field approximation
        containing zero entries. The number of rows corresponds to the number of nodal
        dofs. The matrix is evaluated for the specified local coordinate r.
        '''
        cx = self._node_coord_map

        N_c = [
            1 / 4. * (1 + r[0] * cx[i, 0]) * (1 + r[1] * cx[i, 1]) *
            (r[0] * cx[i, 0] + r[1] * cx[i, 1] - 1) for i in range(0, 4)
        ]

        N_0m1 = 1 / 2. * (1 - r[0]**2) * (1 + cx[4, 1] * r[1])
        N_10 = 1 / 2. * (1 + cx[5, 0] * r[0]) * (1 - r[1]**2)
        N_01 = 1 / 2. * (1 - r[0]**2) * (1 + cx[6, 1] * r[1])
        N_m10 = 1 / 2. * (1 + cx[7, 0] * r[0]) * (1 - r[1]**2)

        N = array([N_c + [N_0m1, N_10, N_01, N_m10]])

        I_mtx = identity(self.n_nodal_dofs, float)
        N_mtx_list = [I_mtx * N[0, i] for i in range(0, N.shape[1])]
        N_mtx = hstack(N_mtx_list)
        return N_mtx

    def get_dNr_mtx(self, r):
        '''
        Return the derivatives of the shape functions used for the field approximation
        '''
        return zeros((2, 8))

    def get_B_mtx(self, r, X):
        J_mtx = self.get_J_mtx(r, X)
        dNr_mtx = self.get_dNr_mtx(r)
        dNx_mtx = dot(inv(J_mtx), dNr_mtx)
        Bx_mtx = zeros((3, 16), dtype='float_')
        for i in range(0, 8):
            Bx_mtx[0, i * 2] = dNx_mtx[0, i]
            Bx_mtx[1, i * 2 + 1] = dNx_mtx[1, i]
            Bx_mtx[2, i * 2] = dNx_mtx[1, i]
            Bx_mtx[2, i * 2 + 1] = dNx_mtx[0, i]
        return Bx_mtx

    def get_mtrl_corr_pred(self, sctx, eps_mtx):
        D_mtx = zeros((3, 3), dtype='float_')
        E = self.E
        nu = self.nu
        D_factor = E / (1 - nu * nu)
        D_mtx[0, 0] = D_factor * 1
        D_mtx[1, 0] = D_factor * nu
        D_mtx[0, 1] = D_factor * nu
        D_mtx[1, 1] = D_factor * 1
        D_mtx[2, 2] = D_factor * (1 - nu) / 2.
        sig_mtx = dot(D_mtx, eps_mtx)
        return sig_mtx, D_mtx

    def get_x_mtx(self, X):
        X = self._get_X_mtx()
class CombineMarkersPanel(HasTraits):
    """Has two marker points sources and interpolates to a third one"""
    model = Instance(CombineMarkersModel, ())

    # model references for UI
    mrk1 = Instance(MarkerPointSource)
    mrk2 = Instance(MarkerPointSource)
    mrk3 = Instance(MarkerPointDest)
    distance = Str

    # Visualization
    scene = Instance(MlabSceneModel)
    scale = Float(5e-3)
    mrk1_obj = Instance(PointObject)
    mrk2_obj = Instance(PointObject)
    mrk3_obj = Instance(PointObject)
    trans = Array()

    view = View(VGroup(VGroup(Item('mrk1', style='custom'),
                              Item('mrk1_obj', style='custom'),
                              show_labels=False,
                              label="Source Marker 1", show_border=True),
                       VGroup(Item('mrk2', style='custom'),
                              Item('mrk2_obj', style='custom'),
                              show_labels=False,
                              label="Source Marker 2", show_border=True),
                       VGroup(Item('distance', style='readonly'),
                              label='Stats', show_border=True),
                       VGroup(Item('mrk3', style='custom'),
                              Item('mrk3_obj', style='custom'),
                              show_labels=False,
                              label="New Marker", show_border=True),
                       ))

    def _mrk1_default(self):
        return self.model.mrk1

    def _mrk2_default(self):
        return self.model.mrk2

    def _mrk3_default(self):
        return self.model.mrk3

    def __init__(self, *args, **kwargs):
        super(CombineMarkersPanel, self).__init__(*args, **kwargs)

        m = self.model
        m.sync_trait('distance', self, 'distance', mutual=False)

        self.mrk1_obj = PointObject(scene=self.scene, color=(155, 55, 55),
                                    point_scale=self.scale)
        self.sync_trait('trans', self.mrk1_obj, mutual=False)
        m.mrk1.sync_trait('points', self.mrk1_obj, 'points', mutual=False)
        m.mrk1.sync_trait('enabled', self.mrk1_obj, 'visible',
                          mutual=False)

        self.mrk2_obj = PointObject(scene=self.scene, color=(55, 155, 55),
                                    point_scale=self.scale)
        self.sync_trait('trans', self.mrk2_obj, mutual=False)
        m.mrk2.sync_trait('points', self.mrk2_obj, 'points', mutual=False)
        m.mrk2.sync_trait('enabled', self.mrk2_obj, 'visible',
                          mutual=False)

        self.mrk3_obj = PointObject(scene=self.scene, color=(150, 200, 255),
                                    point_scale=self.scale)
        self.sync_trait('trans', self.mrk3_obj, mutual=False)
        m.mrk3.sync_trait('points', self.mrk3_obj, 'points', mutual=False)
        m.mrk3.sync_trait('enabled', self.mrk3_obj, 'visible', mutual=False)
Exemple #7
0
class VolumeSlicer(HasTraits):
    # The data to plot
    data = Array()

    # The 4 views displayed
    scene3d = Instance(MlabSceneModel, ())
    scene_x = Instance(MlabSceneModel, ())
    scene_y = Instance(MlabSceneModel, ())
    scene_z = Instance(MlabSceneModel, ())

    # The data source
    data_src3d = Instance(Source)

    # The image plane widgets of the 3D scene
    ipw_3d_x = Instance(PipelineBase)
    ipw_3d_y = Instance(PipelineBase)
    ipw_3d_z = Instance(PipelineBase)

    _axis_names = dict(x=0, y=1, z=2)


    #---------------------------------------------------------------------------
    def __init__(self, **traits):
        super(VolumeSlicer, self).__init__(**traits)
        # Force the creation of the image_plane_widgets:
        self.ipw_3d_x
        self.ipw_3d_y
        self.ipw_3d_z


    #---------------------------------------------------------------------------
    # Default values
    #---------------------------------------------------------------------------
    def _data_src3d_default(self):
        return mlab.pipeline.scalar_field(self.data,
                            figure=self.scene3d.mayavi_scene)

    def make_ipw_3d(self, axis_name):
        ipw = mlab.pipeline.image_plane_widget(self.data_src3d,
                        figure=self.scene3d.mayavi_scene,
                        plane_orientation='%s_axes' % axis_name)
        return ipw

    def _ipw_3d_x_default(self):
        return self.make_ipw_3d('x')

    def _ipw_3d_y_default(self):
        return self.make_ipw_3d('y')

    def _ipw_3d_z_default(self):
        return self.make_ipw_3d('z')


    #---------------------------------------------------------------------------
    # Scene activation callbaks
    #---------------------------------------------------------------------------
    @on_trait_change('scene3d.activated')
    def display_scene3d(self):
        outline = mlab.pipeline.outline(self.data_src3d,
                        figure=self.scene3d.mayavi_scene,
                        )
        self.scene3d.mlab.view(40, 50)
        # Interaction properties can only be changed after the scene
        # has been created, and thus the interactor exists
        for ipw in (self.ipw_3d_x, self.ipw_3d_y, self.ipw_3d_z):
            # Turn the interaction off
            ipw.ipw.interaction = 0
        self.scene3d.scene.background = (0, 0, 0)
        # Keep the view always pointing up
        self.scene3d.scene.interactor.interactor_style = \
                                 tvtk.InteractorStyleTerrain()


    def make_side_view(self, axis_name):
        scene = getattr(self, 'scene_%s' % axis_name)

        # To avoid copying the data, we take a reference to the
        # raw VTK dataset, and pass it on to mlab. Mlab will create
        # a Mayavi source from the VTK without copying it.
        # We have to specify the figure so that the data gets
        # added on the figure we are interested in.
        outline = mlab.pipeline.outline(
                            self.data_src3d.mlab_source.dataset,
                            figure=scene.mayavi_scene,
                            )
        ipw = mlab.pipeline.image_plane_widget(
                            outline,
                            plane_orientation='%s_axes' % axis_name)
        setattr(self, 'ipw_%s' % axis_name, ipw)

        # Synchronize positions between the corresponding image plane
        # widgets on different views.
        ipw.ipw.sync_trait('slice_position',
                            getattr(self, 'ipw_3d_%s'% axis_name).ipw)

        # Make left-clicking create a crosshair
        ipw.ipw.left_button_action = 0
        # Add a callback on the image plane widget interaction to
        # move the others
        def move_view(obj, evt):
            position = obj.GetCurrentCursorPosition()
            for other_axis, axis_number in self._axis_names.items():
                if other_axis == axis_name:
                    continue
                ipw3d = getattr(self, 'ipw_3d_%s' % other_axis)
                ipw3d.ipw.slice_position = position[axis_number]

        ipw.ipw.add_observer('InteractionEvent', move_view)
        ipw.ipw.add_observer('StartInteractionEvent', move_view)

        # Center the image plane widget
        ipw.ipw.slice_position = 0.5*self.data.shape[
                    self._axis_names[axis_name]]

        # Position the view for the scene
        views = dict(x=( 0, 90),
                     y=(90, 90),
                     z=( 0,  0),
                     )
        scene.mlab.view(*views[axis_name])
        # 2D interaction: only pan and zoom
        scene.scene.interactor.interactor_style = \
                                 tvtk.InteractorStyleImage()
        scene.scene.background = (0, 0, 0)


    @on_trait_change('scene_x.activated')
    def display_scene_x(self):
        return self.make_side_view('x')

    @on_trait_change('scene_y.activated')
    def display_scene_y(self):
        return self.make_side_view('y')

    @on_trait_change('scene_z.activated')
    def display_scene_z(self):
        return self.make_side_view('z')


    #---------------------------------------------------------------------------
    # The layout of the dialog created
    #---------------------------------------------------------------------------
    view = View(HGroup(
                  Group(
                       Item('scene_y',
                            editor=SceneEditor(scene_class=Scene),
                            height=250, width=300),
                       Item('scene_z',
                            editor=SceneEditor(scene_class=Scene),
                            height=250, width=300),
                       show_labels=False,
                  ),
                  Group(
                       Item('scene_x',
                            editor=SceneEditor(scene_class=Scene),
                            height=250, width=300),
                       Item('scene3d',
                            editor=SceneEditor(scene_class=MayaviScene),
                            height=250, width=300),
                       show_labels=False,
                  ),
                ),
                resizable=True,
                title='Volume Slicer',
                )
Exemple #8
0
class ForestView(HasTraits):
    # UI Elements
    day = Button("Advance 1 Day")
    histograms = Instance(Plot)
    fire_time_plot = Instance(Plot)
    forest_plot = Instance(Plot)
    forest_image = Property(Array, depends_on="forest")
    run_label = Property(String, depends_on="run")
    run_button = Button
    time_plots = Instance(VPlotContainer)
    trait_to_histogram = Property(depends_on="which_histogram")
    tree_time_plot = Instance(Plot)
    which_histogram = Enum("trees", "fire")

    # ModelView Elements
    density_function = Property(Array)
    fractions = Property(Array(dtype=float))
    fire_history = Array(dtype=float)
    forest = Instance(Forest)
    p_sapling = DelegatesTo("forest", "p_sapling")
    p_lightning = DelegatesTo("forest", "p_lightning")
    plot_data = Instance(ArrayPlotData)
    time = Array(dtype=int)
    tree_history = Array(dtype=float)

    run = Bool

    traits_view = View(
        HGroup(
            VGroup(
                VGroup(
                    Item("forest_plot",
                         editor=ComponentEditor(),
                         show_label=False), ),
                Item("p_sapling", label="trees"),
                Item("p_lightning", label="fires"),
            ),
            VGroup(
                Item("time_plots", editor=ComponentEditor(), show_label=False),
                HGroup(
                    Item("run_button",
                         editor=ButtonEditor(label_value="run_label"),
                         show_label=False),
                    Item("which_histogram", show_label=False),
                    Item("day", show_label=False),
                ),
            ),
        ),
        resizable=True,
    )

    def update_fire_history(self):
        self.fire_history[1:] = self.fire_history[:-1]
        self.fire_history[0] = float(np.sum(self.forest.forest_fires)) / \
            self.forest.forest_fires.size

    def update_tree_history(self):
        self.tree_history[1:] = self.tree_history[:-1]
        self.tree_history[0] = float(np.sum(self.forest.forest_trees)) / \
            self.forest.forest_trees.size

    def update_time(self):
        self.time[1:] = self.time[:-1]
        self.time[0] = self.time[1] + 1

    def _advance(self):
        self.forest.advance_one_day()
        self.update_fire_history()
        self.update_tree_history()
        self.update_time()
        self.plot_data.set_data("forest_image", self.forest_image)
        self.plot_data.set_data("fire_history", self.fire_history)
        self.plot_data.set_data("tree_history", self.tree_history)
        self.plot_data.set_data("density_function", self.density_function)
        self.plot_data.set_data("time", self.time)
        self.plot_data.set_data("fractions", self.fractions)

    def _day_fired(self):
        self._advance()

    def _fire_history_default(self):
        return np.zeros((history_length, ), dtype=float)

    def _fire_time_plot_default(self):
        plot = Plot(self.plot_data, title="Fractional area with fires")
        plot.plot(["time", "fire_history"])
        return plot

    def _forest_plot_default(self):
        plot = Plot(self.plot_data)
        plot.img_plot("forest_image")
        plot.bounds = [0., 2.0]
        return plot

    def _get_fractions(self):
        data = self.trait_to_histogram
        return np.linspace(data.min(), data.max(), 50)

    def _get_fire_density_function(self):
        hist, bins = np.histogram(self.fire_history,
                                  bins=self.fractions,
                                  normed=True)
        tot = np.sum(hist)
        if tot > 0:
            hist /= tot
        return hist

    def _get_density_function(self):
        time_since_start = self.time > 0
        data = self.trait_to_histogram
        hist, bins = np.histogram(data[time_since_start],
                                  bins=self.fractions,
                                  normed=True)
        tot = np.sum(hist)
        if tot > 0:
            hist /= tot
        return hist

    def _get_forest_image(self):
        image = np.zeros((self.forest.size_x, self.forest.size_y, 3),
                         dtype=np.uint8)
        image[:, :, 0] = 255 * self.forest.forest_fires
        image[:, :, 1] = 128 * self.forest.forest_trees
        return image

    def _get_run_label(self):
        if self.run:
            label = "Stop"
        else:
            label = "Run"
        return label

    def _get_trait_to_histogram(self):
        trait_to_histogram = {
            "trees": self.tree_history,
            "fire": self.fire_history,
        }
        return trait_to_histogram[self.which_histogram]

    def _histograms_default(self):
        plot = Plot(self.plot_data)
        plot.plot(["fractions", "density_function"], color="green")
        return plot

    def _plot_data_default(self):
        data = ArrayPlotData(forest_image=self.forest_image,
                             tree_history=self.tree_history,
                             fire_history=self.fire_history,
                             fractions=self.fractions,
                             density_function=self.density_function,
                             time=self.time)
        return data

    def _run_button_fired(self):
        if self.run:
            self.run = False
        else:
            self.run = True

    def _run_changed(self):
        if self.run:
            self.timer.Start()
        else:
            self.timer.Stop()

    def _run_default(self):
        self.timer = Timer(150, self._timer_tick)
        return False

    def _time_plots_default(self):
        return VPlotContainer(self.fire_time_plot,
                              self.tree_time_plot,
                              self.histograms,
                              spacing=0.)

    def _timer_tick(self):
        if not self.run:
            raise StopIteration
        else:
            self._advance()

    def _tree_history_default(self):
        return np.zeros((history_length, ), dtype=float)

    def _tree_time_plot_default(self):
        plot = Plot(self.plot_data, title="Fractional area covered by trees")
        plot.plot(["time", "tree_history"])
        return plot

    def _time_default(self):
        time = np.zeros((history_length, ), dtype=int)
        time[0] = 1
        return time
Exemple #9
0
class ExType(SimDBClass):

    '''Read the data from the directory
    '''

    implements(IExType)

    data_file = File

    file_ext = Str('DAT')

    def validate(self):
        '''Validate the input data return the info whether or not
         the input is valid. This is the condition for processing
         of the derived data.
        '''
        return True

    # set a flag for the view to check whether derived data is available
    #
    derived_data_available = Bool(False)

    # specify inputs
    #
    key = Property(Str, trantient=True, depends_on='data_file')

    def _get_key(self):
        return split(os.path.basename(self.data_file), '.')[0]

    def _set_key(self, value):
        genkey = split(os.path.basename(self.data_file), '.')[0]
        if genkey != value:
            raise KeyError, 'key mismatch %s != %s' % (genkey, value)

    def __setstate__(self, state, kw={}):
        if 'key' in state:
            del state['key']
        super(SimDBClass, self).__setstate__(state, **kw)

    # indicate whether the test is suitable and prepared for
    # calibration.
    ready_for_calibration = Property(Bool)

    def _get_ready_for_calibration(self):
        # return False by default
        # the subclasses shall overload this
        # and define the rules
        return False

    # specify plot templates that can be chosen for viewing
    #
    plot_templates = Dict(transient=True)

    # define processing
    #
    processed_data_array = Array('float_', transient=True)

    def process_source_data(self):
        '''process the source data and assign
        attributes to the DAT-file channel names.
        '''
        print '*** process data ***'
        self._import_processor()
        self._apply_data_reader()
        self.processed_data_array = self.data_array
        self._set_array_attribs()
        self._apply_data_processor()

    data_columns = Callable(None)
    '''Specification of the measured data columns in the data array 
    '''
    data_units = Callable(None)
    '''Specification of the measured data units in the data array 
    '''
    data_reader = Callable(None)
    '''Function reading the data into the self.data_array
    '''
    data_processor = Callable(None)
    '''Function preparing the data for evaluation - convert the measured
    data to a standard format, smooth the data, remove jumps etc. 
    '''

    def _import_processor(self):
        '''Check to see if there is a data processor in the data directory.
        The name of the processor is assumed data_processor.py.

        '''
        dp_file = os.path.join(os.path.dirname(self.data_file),
                               'data_processor.py')
        dp_modpath = os.path.join(os.path.dirname(self.data_file),
                                  'data_processor').replace(simdb.pathchar, '.')[1:]
        exdata_dir = simdb.exdata_dir
        print 'dp_modpath', dp_modpath
        print 'exdata_dir', exdata_dir
        dp_mod = dp_modpath[len(exdata_dir):]
        print 'dp_mod', dp_mod
        print 'sys.path', sys.path
        if os.path.exists(dp_file):
            mod = importlib.import_module(dp_mod)
            print 'simdb-data processor used'
            if hasattr(mod, 'data_columns'):
                self.data_columns = mod.data_columns
            if hasattr(mod, 'data_units'):
                self.data_units = mod.data_units
            if hasattr(mod, 'data_processor'):
                self.data_processor = mod.data_processor
            if hasattr(mod, 'data_reader'):
                self.data_reader = mod.data_reader

    processing_done = Bool(False)

    def _apply_data_processor(self):
        '''Make a call to a test-specific data processor
        transforming the data_array to standard response variables.
        of the test setup.

        An example of data processing for a tensile test is the calculation
        of average displacement from several gauges placed on different sides
        of the specimen. 
        '''
        if self.data_processor:
            self.data_processor(self)
            self.processing_done = True

    def _apply_data_reader(self):
        if self.data_reader:
            self.data_reader(self)
        else:
            self._read_data_array()

    data_array = Array(float, transient=True)

    unit_list = Property(depends_on='data_file')

    def _get_unit_list(self):
        if self.data_units:
            return self.data_units(self)
        else:
            return self.names_and_units[1]

    factor_list = Property(depends_on='data_file')

    def _get_factor_list(self):
        if self.data_columns:
            return self.data_columns(self)
        else:
            return self.names_and_units[0]

    names_and_units = Property(depends_on='data_file')

    @cached_property
    def _get_names_and_units(self):
        ''' Extract the names and units of the measured data.
        The order of the names in the .DAT-file corresponds
        to the order of the .ASC-file.
        '''
        # for data exported into DAT and ASC-files
        file_ = open(self.data_file, 'r')
        lines = file_.read().split()
        names = []
        units = []
        for i in range(len(lines)):
            if lines[i] == '#BEGINCHANNELHEADER':
                print 'names and units are defined in DAT-file'
                name = lines[i + 1].split(',')[1]
                unit = lines[i + 3].split(',')[1]
                names.append(name)
                units.append(unit)

        # for data exported into a single csv-file
        file_split = self.data_file.split('.')
        if os.path.exists(file_split[0] + '.csv'):
            file_ = open(file_split[0] + '.csv', 'r')
            header_line_1 = file_.readline().strip()
            if header_line_1.split(';')[0] == 'Datum/Uhrzeit':
                print 'csv-file with header exists'
                header_line_2 = file_.readline().strip()
                names = header_line_1.split(';')
                units = header_line_2.split(';')
                names[0] = 'Bezugskanal'
                units[0] = 'sec'

        return names, units

    def _names_and_units_default(self):
        ''' Extract the names and units of the measured data.
        The order of the names in the .DAT-file corresponds
        to the order of the .ASC-file.
        '''
        # for data exported into DAT and ASC-files
        file_ = open(self.data_file, 'r')
        lines = file_.read().split()
        names = []
        units = []
        for i in range(len(lines)):
            if lines[i] == '#BEGINCHANNELHEADER':
                name = lines[i + 1].split(',')[1]
                unit = lines[i + 3].split(',')[1]
                names.append(name)
                units.append(unit)

        # for data exported into a single csv-file
        file_split = self.data_file.split('.')
        if os.path.exists(file_split[0] + '.csv'):
            file_ = open(file_split[0] + '.csv', 'r')
            header_line_1 = file_.readline()
            if header_line_1.split(';')[0] == 'Datum/Uhrzeit':
                header_line_2 = file_.readline()
                names = header_line_1.split(';')
                units = header_line_2.split(';')
                names[0] = 'Bezugskanal'
                units[0] = 'sec'
                # cut off trailing '\r\n' at end of header line
                names[-1] = names[-1][:-2]
                units[-1] = units[-1][:-2]

        print 'names, units (default)', names, units
        return names, units

    def _set_array_attribs(self):
        '''Set the measured data as named attributes defining slices into
        the processed data array.
        '''
        for i, factor in enumerate(self.factor_list):
            self.add_trait(
                factor, Array(value=self.processed_data_array[:, i],
                              transient=True))

    # ------------------

    def _read_data_array(self):
        ''' Read the experiment data.
        '''
        if os.path.exists(self.data_file):

            print 'READ FILE'
            # change the file name dat with asc
            file_split = self.data_file.split('.')
            file_name = file_split[0] + '.csv'

            # for data exported into a single csv-file
            if os.path.exists(file_name):
                print 'check csv-file'
                file_ = open(file_name, 'r')
                header_line_1 = file_.readline().split()

                if header_line_1[0].split(';')[0] == 'Datum/Uhrzeit':
                    print 'read csv-file'
                    # for data exported into down sampled data array
                    try:
                        _data_array = np.loadtxt(file_name,
                                                 delimiter=';',
                                                 skiprows=2)
                        # reset time[sec] in order to start at 0.
                        _data_array[:0] -= _data_array[0:0]
                    except ValueError:
                        # for first column use converter method 'time2sec';
                        converters = {0: time2sec}
                        # for all other columns use converter method
                        # 'comma2dot'
                        for i in range(len(header_line_1[0].split(';')) - 1):
                            converters[i + 1] = comma2dot
                        _data_array = np.loadtxt(
                            file_name, delimiter=";", skiprows=2, converters=converters)

                        # reset time[sec] in order to start at 0.
                        _data_array[:0] -= _data_array[0:0]

                else:
                    # for data exported into DAT and ASC-files
                    # try to use loadtxt to read data file
                    try:
                        _data_array = np.loadtxt(file_name,
                                                 delimiter=';')

                    # loadtxt returns an error if the data file contains
                    # 'NOVALUE' entries. In this case use the special
                    # method 'loadtxt_novalue'
                    except ValueError:
                        _data_array = loadtxt_novalue(file_name)

            if not os.path.exists(file_name):
                file_name = file_split[0] + '.ASC'
                if not os.path.exists(file_name):
                    raise IOError, 'file %s does not exist' % file_name

                # for data exported into DAT and ASC-files
                # try to use loadtxt to read data file
                try:
                    _data_array = np.loadtxt(file_name,
                                             delimiter=';')

                # loadtxt returns an error if the data file contains
                # 'NOVALUE' entries. In this case use the special
                # method 'loadtxt_novalue'
                except ValueError:
                    _data_array = loadtxt_novalue(file_name)

            self.data_array = _data_array

    data_dir = Property()
    '''Local directory path of the data file.
    '''

    def _get_data_dir(self):
        return os.path.dirname(self.data_file)

    relative_path = Property
    '''Relative path inside database structure - the path is same for experiment
    in both database structures (remote and local)
    '''

    def _get_relative_path(self):
        return self.data_dir.replace(simdb.simdb_dir, '')[1:]

    hook_up_file = Property
    '''File specifying the access to extended data.
    The cfg file is used to hook up arbitrary type
    of data stored anywhere that can be downloaded
    on demand to the local cache.
    '''

    def _get_hook_up_file(self):
        dir_path = os.path.dirname(self.data_file)
        file_name = os.path.basename(self.data_file)
        file_split = file_name.split('.')
        file_name = os.path.join(dir_path,
                                 file_split[0] + '.cfg')
        if not os.path.exists(file_name):
            file_name = ''
        return file_name

    aramis_start_offset = Property(Float, depends_on='data_file')
    '''Get time offset of aramis start specified in the hookup file.
    '''
    @cached_property
    def _get_aramis_start_offset(self):
        # hook_up an extended file if available.
        aramis_start_offset = 0.0
        if self.hook_up_file:
            config = ConfigParser.ConfigParser()
            config.read(self.hook_up_file)
            try:
                aramis_start_offset = config.get('aramis_data',
                                                 'aramis_start_offset')
            except ConfigParser.NoOptionError:
                pass
        return float(aramis_start_offset)

    aramis_files = Property(depends_on='data_file')
    '''Get the list of available aramis files specified in the hookup file.
    '''
    @cached_property
    def _get_aramis_files(self):
        # hook_up an extended file if available.
        aramis_files = []
        if self.hook_up_file:
            config = ConfigParser.ConfigParser()
            config.read(self.hook_up_file)
            aramis_files = config.get(
                'aramis_data', 'aramis_files').split(',\n')
        return aramis_files

    aramis_dict = Property(depends_on='data_file')
    '''Use the last two specifiers of the aramis file name
    as a key to access the proper file.
    '''
    @cached_property
    def _get_aramis_dict(self):
        # hook_up an extended file if available.
        af_dict = {}
        for af in self.aramis_files:
            fx, fy = af.split('-')[-2:]
            af_dict[fx + '-' + fy] = af
        return af_dict

    def download_aramis_file(self, arkey):
        af = self.aramis_dict[arkey]
        af_rel_dir = os.path.join(self.relative_path, 'aramis')
        af_local_dir = os.path.join(simdb.simdb_cache_dir, af_rel_dir)
        if not os.path.exists(af_local_dir):
            os.makedirs(af_local_dir)
        try:
            s = SFTPServer(simdb.server_username, '', simdb.server_host)
            if hasattr(s, 'sftp'):
                zip_filename = af + '.zip'
                zipfile_server = os.path.join(
                    simdb.simdb_cache_remote_dir, af_rel_dir, zip_filename)

                zipfile_server = string.replace(zipfile_server, '\\', '/')
                zipfile_local = os.path.join(af_local_dir, zip_filename)

                print 'downloading', zipfile_server
                print 'destination', zipfile_local

                s.download(zipfile_server, zipfile_local)
                s.sftp.stat(zipfile_server)
                s.close()
        except IOError, e:
            raise IOError(e)
Exemple #10
0
class ExpBT4PT(ExType):
    '''Experiment: Bending Test Four Point
    '''
    #    label = Str('four point bending test')

    implements(IExType)

    file_ext = 'DAT'

    #--------------------------------------------------------------------
    # register a change of the traits with metadata 'input'
    #--------------------------------------------------------------------

    input_change = Event

    @on_trait_change('+input, ccs.input_change, +ironing_param')
    def _set_input_change(self):
        self.input_change = True

    #-------------------------------------------------------------------------
    # specify inputs:
    #-------------------------------------------------------------------------

    # effective length of the bending test specimen
    # (does not include the 5cm part at each side of the specimens that leaps over the support lines)
    #
    length = Float(1.75,
                   unit='m',
                   input=True,
                   table_field=True,
                   auto_set=False,
                   enter_set=True)
    width = Float(0.2,
                  unit='m',
                  input=True,
                  table_field=True,
                  auto_set=False,
                  enter_set=True)
    thickness = Float(0.055,
                      unit='m',
                      input=True,
                      table_field=True,
                      auto_set=False,
                      enter_set=True)

    # age of the concrete at the time of testing
    age = Int(28,
              unit='d',
              input=True,
              table_field=True,
              auto_set=False,
              enter_set=True)
    loading_rate = Float(1.0,
                         unit='mm/min',
                         input=True,
                         table_field=True,
                         auto_set=False,
                         enter_set=True)

    #--------------------------------------------------------------------------
    # composite cross section
    #--------------------------------------------------------------------------

    ccs = Instance(CompositeCrossSection)

    def _ccs_default(self):
        '''default settings'
        '''
        # SFB 532 - demonstrator textil and concrete:
        fabric_layout_key = '2D-05-11'
        concrete_mixture_key = 'FIL-10-09'
        orientation_fn_key = 'all0'
        n_layers = 1
        #         s_tex_z = 0.060 / (n_layers + 1)
        ccs = CompositeCrossSection(
            fabric_layup_list=[
                #                             plain_concrete(s_tex_z * 0.5),
                FabricLayUp(
                    n_layers=n_layers,
                    orientation_fn_key=orientation_fn_key,
                    #                                    s_tex_z=s_tex_z,
                    s_tex_z=0.030,  # [m]
                    fabric_layout_key=fabric_layout_key),
                plain_concrete(0.025),  # [m]
                #                             plain_concrete(s_tex_z * 0.5)
            ],
            concrete_mixture_key=concrete_mixture_key)
        return ccs

    #--------------------------------------------------------------------------
    # Get properties of the composite
    #--------------------------------------------------------------------------

    # E-modulus of the composite at the time of testing
    E_c = Property(Float,
                   unit='MPa',
                   depends_on='input_change',
                   table_field=True)

    def _get_E_c(self):
        return self.ccs.get_E_c_time(self.age)

    # E-modulus of the composite after 28 days
    E_c28 = DelegatesTo('ccs', listenable=False)

    # reinforcement ration of the composite
    rho_c = DelegatesTo('ccs', listenable=False)

    #-------------------------------------------------------------------------
    # define processing
    #-------------------------------------------------------------------------

    # put this into the ironing procedure processor
    #
    jump_rtol = Float(0.9, auto_set=False, enter_set=True, ironing_param=True)

    data_array_ironed = Property(
        Array(float), depends_on='data_array, +ironing_param, +axis_selection')

    @cached_property
    def _get_data_array_ironed(self):
        '''remove the jumps in the displacement curves
        due to resetting the displacement gauges.
        '''
        print '*** curve ironing activated ***'

        # each column from the data array corresponds to a measured parameter
        # e.g. displacement at a given point as function of time u = f(t))
        #
        data_array_ironed = copy(self.data_array)

        for idx in range(self.data_array.shape[1]):

            # use ironing method only for columns of the displacement gauges.
            #
            #            print 'self.names_and_units[0]',self.names_and_units[0]
            #            print 'self.names_and_units',self.names_and_units
            if self.names_and_units[0][idx] not in {
                    'Kraft',
                    'Bezugskanal',
                    'DMS_o',
                    # for PEEKEL software check
                    # the following names:
                    'Datum/Uhrzeit',
                    'Kraft',
                    'Weg',
                    'DMS'
            }:

                # 1d-array corresponding to column in data_array
                data_arr = copy(data_array_ironed[:, idx])

                # get the difference between each point and its successor
                jump_arr = data_arr[1:] - data_arr[0:-1]

                # get the range of the measured data
                data_arr_range = max(data_arr) - min(data_arr)

                # determine the relevant criteria for a jump
                # based on the data range and the specified tolerances:
                jump_crit = self.jump_rtol * data_arr_range

                # get the indexes in 'data_column' after which a
                # jump exceeds the defined tolerance criteria
                jump_idx = where(fabs(jump_arr) > jump_crit)[0]

                #                 print 'number of jumps removed in data_arr_ironed for', self.names_and_units[0][ idx ], ': ', jump_idx.shape[0]
                #                 print 'force', unique(around(-self.data_array[jump_idx, 1], 2))
                # glue the curve at each jump together
                for jidx in jump_idx:
                    # get the offsets at each jump of the curve
                    shift = data_arr[jidx + 1] - data_arr[jidx]
                    # shift all succeeding values by the calculated offset
                    data_arr[jidx + 1:] -= shift

                data_array_ironed[:, idx] = data_arr[:]

        return data_array_ironed

    def process_source_data(self):
        '''read in the measured data from file and assign
        attributes after array processing.
        If necessary modify the assigned data, i.e. change
        the sign or specify an offset for the specific test setup.
        '''
        print '*** process source data ***'

        super(ExpBT4PT, self).process_source_data()

        self._read_data_array()

        # curve ironing:
        #
        self.processed_data_array = self.data_array_ironed

        # set attributes:
        #
        self._set_array_attribs()

        # DIADAGO-measuring software:
        #
        if hasattr(self, "Kraft") and hasattr(self, "DB_mi") \
                and hasattr(self, "DB_re") and hasattr(self, "DB_li") \
                and hasattr(self, "W10_u") and hasattr(self, "DMS_o"):

            # convert units and change signs
            #
            self.Kraft -= self.Kraft[0]
            self.Kraft *= -1
            # vertical displacement at midspan [mm]:
            # (reset displacement gauge by its initial value and change sign
            # in order to return a positive value for a displacement)
            self.DB_mi -= self.DB_mi[0]
            self.DB_mi *= -1
            # vertical displacements at one third of the span (displacement under loading point) [mm]:
            # (left)
            self.DB_li -= self.DB_li[0]
            self.DB_li *= -1
            # (right)
            self.DB_re -= self.DB_re[0]
            self.DB_re *= -1
            # horizontal displacements at the bottom side of the bending specimen [mm]
            # (measuring length l_0 = 0.45 m)
            self.W10_u -= self.W10_u[0]
            self.W10_u *= -1
            # compressive strain at the upper side of the bending specimen at
            # midspan [mm]
            self.DMS_o -= self.DMS_o[0]
            # change unite from [nm/m], i.e. [10^(-6)*m / m], to [mm]
            self.DMS_o /= 1000.

            # set attributes of displacement (original data before ironing):
            #
            DB_mi_orig = self.data_array[:, 2]
            DB_mi_orig -= DB_mi_orig[0]
            DB_mi_orig *= -1
            self.add_trait("DB_mi_orig", Array(value=DB_mi_orig,
                                               transient=True))

            DB_li_orig = self.data_array[:, 4]
            DB_li_orig -= DB_li_orig[0]
            DB_li_orig *= -1
            self.add_trait("DB_li_orig", Array(value=DB_li_orig,
                                               transient=True))

            DB_re_orig = self.data_array[:, 5]
            DB_re_orig -= DB_re_orig[0]
            DB_re_orig *= -1
            self.add_trait("DB_re_orig", Array(value=DB_re_orig,
                                               transient=True))

        # PEEKEL-measuring software:
        #
        if hasattr(self, "Kraft") and hasattr(self, "Weg") \
                and hasattr(self, "WA_Links") and hasattr(self, "WA_Mitte_Links") \
                and hasattr(self, "WA_Mitte") and hasattr(self, "WA_Mitte_Rechts") \
                and hasattr(self, "WA_Rechts") and hasattr(self, "WA_unten") and hasattr(self, "DMS"):

            # convert units and change signs
            #
            self.Kraft -= self.Kraft[0]
            self.Kraft *= -1
            # vertical displacements [mm]:
            # (reset displacement gauges by their initial values and change sign
            # in order to return a positive value for a displacement)
            self.WA_Links -= self.WA_Links[0]
            self.WA_Links *= -1
            self.DB_li = copy(self.WA_Links)  # set alias
            self.WA_Mitte -= self.WA_Mitte[0]
            self.WA_Mitte *= -1
            self.DB_mi = copy(self.WA_Mitte)  # set alias
            self.WA_Rechts -= self.WA_Rechts[0]
            self.WA_Rechts *= -1
            self.DB_re = copy(self.WA_Rechts)  # set alias
            self.WA_Mitte_Links -= self.WA_Mitte_Links[0]
            self.WA_Mitte_Links *= -1
            self.WA_Mitte_Rechts -= self.WA_Mitte_Rechts[0]
            self.WA_Mitte_Rechts *= -1

            # horizontal displacements at the bottom side of the bending specimen [mm]
            # (measuring length l_0 = 0.30 m)
            self.WA_unten -= self.WA_unten[0]
            self.WA_unten *= -1
            # compressive strain at the upper side of the bending specimen at
            # midspan [mm]
            self.DMS -= self.DMS[0]
            # change unite from [nm/m], i.e. [10^(-6)*m / m], to [mm]
            self.DMS /= 1000.
            self.DMS_o = copy(self.DMS)  # set alias

            # set attributes of displacement (original data before ironing):
            #
            DB_mi_orig = np.copy(self.data_array[:, 5])
            DB_mi_orig -= DB_mi_orig[0]
            DB_mi_orig *= -1
            self.add_trait("DB_mi_orig", Array(value=DB_mi_orig,
                                               transient=True))

            DB_li_orig = np.copy(self.data_array[:, 3])
            DB_li_orig -= DB_li_orig[0]
            DB_li_orig *= -1
            self.add_trait("DB_li_orig", Array(value=DB_li_orig,
                                               transient=True))

            DB_re_orig = np.copy(self.data_array[:, 7])
            DB_re_orig -= DB_re_orig[0]
            DB_re_orig *= -1

            self.add_trait("DB_re_orig", Array(value=DB_re_orig,
                                               transient=True))

    K_bending_elast_c = Property(Array('float_'), depends_on='input_change')

    @cached_property
    def _get_K_bending_elast_c(self):
        '''calculate the analytical bending stiffness of the beam (4 point bending)
        relation between center deflection and 2 * load/2 in the thirdpoints (sum up to F)
        '''
        t = self.thickness
        w = self.width
        L = self.length

        # coposite E-modulus
        #
        E_c = self.E_c

        # moment of inertia
        #
        I_yy = t**3 * w / 12.

        delta_11 = (L**3) / 56.348 / E_c / I_yy

        # [MN/m]=[kN/mm] bending stiffness with respect to a force applied at center of the beam
        #
        K_bending_elast_c = 1 / delta_11
        #         print 'K_bending_elast_c', K_bending_elast_c

        print 'K_bending_elast_c', K_bending_elast_c
        return K_bending_elast_c

    K_bending_elast_thirdpoints = Property(Array('float_'),
                                           depends_on='input_change')

    @cached_property
    def _get_K_bending_elast_thirdpoints(self):
        '''calculate the analytical bending stiffness of the beam (4 point bending)
        relation between thirdpoint deflection and 2 * load/2 in the thirdpoints (sum up to F)
        '''
        t = self.thickness
        w = self.width
        L = self.length

        # coposite E-modulus
        #
        E_c = self.E_c

        # moment of inertia
        #
        I_yy = t**3 * w / 12.

        delta_11 = (L**3) * 5 / 324 / E_c / I_yy

        # [MN/m]=[kN/mm] bending stiffness with respect to a force applied at center of the beam
        #
        K_bending_elast_thirdpoints = 1 / delta_11
        #         print 'K_bending_elast', K_bending_elast

        print 'K_bending_elast_thirdpoints', K_bending_elast_thirdpoints
        return K_bending_elast_thirdpoints

    #-------------------------------------------------------------------------
    # plot templates
    #-------------------------------------------------------------------------

    plot_templates = {
        'force / deflection (center)':
        '_plot_force_deflection_center',
        'force / deflection (center) - original':
        '_plot_force_deflection_center_orig',
        'smoothed force / deflection (center)':
        '_plot_smoothed_force_deflection_center',
        'force / deflection (thirdpoints)':
        '_plot_force_deflection_thirdpoints',
        'strain (top/bottom) / force':
        '_plot_strain_top_bottom_force',
        'displacement (ironed/original - center)':
        '_plot_ironed_orig_force_deflection_center',
        'displacement (ironed/original - left)':
        '_plot_ironed_orig_force_deflection_left',
        'displacement (ironed/original - right)':
        '_plot_ironed_orig_force_deflection_right',
        'displacement (center, thirdpoints) (ironed, average)':
        '_plot_ironed_force_deflection_avg',
        'stress / deflection (center) - original':
        '_plot_stress_deflection_center_orig'
    }

    default_plot_template = 'force / deflection (center)'

    # get only the ascending branch of the response curve
    #
    max_force_idx = Property(Int)

    def _get_max_force_idx(self):
        '''get the index of the maximum force'''
        # NOTE: processed data returns positive values for force and
        # displacement
        return argmax(self.Kraft)

    def _plot_force_deflection_center(self,
                                      axes,
                                      offset_w=0.,
                                      color='black',
                                      linewidth=1.,
                                      label=None):
        # get only the ascending branch of the response curve
        f_asc = self.Kraft[:self.max_force_idx + 1]
        w_asc = self.DB_mi[:self.max_force_idx + 1]

        # add curves
        #
        axes.plot(w_asc, f_asc, linewidth=linewidth, label=label, color=color)

        # add axes labels
        #
        xkey = 'deflection [mm]'
        ykey = 'force [kN]'
        #        axes.set_xlabel('%s' % (xkey,))
        #        axes.set_ylabel('%s' % (ykey,))
        # draw linear stiffness for 2 mm range
        f_max = f_asc[-1]
        K_c = self.K_bending_elast_c
        w_linear = np.array([0., f_max / K_c])
        F_linear = np.array([0., f_max])
        axes.plot(w_linear,
                  F_linear,
                  linestyle='--',
                  color='black',
                  linewidth=linewidth)

    def _plot_force_deflection_center_orig(self,
                                           axes,
                                           offset_w=0.,
                                           color='black',
                                           linewidth=1.,
                                           label=None):
        '''plot the original data before jumps has been processed out
        '''
        # get only the ascending branch of the response curve
        f_asc = self.Kraft[:self.max_force_idx + 1]
        w_asc = self.DB_mi_orig[:self.max_force_idx + 1]

        # add curves
        #
        axes.plot(w_asc, f_asc, linewidth=linewidth, label=label, color=color)

        # add axes labels
        #
        xkey = 'deflection [mm]'
        ykey = 'force [kN]'
        #        axes.set_xlabel('%s' % (xkey,))
        #        axes.set_ylabel('%s' % (ykey,))
        # draw linear stiffness for 2 mm range
        f_max = f_asc[-1]
        K_c = self.K_bending_elast_c
        w_linear = np.array([0., f_max / K_c])
        F_linear = np.array([0., f_max])
        axes.plot(w_linear,
                  F_linear,
                  linestyle='--',
                  color='black',
                  linewidth=linewidth)

    n_fit_window_fraction = Float(0.1)

    def _plot_stress_deflection_center_orig(self,
                                            axes,
                                            n_roving,
                                            A_roving,
                                            deff,
                                            offset_w=0.,
                                            color='black',
                                            linewidth=1.,
                                            label=None):
        '''calculate and plot the reinforcement stress and the deflection for comparison of different textiles
        '''
        # get only the ascending branch of the response curve
        L = self.length
        s_asc = self.Kraft[:self.max_force_idx + 1] * (L / 5) * 1000 / (
            deff * 0.95 * n_roving * A_roving)
        w_asc = self.DB_mi_orig[:self.max_force_idx + 1]

        # add curves
        #
        axes.plot(w_asc, s_asc, linewidth=linewidth, label=label, color=color)

        # add axes labels
        #
        xkey = 'deflection [mm]'
        ykey = 'stress [MPa]'
#        axes.set_xlabel('%s' % (xkey,))
#        axes.set_ylabel('%s' % (ykey,))

    n_fit_window_fraction = Float(0.1)

    def _plot_smoothed_force_deflection_center(self, axes):
        # get only the ascending branch of the response curve
        f_asc = self.Kraft[:self.max_force_idx + 1]
        w_asc = self.DB_mi[:self.max_force_idx + 1]

        # add axes labels
        #
        n_points = int(self.n_fit_window_fraction * len(w_asc))
        f_smooth = smooth(f_asc, n_points, 'flat')
        w_smooth = smooth(w_asc, n_points, 'flat')

        # add curves
        #
        axes.plot(w_smooth, f_smooth, color='blue', linewidth=2)

    def _plot_force_deflection_thirdpoints(self, axes):
        '''deflection at the third points (under the loading points)
        '''
        # get only the ascending branch of the response curve
        f_asc = self.Kraft[:self.max_force_idx + 1]
        # displacement left
        w_l_asc = self.DB_li[:self.max_force_idx + 1]
        # displacement rigth
        w_r_asc = self.DB_re[:self.max_force_idx + 1]

        #        # average
        #        w_lr_asc = (w_l_asc + w_r_asc) / 2

        #        axes.plot( w_lr_asc, f_asc, color = 'green', linewidth = 2 )
        axes.plot(w_l_asc, f_asc, color='green', linewidth=1)
        axes.plot(w_r_asc, f_asc, color='green', linewidth=1)
        w_linear = 2 * np.array([0., 1.])
        F_linear = 2 * np.array([0., self.K_bending_elast_thirdpoints])
        axes.plot(w_linear, F_linear, linestyle='--')

    def _plot_strain_top_bottom_force(self, axes):
        '''deflection at the third points (under the loading points)
        '''
        # get only the ascending branch of the response curve
        f_asc = self.Kraft[:self.max_force_idx + 1]
        # compressive strain (top) [permile]
        eps_c = self.DMS_o[:self.max_force_idx + 1]
        # tensile strain (bottom) [permile];

        # NOTE: measuring length is not specified in 'exp_bt'-setup!
        # @todo: add this as configurable trait in the 'exp_bt' setup!
        if hasattr(self, "W10_u"):
            # NOTE: only valid for constant measuring length l_0 = 0.45m
            eps_t = self.W10_u[:self.max_force_idx + 1] / 0.45

        if hasattr(self, "WA_unten"):
            # NOTE: only valid for constant measuring length l_0 = 0.30m
            eps_t = self.WA_unten[:self.max_force_idx + 1] / 0.30

        # add curves
        #
        axes.plot(eps_c, f_asc, color='blue', linewidth=1)
        axes.plot(eps_t, f_asc, color='red', linewidth=1)

        # add axes labels
        #
        xkey = 'strain [1*e-3]'
        ykey = 'force [kN]'
        axes.set_xlabel('%s' % (xkey, ))
        axes.set_ylabel('%s' % (ykey, ))

    def _plot_ironed_orig_force_deflection_center(self, axes):
        '''plot original displacement (center) as measured by the displacement gauge
        and compare with curve after data has been processed by ironing procedure
        '''
        # get only the ascending branch of the response curve
        F_asc = self.Kraft[:self.max_force_idx + 1]
        w_ironed_asc = self.DB_mi[:self.max_force_idx + 1]
        w_orig_asc = self.DB_mi_orig[:self.max_force_idx + 1]

        # add curves
        #
        axes.plot(w_ironed_asc, F_asc, color='blue', linewidth=1.5)
        axes.plot(w_orig_asc, F_asc, color='grey', linewidth=1.5)

        # add axes labels
        #
        xkey = 'deflection (original data / ironed data) [mm]'
        ykey = 'force [kN]'
#        axes.set_xlabel('%s' % (xkey,))
#        axes.set_ylabel('%s' % (ykey,))

#        fw_arr = np.hstack([F_asc[:, None], w_ironed_asc[:, None]])
#        print 'fw_arr.shape', fw_arr.shape
#        np.savetxt('BT-4PT-12c-6cm-TU-SH4-V1_f-w_interpolated.csv', fw_arr, delimiter='    ')

    def _plot_ironed_orig_force_deflection_left(self, axes):
        '''plot original displacement (left) as measured by the displacement gauge
        and compare with curve after data has been processed by ironing procedure
        '''
        w_ironed = self.DB_li
        w_orig = self.DB_li_orig
        F = self.Kraft
        axes.plot(w_ironed, F)
        axes.plot(w_orig, F)
        xkey = 'deflection (original data / ironed data) [mm]'
        ykey = 'force [kN]'
#        axes.set_xlabel('%s' % (xkey,))
#        axes.set_ylabel('%s' % (ykey,))

    def _plot_ironed_orig_force_deflection_right(self, axes):
        '''plot original displacement (left) as measured by the displacement gauge
        and compare with curve after data has been processed by ironing procedure
        '''
        w_ironed = self.DB_re
        w_orig = self.DB_re_orig
        F = self.Kraft
        axes.plot(w_ironed, F)
        axes.plot(w_orig, F)
        xkey = 'deflection (original data / ironed data) [mm]'
        ykey = 'force [kN]'
#        axes.set_xlabel('%s' % (xkey,))
#        axes.set_ylabel('%s' % (ykey,))

    def _plot_ironed_force_deflection_avg(self, axes):
        '''plot center displacement and the average of the thirdpoints displacement measured by the displacement gauge
        after data has been processed by ironing procedure (remove resetting jumps of the displacement gauges)
        '''
        # get only the ascending branch of the response curve
        F_asc = self.Kraft[:self.max_force_idx + 1]
        w_c_ironed_asc = self.DB_mi[:self.max_force_idx + 1]
        w_l_ironed_asc = self.DB_li[:self.max_force_idx + 1]
        w_r_ironed_asc = self.DB_re[:self.max_force_idx + 1]
        w_lr_avg = (w_l_ironed_asc + w_r_ironed_asc) / 2.
        axes.plot(w_lr_avg, F_asc, color='black', linewidth=1.5, linestyle='-')
        axes.plot(w_c_ironed_asc,
                  F_asc,
                  color='black',
                  linewidth=1.5,
                  linestyle='-')
        xkey = 'deflection [mm]'
        ykey = 'force [kN]'


#        axes.set_xlabel('%s' % (xkey,))
#        axes.set_ylabel('%s' % (ykey,))

#-------------------------------------------------------------------------
# view
#-------------------------------------------------------------------------

    traits_view = View(VGroup(
        Group(Item('length', format_str="%.3f"),
              Item('width', format_str="%.3f"),
              Item('thickness', format_str="%.3f"),
              label='geometry'),
        Group(Item('loading_rate'), Item('age'), label='loading rate and age'),
        Group(Item('E_c', show_label=True, style='readonly',
                   format_str="%.0f"),
              Item('ccs@', show_label=False),
              label='composite cross section')),
                       scrollable=True,
                       resizable=True,
                       height=0.8,
                       width=0.6)
Exemple #11
0
class LUTManager(Base):

    # The version of this class.  Used for persistence.
    __version__ = 0

    # The lookup table.
    lut = Instance(tvtk.LookupTable, (), record=False)
    # The scalar bar.
    scalar_bar = Instance(tvtk.ScalarBarActor, (), record=True)
    # The scalar_bar_widget
    scalar_bar_widget = Instance(tvtk.ScalarBarWidget, ())

    # The representation associated with the scalar_bar_widget.  This
    # only exists in VTK versions about around 5.2.
    scalar_bar_representation = Instance(tvtk.Object,
                                         allow_none=True,
                                         record=True)

    # The title text property of the axes.
    title_text_property = Property(record=True)

    # The label text property of the axes.
    label_text_property = Property(record=True)

    # The current mode of the LUT.
    lut_mode = Enum('blue-red',
                    lut_mode_list(),
                    desc='the type of the lookup table')

    # File name of the LUT file to use.
    file_name = Str('',
                    editor=FileEditor,
                    desc='the filename containing the LUT')

    # Reverse the colors of the LUT.
    reverse_lut = Bool(False, desc='if the lut is to be reversed')

    # Turn on/off the visibility of the scalar bar.
    show_scalar_bar = Bool(False, desc='if scalar bar is shown or not')

    # This is an alias for show_scalar_bar.
    show_legend = Property(Bool, desc='if legend is shown or not')

    # The number of labels to use for the scalar bar.
    number_of_labels = Range(0,
                             64,
                             8,
                             enter_set=True,
                             auto_set=False,
                             desc='the number of labels to display')

    # Number of colors for the LUT.
    number_of_colors = Range(2,
                             2147483647,
                             256,
                             enter_set=True,
                             auto_set=False,
                             desc='the number of colors for the LUT')

    # Enable shadowing of the labels and text.
    shadow = Bool(False, desc='if the labels and text have shadows')

    # Use the default data name or the user specified one.
    use_default_name = Bool(True,
                            desc='if the default data name is to be used')

    # The default data name -- set by the module manager.
    default_data_name = Str('data',
                            enter_set=True,
                            auto_set=False,
                            desc='the default data name')

    # The optionally user specified name of the data.
    data_name = Str('',
                    enter_set=True,
                    auto_set=False,
                    desc='the title of the legend')

    # Use the default range or user specified one.
    use_default_range = Bool(True,
                             desc='if the default data range is to be used')
    # The default data range -- this is computed and set by the
    # module manager.
    default_data_range = Array(shape=(2, ),
                               value=[0.0, 1.0],
                               dtype=float,
                               enter_set=True,
                               auto_set=False,
                               desc='the default range of the data mapped')

    # The optionally user defined range of the data.
    data_range = Array(shape=(2, ),
                       value=[0.0, 1.0],
                       dtype=float,
                       enter_set=True,
                       auto_set=False,
                       desc='the range of the data mapped')

    # Create a new LUT.
    create_lut = Button('Launch LUT editor',
                        desc='if we launch a Lookup table editor in'
                        ' a separate process')

    ########################################
    ## Private traits.
    # The original range of the data.
    _orig_data_range = Array(shape=(2, ), value=[0.0, 1.0], dtype=float)
    _title_text_property = Instance(tvtk.TextProperty)
    _label_text_property = Instance(tvtk.TextProperty)

    ######################################################################
    # `object` interface
    ######################################################################
    def __init__(self, **traits):
        super(LUTManager, self).__init__(**traits)

        # Initialize the scalar bar.
        sc_bar = self.scalar_bar
        sc_bar.trait_set(lookup_table=self.lut,
                         title=self.data_name,
                         number_of_labels=self.number_of_labels,
                         orientation='horizontal',
                         width=0.8,
                         height=0.17)
        pc = sc_bar.position_coordinate
        pc.trait_set(coordinate_system='normalized_viewport',
                     value=(0.1, 0.01, 0.0))
        self._shadow_changed(self.shadow)

        # Initialize the lut.
        self._lut_mode_changed(self.lut_mode)

        # Set the private traits.
        ttp = self._title_text_property = sc_bar.title_text_property
        ltp = self._label_text_property = sc_bar.label_text_property

        # Call render when the text properties are changed.
        ttp.on_trait_change(self.render)
        ltp.on_trait_change(self.render)

        # Initialize the scalar_bar_widget
        self.scalar_bar_widget.trait_set(scalar_bar_actor=self.scalar_bar,
                                         key_press_activation=False)
        self._number_of_colors_changed(self.number_of_colors)

    ######################################################################
    # `Base` interface
    ######################################################################
    def start(self):
        """This is invoked when this object is added to the mayavi
        pipeline.
        """
        # Do nothing if we are already running.
        if self.running:
            return

        # Show the legend if necessary.
        self._show_scalar_bar_changed(self.show_scalar_bar)

        # Call parent method to set the running state.
        super(LUTManager, self).start()

    def stop(self):
        """Invoked when this object is removed from the mayavi
        pipeline.
        """
        if not self.running:
            return

        # Hide the scalar bar.
        sbw = self.scalar_bar_widget
        if sbw.interactor is not None:
            sbw.off()

        # Call parent method to set the running state.
        super(LUTManager, self).stop()

    ######################################################################
    # Non-public interface
    ######################################################################
    def _lut_mode_changed(self, value):

        if value == 'file':
            if self.file_name:
                self.load_lut_from_file(self.file_name)
            #self.lut.force_build()
            return

        reverse = self.reverse_lut
        if value in pylab_luts:
            lut = pylab_luts[value]
            if reverse:
                lut = lut[::-1, :]
            n_total = len(lut)
            n_color = self.number_of_colors
            if not n_color >= n_total:
                lut = lut[::int(round(n_total / float(n_color)))]
            self.load_lut_from_list(lut.tolist())
            #self.lut.force_build()
            return
        elif value == 'blue-red':
            if reverse:
                hue_range = 0.0, 0.6667
                saturation_range = 1.0, 1.0
                value_range = 1.0, 1.0
            else:
                hue_range = 0.6667, 0.0
                saturation_range = 1.0, 1.0
                value_range = 1.0, 1.0
        elif value == 'black-white':
            if reverse:
                hue_range = 0.0, 0.0
                saturation_range = 0.0, 0.0
                value_range = 1.0, 0.0
            else:
                hue_range = 0.0, 0.0
                saturation_range = 0.0, 0.0
                value_range = 0.0, 1.0
        lut = self.lut
        lut.trait_set(hue_range=hue_range,
                      saturation_range=saturation_range,
                      value_range=value_range,
                      number_of_table_values=self.number_of_colors,
                      ramp='sqrt')
        lut.modified()
        lut.force_build()

        self.render()

    def _scene_changed(self, value):
        sbw = self.scalar_bar_widget
        if value is None:
            return
        if sbw.interactor is not None:
            sbw.off()
        value.add_widgets(sbw, enabled=False)
        if self.show_scalar_bar:
            sbw.on()
        self._foreground_changed_for_scene(None, value.foreground)

    def _foreground_changed_for_scene(self, old, new):
        # Change the default color for the text.
        self.title_text_property.color = new
        self.label_text_property.color = new
        self.render()

    def _number_of_colors_changed(self, value):
        if self.lut_mode == 'file':
            return
        elif self.lut_mode in pylab_luts:
            # We can't interpolate these LUTs, as they are defined from a
            # table. We hack around this limitation
            reverse = self.reverse_lut
            lut = pylab_luts[self.lut_mode]
            if reverse:
                lut = lut[::-1, :]
            n_total = len(lut)
            if value > n_total:
                return
            lut = lut[::int(round(n_total / float(value)))]
            self.load_lut_from_list(lut.tolist())
        else:
            lut = self.lut
            lut.number_of_table_values = value
            lut.modified()
            lut.build()
            self.render()  # necessary to flush.
        sc_bar = self.scalar_bar
        sc_bar.maximum_number_of_colors = value
        sc_bar.modified()
        self.render()

    def _number_of_labels_changed(self, value):
        sc_bar = self.scalar_bar
        sc_bar.number_of_labels = value
        sc_bar.modified()
        self.render()

    def _file_name_changed(self, value):
        if self.lut_mode == 'file':
            self.load_lut_from_file(value)
        else:
            # This will automagically load the LUT from the file.
            self.lut_mode = 'file'

    def _reverse_lut_changed(self, value):
        # This will do the needful.
        self._lut_mode_changed(self.lut_mode)

    def _show_scalar_bar_changed(self, value):
        if self.scene is not None:
            # Without a title for scalar bar actor, vtkOpenGLTexture logs this:
            # Error: No scalar values found for texture input!
            if self.scalar_bar.title == '':
                self.scalar_bar.title = ' '
            self.scalar_bar_widget.enabled = value
            self.render()

    def _get_show_legend(self):
        return self.show_scalar_bar

    def _set_show_legend(self, value):
        old = self.show_scalar_bar
        if value != old:
            self.show_scalar_bar = value
            self.trait_property_changed('show_legend', old, value)

    def _shadow_changed(self, value):
        sc_bar = self.scalar_bar
        sc_bar.title_text_property.shadow = self.shadow
        sc_bar.label_text_property.shadow = self.shadow
        self.render()

    def _use_default_name_changed(self, value):
        self._default_data_name_changed(self.default_data_name)

    def _data_name_changed(self, value):
        sc_bar = self.scalar_bar
        sc_bar.title = value
        sc_bar.modified()
        self.render()

    def _default_data_name_changed(self, value):
        if self.use_default_name:
            self.data_name = value

    def _use_default_range_changed(self, value):
        self._default_data_range_changed(self.default_data_range)

    def _data_range_changed(self, value):
        # should be guaranteed by callers, otherwise VTK will print an error
        assert value[0] <= value[1]
        try:
            self.lut.set_range(value[0], value[1])
        except TypeError:
            self.lut.set_range((value[0], value[1]))
        except AttributeError:
            self.lut.range = value
        self.scalar_bar.modified()
        self.render()

    def _default_data_range_changed(self, value):
        if self.use_default_range:
            self.data_range = value

    def _visible_changed(self, value):
        state = self.show_scalar_bar and value
        self._show_scalar_bar_changed(state)
        super(LUTManager, self)._visible_changed(value)

    def load_lut_from_file(self, file_name):
        lut_list = []
        if len(file_name) > 0:
            try:
                f = open(file_name, 'r')
            except IOError:
                msg = "Cannot open Lookup Table file: %s\n" % file_name
                error(msg)
            else:
                f.close()
                try:
                    lut_list = parse_lut_file(file_name)
                except IOError as err_msg:
                    msg = "Sorry could not parse LUT file: %s\n" % file_name
                    msg += err_msg
                    error(msg)
                else:
                    if self.reverse_lut:
                        lut_list.reverse()
                    self.lut = set_lut(self.lut, lut_list)
                    self.render()

    def load_lut_from_list(self, list):
        self.lut = set_lut(self.lut, list)
        self.render()

    def _get_title_text_property(self):
        return self._title_text_property

    def _get_label_text_property(self):
        return self._label_text_property

    def _create_lut_fired(self):
        from tvtk import util
        script = os.path.join(os.path.dirname(util.__file__),
                              'wx_gradient_editor.py')
        subprocess.Popen([sys.executable, script])
        auto_close_message('Launching LUT editor in separate process ...')

    def _scalar_bar_representation_default(self):
        w = self.scalar_bar_widget
        if hasattr(w, 'representation'):
            r = w.representation
            r.on_trait_change(self.render)
            return r
        else:
            return None
Exemple #12
0
    def process_source_data(self):
        '''read in the measured data from file and assign
        attributes after array processing.
        If necessary modify the assigned data, i.e. change
        the sign or specify an offset for the specific test setup.
        '''
        print '*** process source data ***'

        super(ExpBT4PT, self).process_source_data()

        self._read_data_array()

        # curve ironing:
        #
        self.processed_data_array = self.data_array_ironed

        # set attributes:
        #
        self._set_array_attribs()

        # DIADAGO-measuring software:
        #
        if hasattr(self, "Kraft") and hasattr(self, "DB_mi") \
                and hasattr(self, "DB_re") and hasattr(self, "DB_li") \
                and hasattr(self, "W10_u") and hasattr(self, "DMS_o"):

            # convert units and change signs
            #
            self.Kraft -= self.Kraft[0]
            self.Kraft *= -1
            # vertical displacement at midspan [mm]:
            # (reset displacement gauge by its initial value and change sign
            # in order to return a positive value for a displacement)
            self.DB_mi -= self.DB_mi[0]
            self.DB_mi *= -1
            # vertical displacements at one third of the span (displacement under loading point) [mm]:
            # (left)
            self.DB_li -= self.DB_li[0]
            self.DB_li *= -1
            # (right)
            self.DB_re -= self.DB_re[0]
            self.DB_re *= -1
            # horizontal displacements at the bottom side of the bending specimen [mm]
            # (measuring length l_0 = 0.45 m)
            self.W10_u -= self.W10_u[0]
            self.W10_u *= -1
            # compressive strain at the upper side of the bending specimen at
            # midspan [mm]
            self.DMS_o -= self.DMS_o[0]
            # change unite from [nm/m], i.e. [10^(-6)*m / m], to [mm]
            self.DMS_o /= 1000.

            # set attributes of displacement (original data before ironing):
            #
            DB_mi_orig = self.data_array[:, 2]
            DB_mi_orig -= DB_mi_orig[0]
            DB_mi_orig *= -1
            self.add_trait("DB_mi_orig", Array(value=DB_mi_orig,
                                               transient=True))

            DB_li_orig = self.data_array[:, 4]
            DB_li_orig -= DB_li_orig[0]
            DB_li_orig *= -1
            self.add_trait("DB_li_orig", Array(value=DB_li_orig,
                                               transient=True))

            DB_re_orig = self.data_array[:, 5]
            DB_re_orig -= DB_re_orig[0]
            DB_re_orig *= -1
            self.add_trait("DB_re_orig", Array(value=DB_re_orig,
                                               transient=True))

        # PEEKEL-measuring software:
        #
        if hasattr(self, "Kraft") and hasattr(self, "Weg") \
                and hasattr(self, "WA_Links") and hasattr(self, "WA_Mitte_Links") \
                and hasattr(self, "WA_Mitte") and hasattr(self, "WA_Mitte_Rechts") \
                and hasattr(self, "WA_Rechts") and hasattr(self, "WA_unten") and hasattr(self, "DMS"):

            # convert units and change signs
            #
            self.Kraft -= self.Kraft[0]
            self.Kraft *= -1
            # vertical displacements [mm]:
            # (reset displacement gauges by their initial values and change sign
            # in order to return a positive value for a displacement)
            self.WA_Links -= self.WA_Links[0]
            self.WA_Links *= -1
            self.DB_li = copy(self.WA_Links)  # set alias
            self.WA_Mitte -= self.WA_Mitte[0]
            self.WA_Mitte *= -1
            self.DB_mi = copy(self.WA_Mitte)  # set alias
            self.WA_Rechts -= self.WA_Rechts[0]
            self.WA_Rechts *= -1
            self.DB_re = copy(self.WA_Rechts)  # set alias
            self.WA_Mitte_Links -= self.WA_Mitte_Links[0]
            self.WA_Mitte_Links *= -1
            self.WA_Mitte_Rechts -= self.WA_Mitte_Rechts[0]
            self.WA_Mitte_Rechts *= -1

            # horizontal displacements at the bottom side of the bending specimen [mm]
            # (measuring length l_0 = 0.30 m)
            self.WA_unten -= self.WA_unten[0]
            self.WA_unten *= -1
            # compressive strain at the upper side of the bending specimen at
            # midspan [mm]
            self.DMS -= self.DMS[0]
            # change unite from [nm/m], i.e. [10^(-6)*m / m], to [mm]
            self.DMS /= 1000.
            self.DMS_o = copy(self.DMS)  # set alias

            # set attributes of displacement (original data before ironing):
            #
            DB_mi_orig = np.copy(self.data_array[:, 5])
            DB_mi_orig -= DB_mi_orig[0]
            DB_mi_orig *= -1
            self.add_trait("DB_mi_orig", Array(value=DB_mi_orig,
                                               transient=True))

            DB_li_orig = np.copy(self.data_array[:, 3])
            DB_li_orig -= DB_li_orig[0]
            DB_li_orig *= -1
            self.add_trait("DB_li_orig", Array(value=DB_li_orig,
                                               transient=True))

            DB_re_orig = np.copy(self.data_array[:, 7])
            DB_re_orig -= DB_re_orig[0]
            DB_re_orig *= -1

            self.add_trait("DB_re_orig", Array(value=DB_re_orig,
                                               transient=True))
Exemple #13
0
class CellGrid(HasTraits):
    '''
    Manage an array of cells defined within a structured grid.

    The distinction between the coordinate information supplied 
    in the arrays is done using the following naming convention:

    point - geometric points within the regular grid

    node - point specified in the grid_cell specification

    vertex - node with topological function (corner nodes)

    base_node - the first node of an element

    point_idx_grid ... enumeration of points within the point_grid

    cell_idx_grid ... enumeration of cells in the grid maps the 
                      topological index to the flattened index
                      ( ix, iy, iz ) -> ii

    cell_node_map ... array mapping the cell idx to the  point idx 
                      representing a node of the cell

    '''

    # Everything depends on the grid_cell_specification
    # defining the distribution of nodes within the cell.
    #
    grid_cell_spec = Instance(CellSpec)

    def _grid_cell_spec_default(self):
        return CellSpec()

    n_dims = Delegate('grid_cell_spec')

    # Grid cell template - gets repeated according to the
    # within the grid geometry specification
    #
    grid_cell = Property(depends_on='grid_cell_spec.+')

    @cached_property
    def _get_grid_cell(self):
        return GridCell(grid_cell_spec=self.grid_cell_spec)

    # Grid geometry specification
    #
    coord_min = Array(Float, value=[0., 0., 0.])
    coord_max = Array(Float, value=[1., 1., 1.])

    # Remark[rch]:
    # beware - the Int type is not regarded as a normal int
    # within an array and must be first converted to int array
    #
    # Had we defined int as the dtype of an array, there would
    # be errors during editing
    #
    shape = Array(Int, value=[1, 1, 1])

    # Derived specifier for element grid shape
    # It converts the Int array to int so that it can be
    # used by general numpy operators
    #
    cell_idx_grid_shape = Property(Array(int), depends_on='shape')

    @cached_property
    def _get_cell_idx_grid_shape(self):
        return array(self.shape, dtype=int)

    cell_idx_grid_size = Property(Int, depends_on='shape')

    @cached_property
    def _get_cell_idx_grid_size(self):
        return reduce(lambda x, y: x * y, self.cell_idx_grid_shape)

    # Grid with the enumeration of the cells respecting
    # the dimensionality. This grid is used to implement
    # the mapping between the cells and nodes.
    #
    cell_idx_grid = Property(Array(int), depends_on='shape')

    @cached_property
    def _get_cell_idx_grid(self):
        return arange(self.cell_idx_grid_size).reshape(
            self.cell_idx_grid_shape)

    def __getitem__(self, idx):
        '''High level access and slicing to the cells within the grid.

        The return value is a tuple with 
        1. array of cell indices
        2. array of nodes for each element
        3. array of coordinates for each node.
        '''
        return CellGridSlice(cell_grid=self, grid_slice=idx)

    #-------------------------------------------------------------------------
    # Shape and size characteristics used for both the idx_grid and point_grid
    #-------------------------------------------------------------------------
    def _get_point_grid_shape(self):
        '''Get the grid shape for the full index and point grids.

        This is the background grid. Some of the nodes can be unused by the
        cells depending on the specification in the grid_cell_spec.
        '''
        cell_shape = self.grid_cell_spec.get_cell_shape().astype(int)
        cell_idx_grid_shape = self.cell_idx_grid_shape
        return multiply(cell_shape - 1, cell_idx_grid_shape) + 1

    point_grid_size = Property(depends_on='shape,grid_cell_spec.node_coords')

    def _get_point_grid_size(self):
        '''Get the size of the full index and point grids
        '''
        shape = self._get_point_grid_shape()
        return reduce(lambda i, j: i * j, shape)

    #-------------------------------------------------------------------------
    # point_idx_grid - shaping and slicing methods for construction and orientation
    # point_idx_grid represents the enumeration of the nodes of the cell grid. It
    # serves for constructing the mappings between cells and nodes.
    #-------------------------------------------------------------------------
    def _get_point_idx_grid_slices(self):
        '''Get slices defining the index grid in a format suitable 
        for mgrid generation.
        '''
        subcell_shape = self.grid_cell_spec.get_cell_shape() - 1
        cell_idx_grid_shape = self.cell_idx_grid_shape
        return tuple([
            slice(0, c * g + 1)
            for c, g in zip(subcell_shape, cell_idx_grid_shape)
        ])

    point_idx_grid = Property(Array,
                              depends_on='shape,grid_cell_spec.node_coords')

    @cached_property
    def _get_point_idx_grid(self):
        '''Get the index numbers of the points within the grid
        '''
        return arange(self.point_grid_size).reshape(
            self._get_point_grid_shape())

    #-------------------------------------------------------------------------
    # Unit cell enumeration - used as a template for enumerations in 3D
    #-------------------------------------------------------------------------
    idx_cell_slices = Property(Tuple)

    def _get_idx_cell_slices(self):
        '''Get slices extracting the first cell from the point index grid
        '''
        cell_shape = self.grid_cell_spec.get_cell_shape()
        return tuple([slice(0, c) for c in cell_shape])

    idx_cell = Property(Array)

    def _get_idx_cell(self):
        '''Get the node map within a cell of a 1-3 dimensional grid

        The enumeration of nodes within a single cell is managed by the
        self.grid_cell. This must be adapted to the global enumeration of the grid.

        The innermost index runs over the z-axis. Thus, the index of the points
        on the z axis is [0,1,...,shape[2]-1]. The following node at the y axis has 
        the number [shape[2], shape[2]+1, ..., shape[2]*2].
        '''
        return self.point_idx_grid[self.idx_cell_slices]

    def get_cell_idx(self, offset):
        '''Get the address of the cell within the cell grid.
        '''
        idx_tuple = zeros((self.n_dims, ), dtype='int_')
        roof = offset
        for i, n in enumerate(self.shape[-1:0:-1]):
            idx = roof / (n)
            roof -= idx * n
            idx_tuple[i] = idx
        idx_tuple[self.n_dims - 1] = roof
        return tuple(idx_tuple)

    def get_cell_offset(self, idx_tuple):
        '''Get the index of the cell within the flattened list.
        '''
        return self.cell_idx_grid[idx_tuple]

    #-------------------------------------------------------------------------
    # vertex slices
    #-------------------------------------------------------------------------
    vertex_slices = Property

    def _get_vertex_slices(self):
        cell_shape = self.grid_cell_spec.get_cell_shape()
        return tuple([slice(0, None, c - 1) for c in cell_shape])

    #-------------------------------------------------------------------------
    # point_grid - shaping and slicing methods for construction and orientation
    #-------------------------------------------------------------------------
    point_x_grid_slices = Property

    def _get_point_x_grid_slices(self):
        '''Get the slices to be used for the mgrid tool 
        to generate the point grid.
        '''
        ndims = self.n_dims
        shape = self._get_point_grid_shape()
        return tuple([
            slice(float(self.coord_min[i]), float(self.coord_max[i]),
                  complex(0, shape[i])) for i in range(ndims)
        ])

    #-------------------------------------------------------------------------
    # Geometry transformation
    #-------------------------------------------------------------------------
    geo_transform = Callable

    geo_transform_vfn = Property

    def _get_geo_transform_vfn(self):
        vfn_shell_stb = frompyfunc(self.geo_transform, 2, 3)

    #-------------------------------------------------------------------------
    # Point coordinates x - is parametric, X - is global
    #-------------------------------------------------------------------------
    point_x_grid = Property(
        depends_on='grid_cell_spec.+,shape,coord_min,coord_max')

    @cached_property
    def _get_point_x_grid(self):
        '''
        Construct the point grid underlying the mesh grid structure.
        '''
        return mgrid[self._get_point_x_grid_slices()]

    point_X_grid = Property

    def _get_point_X_grid(self):
        '''
        Construct the point grid underlying the mesh grid structure.
        '''
        x_dim_shape = self.point_x_grid.shape[1:]
        return array([
            self.point_X_arr[:, i].reshape(x_dim_shape)
            for i in range(self.n_dims)
        ],
                     dtype='float_')

    point_x_arr = Property

    def _get_point_x_arr(self):
        return c_[tuple([x.flatten() for x in self.point_x_grid])]

    point_X_arr = Property(
        depends_on='grid_cell_spec,shape,coord_min,coord_max, geo_transform')

    @cached_property
    def _get_point_X_arr(self):
        '''Get the (n,3) array with point coordinates.
        '''
        # If the geo transform has been set, perform the mapping
        #
        if self.geo_transform:
            return self.geo_transform(self.point_x_arr)
        else:
            return self.point_x_arr

    #-------------------------------------------------------------------------
    # Vertex manipulations
    #-------------------------------------------------------------------------
    vertex_idx_grid = Property

    def _get_vertex_idx_grid(self):
        '''
        Construct the base node grid. Base node has the lowest number within a cell.
        All relevant cell_node numbers can be derived just be adding an array
        of relative node offsets within the cell to the base node.
        (see the method get_cell_nodes( cell_num)  below)  
        '''
        # get the cell shape - number of cell points without the next base
        # point
        subcell_shape = self.grid_cell_spec.get_cell_shape().astype(int) - 1
        # get the element grid shape (number of elements in each dimension)
        cell_idx_grid_shape = self.cell_idx_grid_shape

        # The following code determines the offsets between two neighbouring nodes
        # along each axis. It loops over the axes so that also 1- and 2- dimensional
        # grids are included. For 3D grid - the following code shows what's happening
        #
        # 1) get the index offset between two neighboring points on the z-axis
        #
        # z_offset = subcell_shape[2]
        #
        # 2) get the index offset between two neighboring points on the y-axis
        #
        # y_offset = ( cell_idx_grid_shape[2] * subcell_shape[2] + 1) * subcell_shape[1]
        #
        # 3) get the index offset between two neighboring points on the x-axis
        #
        # x_offset = ( cell_idx_grid_shape[2] * subcell_shape[2] + 1 ) * \
        #            ( cell_idx_grid_shape[1] * subcell_shape[1] + 1 ) * \
        #            subcell_shape[0]

        offsets = zeros(self.n_dims, dtype='int_')

        for i in range(self.n_dims - 1, -1, -1):
            offsets[i] = subcell_shape[i]
            for j in range(i + 1, self.n_dims):
                offsets[i] *= (cell_idx_grid_shape[j] * subcell_shape[j] + 1)

        # grid shape (shape + 1)
        gshape = cell_idx_grid_shape + 1

        # Determine the offsets of all base nodes on each axis by multiplying
        # the respective offset with a point enumeration on that axis
        #
        # In 3D corresponds to the following
        #
        #        xi_offsets = x_offset * arange( gshape[0] )
        #        yi_offsets = y_offset * arange( gshape[1] )
        #        zi_offsets = z_offset * arange( gshape[2] )

        all_offsets = [
            offsets[n] * arange(gshape[n]) for n in range(self.n_dims)
        ]

        # Construct the broadcastable slices used for the construction of the
        # base node index grid. In 3D this corresponds to the following:
        #
        # Expand the dimension of offsets and sum them up. using broadcasting
        # this generates the grid of the base nodes (the last node is cut away
        # as it does not hold any element
        #        idx_grid = xi_offsets[:-1,None,None] + \
        #                   yi_offsets[None,:-1,None] + \
        #                   zi_offsets[None,None,:-1]

        slices = []
        for i in range(self.n_dims):
            s = [None for j in range(self.n_dims)]
            s[i] = slice(None)
            slices.append(tuple(s))

        vertex_offsets = [
            all_offsets[i][slices[i]] for i in range(self.n_dims)
        ]
        vertex_idx_grid = reduce(add, vertex_offsets)

        # return the indexes of the vertex nodes
        return vertex_idx_grid

    vertex_idx_arr = Property

    def _get_vertex_idx_arr(self):
        '''Get the index of nodes at the vertex of the cells
        of the cells. The result is a sorted flat array. The base node 
        position within the returned array defines the index of the cell. 
        '''
        vertex_idx_grid = self.vertex_idx_grid
        return sort(vertex_idx_grid.flatten())

    vertex_x_grid = Property

    def _get_vertex_x_grid(self):
        return array(
            [x_grid[self.vertex_slices] for x_grid in self.point_x_grid])

    vertex_X_grid = Property

    def _get_vertex_X_grid(self):
        return array(
            [X_grid[self.vertex_slices] for X_grid in self.point_X_grid])

    vertex_x_arr = Property

    def _get_vertex_x_arr(self):
        return c_[tuple([x.flatten() for x in self.vertex_x_grid])]

    vertex_X_arr = Property

    def _get_vertex_X_arr(self):
        return c_[tuple([X.flatten() for X in self.vertex_X_grid])]

    #-------------------------------------------------------------------------
    # Cell manipulations
    #-------------------------------------------------------------------------
    base_nodes = Property

    def _get_base_nodes(self):
        '''Get the index of nodes that are the bottom left front vertexs
        of the cells. The result is a sorted flat array. The base node 
        position within the returned array defines the index of the cell. 
        '''
        vertex_idx_grid = self.vertex_idx_grid
        cutoff_last = [slice(0, -1) for i in range(self.n_dims)]
        base_node_grid = vertex_idx_grid[tuple(cutoff_last)]
        return sort(base_node_grid.flatten())

    cell_node_map = Property(depends_on='shape,grid_cell_spec.+')

    @cached_property
    def _get_cell_node_map(self):
        '''
        Construct an array with the mapping between elements and nodes. 
        Returns the dof for [ cell_idx, node, dim ]
        '''
        idx_cell = self.idx_cell
        node_map = idx_cell.flatten()[self.grid_cell.node_map]
        base_nodes = self.base_nodes

        # Use broadcasting to construct the node map for all elements
        #
        cell_node_map = base_nodes[:, None] + node_map[None, :]
        return cell_node_map

    cell_grid_node_map = Property(depends_on='shape,grid_cell_spec.+')

    @cached_property
    def _get_cell_grid_node_map(self):
        '''
        Return the dof for [ cell_x, cell_y, node, dim ]
        where 
        - cell_x - is the cell index in the first dimension
        - cell_y - is the cell index in the second dimension
        - node - is the node index within the cell
        - dim - is the index of the dof within the node
        '''
        new_shape = tuple(self.shape) + self.cell_node_map.shape[1:]
        return self.cell_node_map.reshape(new_shape)

    def get_cell_point_x_arr(self, cell_idx):
        '''Return the node coordinates included in the cell cell_idx. 
        '''
        iexp = index_exp[self.cell_node_map[cell_idx]]
        return self.point_x_arr[iexp]

    def get_cell_point_X_arr(self, cell_idx):
        '''Return the node coordinates included in the cell cell_idx. 
        '''
        iexp = index_exp[self.cell_node_map[cell_idx]]
        return self.point_X_arr[iexp]

    #-------------------------------------------------------------------------
    # @todo - candidate for deletion - the slicing operator [] is doing this more generally
    #-------------------------------------------------------------------------
    boundary_slices = Property(depends_on='grid_cell_spec.+')

    @cached_property
    def _get_boundary_slices(self):
        '''Get the slices to get the boundary nodes.
        '''
        # slices must correspond to the dimensions
        slices = []
        for i in range(self.n_dims):
            s_low = [slice(None) for j in range(self.n_dims)]
            s_low[i] = 0
            s_high = [slice(None) for j in range(self.n_dims)]
            s_high[i] = -1
            slices.append(s_low)
            slices.append(s_high)
        return slices

    #--------------------------------------------------------------------------
    # Wrappers exporting the grid date to mayavi pipelines
    #--------------------------------------------------------------------------\

    def get_cell_mvpoints(self, cell_idx):
        '''Return the node coordinates included in the cell cell_idx. 
        In case that the grid is in reduced dimensions - blow it up with zeros.
        '''
        points = self.get_cell_point_X_arr(cell_idx)

        # augment the points to be 3D
        if self.n_dims < 3:
            mvpoints = zeros((points.shape[0], 3), dtype='float_')
            mvpoints[:, :self.n_dims] = points
            return mvpoints
        else:
            return points

    def _get_mvpoints_grid_shape(self):
        '''Shape of point grid in 3D.
        The information is needed by the mayavi pipeline to use the
        StructuredGrid source - tvtk class.
        '''
        shape = self._get_point_grid_shape()
        return tuple(list(shape) + [1 for n in range(3 - len(shape))])

    def _get_mvpoints(self, swap=True):
        '''Get the points in with deepest index along the x axes.
        This format is required when putting the point data to the 
        vtk structured grid.
        '''
        point_X_grid = self.point_X_grid
        if swap == True:
            point_X_grid = point_X_grid.swapaxes(1, self.n_dims)

        point_X_arr = c_[tuple([X.flatten() for X in point_X_grid])]

        # augment the points to be 3D
        if self.n_dims < 3:
            mv_points = zeros((point_X_arr.shape[0], 3), dtype='float_')
            mv_points[:, :self.n_dims] = point_X_arr
            return mv_points
        else:
            return point_X_arr

    #-----------------------------------------------------------------
    # Visualization-related methods
    #-----------------------------------------------------------------

    # mvp_point_grid = Trait(MVStructuredGrid)
    #
    # def _mvp_point_grid_default(self):
    #     return MVStructuredGrid(name='Point grid',
    #                             dims=self._get_mvpoints_grid_shape,
    #                             points=self._get_mvpoints)

    refresh_button = Button('Draw')

    @on_trait_change('refresh_button')
    def redraw(self):
        '''Redraw the point grid.
        '''
        self.mvp_point_grid.redraw()

    cell_array = Button('Browse cell array')

    def _cell_array_fired(self):
        cell_array = self.cell_node_map
        self.show_array = CellArray(data=cell_array,
                                    cell_view=CellView(cell_grid=self))
        self.show_array.configure_traits(kind='live')

    #------------------------------------------------------------------
    # UI - related methods
    #------------------------------------------------------------------
    traits_view = View(Item('grid_cell_spec'),
                       Item('shape@'),
                       Item('coord_min'),
                       Item('coord_max'),
                       Item('refresh_button'),
                       Item('cell_array'),
                       resizable=True,
                       scrollable=True,
                       height=0.5,
                       width=0.5)
Exemple #14
0
class DensityGateOp(HasStrictTraits):
    """
    This module computes a gate based on a 2D density plot.  The user chooses
    what proportion of events to keep, and the module creates a gate that selects
    that proportion of events in the highest-density bins of the 2D density
    histogram.
    
    Attributes
    ----------
    name : Str
        The operation name; determines the name of the new metadata column
        
    xchannel : Str
        The X channel to apply the binning to.
        
    ychannel : Str
        The Y channel to apply the binning to.

    xscale : {"linear", "logicle", "log"} (default = "linear")
        Re-scale the data on the X acis before fitting the data?  

    yscale : {"linear", "logicle", "log"} (default = "linear")
        Re-scale the data on the Y axis before fitting the data?  
        
    keep : Float (default = 0.9)
        What proportion of events to keep?  Must be ``>0`` and ``<1`` 
        
    bins : Int (default = 100)
        How many bins should there be on each axis?  Must be positive.
        
    min_quantile : Float (default = 0.001)
        Clip values below this quantile
        
    max_quantile : Float (default = 1.0)
        Clip values above this quantile

    sigma : Float (default = 1.0)
        What standard deviation to use for the gaussian blur?
    
    by : List(Str)
        A list of metadata attributes to aggregate the data before estimating
        the gate.  For example, if the experiment has two pieces of metadata,
        ``Time`` and ``Dox``, setting ``by = ["Time", "Dox"]`` will fit a 
        separate gate to each subset of the data with a unique combination of
        ``Time`` and ``Dox``.
        
    Notes
    -----
    This gating method was developed by John Sexton, in Jeff Tabor's lab at
    Rice University.  
    
    From http://taborlab.github.io/FlowCal/fundamentals/density_gate.html,
    the method is as follows:
    
    1. Determines the number of events to keep, based on the user specified 
       gating fraction and the total number of events of the input sample.
       
    2. Divides the 2D channel space into a rectangular grid, and counts the 
       number of events falling within each bin of the grid. The number of 
       counts per bin across all bins comprises a 2D histogram, which is a 
       coarse approximation of the underlying probability density function.
       
    3. Smoothes the histogram generated in Step 2 by applying a Gaussian Blur. 
       Theoretically, the proper amount of smoothing results in a better 
       estimate of the probability density function. Practically, smoothing 
       eliminates isolated bins with high counts, most likely corresponding to 
       noise, and smoothes the contour of the gated region.
       
    4. Selects the bins with the greatest number of events in the smoothed 
       histogram, starting with the highest and proceeding downward until the 
       desired number of events to keep, calculated in step 1, is achieved.
    
    Examples
    --------
    
    .. plot::
        :context: close-figs
        
        Make a little data set.
    
        >>> import cytoflow as flow
        >>> import_op = flow.ImportOp()
        >>> import_op.tubes = [flow.Tube(file = "Plate01/RFP_Well_A3.fcs",
        ...                              conditions = {'Dox' : 10.0}),
        ...                    flow.Tube(file = "Plate01/CFP_Well_A4.fcs",
        ...                              conditions = {'Dox' : 1.0})]
        >>> import_op.conditions = {'Dox' : 'float'}
        >>> ex = import_op.apply()
    
    Create and parameterize the operation.
    
    .. plot::
        :context: close-figs
        
        >>> dens_op = flow.DensityGateOp(name = 'Density',
        ...                              xchannel = 'FSC-A',
        ...                              xscale = 'log',
        ...                              ychannel = 'SSC-A',
        ...                              yscale = 'log',
        ...                              keep = 0.5)
        
    Find the bins to keep
    
    .. plot::
        :context: close-figs
        
        >>> dens_op.estimate(ex)
        
    Plot a diagnostic view
    
    .. plot::
        :context: close-figs
        
        >>> dens_op.default_view().plot(ex)
        
    Apply the gate
    
    .. plot::
        :context: close-figs
        
        >>> ex2 = dens_op.apply(ex)
        
    """

    id = Constant('edu.mit.synbio.cytoflow.operations.density')
    friendly_id = Constant("Density Gate")

    name = Str
    xchannel = Str
    ychannel = Str
    xscale = util.ScaleEnum
    yscale = util.ScaleEnum
    keep = util.PositiveFloat(0.9, allow_zero=False)
    bins = util.PositiveInt(100, allow_zero=False)
    min_quantile = util.PositiveFloat(0.001, allow_zero=True)
    max_quantile = util.PositiveFloat(1.0, allow_zero=False)
    sigma = util.PositiveFloat(1.0, allow_zero=False)
    by = List(Str)

    _xscale = Instance(util.IScale, transient=True)
    _yscale = Instance(util.IScale, transient=True)

    _xbins = Array(transient=True)
    _ybins = Array(transient=True)

    _keep_xbins = Dict(Any, Array, transient=True)
    _keep_ybins = Dict(Any, Array, transient=True)
    _histogram = Dict(Any, Array, transient=True)

    def estimate(self, experiment, subset=None):
        """
        Split the data set into bins and determine which ones to keep.
        
        Parameters
        ----------
        experiment : Experiment
            The :class:`.Experiment` to use to estimate the gate parameters.
            
        subset : Str (default = None)
            If set, determine the gate parameters on only a subset of the
            ``experiment`` parameter.
        """

        if experiment is None:
            raise util.CytoflowOpError('experiment', "No experiment specified")

        if self.xchannel not in experiment.data:
            raise util.CytoflowOpError(
                'xchannel',
                "Column {0} not found in the experiment".format(self.xchannel))

        if self.ychannel not in experiment.data:
            raise util.CytoflowOpError(
                'ychannel',
                "Column {0} not found in the experiment".format(self.ychannel))

        if self.min_quantile > 1.0:
            raise util.CytoflowOpError('min_quantile',
                                       "min_quantile must be <= 1.0")

        if self.max_quantile > 1.0:
            raise util.CytoflowOpError('max_quantile',
                                       "max_quantile must be <= 1.0")

        if not (self.max_quantile > self.min_quantile):
            raise util.CytoflowOpError('max_quantile',
                                       "max_quantile must be > min_quantile")

        if self.keep > 1.0:
            raise util.CytoflowOpError('keep', "keep must be <= 1.0")

        for b in self.by:
            if b not in experiment.conditions:
                raise util.CytoflowOpError(
                    'by', "Aggregation metadata {} not found, "
                    "must be one of {}".format(b, experiment.conditions))

        if subset:
            try:
                experiment = experiment.query(subset)
            except:
                raise util.CytoflowOpError(
                    'subset', "Subset string '{0}' isn't valid".format(subset))

            if len(experiment) == 0:
                raise util.CytoflowOpError(
                    'subset',
                    "Subset string '{0}' returned no events".format(subset))

        if self.by:
            groupby = experiment.data.groupby(self.by)
        else:
            # use a lambda expression to return a group that contains
            # all the events
            groupby = experiment.data.groupby(lambda _: True)

        # get the scale. estimate the scale params for the ENTIRE data set,
        # not subsets we get from groupby().  And we need to save it so that
        # the data is transformed the same way when we apply()
        self._xscale = xscale = util.scale_factory(self.xscale,
                                                   experiment,
                                                   channel=self.xchannel)
        self._yscale = yscale = util.scale_factory(self.yscale,
                                                   experiment,
                                                   channel=self.ychannel)

        xlim = (xscale.clip(experiment[self.xchannel].quantile(
            self.min_quantile)),
                xscale.clip(experiment[self.xchannel].quantile(
                    self.max_quantile)))

        ylim = (yscale.clip(experiment[self.ychannel].quantile(
            self.min_quantile)),
                yscale.clip(experiment[self.ychannel].quantile(
                    self.max_quantile)))

        self._xbins = xbins = xscale.inverse(
            np.linspace(xscale(xlim[0]), xscale(xlim[1]), self.bins))
        self._ybins = ybins = yscale.inverse(
            np.linspace(yscale(ylim[0]), yscale(ylim[1]), self.bins))

        histogram = {}
        for group, group_data in groupby:
            if len(group_data) == 0:
                raise util.CytoflowOpError(
                    'by', "Group {} had no data".format(group))

            h, _, _ = np.histogram2d(group_data[self.xchannel],
                                     group_data[self.ychannel],
                                     bins=[xbins, ybins])

            h = scipy.ndimage.filters.gaussian_filter(h, sigma=self.sigma)

            i = scipy.stats.rankdata(h, method="ordinal") - 1
            i = np.unravel_index(np.argsort(-i), h.shape)

            goal_count = self.keep * len(group_data)
            curr_count = 0
            num_bins = 0

            while (curr_count < goal_count and num_bins < i[0].size):
                curr_count += h[i[0][num_bins], i[1][num_bins]]
                num_bins += 1

            self._keep_xbins[group] = i[0][0:num_bins]
            self._keep_ybins[group] = i[1][0:num_bins]
            histogram[group] = h

        self._histogram = histogram

    def apply(self, experiment):
        """
        Creates a new condition based on membership in the gate that was
        parameterized with :meth:`estimate`.
        
        Parameters
        ----------
        experiment : Experiment
            the :class:`.Experiment` to apply the gate to.
            
        Returns
        -------
        Experiment
            a new :class:`.Experiment` with the new gate applied.
        """

        if experiment is None:
            raise util.CytoflowOpError('experiment', "No experiment specified")

        if not self.xchannel:
            raise util.CytoflowOpError('xchannel', "Must set X channel")

        if not self.ychannel:
            raise util.CytoflowOpError('ychannel', "Must set Y channel")

        # make sure name got set!
        if not self.name:
            raise util.CytoflowOpError(
                'name', "You have to set the gate's name "
                "before applying it!")

        if self.name != util.sanitize_identifier(self.name):
            raise util.CytoflowOpError(
                'name',
                "Name can only contain letters, numbers and underscores.".
                format(self.name))

        if self.name in experiment.data.columns:
            raise util.CytoflowOpError(
                'name',
                "Experiment already has a column named {0}".format(self.name))

        if not (self._xbins.size and self._ybins.size and self._keep_xbins):
            raise util.CytoflowOpError(
                None, "No gate estimate found.  Did you forget to "
                "call estimate()?")

        if not self._xscale:
            raise util.CytoflowOpError(
                None, "Couldn't find _xscale.  What happened??")

        if not self._yscale:
            raise util.CytoflowOpError(
                None, "Couldn't find _yscale.  What happened??")

        if self.xchannel not in experiment.data:
            raise util.CytoflowOpError(
                'xchannel',
                "Column {0} not found in the experiment".format(self.xchannel))

        if self.ychannel not in experiment.data:
            raise util.CytoflowOpError(
                'ychannel',
                "Column {0} not found in the experiment".format(self.ychannel))

        for b in self.by:
            if b not in experiment.conditions:
                raise util.CytoflowOpError(
                    'by', "Aggregation metadata {} not found, "
                    "must be one of {}".format(b, experiment.conditions))

        if self.by:
            groupby = experiment.data.groupby(self.by)
        else:
            # use a lambda expression to return a group that
            # contains all the events
            groupby = experiment.data.groupby(lambda _: True)

        event_assignments = pd.Series([False] * len(experiment), dtype="bool")

        for group, group_data in groupby:
            if group not in self._keep_xbins:
                # there weren't any events in this group, so we didn't get
                # an estimate
                continue

            group_idx = groupby.groups[group]

            cX = pd.cut(group_data[self.xchannel],
                        self._xbins,
                        include_lowest=True,
                        labels=False).reset_index(drop=True)
            cY = pd.cut(group_data[self.ychannel],
                        self._ybins,
                        include_lowest=True,
                        labels=False).reset_index(drop=True)

            group_keep = pd.Series([False] * len(group_data))

            keep_x = self._keep_xbins[group]
            keep_y = self._keep_ybins[group]

            for (xbin, ybin) in zip(keep_x, keep_y):
                group_keep = group_keep | ((cX == xbin) & (cY == ybin))

            event_assignments.iloc[group_idx] = group_keep

        new_experiment = experiment.clone()

        new_experiment.add_condition(self.name, "bool", event_assignments)

        new_experiment.history.append(
            self.clone_traits(transient=lambda _: True))
        return new_experiment

    def default_view(self, **kwargs):
        """
        Returns a diagnostic plot of the Gaussian mixture model.
         
        Returns
        -------
        IView
            a diagnostic view, call :meth:`~DensityGateView.plot` to see the 
            diagnostic plot.
        """
        v = DensityGateView(op=self)
        v.trait_set(**kwargs)
        return v
Exemple #15
0
class NVFinder(ManagedJob, HasTraits):

    submit_button = Button(
        label='correct targets',
        desc=
        'Performs a refocus for all targets in auto focus (without recording the drift).'
    )
    remove_button = Button(label='abort',
                           desc='Stop the running refocus measurement.')

    Sigma = Range(low=0.01,
                  high=10.,
                  value=0.1,
                  desc='Sigma of Gaussian for smoothing [micron]',
                  label='sigma [micron]',
                  mode='slider',
                  auto_set=False,
                  enter_set=True)
    Threshold = Range(low=0,
                      high=1000000,
                      value=50000,
                      desc='Threshold [counts/s]',
                      label='threshold [counts/s]',
                      auto_set=False,
                      enter_set=True)
    Margin = Range(low=0.0,
                   high=100.,
                   value=4.,
                   desc='Margin [micron]',
                   label='margin [micron]',
                   auto_set=False,
                   enter_set=True)

    RawPlot = Instance(Component)
    SmoothPlot = Instance(Component)
    RegionsPlot = Instance(Component)

    X = Array(dtype=numpy.float)
    Y = Array(dtype=numpy.float)
    z = Float()
    Raw = Array(dtype=numpy.float)
    Smooth = Property(trait=Array(), depends_on='Raw,Sigma')
    Thresh = Property(trait=Array(), depends_on='Raw,Smooth,Sigma,Threshold')
    RegionsAndLabels = Property(trait=Tuple(Array(), Array()),
                                depends_on='Raw,Smooth,Thresh,Sigma,Threshold')
    Positions = Property(trait=Array(), depends_on='Raw,X,Y,Sigma,Threshold')
    XPositions = Property(trait=Array())
    YPositions = Property(trait=Array())

    ImportData = Button()
    ExportTargets = Button()

    traits_view = View(HGroup(
        Item('submit_button', show_label=False),
        Item('remove_button', show_label=False),
        Item('priority', enabled_when='state != "run"'),
        Item('state', style='readonly'),
    ),
                       Tabbed(
                           Item('RawPlot',
                                editor=ComponentEditor(),
                                show_label=False,
                                resizable=True),
                           Item('SmoothPlot',
                                editor=ComponentEditor(),
                                show_label=False,
                                resizable=True),
                           Item('RegionsPlot',
                                editor=ComponentEditor(),
                                show_label=False,
                                resizable=True)),
                       HGroup(Item('ImportData', show_label=False),
                              Item('ExportTargets', show_label=False),
                              Item('Margin')),
                       Item('Sigma'),
                       Item('Threshold'),
                       title='NV Finder',
                       width=800,
                       height=700,
                       buttons=['OK'],
                       resizable=True)

    def __init__(self, confocal, auto_focus):
        super(NVFinder, self).__init__()
        self.confocal = confocal
        self.auto_focus = auto_focus

    def _run(self):

        try:  # refocus all targets
            self.state = 'run'

            af = self.auto_focus
            confocal = af.confocal

            af.periodic_focus = False
            af.forget_drift()
            af.current_target = None
            for target in af.targets.iterkeys():
                if threading.current_thread().stop_request.isSet():
                    break
                coordinates = af.targets[target]
                confocal.x, confocal.y, confocal.z = coordinates
                corrected_coordinates = af.focus()
                af.targets[target] = corrected_coordinates
                af.trait_property_changed('targets', af.targets)
                logging.getLogger().debug('NV finder: auto focus target ' +
                                          str(target) + ': %.2f, %.2f, %.2f' %
                                          tuple(corrected_coordinates))
            self.state = 'idle'

        except:  # if anything fails, recover
            logging.getLogger().exception('Error in NV finder.')
            self.state = 'error'

    @cached_property
    def _get_RegionsAndLabels(self):
        s = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
        regions, labels = ndimage.label(self.Thresh, s)
        return regions, labels

    @cached_property
    def _get_Positions(self):
        positions = []
        for i in range(1, self.RegionsAndLabels[1] + 1):
            y, x = ndimage.center_of_mass(
                (self.RegionsAndLabels[0] == i).astype(int))
            if y < 0:
                y = 0
            if y >= len(self.Y):
                y = len(self.Y) - 1
            if x < 0:
                x = 0
            if x >= len(self.X):
                x = len(self.X) - 1
            positions.append((self.Y[int(y)], self.X[int(x)]))
        return numpy.array(positions)

    def _get_XPositions(self):
        if len(self.Positions) == 0:
            return numpy.array(())
        else:
            return self.Positions[:, 1]

    def _get_YPositions(self):
        if len(self.Positions) == 0:
            return numpy.array(())
        else:
            return self.Positions[:, 0]

    @cached_property
    def _get_Thresh(self):
        return (self.Smooth > self.Threshold).astype(int)

    @cached_property
    def _get_Smooth(self):
        return ndimage.gaussian_filter(
            self.Raw, int(self.Sigma / (self.X[1] - self.X[0])))

    def _Raw_default(self):
        return self.confocal.image
        #return numpy.asarray(Image.open('original.png'))[:,:,0]

    def _X_default(self):
        return self.confocal.X
        #return numpy.arange(self.Raw.shape[1])/100.

    def _Y_default(self):
        return self.confocal.Y
        #return numpy.arange(self.Raw.shape[0])/100.

    def _z_default(self):
        return self.confocal.z
        #return 0.0

    def _RawPlot_default(self):

        plotdata = ArrayPlotData(imagedata=self.Raw,
                                 x=self.XPositions,
                                 y=self.YPositions)
        plot = Plot(plotdata, width=500, height=500, resizable='hv')
        RawImage = plot.img_plot(
            'imagedata',
            colormap=gray,
            xbounds=(self.X[0], self.X[-1]),
            ybounds=(self.Y[0], self.Y[-1]),
        )[0]
        RawImage.x_mapper.domain_limits = (self.X[0], self.X[-1])
        RawImage.y_mapper.domain_limits = (self.Y[0], self.Y[-1])
        RawImage.overlays.append(ZoomTool(RawImage))
        scatterplot = plot.plot(('x', 'y'),
                                type='scatter',
                                marker='plus',
                                color='yellow')
        colormap = RawImage.color_mapper
        colorbar = ColorBar(index_mapper=LinearMapper(range=colormap.range),
                            color_mapper=colormap,
                            plot=plot,
                            orientation='v',
                            resizable='v',
                            width=10,
                            padding=20)
        colorbar.padding_top = plot.padding_top
        colorbar.padding_bottom = plot.padding_bottom

        self.RawData = plotdata
        self.RawImage = RawImage

        container = HPlotContainer(padding=20,
                                   fill_padding=True,
                                   use_backbuffer=True)
        container.add(colorbar)
        container.add(plot)

        return container

    def _SmoothPlot_default(self):

        plotdata = ArrayPlotData(imagedata=self.Smooth)
        plot = Plot(plotdata, width=500, height=500, resizable='hv')
        SmoothImage = plot.img_plot(
            'imagedata',
            colormap=gray,
            xbounds=(self.X[0], self.X[-1]),
            ybounds=(self.Y[0], self.Y[-1]),
        )[0]
        SmoothImage.x_mapper.domain_limits = (self.X[0], self.X[-1])
        SmoothImage.y_mapper.domain_limits = (self.Y[0], self.Y[-1])
        SmoothImage.overlays.append(ZoomTool(SmoothImage))

        colormap = SmoothImage.color_mapper
        colorbar = ColorBar(index_mapper=LinearMapper(range=colormap.range),
                            color_mapper=colormap,
                            plot=plot,
                            orientation='v',
                            resizable='v',
                            width=10,
                            padding=20)
        colorbar.padding_top = plot.padding_top
        colorbar.padding_bottom = plot.padding_bottom

        self.SmoothImage = SmoothImage

        container = HPlotContainer(padding=20,
                                   fill_padding=True,
                                   use_backbuffer=True)
        container.add(colorbar)
        container.add(plot)

        return container

    def _RegionsPlot_default(self):

        plotdata = ArrayPlotData(imagedata=self.RegionsAndLabels[0],
                                 x=self.XPositions,
                                 y=self.YPositions)
        plot = Plot(plotdata, width=500, height=500, resizable='hv')
        RegionsImage = plot.img_plot(
            'imagedata',
            colormap=gray,
            xbounds=(self.X[0], self.X[-1]),
            ybounds=(self.Y[0], self.Y[-1]),
        )[0]
        RegionsImage.x_mapper.domain_limits = (self.X[0], self.X[-1])
        RegionsImage.y_mapper.domain_limits = (self.Y[0], self.Y[-1])
        RegionsImage.overlays.append(ZoomTool(RegionsImage))

        scatterplot = plot.plot(('x', 'y'),
                                type='scatter',
                                marker='plus',
                                color='yellow')

        colormap = RegionsImage.color_mapper
        colorbar = ColorBar(index_mapper=LinearMapper(range=colormap.range),
                            color_mapper=colormap,
                            plot=plot,
                            orientation='v',
                            resizable='v',
                            width=10,
                            padding=20)
        colorbar.padding_top = plot.padding_top
        colorbar.padding_bottom = plot.padding_bottom

        self.RegionsData = plotdata
        self.RegionsImage = RegionsImage

        container = HPlotContainer(padding=20,
                                   fill_padding=True,
                                   use_backbuffer=True)
        container.add(colorbar)
        container.add(plot)

        return container

    def _Sigma_changed(self):
        self._get_Positions()

    def _Threshold_changed(self):
        self._get_Positions()

    # automatic update of plots

    def _Smooth_changed(self):
        self.SmoothImage.value.set_data(self.Smooth)

    def _RegionsAndLabels_changed(self):
        self.RegionsData.set_data('imagedata', self.RegionsAndLabels[0])

    def _Positions_changed(self):
        self.RegionsData.set_data('x', self.XPositions)
        self.RegionsData.set_data('y', self.YPositions)
        self.RawData.set_data('x', self.XPositions)
        self.RawData.set_data('y', self.YPositions)

    @on_trait_change('ImportData')
    def Import(self):
        self.Raw = self._Raw_default()
        self.X = self._X_default()
        self.Y = self._Y_default()
        self.z = self._z_default()
        self.RawImage.index.set_data(self.X, self.Y)
        self.SmoothImage.index.set_data(self.X, self.Y)
        self.RegionsImage.index.set_data(self.X, self.Y)
        self.RawData.set_data('imagedata', self.Raw)
        self._get_Positions()

    @on_trait_change('ExportTargets')
    def Export(self):
        z = self.z
        for i, pos in enumerate(self.Positions):
            y, x = pos
            if x > self.X[0] + self.Margin and x < self.X[
                    -1] - self.Margin and y > self.Y[
                        0] + self.Margin and y < self.Y[-1] - self.Margin:
                self.auto_focus.add_target(str(i), np.array((x, y, z)))
Exemple #16
0
class Surf(Pipeline):
    """
    Plots a surface using regularly-spaced elevation data supplied as a 2D
    array.

    **Function signatures**::

        surf(s, ...)
        surf(x, y, s, ...)
        surf(x, y, f, ...)

    s is the elevation matrix, a 2D array, where indices along the first
    array axis represent x locations, and indices along the second array
    axis represent y locations.

    x and y can be 1D or 2D arrays such as returned by numpy.ogrid or
    numpy.mgrid. Arrays returned by numpy.meshgrid require a transpose
    first to obtain correct indexing order.
    The points should be located on an orthogonal grid (possibly
    non-uniform). In other words, all the points sharing a same
    index in the s array need to have the same x or y value. For
    arbitrary-shaped position arrays (non-orthogonal grids), see the mesh
    function.

    If only 1 array s is passed, the x and y arrays are assumed to be
    made from the indices of arrays, and an uniformly-spaced data set is
    created.

    If 3 positional arguments are passed the last one must be an array s,
    or a callable, f, that returns an array. x and y give the
    coordinates of positions corresponding to the s values."""

    _source_function = Callable(array2d_source)

    _pipeline = [WarpScalarFactory, PolyDataNormalsFactory, SurfaceFactory]

    warp_scale = Any(1,
                     help="""scale of the z axis (warped from
                        the value of the scalar). By default this scale
                        is a float value.

                        If you specify 'auto', the scale is calculated to
                        give a pleasant aspect ratio to the plot,
                        whatever the bounds of the data.

                        If you specify a value for warp_scale in
                        addition to an extent, the warp scale will be
                        determined by the warp_scale, and the plot be
                        positioned along the z axis with the zero of the
                        data centered on the center of the extent. If you
                        are using explicit extents, this is the best way
                        to control the vertical scale of your plots.

                        If you want to control the extent (or range)
                        of the surface object, rather than its scale,
                        see the `extent` keyword argument.
                        """)

    mask = Array(help="""boolean mask array to suppress some data points.
                 Note: this works based on colormapping of scalars and will
                 not work if you specify a solid color using the
                 `color` keyword.""")

    def __call_internal__(self, *args, **kwargs):
        """ Override the call to be able to scale automatically the axis.
        """
        self.source = self._source_function(*args, **kwargs)
        kwargs.pop('name', None)
        # Deal with both explicit warp scale and extent, this is
        # slightly hairy. The wigner example is a good test case for
        # this.
        if not 'warp_scale' in kwargs and not 'extent' in kwargs:
            try:
                xi, xf, yi, yf, _, _ = self.source.data.bounds
                zi, zf = self.source.data.scalar_range
            except AttributeError:
                xi, xf, yi, yf, _, _ = self.source.image_data.bounds
                zi, zf = self.source.image_data.scalar_range
            aspect_ratios = [(zf - zi) / (xf - xi), (zf - zi) / (yf - yi)]
            if min(aspect_ratios) < 0.01 or max(aspect_ratios) > 100:
                print('Warning: the range of your scalar values differs by ' \
                'more than a factor 100 than the range of the grid values ' \
                'and you did not '\
                'specify a warp_scale. You could try warp_scale="auto".')
        if 'warp_scale' in kwargs and not kwargs['warp_scale'] == 'auto' \
                and 'extent' in kwargs:
            # XXX: I should use the logging module.
            print('Warning: both warp_scale and extent keyword argument ' \
            'specified, the z bounds of the extents will be overridden')
            xi, xf, yi, yf, zi, zf = kwargs['extent']
            zo = 0.5 * (zi + zf)
            try:
                si, sf = self.source.data.scalar_range
            except AttributeError:
                si, sf = self.source.image_data.scalar_range
            z_span = kwargs['warp_scale'] * abs(sf - si)
            zi = zo + si * kwargs['warp_scale']
            zf = zi + z_span
            kwargs['extent'] = (xi, xf, yi, yf, zi, zf)
            kwargs['warp_scale'] = 1
        elif kwargs.get('warp_scale', 1) == 'auto':
            if 'extent' in kwargs:
                if 'warp_scale' in kwargs:
                    print("Warning: extent specified, warp_scale='auto' " \
                    "ignored.")
            else:
                try:
                    xi, xf, yi, yf, _, _ = self.source.data.bounds
                    zi, zf = self.source.data.scalar_range
                except AttributeError:
                    xi, xf, yi, yf, _, _ = self.source.image_data.bounds
                    zi, zf = self.source.image_data.scalar_range
                z0 = zf - zi
                dz = 0.3 * ((xf - xi) + (yf - yi))
                zi = z0 - 0.5 * dz
                zf = z0 + 0.5 * dz
                kwargs['extent'] = (xi, xf, yi, yf, zi, zf)
            kwargs['warp_scale'] = 1.
        self.store_kwargs(kwargs)

        # Copy the pipeline so as not to modify it for the next call
        self.pipeline = self._pipeline[:]
        return self.build_pipeline()
Exemple #17
0
class Image(HasStrictTraits):
    """ An SEM image stored in a file. """

    #: The filename of the image.
    filename = File(exists=True)

    #: The ID of the sample that is being imaged.
    sample_id = Str()

    #: The name of the operator who acquired the image.
    operator = Str("N/A")

    #: The date the image was acquired.
    date_acquired = Date()

    #: The size of the image.
    scan_size = Tuple(Float, Float)

    #: The width of the image.
    scan_width = Property(Float, depends_on='scan_size')

    #: The height of the image.
    scan_height = Property(Float, depends_on='scan_size')

    #: The image as a 2D numpy array
    image = Array(shape=(None, None), dtype='uint8')

    #: The area of each pixel.
    pixel_area = Property(Float, depends_on='scan_height,scan_width,image')

    #: The histogram of pixel intensities.
    histogram = Property(Array, depends_on='image')

    def threshold(self, low=0, high=255):
        """ Compute a threshold mask for the array. """
        return (self.image >= low) & (self.image <= high)

    # Trait observers

    @observe('filename')
    def read_image(self, event):
        pil_image = PILImage.open(self.filename).convert("L")
        self.image = np.array(pil_image)

    # Trait default methods

    def _date_acquired_default(self):
        return datetime.date.today()

    def _scan_size_default(self):
        return (1e-5, 1e-5)

    # Trait property methods

    def _get_scan_width(self):
        return self.scan_size[0]

    def _set_scan_width(self, value):
        self.scan_size = (value, self.scan_size[1])

    def _get_scan_height(self):
        return self.scan_size[1]

    def _set_scan_height(self, value):
        self.scan_size = (self.scan_size[0], value)

    def _get_pixel_area(self):
        if self.image.size > 0:
            return self.scan_height * self.scan_width / self.image.size
        else:
            return 0

    @cached_property
    def _get_histogram(self):
        hist, bins = np.histogram(
            self.image,
            bins=256,
            range=(0, 256),
            density=True,
        )
        return hist

    # TraitsUI view declaration

    view = View(
        Item('filename'),
        Item('sample_id', label='Sample ID'),
        Item('operator'),
        Item('date_acquired'),
        Item('scan_width', label='Width (m):'),
        Item('scan_height', label='Height (m):'),
        Item(
            'pixel_area',
            format_func=lambda area: "{:0.3f} µm²".format(area * 1e18),
            style='readonly',
        ),
    )
Exemple #18
0
class Mesh(Pipeline):
    """
    Plots a surface using grid-spaced data supplied as 2D arrays.

    **Function signatures**::

        mesh(x, y, z, ...)

    x, y, z are 2D arrays, all of the same shape, giving the positions of
    the vertices of the surface. The connectivity between these points is
    implied by the connectivity on the arrays.

    For simple structures (such as orthogonal grids) prefer the `surf`
    function, as it will create more efficient data structures. For mesh
    defined by triangles rather than regular implicit connectivity, see the
    `triangular_mesh` function.
    """

    scale_mode = Trait('none', {
        'none': 'data_scaling_off',
        'scalar': 'scale_by_scalar',
        'vector': 'scale_by_vector'
    },
                       help="""the scaling mode for the glyphs
                            ('vector', 'scalar', or 'none').""")

    scale_factor = CFloat(0.05,
                          desc="""scale factor of the glyphs used to represent
                        the vertices, in fancy_mesh mode. """)

    tube_radius = Trait(0.025,
                        CFloat,
                        None,
                        help="""radius of the tubes used to represent the
                        lines, in mesh mode. If None, simple lines are used.
                        """)

    scalars = Array(help="""optional scalar data.""")

    mask = Array(help="""boolean mask array to suppress some data points.
                 Note: this works based on colormapping of scalars and will
                 not work if you specify a solid color using the
                 `color` keyword.""")

    representation = Trait(
        'surface',
        'wireframe',
        'points',
        'mesh',
        'fancymesh',
        desc="""the representation type used for the surface.""")

    _source_function = Callable(grid_source)

    _pipeline = [
        ExtractEdgesFactory, GlyphFactory, TubeFactory, SurfaceFactory
    ]

    def __call_internal__(self, *args, **kwargs):
        """ Override the call to be able to choose whether to apply
        filters.
        """
        self.source = self._source_function(*args, **kwargs)
        kwargs.pop('name', None)
        self.store_kwargs(kwargs)
        # Copy the pipeline so as not to modify it for the next call
        self.pipeline = self._pipeline[:]
        if not self.kwargs['representation'] in ('mesh', 'fancymesh'):
            self.pipeline.remove(ExtractEdgesFactory)
            self.pipeline.remove(TubeFactory)
            self.pipeline.remove(GlyphFactory)
            self.pipeline = [
                PolyDataNormalsFactory,
            ] + self.pipeline
        else:
            if self.kwargs['tube_radius'] is None:
                self.pipeline.remove(TubeFactory)
            if not self.kwargs['representation'] == 'fancymesh':
                self.pipeline.remove(GlyphFactory)
            self.kwargs['representation'] = 'surface'
        return self.build_pipeline()
Exemple #19
0
class Elementary1DRule(NDimRule):
    """ Rule implementing an elementary 1D cellular automata.

    This uses Wolfram's rule numbering scheme to identify the rules and
    scipy.ndimage to handle the boundary conditions.

    Notes
    -----

    See `Wikipedia
    <https://en.wikipedia.org/wiki/Elementary_cellular_automaton>`_ for further
    information on how these automata work.
    """

    # Elementary1DRule Traits ------------------------------------------------

    #: The number of the rule.
    rule_number = Range(0, 255)

    #: The state value for "empty" cells.
    empty_state = StateValue(0)

    #: The state value for "filled" cells.
    filled_state = StateValue(1)

    #: The bit-mask corresponding to the rule.
    bit_mask = Property(Array(shape=(8,), dtype=bool),
                        depends_on='rule_number')

    #: The boundary mode to use.
    boundary = Enum('empty', 'filled', 'nearest', 'wrap', 'reflect')

    # NDimRule Traits --------------------------------------------------------

    #: These are 1-dimensional only rules.
    ndim = Constant(1)

    # ------------------------------------------------------------------------
    # Elementary1DRule interface
    # ------------------------------------------------------------------------

    def reflect(self):
        """ Reflect the cellular automata left-to-right. """
        self.bit_mask = self.bit_mask[REVERSE_PERMUTATION]

    def complement(self):
        """ Complement the cellular automata replacing 1's with 0's throughout. """
        self.bit_mask = ~self.bit_mask[::-1]

    # ------------------------------------------------------------------------
    # AbstractRule interface
    # ------------------------------------------------------------------------

    def step(self, states):
        """ Apply the specified rule to the states.

        Parameters
        ----------
        states : array
            An array holding the current states of the automata.

        Returns
        -------
        states : array
            The new states of the automata after the rule has been applied.
        """
        states = super(NDimRule, self).step(states)

        wrap_args = {'mode': self.boundary}
        if self.boundary == 'empty':
            wrap_args['mode'] = 'constant'
            wrap_args['cval'] = 0
        elif self.boundary == 'filled':
            wrap_args['mode'] = 'constant'
            wrap_args['cval'] = 1

        filled = (states == self.filled_state)
        filled = ndimage.generic_filter1d(
            filled, self._rule_filter, filter_size=3, **wrap_args)

        states = np.full(filled.shape, self.empty_state, dtype='uint8')
        states[filled] = self.filled_state
        return states

    # ------------------------------------------------------------------------
    # Private interface
    # ------------------------------------------------------------------------

    def _rule_filter(self, iline, oline):
        """ Kernel to compute values in generic filter """
        index = (iline[:-2] * 4 + iline[1:-1] * 2 + iline[2:]).astype('uint8')
        oline[...] = self.bit_mask[index]

    # Trait properties -------------------------------------------------------

    @cached_property
    def _get_bit_mask(self):
        bits = np.unpackbits(np.array([self.rule_number], dtype='uint8'))[::-1]
        return bits.astype(bool)

    def _set_bit_mask(self, bits):
        bits = np.asarray(bits, dtype=bool)
        self.rule_number = int(np.packbits(bits[::-1])[0])
Exemple #20
0
class FiducialsPanel(HasPrivateTraits):
    """Set fiducials on an MRI surface"""
    model = Instance(MRIHeadWithFiducialsModel)

    fid_file = DelegatesTo('model')
    fid_fname = DelegatesTo('model')
    lpa = DelegatesTo('model')
    nasion = DelegatesTo('model')
    rpa = DelegatesTo('model')
    can_save = DelegatesTo('model')
    can_save_as = DelegatesTo('model')
    can_reset = DelegatesTo('model')
    fid_ok = DelegatesTo('model')
    locked = DelegatesTo('model', 'lock_fiducials')

    set = Enum('LPA', 'Nasion', 'RPA')
    current_pos = Array(float, (1, 3))  # for editing

    save_as = Button(label='Save As...')
    save = Button(label='Save')
    reset_fid = Button(label="Reset to File")

    headview = Instance(HeadViewController)
    hsp_obj = Instance(SurfaceObject)

    picker = Instance(object)

    # the layout of the dialog created
    view = View(
        VGroup(Item('fid_file', label='Fiducials File'),
               Item('fid_fname', show_label=False, style='readonly'),
               Item('set', style='custom'),
               Item('current_pos', label='Pos'),
               HGroup(Item('save',
                           enabled_when='can_save',
                           tooltip="If a filename is currently "
                           "specified, save to that file, otherwise "
                           "save to the default file name"),
                      Item('save_as', enabled_when='can_save_as'),
                      Item('reset_fid', enabled_when='can_reset'),
                      show_labels=False),
               enabled_when="locked==False"))

    def __init__(self, *args, **kwargs):
        super(FiducialsPanel, self).__init__(*args, **kwargs)
        self.sync_trait('lpa', self, 'current_pos', mutual=True)

    def _reset_fid_fired(self):
        self.model.reset = True

    def _save_fired(self):
        self.model.save()

    def _save_as_fired(self):
        if self.fid_file:
            default_path = self.fid_file
        else:
            default_path = self.model.default_fid_fname

        dlg = FileDialog(action="save as",
                         wildcard=fid_wildcard,
                         default_path=default_path)
        dlg.open()
        if dlg.return_code != OK:
            return

        path = dlg.path
        if not path.endswith('.fif'):
            path = path + '.fif'
            if os.path.exists(path):
                answer = confirm(
                    None, "The file %r already exists. Should it "
                    "be replaced?", "Overwrite File?")
                if answer != YES:
                    return

        self.model.save(path)

    def _on_pick(self, picker):
        if self.locked:
            return

        self.picker = picker
        n_pos = len(picker.picked_positions)

        if n_pos == 0:
            logger.debug("GUI: picked empty location")
            return

        if picker.actor is self.hsp_obj.surf.actor.actor:
            idxs = []
            idx = None
            pt = [picker.pick_position]
        elif self.hsp_obj.surf.actor.actor in picker.actors:
            idxs = [
                i for i in range(n_pos)
                if picker.actors[i] is self.hsp_obj.surf.actor.actor
            ]
            idx = idxs[-1]
            pt = [picker.picked_positions[idx]]
        else:
            logger.debug("GUI: picked object other than MRI")

        round_ = lambda x: round(x, 3)
        poss = [map(round_, pos) for pos in picker.picked_positions]
        pos = map(round_, picker.pick_position)
        msg = ["Pick Event: %i picked_positions:" % n_pos]

        line = str(pos)
        if idx is None:
            line += " <-pick_position"
        msg.append(line)

        for i, pos in enumerate(poss):
            line = str(pos)
            if i == idx:
                line += " <- MRI mesh"
            elif i in idxs:
                line += " (<- also MRI mesh)"
            msg.append(line)
        logger.debug(os.linesep.join(msg))

        if self.set == 'Nasion':
            self.nasion = pt
        elif self.set == 'LPA':
            self.lpa = pt
        elif self.set == 'RPA':
            self.rpa = pt
        else:
            raise ValueError("set = %r" % self.set)

    @on_trait_change('set')
    def _on_set_change(self, obj, name, old, new):
        self.sync_trait(old.lower(),
                        self,
                        'current_pos',
                        mutual=True,
                        remove=True)
        self.sync_trait(new.lower(), self, 'current_pos', mutual=True)
        if new == 'Nasion':
            self.headview.front = True
        elif new == 'LPA':
            self.headview.left = True
        elif new == 'RPA':
            self.headview.right = True
class ImageDataProbe(Filter):
    """
    A filter that can be used to probe any dataset using a Structured
    Points dataset.  The filter also allows one to convert the scalar
    data to an unsigned short array so that the scalars can be used for
    volume visualization.
    """

    # The image data onto which the data is probed.
    probe_data = Instance(tvtk.ImageData, args=())

    # The probe filter.
    filter = Instance(tvtk.ProbeFilter, args=())

    rescale_scalars = Bool(False, desc='if the input scalars are '\
                                       'rescaled to an unsigned short '\
                                       'array')

    # Specifies if we can change the spacing/dimensions -- not allowed
    # for imagedata/structured points data.
    allow_changes = Bool(True)

    # Spacing of points in the image data.
    spacing = Array(value=(0.0, 0.0, 0.0),
                    shape=(3, ),
                    cols=1,
                    dtype=float,
                    enter_set=True,
                    auto_set=False,
                    labels=['sx', 'sy', 'sz'],
                    desc='the spacing of points')

    # Dimensions of the image data.
    dimensions = Array(value=(0, 0, 0),
                       shape=(3, ),
                       cols=1,
                       dtype=int,
                       enter_set=True,
                       auto_set=False,
                       labels=['nx', 'ny', 'nz'],
                       desc='the dimensions of the image data')

    # Reset settings to defaults.
    reset_defaults = Button(desc='if probe data is reset to defaults')

    # Name of rescaled scalar to generate.
    rescaled_scalar_name = Str('probe_us_array')

    input_info = PipelineInfo(datasets=['image_data'],
                              attribute_types=['any'],
                              attributes=['any'])

    output_info = PipelineInfo(datasets=['image_data'],
                               attribute_types=['any'],
                               attributes=['any'])

    ########################################
    # Private traits.

    # A trait to prevent static handlers from firing unnecessarily.
    _event_handled = Bool(False)

    ########################################
    # View related traits.

    view = View(Group(
        Item(name='dimensions', enabled_when='allow_changes'),
        Item(name='spacing', enabled_when='allow_changes'),
        Item(name='rescale_scalars'),
        Item(name='reset_defaults', show_label=False),
    ),
                resizable=True)

    ######################################################################
    # `Filter` interface.
    ######################################################################
    def setup_pipeline(self):
        """Creates the pipeline."""
        self.configure_input_data(self.filter, self.probe_data)

    def update_pipeline(self):
        """Connect and update the pipeline."""
        inputs = self.inputs
        if len(inputs) == 0:
            return

        fil = self.filter
        self.configure_source_data(fil, inputs[0].outputs[0])
        reset = False
        if self.dimensions.sum() == 0:
            reset = True
        self._setup_probe_data(reset)
        fil.update()
        self._rescale_scalars_changed(self.rescale_scalars)
        self._set_outputs([fil])

    ######################################################################
    # Non-public interface.
    ######################################################################
    def _setup_probe_data(self, reset=False):
        pd = self.probe_data
        input = self.inputs[0].get_output_dataset()
        if input.is_a('vtkImageData'):
            self.allow_changes = False
            self.trait_set(spacing=input.spacing, dimensions=input.dimensions)
            pd.trait_set(origin=input.origin,
                         dimensions=input.dimensions,
                         spacing=input.spacing)
            pd.update()
        elif reset:
            self.allow_changes = True
            b = numpy.array(input.bounds)
            pd.origin = b[::2]
            l = b[1::2] - b[::2]
            tot_len = sum(l)
            npnt = pow(input.number_of_points, 1. / 3.) + 0.5
            fac = 3.0 * npnt / tot_len
            dims = (l * fac).astype(int) + 1
            extent = (0, dims[0] - 1, 0, dims[1] - 1, 0, dims[2] - 1)
            pd.trait_set(extent=extent, dimensions=dims)

            max_dim = dims.max()
            dims = (dims - 1).clip(min=1, max=max_dim + 1)
            l = l.clip(min=1e-3, max=l.max() + 1.0)
            pd.spacing = l / dims
            self._event_handled = True
            self.trait_set(spacing=pd.spacing, dimensions=pd.dimensions)
            self._event_handled = False

    def _rescale_scalars_changed(self, value):
        out = self.filter.output
        pd = out.point_data
        sc = pd.scalars
        if sc is None:
            # no input scalars
            return

        if not value:
            dataset = self.inputs[0].get_output_dataset()
            orig_sc = dataset.point_data.scalars
            if sc.is_a('vtkUnsignedShortArray') and \
               sc.name == self.rescaled_scalar_name:
                pd.set_active_scalars(orig_sc.name)
                pd.update()
                self.pipeline_changed = True
                self.render()

            return

        s_min, s_max = sc.range
        # checking to see if input array is constant.
        avg = (s_max + s_min) * 0.5
        diff = 1
        if (s_max > avg) and (avg > s_min):
            diff = s_max - s_min

        arr = (sc.to_array() - s_min) * 65535.0 / diff
        uc = tvtk.UnsignedShortArray(name=self.rescaled_scalar_name)
        uc.from_array(arr)
        pd.add_array(uc)
        pd.set_active_scalars(self.rescaled_scalar_name)
        pd.update()
        self.pipeline_changed = True
        self.render()

    def _dimensions_changed(self, value):
        if not self.allow_changes or self._event_handled:
            return

        max_d = value.max()
        dims = (value - 1).clip(min=1, max=max_d)
        b = numpy.array(self.inputs[0].get_output_dataset().bounds)
        l = b[1::2] - b[::2]
        self.spacing = l / dims
        self._update_probe()

    def _spacing_changed(self, value):
        if not self.allow_changes or self._event_handled:
            return
        b = numpy.array(self.inputs[0].get_output_dataset().bounds)
        l = b[1::2] - b[::2]
        dims = (l / value + 0.5).astype(int) + 1
        # Recalculate space because of rounding.
        maxd = dims.max()
        dims1 = (dims - 1).clip(min=1, max=maxd)
        sp = l / dims1
        self._event_handled = True
        self.trait_set(spacing=sp, dimensions=dims)
        self._event_handled = False
        self._update_probe()

    def _update_probe(self):
        pd = self.probe_data
        dims = self.dimensions
        spacing = self.spacing
        extent = (0, dims[0] - 1, 0, dims[1] - 1, 0, dims[2] - 1)
        pd.trait_set(extent=extent, dimensions=dims, spacing=spacing)
        pd.modified()
        fil = self.filter
        w = fil.global_warning_display
        fil.global_warning_display = False
        fil.remove_all_inputs()
        self.configure_input_data(fil, pd)
        fil.update_whole_extent()
        fil.update()
        self._rescale_scalars_changed(self.rescale_scalars)
        fil.global_warning_display = w
        self.data_changed = True

    def _reset_defaults_fired(self):
        self._setup_probe_data(reset=True)
        self._rescale_scalars_changed(self.rescale_scalars)
Exemple #22
0
class MRIHeadWithFiducialsModel(HasPrivateTraits):
    """Represent an MRI head shape with fiducials

    Attributes
    ----------
    points : array (n_points, 3)
        MRI head surface points.
    tris : array (n_tris, 3)
        Triangles based on points.
    lpa : array (1, 3)
        Left peri-auricular point coordinates.
    nasion : array (1, 3)
        Nasion coordinates.
    rpa : array (1, 3)
        Right peri-auricular point coordinates.
    """
    subject_source = Instance(MRISubjectSource, ())
    bem = Instance(BemSource, ())
    fid = Instance(FiducialsSource, ())

    fid_file = DelegatesTo('fid', 'file')
    fid_fname = DelegatesTo('fid', 'fname')
    fid_points = DelegatesTo('fid', 'points')
    subjects_dir = DelegatesTo('subject_source')
    subject = DelegatesTo('subject_source')
    subject_has_bem = DelegatesTo('subject_source')
    points = DelegatesTo('bem')
    norms = DelegatesTo('bem')
    tris = DelegatesTo('bem')
    lpa = Array(float, (1, 3))
    nasion = Array(float, (1, 3))
    rpa = Array(float, (1, 3))

    reset = Event(desc="Reset fiducials to the file.")

    # info
    can_save = Property(depends_on=['file', 'can_save_as'])
    can_save_as = Property(depends_on=['lpa', 'nasion', 'rpa'])
    can_reset = Property(
        depends_on=['file', 'fid.points', 'lpa', 'nasion', 'rpa'])
    fid_ok = Property(depends_on=['lpa', 'nasion', 'rpa'],
                      desc="All points "
                      "are set")
    default_fid_fname = Property(depends_on=['subjects_dir', 'subject'],
                                 desc="the default file name for the "
                                 "fiducials fif file")

    # switch for the GUI (has no effect in the model)
    lock_fiducials = Bool(False,
                          desc="Used by GIU, has no effect in the "
                          "model.")

    @on_trait_change('fid_points')
    def reset_fiducials(self):
        if self.fid_points is not None:
            self.lpa = self.fid_points[0:1]
            self.nasion = self.fid_points[1:2]
            self.rpa = self.fid_points[2:3]

    def save(self, fname=None):
        """Save the current fiducials to a file

        Parameters
        ----------
        fname : str
            Destination file path. If None, will use the current fid filename
            if available, or else use the default pattern.
        """
        if fname is None:
            fname = self.fid_file
        if not fname:
            fname = self.default_fid_fname

        dig = [{
            'kind': 1,
            'ident': 1,
            'r': np.array(self.lpa[0])
        }, {
            'kind': 1,
            'ident': 2,
            'r': np.array(self.nasion[0])
        }, {
            'kind': 1,
            'ident': 3,
            'r': np.array(self.rpa[0])
        }]
        write_fiducials(fname, dig, FIFF.FIFFV_COORD_MRI)
        self.fid_file = fname

    @cached_property
    def _get_can_reset(self):
        if not self.fid_file:
            return False
        elif np.any(self.lpa != self.fid.points[0:1]):
            return True
        elif np.any(self.nasion != self.fid.points[1:2]):
            return True
        elif np.any(self.rpa != self.fid.points[2:3]):
            return True
        return False

    @cached_property
    def _get_can_save_as(self):
        can = not (np.all(self.nasion == self.lpa)
                   or np.all(self.nasion == self.rpa)
                   or np.all(self.lpa == self.rpa))
        return can

    @cached_property
    def _get_can_save(self):
        if not self.can_save_as:
            return False
        elif self.fid_file:
            return True
        elif self.subjects_dir and self.subject:
            return True
        else:
            return False

    @cached_property
    def _get_default_fid_fname(self):
        fname = fid_fname.format(subjects_dir=self.subjects_dir,
                                 subject=self.subject)
        return fname

    @cached_property
    def _get_fid_ok(self):
        return all(np.any(pt) for pt in (self.nasion, self.lpa, self.rpa))

    def _reset_fired(self):
        self.reset_fiducials()

    # if subject changed because of a change of subjects_dir this was not
    # triggered
    @on_trait_change('subjects_dir,subject')
    def _subject_changed(self):
        subject = self.subject
        subjects_dir = self.subjects_dir
        if not subjects_dir or not subject:
            return

        # update bem head
        path = head_bem_fname.format(subjects_dir=subjects_dir,
                                     subject=subject)
        self.bem.file = path

        # find fiducials file
        path = fid_fname.format(subjects_dir=subjects_dir, subject=subject)
        if os.path.exists(path):
            self.fid_file = path
            self.lock_fiducials = True
        else:
            path = fid_fname_general.format(subjects_dir=subjects_dir,
                                            subject=subject,
                                            head='*')
            fnames = glob(path)
            if fnames:
                path = fnames[0]
                self.fid.file = path
                self.lock_fiducials = True
            else:
                self.fid.reset_traits(['file'])
                self.lock_fiducials = False

        # does not seem to happen by itself ... so hard code it:
        self.reset_fiducials()
Exemple #23
0
class LogPlot(HasTraits):
    '''Top-level component in the GUI. Contains multiple DataPlots.'''
    data_plots = List(Instance(DataPlot))
    time_plot = Instance(Plot)
    selection = Any()

    data = Instance(ArrayPlotData)

    time = Array()
    y = Array()

    def _data_default(self):
        return ArrayPlotData(t=self.time, y=self.y)

    def _data_plots_default(self):
        dplot = DataPlot(t=self.time)
        dplot2 = DataPlot(t=self.time)
        return [dplot, dplot2]

    def _time_default(self):
        return np.linspace(0, 10, 100)

    def handle_selection_change(self):
        selection = self.selection.selection

        if selection is None:
            low = np.min(self.time)
            high = np.max(self.time)
        else:
            low, high = selection

        #Update ranges in data plots
        for dp in self.data_plots:
            dp.plot.index_range = DataRange1D(low=low, high=high)

    def _time_plot_default(self):
        plot = Plot(self.data)
        line_plot = plot.plot(('t', 'y'))[0]

        line_plot.active_tool = RangeSelection(line_plot,
                                               left_button_selects=True)
        line_plot.overlays.append(RangeSelectionOverlay(component=line_plot))
        self.selection = line_plot.active_tool

        plot.padding = 20
        plot.padding_left = 50

        self.selection.on_trait_change(self.handle_selection_change,
                                       'selection')

        return plot

    view = View(VGroup(Item('time_plot',
                            editor=ComponentEditor(),
                            show_label=False,
                            height=100,
                            width=800,
                            resizable=False,
                            padding=0),
                       Item('data_plots',
                            editor=ListEditor(editor=InstanceEditor(),
                                              style='custom'),
                            show_label=False,
                            style='custom',
                            padding=0),
                       padding=0),
                width=800,
                height=500,
                resizable=True,
                title="Log Plotter")
Exemple #24
0
class YoshimuraCPFactory(FactoryTask):
    '''Generate a Yoshimura crease pattern based
    on the specification of its parameters.
    '''

    L_x = Float(4, geometry=True)
    L_y = Float(2, geometry=True)

    n_x = Int(2, geometry=True)
    n_y = Int(2, geometry=True)

    def deliver(self):
        return CreasePatternState(X=self.X, L=self.L, F=self.F)

    new_nodes = Array(value=[], dtype=float)
    new_crease_lines = Array(value=[], dtype=int)

    X = Property

    def _get_X(self):
        return self._geometry[0]

    def _set_X(self, values):
        values = values.reshape(-1, 3)
        self.X[:, :] = values[:, :]

    L = Property

    def _get_L(self):
        return self._geometry[1]

    F = Property

    def _get_F(self):
        return self._geometry[2]

    N_h = Property

    def _get_N_h(self):
        return self._geometry[3]

    N_v = Property

    def _get_N_v(self):
        return self._geometry[4]

    N_i = Property

    def _get_N_i(self):
        return self._geometry[5]

    X_h = Property

    def _get_X_h(self):
        return self._geometry[6]

    X_v = Property

    def _get_X_v(self):
        return self._geometry[7]

    X_i = Property

    def _get_X_i(self):
        return self._geometry[8]

    interior_vertices = Property

    def _get_interior_vertices(self):
        return self._geometry[9]

    cycled_neighbors = Property

    def _get_cycled_neighbors(self):
        return self._geometry[10]

    connectivity = Property
    '''
    The connectivity represents all inner nodes [n] of 
    the crease pattern and their connected nodes [cn].

    (n,[cn1,cn2,...,cni])
    '''

    def _get_connectivity(self):
        con = [(vertex, neighbors) for vertex, neighbors in zip(
            self.interior_vertices, self.cycled_neighbors.T)]
        return con

    # deformed nodes
    XX = Property(depends_on='fx, nodes')

    def _get_XX(self):

        XX = np.zeros(self.X.shape)
        XX[:, 0] = self.fx(self.X[:, 0], self.X[:, 1])
        XX[:, 1] = self.fy(self.X[:, 0], self.X[:, 1])
        return XX

    transform = Bool(False)

    # geometric deformation
    _fx_expr = Any

    def __fx_expr_default(self):
        return x_

    _fy_expr = Any

    def __fy_expr_default(self):
        return y_

    fy = Property

    def _set_fy(self, ls_expr):
        self._fy_expr = ls_expr

    def _get_fy(self):
        return sp.lambdify([x_, y_], self._fy_expr)

    fx = Property

    def _set_fx(self, ls_expr):
        self._fx_expr = ls_expr

    def _get_fx(self):
        return sp.lambdify([x_, y_], self._fx_expr)

    geo_transform = Callable

    def _geo_transform_default(self):
        return lambda X_arr: X_arr

    _geometry = Property(depends_on='+geometry')

    @cached_property
    def _get__geometry(self):

        n_x = self.n_x
        n_y = self.n_y

        L_x = self.L_x
        L_y = self.L_y

        x_e, y_e = np.mgrid[0:L_x:complex(n_x + 1), 0:L_y:complex(n_y + 1)]

        # nodes on horizontal crease lines

        x_h = x_e[:, ::2]
        y_h = y_e[:, ::2]
        X_h = np.c_[x_h.flatten(), y_h.flatten()]

        # nodes on vertical boundaries on odd horizontal crease lines

        x_v = x_e[(0, -1), 1::2]
        y_v = y_e[(0, -1), 1::2]
        X_v = np.c_[x_v.flatten(), y_v.flatten()]

        # interior nodes on odd horizontal crease lines

        x_i = (x_e[1:, 1::2] + x_e[:-1, 1::2]) / 2.0
        y_i = (y_e[1:, 1::2] + y_e[:-1, 1::2]) / 2.0
        X_i = np.c_[x_i.flatten(), y_i.flatten()]

        # node enumeration in grid form on
        # (1) the even horizontal crease lines
        # (2)
        # (3)

        n_y_2 = int(n_y / 2)
        n_h = np.arange((n_x + 1) * (n_y_2 + 1)).reshape((n_x + 1),
                                                         (n_y_2 + 1))
        n_v = np.arange((2 * n_y_2)).reshape(2, n_y_2) + n_h[-1, -1] + 1
        n_i = np.arange(n_x * n_y_2).reshape(n_x, n_y_2) + n_v[-1, -1] + 1
        n_viv = np.vstack([n_v[0, :], n_i, n_v[-1, :]])

        # connectivity of nodes defining the crease pattern

        e_h00 = np.c_[n_h[:-1, :].flatten(), n_h[1:, :].flatten()]
        e_h90 = np.c_[n_h[(0, -1), :-1].flatten(), n_v[:, :].flatten()]
        e_v90 = np.c_[n_v[:, :].flatten(), n_h[(0, -1), 1:].flatten()]
        e_h45 = np.c_[n_h[:-1, :-1].flatten(), n_i[:, :].flatten()]
        e_i45 = np.c_[n_i[:, :].flatten(), n_h[1:, 1:].flatten()]
        e_h135 = np.c_[n_h[1:, :-1].flatten(), n_i[:, :].flatten()]
        e_i135 = np.c_[n_i[:, :].flatten(), n_h[:-1, 1:].flatten()]
        e_v00 = np.c_[n_v[:, :].flatten(), n_i[(0, -1), :].flatten()]
        e_i00 = np.c_[n_i[:-1, :].flatten(), n_i[1:, :].flatten()]

        nodes = np.vstack([X_h, X_v, X_i])
        zero_z = np.zeros((nodes.shape[0], 1), dtype=np.float_)

        nodes = np.hstack([nodes, zero_z])
        crease_lines = np.vstack(
            [e_h00, e_h90, e_v90, e_h45, e_i45, e_h135, e_i135, e_v00, e_i00])

        # ======================================================================
        # Connectivity mepping - neighbours of each vertex - closed
        # ======================================================================

        c_h = n_h[1:-1, 1:-1].flatten()

        c_h235 = n_viv[1:-2, :-1].flatten()
        c_h315 = n_viv[2:-1, :-1].flatten()
        c_h000 = n_h[2:, 1:-1].flatten()
        c_h045 = n_viv[2:-1, 1:].flatten()
        c_h135 = n_viv[1:-2, 1:].flatten()
        c_h180 = n_h[:-2, 1:-1].flatten()

        conn_h = np.vstack([c_h235, c_h315, c_h000, c_h045, c_h135, c_h180])

        c_viv = n_viv[1:-1, :].flatten()
        c_viv235 = n_h[:-1, :-1].flatten()
        c_viv315 = n_h[1:, :-1].flatten()
        c_viv000 = n_viv[2:, :].flatten()
        c_viv045 = n_h[1:, 1:].flatten()
        c_viv135 = n_h[:-1, 1:].flatten()
        c_viv180 = n_viv[:-2, :].flatten()

        conn_viv = np.vstack(
            [c_viv235, c_viv315, c_viv000, c_viv045, c_viv135, c_viv180])

        interior_vertices = np.hstack([c_h, c_viv])
        cycled_neighbors = np.hstack([conn_h, conn_viv])

        # ======================================================================
        # Construct the facet mappings
        # ======================================================================
        f_h00 = np.c_[n_h[:-1, :-1].flatten(), n_h[1:, :-1].flatten(),
                      n_i[:, :].flatten()]
        f_hi90 = np.c_[n_h[1:-1, :-1].flatten(), n_i[1:, :].flatten(),
                       n_i[:-1, :].flatten()]
        f_hl90 = np.c_[n_h[0, :-1].flatten(), n_i[0, :].flatten(),
                       n_v[0, :].flatten()]
        f_hr90 = np.c_[n_h[-1, :-1].flatten(), n_i[-1, :].flatten(),
                       n_v[-1, :].flatten()]
        g_h00 = np.c_[n_h[:-1, 1:].flatten(), n_h[1:, 1:].flatten(),
                      n_i[:, :].flatten()]
        g_hi90 = np.c_[n_h[1:-1, 1:].flatten(), n_i[1:, :].flatten(),
                       n_i[:-1, :].flatten()]
        g_hl90 = np.c_[n_h[0, 1:].flatten(), n_i[0, :].flatten(),
                       n_v[0, :].flatten()]
        g_hr90 = np.c_[n_h[-1, 1:].flatten(), n_i[-1, :].flatten(),
                       n_v[-1, :].flatten()]

        facets = np.vstack(
            [f_h00, f_hi90, f_hl90, f_hr90, g_h00, g_hi90, g_hl90, g_hr90])

        return (self.geo_transform(nodes), crease_lines, facets, n_h, n_v, n_i,
                X_h, X_v, X_i, interior_vertices, cycled_neighbors)
Exemple #25
0
class MATS2D5Bond(MATS2DEval):
    '''
    Elastic Model.
    '''

    # implements( IMATSEval )

    #---------------------------------------------------------------------------
    # Parameters of the numerical algorithm (integration)
    #---------------------------------------------------------------------------

    stress_state = Enum("plane_stress", "plane_strain")

    #---------------------------------------------------------------------------
    # Material parameters 
    #---------------------------------------------------------------------------

    E_m = Float(1.,  # 34e+3,
                 label="E_m",
                 desc="Young's Modulus",
                 auto_set=False)
    nu_m = Float(0.2,
                 label='nu_m',
                 desc="Poison's ratio",
                 auto_set=False)

    E_f = Float(1.,  # 34e+3,
                 label="E_f",
                 desc="Young's Modulus",
                 auto_set=False)
    nu_f = Float(0.2,
                 label='nu_f',
                 desc="Poison's ratio",
                 auto_set=False)

    G = Float(1.,  # 34e+3,
                 label="G",
                 desc="Shear Modulus",
                 auto_set=False)

    D_el = Property(Array(float), depends_on='E_f, nu_f,E_m,nu_f,G, stress_state')

    @cached_property
    def _get_D_el(self):
        if self.stress_state == "plane_stress":
            return self._get_D_plane_stress()
        else:
            return self._get_D_plane_strain()

    # This event can be used by the clients to trigger an action upon
    # the completed reconfiguration of the material model
    #
    changed = Event

    #---------------------------------------------------------------------------------------------
    # View specification
    #---------------------------------------------------------------------------------------------

    view_traits = View(VSplit(Group(Item('E_m'),
                                      Item('nu_m'),
                                      Item('E_f'),
                                      Item('nu_f'),
                                      Item('G')
                                      ),
                                Group(Item('stress_state', style='custom'),
                                       Spring(resizable=True),
                                       label='Configuration parameters', show_border=True,
                                       ),
                                ),
                        resizable=True
                        )

    #-----------------------------------------------------------------------------------------------
    # Private initialization methods
    #-----------------------------------------------------------------------------------------------

    #-----------------------------------------------------------------------------------------------
    # Setup for computation within a supplied spatial context
    #-----------------------------------------------------------------------------------------------

    def new_cntl_var(self):
        return zeros(3, float_)

    def new_resp_var(self):
        return zeros(3, float_)

    #-----------------------------------------------------------------------------------------------
    # Evaluation - get the corrector and predictor
    #-----------------------------------------------------------------------------------------------

    def get_corr_pred(self, sctx, eps_app_eng, d_eps, tn, tn1):
        '''
        Corrector predictor computation.
        @param eps_app_eng input variable - engineering strain
        '''

        sigma = dot(self.D_el[:], eps_app_eng)

        # You print the stress you just computed and the value of the apparent E

        return  sigma, self.D_el

    #---------------------------------------------------------------------------------------------
    # Subsidiary methods realizing configurable features
    #---------------------------------------------------------------------------------------------

    def _get_D_plane_stress(self):
        E_m = self.E_m
        nu_m = self.nu_m
        E_f = self.E_f
        nu_f = self.nu_f
        G = self.G
        D_stress = zeros([8, 8])
        D_stress[0, 0] = E_m / (1.0 - nu_m * nu_m)
        D_stress[0, 1] = E_m / (1.0 - nu_m * nu_m) * nu_m
        D_stress[1, 0] = E_m / (1.0 - nu_m * nu_m) * nu_m
        D_stress[1, 1] = E_m / (1.0 - nu_m * nu_m)
        D_stress[2, 2] = E_m / (1.0 - nu_m * nu_m) * (1.0 / 2.0 - nu_m / 2.0)

        D_stress[3, 3] = E_f / (1.0 - nu_f * nu_f)
        D_stress[3, 4] = E_f / (1.0 - nu_f * nu_f) * nu_f
        D_stress[4, 3] = E_f / (1.0 - nu_f * nu_f) * nu_f
        D_stress[4, 4] = E_f / (1.0 - nu_f * nu_f)
        D_stress[5, 5] = E_f / (1.0 - nu_f * nu_f) * (1.0 / 2.0 - nu_f / 2.0)

        D_stress[6, 6] = G
        D_stress[7, 7] = G
        return D_stress

    def _get_D_plane_strain(self):
        # TODO: adapt to use arbitrary 2d model following the 1d5 bond
        E_m = self.E_m
        nu_m = self.nu_m
        E_f = self.E_f
        nu_f = self.nu_f
        G = self.G
        D_strain = zeros([8, 8])
        D_strain[0, 0] = E_m * (1.0 - nu_m) / (1.0 + nu_m) / (1.0 - 2.0 * nu_m)
        D_strain[0, 1] = E_m / (1.0 + nu_m) / (1.0 - 2.0 * nu_m) * nu_m
        D_strain[1, 0] = E_m / (1.0 + nu_m) / (1.0 - 2.0 * nu_m) * nu_m
        D_strain[1, 1] = E_m * (1.0 - nu_m) / (1.0 + nu_m) / (1.0 - 2.0 * nu_m)
        D_strain[2, 2] = E_m * (1.0 - nu_m) / (1.0 + nu_m) / (2.0 - 2.0 * nu_m)

        D_strain[3, 3] = E_f * (1.0 - nu_f) / (1.0 + nu_f) / (1.0 - 2.0 * nu_f)
        D_strain[3, 4] = E_f / (1.0 + nu_f) / (1.0 - 2.0 * nu_f) * nu_f
        D_strain[4, 3] = E_f / (1.0 + nu_f) / (1.0 - 2.0 * nu_f) * nu_f
        D_strain[4, 4] = E_f * (1.0 - nu_f) / (1.0 + nu_f) / (1.0 - 2.0 * nu_f)
        D_strain[5, 5] = E_f * (1.0 - nu_f) / (1.0 + nu_f) / (2.0 - 2.0 * nu_f)

        D_strain[6, 6] = G
        D_strain[7, 7] = G
        return D_strain

    #---------------------------------------------------------------------------------------------
    # Response trace evaluators
    #---------------------------------------------------------------------------------------------

    def get_sig_norm(self, sctx, eps_app_eng):
        sig_eng, D_mtx = self.get_corr_pred(sctx, eps_app_eng, 0, 0, 0)
        return array([ scalar_sqrt(sig_eng[0] ** 2 + sig_eng[1] ** 2) ])

    def get_eps_app_m(self, sctx, eps_app_eng):
        return self.map_eps_eng_to_mtx((eps_app_eng[:3]))

    def get_eps_app_f(self, sctx, eps_app_eng):
        return self.map_eps_eng_to_mtx((eps_app_eng[3:6]))

    def get_sig_app_m(self, sctx, eps_app_eng):
        sig_eng, D_mtx = self.get_corr_pred(sctx, eps_app_eng, 0, 0, 0)
        return self.map_sig_eng_to_mtx((sig_eng[:3]))

    def get_sig_app_f(self, sctx, eps_app_eng):
        sig_eng, D_mtx = self.get_corr_pred(sctx, eps_app_eng, 0, 0, 0)
        return self.map_sig_eng_to_mtx((sig_eng[3:6]))

    # Declare and fill-in the rte_dict - it is used by the clients to
    # assemble all the available time-steppers.
    #
    rte_dict = Trait(Dict)

    def _rte_dict_default(self):
        return {
                'eps_app_f'  : self.get_eps_app_f,
                'eps_app_m'  : self.get_eps_app_m,
                'sig_app_f'  : self.get_sig_app_f,
                'sig_app_m'  : self.get_sig_app_m,
                'sig_norm'   : self.get_sig_norm}
Exemple #26
0
class FETS2D4Q8U(FETSEval):
    debug_on = True

    mats_eval = Instance(MATSEval)

    # Dimensional mapping
    dim_slice = slice(0, 2)

    n_e_dofs = Int(2 * 8)
    t = Float(1.0, label='thickness')

    # Integration parameters
    #
    ngp_r = 3
    ngp_s = 3

    dof_r = Array(value=[[-1., -1.], [1., -1.], [1., 1.], [-1., 1.], [0., -1.],
                         [1., 0.], [0., 1.], [-1., 0.]])
    geo_r = Array(value=[[-1, -1], [1, -1], [1, 1], [-1, 1]])
    #
    vtk_r = Array(value=[[-1., -1.], [0., -1.], [1., -1.], [-1., 0.], [1., 0.],
                         [-1., 1.], [0., 1.], [1., 1.]])
    vtk_cells = [[0, 2, 7, 5, 1, 4, 6, 3]]
    vtk_cell_types = 'QuadraticQuad'

    n_nodal_dofs = 2

    # Ordering of the nodes of the parent element used for the geometry
    # approximation
    _node_coord_map_geo = Array(Float, (4, 2),
                                [[-1., -1.], [1., -1.], [1., 1.], [-1., 1.]])

    #---------------------------------------------------------------------
    # Method required to represent the element geometry
    #---------------------------------------------------------------------
    def get_N_geo_mtx(self, r_pnt):
        '''
        Return the value of shape functions for the specified local coordinate r_pnt
        '''
        cx = self._node_coord_map_geo
        N_geo_mtx = np.array([[
            1 / 4. * (1 + r_pnt[0] * cx[i, 0]) * (1 + r_pnt[1] * cx[i, 1])
            for i in range(0, 4)
        ]])
        return N_geo_mtx

    def get_dNr_geo_mtx(self, r_pnt):
        '''
        Return the matrix of shape function derivatives.
        Used for the conrcution of the Jacobi matrix.

        @TODO - the B matrix is used
        just for uniaxial bar here with a trivial differential
        operator.
        '''
        cx = self._node_coord_map_geo
        dNr_geo_mtx = np.array([[
            1 / 4. * cx[i, 0] * (1 + r_pnt[1] * cx[i, 1]) for i in range(0, 4)
        ], [
            1 / 4. * cx[i, 1] * (1 + r_pnt[0] * cx[i, 0]) for i in range(0, 4)
        ]])
        return dNr_geo_mtx

    #-------------------------------------------------------------------------
    # Method delivering the shape functions for the field variables and their derivatives
    #-------------------------------------------------------------------------
    def get_N_mtx(self, r_pnt):
        '''
        Returns the matrix of the shape functions (derived in femple) used for the field 
        approximation containing zero entries. The number of rows corresponds to the number 
        of nodal dofs. The matrix is evaluated for the specified local coordinate r_pnt.
        '''
        N_dof = np.zeros((1, 8))
        N_dof[0, 0] = -((-1 + r_pnt[1]) * (-1 + r_pnt[0]) *
                        (r_pnt[0] + 1 + r_pnt[1])) / 4.0
        N_dof[0, 1] = -((-1 + r_pnt[1]) * (1 + r_pnt[0]) *
                        (r_pnt[0] - 1 - r_pnt[1])) / 4.0
        N_dof[0, 2] = ((1 + r_pnt[1]) * (1 + r_pnt[0]) *
                       (r_pnt[0] - 1 + r_pnt[1])) / 4.0
        N_dof[0, 3] = ((1 + r_pnt[1]) * (-1 + r_pnt[0]) *
                       (r_pnt[0] + 1 - r_pnt[1])) / 4.0
        N_dof[0,
              4] = ((-1 + r_pnt[0]) * (1 + r_pnt[0]) * (-1 + r_pnt[1])) / 2.0
        N_dof[0, 5] = - \
            ((-1 + r_pnt[1]) * (1 + r_pnt[1]) * (1 + r_pnt[0])) / 2.0
        N_dof[0, 6] = - \
            ((-1 + r_pnt[0]) * (1 + r_pnt[0]) * (1 + r_pnt[1])) / 2.0
        N_dof[0,
              7] = ((-1 + r_pnt[1]) * (1 + r_pnt[1]) * (-1 + r_pnt[0])) / 2.0

        I_mtx = np.identity(self.n_nodal_dofs, float)
        N_mtx_list = [I_mtx * N_dof[0, i] for i in range(0, N_dof.shape[1])]
        N_mtx = np.hstack(N_mtx_list)
        return N_mtx

    def get_dNr_mtx(self, r_pnt):
        '''
        Return the derivatives of the shape functions (derived in femple) 
        used for the field approximation
        '''
        dNr_mtx = np.zeros((2, 8), dtype='float_')
        dNr_mtx[0,
                0] = -((-1 + r_pnt[1]) * (r_pnt[0] + 1 + r_pnt[1])) / 4.0 - (
                    (-1 + r_pnt[1]) * (-1 + r_pnt[0])) / 4.0
        dNr_mtx[0,
                1] = -((-1 + r_pnt[1]) * (r_pnt[0] - 1 - r_pnt[1])) / 4.0 - (
                    (-1 + r_pnt[1]) * (1 + r_pnt[0])) / 4.0
        dNr_mtx[0,
                2] = ((1 + r_pnt[1]) *
                      (r_pnt[0] - 1 + r_pnt[1])) / 4.0 + ((1 + r_pnt[1]) *
                                                          (1 + r_pnt[0])) / 4.0
        dNr_mtx[0, 3] = ((1 + r_pnt[1]) * (r_pnt[0] + 1 - r_pnt[1])) / 4.0 + (
            (1 + r_pnt[1]) * (-1 + r_pnt[0])) / 4.0
        dNr_mtx[0, 4] = ((-1 + r_pnt[1]) * (1 + r_pnt[0])) / \
            2.0 + ((-1 + r_pnt[1]) * (-1 + r_pnt[0])) / 2.0
        dNr_mtx[0, 5] = -((-1 + r_pnt[1]) * (1 + r_pnt[1])) / 2.0
        dNr_mtx[0, 6] = -((1 + r_pnt[1]) * (1 + r_pnt[0])) / \
            2.0 - ((1 + r_pnt[1]) * (-1 + r_pnt[0])) / 2.0
        dNr_mtx[0, 7] = ((-1 + r_pnt[1]) * (1 + r_pnt[1])) / 2.0
        dNr_mtx[1,
                0] = -((-1 + r_pnt[0]) * (r_pnt[0] + 1 + r_pnt[1])) / 4.0 - (
                    (-1 + r_pnt[1]) * (-1 + r_pnt[0])) / 4.0
        dNr_mtx[1, 1] = -((1 + r_pnt[0]) * (r_pnt[0] - 1 - r_pnt[1])) / 4.0 + (
            (-1 + r_pnt[1]) * (1 + r_pnt[0])) / 4.0
        dNr_mtx[1,
                2] = ((1 + r_pnt[0]) *
                      (r_pnt[0] - 1 + r_pnt[1])) / 4.0 + ((1 + r_pnt[1]) *
                                                          (1 + r_pnt[0])) / 4.0
        dNr_mtx[1, 3] = ((-1 + r_pnt[0]) * (r_pnt[0] + 1 - r_pnt[1])) / 4.0 - (
            (1 + r_pnt[1]) * (-1 + r_pnt[0])) / 4.0
        dNr_mtx[1, 4] = ((-1 + r_pnt[0]) * (1 + r_pnt[0])) / 2.0
        dNr_mtx[1, 5] = -((1 + r_pnt[1]) * (1 + r_pnt[0])) / \
            2.0 - ((-1 + r_pnt[1]) * (1 + r_pnt[0])) / 2.0
        dNr_mtx[1, 6] = -((-1 + r_pnt[0]) * (1 + r_pnt[0])) / 2.0
        dNr_mtx[1, 7] = ((1 + r_pnt[1]) * (-1 + r_pnt[0])) / \
            2.0 + ((-1 + r_pnt[1]) * (-1 + r_pnt[0])) / 2.0
        return dNr_mtx

    def get_B_mtx(self, r_pnt, X_mtx):
        J_mtx = self.get_J_mtx(r_pnt, X_mtx)
        dNr_mtx = self.get_dNr_mtx(r_pnt)
        dNx_mtx = np.dot(inv(J_mtx), dNr_mtx)
        Bx_mtx = np.zeros((3, 16), dtype='float_')
        for i in range(0, 8):
            Bx_mtx[0, i * 2] = dNx_mtx[0, i]
            Bx_mtx[1, i * 2 + 1] = dNx_mtx[1, i]
            Bx_mtx[2, i * 2] = dNx_mtx[1, i]
            Bx_mtx[2, i * 2 + 1] = dNx_mtx[0, i]
        return Bx_mtx
Exemple #27
0
class CombineMarkersPanel(HasTraits):  # noqa: D401
    """Has two marker points sources and interpolates to a third one."""

    model = Instance(CombineMarkersModel, ())

    # model references for UI
    mrk1 = Instance(MarkerPointSource)
    mrk2 = Instance(MarkerPointSource)
    mrk3 = Instance(MarkerPointDest)
    distance = Str

    # Visualization
    scene = Instance(MlabSceneModel)
    scale = Float(5e-3)
    mrk1_obj = Instance(PointObject)
    mrk2_obj = Instance(PointObject)
    mrk3_obj = Instance(PointObject)
    trans = Array()

    view = View(VGroup(VGroup(Item('mrk1', style='custom'),
                              Item('mrk1_obj', style='custom'),
                              show_labels=False,
                              label="Source Marker 1", show_border=True),
                       VGroup(Item('mrk2', style='custom'),
                              Item('mrk2_obj', style='custom'),
                              show_labels=False,
                              label="Source Marker 2", show_border=True),
                       VGroup(Item('distance', style='readonly'),
                              label='Stats', show_border=True),
                       VGroup(Item('mrk3', style='custom'),
                              Item('mrk3_obj', style='custom'),
                              show_labels=False,
                              label="New Marker", show_border=True),
                       ))

    def _mrk1_default(self):
        return self.model.mrk1

    def _mrk2_default(self):
        return self.model.mrk2

    def _mrk3_default(self):
        return self.model.mrk3

    def __init__(self, *args, **kwargs):  # noqa: D102
        super(CombineMarkersPanel, self).__init__(*args, **kwargs)

        self.model.sync_trait('distance', self, 'distance', mutual=False)

        self.mrk1_obj = PointObject(scene=self.scene,
                                    color=(0.608, 0.216, 0.216),
                                    point_scale=self.scale)
        self.model.mrk1.sync_trait(
            'enabled', self.mrk1_obj, 'visible', mutual=False)

        self.mrk2_obj = PointObject(scene=self.scene,
                                    color=(0.216, 0.608, 0.216),
                                    point_scale=self.scale)
        self.model.mrk2.sync_trait(
            'enabled', self.mrk2_obj, 'visible', mutual=False)

        self.mrk3_obj = PointObject(scene=self.scene,
                                    color=(0.588, 0.784, 1.),
                                    point_scale=self.scale)
        self.model.mrk3.sync_trait(
            'enabled', self.mrk3_obj, 'visible', mutual=False)

    @on_trait_change('model:mrk1:points,trans')
    def _update_mrk1(self):
        if self.mrk1_obj is not None:
            self.mrk1_obj.points = apply_trans(self.trans,
                                               self.model.mrk1.points)

    @on_trait_change('model:mrk2:points,trans')
    def _update_mrk2(self):
        if self.mrk2_obj is not None:
            self.mrk2_obj.points = apply_trans(self.trans,
                                               self.model.mrk2.points)

    @on_trait_change('model:mrk3:points,trans')
    def _update_mrk3(self):
        if self.mrk3_obj is not None:
            self.mrk3_obj.points = apply_trans(self.trans,
                                               self.model.mrk3.points)
Exemple #28
0
class NuclearRabi(Pulsed):
    """Defines a Nuclear Rabi measurement."""

    mw_frequency = Range(low=1,
                         high=20e9,
                         value=2.7775190e9,
                         desc='microwave frequency',
                         label='MW frequency [Hz]',
                         mode='text',
                         auto_set=False,
                         enter_set=True)
    mw_power = Range(low=-100.,
                     high=25.,
                     value=-27,
                     desc='microwave power',
                     label='MW power [dBm]',
                     mode='text',
                     auto_set=False,
                     enter_set=True)
    t_pi = Range(low=1.,
                 high=100000.,
                 value=708.,
                 desc='length of pi pulse [ns]',
                 label='pi [ns]',
                 mode='text',
                 auto_set=False,
                 enter_set=True)

    rf_frequency = Range(low=1,
                         high=20e6,
                         value=2.794e6,
                         desc='RF frequency',
                         label='RF frequency [Hz]',
                         mode='text',
                         auto_set=False,
                         enter_set=True)
    rf_power = Range(low=-130.,
                     high=25.,
                     value=23,
                     desc='RF power',
                     label='RF power [dBm]',
                     mode='text',
                     auto_set=False,
                     enter_set=True)

    tau_begin = Range(low=0.,
                      high=1e8,
                      value=1.5,
                      desc='tau begin [ns]',
                      label='tau begin [ns]',
                      mode='text',
                      auto_set=False,
                      enter_set=True)
    tau_end = Range(low=1.,
                    high=1e8,
                    value=3.0e5,
                    desc='tau end [ns]',
                    label='tau end [ns]',
                    mode='text',
                    auto_set=False,
                    enter_set=True)
    tau_delta = Range(low=1.,
                      high=1e6,
                      value=1000.0,
                      desc='delta tau [ns]',
                      label='delta tau [ns]',
                      mode='text',
                      auto_set=False,
                      enter_set=True)
    laser = Range(low=1.,
                  high=1.0e7,
                  value=3000.0,
                  desc='laser [ns]',
                  label='laser [ns]',
                  mode='text',
                  auto_set=False,
                  enter_set=True)
    wait = Range(low=1.,
                 high=1.0e8,
                 value=1.0e6,
                 desc='wait [ns]',
                 label='wait [ns]',
                 mode='text',
                 auto_set=False,
                 enter_set=True)

    tau = Array(value=np.array((0., 1.)))

    get_set_items = Pulsed.get_set_items + [
        'mw_frequency', 'mw_power', 't_pi', 'rf_frequency', 'rf_power',
        'tau_begin', 'tau_end', 'tau_delta', 'laser', 'wait', 'tau'
    ]

    traits_view = View(
        VGroup(
            HGroup(
                Item('submit_button', show_label=False),
                Item('remove_button', show_label=False),
                Item('resubmit_button', show_label=False),
                Item('priority'),
                Item('state', style='readonly'),
            ),
            Tabbed(
                VGroup(HGroup(
                    Item('mw_frequency',
                         width=-80,
                         enabled_when='state == "idle"'),
                    Item('mw_power', width=-80,
                         enabled_when='state == "idle"'),
                    Item('t_pi', width=-80, enabled_when='state == "idle"'),
                ),
                       HGroup(
                           Item('rf_frequency',
                                width=-80,
                                enabled_when='state == "idle"'),
                           Item('rf_power',
                                width=-80,
                                enabled_when='state == "idle"'),
                       ),
                       HGroup(
                           Item('tau_begin',
                                width=-80,
                                enabled_when='state == "idle"'),
                           Item('tau_end',
                                width=-80,
                                enabled_when='state == "idle"'),
                           Item('tau_delta',
                                width=-80,
                                enabled_when='state == "idle"'),
                       ),
                       label='parameter'),
                VGroup(HGroup(
                    Item('laser', width=-80, enabled_when='state == "idle"'),
                    Item('wait', width=-80, enabled_when='state == "idle"'),
                    Item('record_length',
                         width=-80,
                         enabled_when='state == "idle"'),
                    Item('bin_width',
                         width=-80,
                         enabled_when='state == "idle"'),
                ),
                       label='settings'),
            ),
        ),
        title='Nuclear Rabi Measurement',
    )

    def generate_sequence(self):
        t_pi = self.t_pi
        laser = self.laser
        tau = self.tau
        wait = self.wait
        sequence = []
        for t in tau:
            sequence.append((['mw'], t_pi))
            sequence.append((['rf'], t))
            sequence.append(([], 500))
            sequence.append((['mw'], t_pi))
            sequence.append((['laser', 'aom'], laser))
            sequence.append(([], wait))
        sequence.append((['sequence'], 100))
        return sequence

    def apply_parameters(self):
        """Overwrites apply_parameters() from pulsed. Prior to generating sequence, etc., generate the tau mesh."""
        self.tau = np.arange(self.tau_begin, self.tau_end, self.tau_delta)
        Pulsed.apply_parameters(self)

    def start_up(self):
        ha.PulseGenerator().Night()
        ha.Microwave().setOutput(self.mw_power, self.mw_frequency)
        ha.RFSource().setOutput(self.rf_power, self.rf_frequency)
        ha.RFSource().setMode()

    def shut_down(self):
        ha.PulseGenerator().Light()
        ha.Microwave().setOutput(None, self.mw_frequency)
        ha.RFSource().setOutput(None, self.rf_frequency)
Exemple #29
0
 class SafeTransformPolyDataFilter(tvtk.TransformPolyDataFilter):
     center = Array(shape=(3, ), value=np.zeros(3))
Exemple #30
0
class MRIHeadWithFiducialsModel(HasPrivateTraits):
    """Represent an MRI head shape (high and low res) with fiducials.

    Attributes
    ----------
    points : array (n_points, 3)
        MRI head surface points.
    tris : array (n_tris, 3)
        Triangles based on points.
    lpa : array (1, 3)
        Left peri-auricular point coordinates.
    nasion : array (1, 3)
        Nasion coordinates.
    rpa : array (1, 3)
        Right peri-auricular point coordinates.
    """

    subject_source = Instance(MRISubjectSource, ())
    bem_low_res = Instance(SurfaceSource, ())
    bem_high_res = Instance(SurfaceSource, ())
    fid = Instance(FiducialsSource, ())

    fid_file = DelegatesTo('fid', 'file')
    fid_fname = DelegatesTo('fid', 'fname')
    fid_points = DelegatesTo('fid', 'points')
    subjects_dir = DelegatesTo('subject_source')
    subject = DelegatesTo('subject_source')
    subject_has_bem = DelegatesTo('subject_source')
    lpa = Array(float, (1, 3))
    nasion = Array(float, (1, 3))
    rpa = Array(float, (1, 3))

    reset = Event(desc="Reset fiducials to the file.")

    # info
    can_save = Property(depends_on=['file', 'can_save_as'])
    can_save_as = Property(depends_on=['lpa', 'nasion', 'rpa'])
    can_reset = Property(depends_on=['file', 'fid.points', 'lpa', 'nasion',
                                     'rpa'])
    fid_ok = Property(depends_on=['lpa', 'nasion', 'rpa'], desc="All points "
                      "are set")
    default_fid_fname = Property(depends_on=['subjects_dir', 'subject'],
                                 desc="the default file name for the "
                                 "fiducials fif file")

    # switch for the GUI (has no effect in the model)
    lock_fiducials = Bool(False, desc="Used by GIU, has no effect in the "
                          "model.")

    @on_trait_change('fid_points')
    def reset_fiducials(self):  # noqa: D102
        if self.fid_points is not None:
            self.lpa = self.fid_points[0:1]
            self.nasion = self.fid_points[1:2]
            self.rpa = self.fid_points[2:3]

    def save(self, fname=None):
        """Save the current fiducials to a file.

        Parameters
        ----------
        fname : str
            Destination file path. If None, will use the current fid filename
            if available, or else use the default pattern.
        """
        if fname is None:
            fname = self.fid_file
        if not fname:
            fname = self.default_fid_fname

        dig = [{'kind': FIFF.FIFFV_POINT_CARDINAL,
                'ident': FIFF.FIFFV_POINT_LPA,
                'r': np.array(self.lpa[0])},
               {'kind': FIFF.FIFFV_POINT_CARDINAL,
                'ident': FIFF.FIFFV_POINT_NASION,
                'r': np.array(self.nasion[0])},
               {'kind': FIFF.FIFFV_POINT_CARDINAL,
                'ident': FIFF.FIFFV_POINT_RPA,
                'r': np.array(self.rpa[0])}]
        write_fiducials(fname, dig, FIFF.FIFFV_COORD_MRI)
        self.fid_file = fname

    @cached_property
    def _get_can_reset(self):
        if not self.fid_file:
            return False
        elif np.any(self.lpa != self.fid.points[0:1]):
            return True
        elif np.any(self.nasion != self.fid.points[1:2]):
            return True
        elif np.any(self.rpa != self.fid.points[2:3]):
            return True
        return False

    @cached_property
    def _get_can_save_as(self):
        can = not (np.all(self.nasion == self.lpa) or
                   np.all(self.nasion == self.rpa) or
                   np.all(self.lpa == self.rpa))
        return can

    @cached_property
    def _get_can_save(self):
        if not self.can_save_as:
            return False
        elif self.fid_file:
            return True
        elif self.subjects_dir and self.subject:
            return True
        else:
            return False

    @cached_property
    def _get_default_fid_fname(self):
        fname = fid_fname.format(subjects_dir=self.subjects_dir,
                                 subject=self.subject)
        return fname

    @cached_property
    def _get_fid_ok(self):
        return all(np.any(pt) for pt in (self.nasion, self.lpa, self.rpa))

    def _reset_fired(self):
        self.reset_fiducials()

    # if subject changed because of a change of subjects_dir this was not
    # triggered
    @on_trait_change('subjects_dir,subject')
    def _subject_changed(self):
        subject = self.subject
        subjects_dir = self.subjects_dir
        if not subjects_dir or not subject:
            return

        # find high-res head model (if possible)
        high_res_path = _find_head_bem(subject, subjects_dir, high_res=True)
        low_res_path = _find_head_bem(subject, subjects_dir, high_res=False)
        if high_res_path is None and low_res_path is None:
            msg = 'No standard head model was found for subject %s' % subject
            error(None, msg, "No head surfaces found")
            raise RuntimeError(msg)
        if high_res_path is not None:
            self.bem_high_res.file = high_res_path
        else:
            self.bem_high_res.file = low_res_path
        if low_res_path is None:
            # This should be very rare!
            warn('No low-resolution head found, decimating high resolution '
                 'mesh (%d vertices): %s' % (len(self.bem_high_res.surf.rr),
                                             high_res_path,))
            # Create one from the high res one, which we know we have
            rr, tris = decimate_surface(self.bem_high_res.surf.rr,
                                        self.bem_high_res.surf.tris,
                                        n_triangles=5120)
            surf = complete_surface_info(dict(rr=rr, tris=tris),
                                         copy=False, verbose=False)
            # directly set the attributes of bem_low_res
            self.bem_low_res.surf = Surf(tris=surf['tris'], rr=surf['rr'],
                                         nn=surf['nn'])
        else:
            self.bem_low_res.file = low_res_path

        # find fiducials file
        fid_files = _find_fiducials_files(subject, subjects_dir)
        if len(fid_files) == 0:
            self.fid.reset_traits(['file'])
            self.lock_fiducials = False
        else:
            self.fid_file = fid_files[0].format(subjects_dir=subjects_dir,
                                                subject=subject)
            self.lock_fiducials = True

        # does not seem to happen by itself ... so hard code it:
        self.reset_fiducials()