class config(HasTraits):
    uuid = traits.Str(desc="UUID")

    # Directories
    working_dir = Directory(mandatory=True,
                            desc="Location of the Nipype working directory")
    base_dir = Directory(
        os.path.abspath('.'),
        mandatory=True,
        desc='Base directory of data. (Should be subject-independent)')
    sink_dir = Directory(mandatory=True,
                         desc="Location where the BIP will store the results")
    crash_dir = Directory(mandatory=False,
                          desc="Location to store crash files")
    surf_dir = Directory(
        desc="freesurfer directory. subject id's should be the same")
    save_script_only = traits.Bool(False)
    # Execution
    run_using_plugin = Bool(
        False,
        usedefault=True,
        desc="True to run pipeline with plugin, False to run serially")
    plugin = traits.Enum("PBS",
                         "MultiProc",
                         "SGE",
                         "Condor",
                         usedefault=True,
                         desc="plugin to use, if run_using_plugin=True")
    plugin_args = traits.Dict({"qsub_args": "-q many"},
                              usedefault=True,
                              desc='Plugin arguments.')
    test_mode = Bool(
        False,
        mandatory=False,
        usedefault=True,
        desc='Affects whether where and if the workflow keeps its \
                            intermediary files. True to keep intermediary files. '
    )
    # Data
    datagrabber = traits.Instance(Data, ())
    #subject_id = traits.String()
    #contrast = traits.File()
    #mask_contrast = traits.File()
    use_contrast_mask = traits.Bool(True)
    #reg_file = traits.File()
    #mean_image = traits.File()
    background_thresh = traits.Float(0.5)
    hemi = traits.List(['lh', 'rh'])
    roi = traits.List(
        ['superiortemporal', 'bankssts'],
        traits.Enum('superiortemporal', 'bankssts', 'caudalanteriorcingulate',
                    'caudalmiddlefrontal', 'corpuscallosum', 'cuneus',
                    'entorhinal', 'fusiform', 'inferiorparietal',
                    'inferiortemporal', 'isthmuscingulate', 'lateraloccipital',
                    'lateralorbitofrontal', 'lingual', 'medialorbitofrontal',
                    'middletemporal', 'parahippocampal', 'paracentral',
                    'parsopercularis', 'parsorbitalis', 'parstriangularis',
                    'pericalcarine', 'postcentral', 'posteriorcingulate',
                    'precentral', 'precuneus', 'rostralanteriorcingulate',
                    'rostralmiddlefrontal', 'superiorfrontal',
                    'superiorparietal', 'supramarginal', 'frontalpole',
                    'temporalpole', 'transversetemporal', 'insula'),
        usedefault=True)  #35 freesurfer regions,
    thresh = traits.Float(1.5)
Esempio n. 2
0
class MomentCurvature(tr.HasStrictTraits):
    r'''Class returning the moment curvature relationship.
    '''

    b_z = tr.Any
    get_b_z = tr.Property

    @tr.cached_property
    def _get_get_b_z(self):
        return sp.lambdify(z, self.b_z, 'numpy')

    h = tr.Float

    model_params = tr.Dict({
        E_ct: 24000, E_cc: 25000,
        eps_cr: 0.001,
        eps_cy: -0.003,
        eps_cu: -0.01,
        mu: 0.33,
        eps_tu: 0.003
    })

    # Number of material points along the height of the cross section
    n_m = tr.Int(100)

    # Reinforcement
    z_j = tr.Array(np.float_, value=[10])
    A_j = tr.Array(np.float_, value=[[np.pi * (16 / 2.)**2]])
    E_j = tr.Array(np.float_, value=[[210000]])
    eps_sy_j = tr.Array(np.float_, value=[[500. / 210000.]])

    z_m = tr.Property(depends_on='n_m, h')

    @tr.cached_property
    def _get_z_m(self):
        return np.linspace(0, self.h, self.n_m)

    kappa_range = tr.Tuple(-0.001, 0.001, 101)

    kappa_t = tr.Property(tr.Array(np.float_), depends_on='kappa_range')

    @tr.cached_property
    def _get_kappa_t(self):
        return np.linspace(*self.kappa_range)

    get_eps_z = tr.Property(depends_on='model_params_items')

    @tr.cached_property
    def _get_get_eps_z(self):
        return sp.lambdify(
            (kappa, eps_bot, z), eps_z.subs(subs_eps), 'numpy'
        )

    get_sig_c_z = tr.Property(depends_on='model_params_items')

    @tr.cached_property
    def _get_get_sig_c_z(self):
        return sp.lambdify(
            (kappa, eps_bot, z), sig_c_z_lin.subs(self.model_params), 'numpy'
        )

    get_sig_s_eps = tr.Property(depends_on='model_params_items')

    @tr.cached_property
    def _get_get_sig_s_eps(self):
        return sp.lambdify((eps, E_s, eps_sy), sig_s_eps, 'numpy')

    # Normal force

    def get_N_s_tj(self, kappa_t, eps_bot_t):
        eps_z_tj = self.get_eps_z(
            kappa_t[:, np.newaxis], eps_bot_t[:, np.newaxis],
            self.z_j[np.newaxis, :]
        )
        sig_s_tj = self.get_sig_s_eps(eps_z_tj, self.E_j, self.eps_sy_j)
        return np.einsum('j,tj->tj', self.A_j, sig_s_tj)

    def get_N_c_t(self, kappa_t, eps_bot_t):
        z_tm = self.z_m[np.newaxis, :]
        b_z_m = self.get_b_z(z_tm)  # self.get_b_z(self.z_m) also OK
        N_z_tm = b_z_m * self.get_sig_c_z(
            kappa_t[:, np.newaxis], eps_bot_t[:, np.newaxis], z_tm
        )
        return np.trapz(N_z_tm, x=z_tm, axis=-1)

    def get_N_t(self, kappa_t, eps_bot_t):
        N_s_t = np.sum(self.get_N_s_tj(kappa_t, eps_bot_t), axis=-1)
        return self.get_N_c_t(kappa_t, eps_bot_t) + N_s_t

    # SOLVER: Get eps_bot to render zero force

    eps_bot_t = tr.Property()
    r'''Resolve the tensile strain to get zero normal force 
    for the prescribed curvature
    '''

    def _get_eps_bot_t(self):
        res = root(lambda eps_bot_t: self.get_N_t(self.kappa_t, eps_bot_t),
                   0.0000001 + np.zeros_like(self.kappa_t), tol=1e-6)
        return res.x

    # POSTPROCESSING

    eps_cr = tr.Property()

    def _get_eps_cr(self):
        return np.array([self.model_params[eps_cr]], dtype=np.float_)

    kappa_cr = tr.Property()

    def _get_kappa_cr(self):
        res = root(lambda kappa: self.get_N_t(kappa, self.eps_cr),
                   0.0000001 + np.zeros_like(self.eps_cr), tol=1e-10)
        return res.x

    # Bending moment

    M_s_t = tr.Property()

    def _get_M_s_t(self):
        eps_z_tj = self.get_eps_z(
            self.kappa_t[:, np.newaxis], self.eps_bot_t[:, np.newaxis],
            self.z_j[np.newaxis, :]
        )
        sig_z_tj = self.get_sig_s_eps(
            eps_z_tj, self.E_j, self.eps_sy_j)
        return -np.einsum('j,tj,j->t', self.A_j, sig_z_tj, self.z_j)

    M_c_t = tr.Property()

    def _get_M_c_t(self):
        z_tm = self.z_m[np.newaxis, :]
        b_z_m = self.get_b_z(z_tm)
        N_z_tm = b_z_m * self.get_sig_c_z(
            self.kappa_t[:, np.newaxis], self.eps_bot_t[:, np.newaxis], z_tm
        )
        return -np.trapz(N_z_tm * z_tm, x=z_tm, axis=-1)

    M_t = tr.Property()

    def _get_M_t(self):
        return self.M_c_t + self.M_s_t

    N_s_tj = tr.Property()

    def _get_N_s_tj(self):
        return self.get_N_s_tj(self.kappa_t, self.eps_bot_t)

    eps_tm = tr.Property()

    def _get_eps_tm(self):
        return self.get_eps_z(
            self.kappa_t[:, np.newaxis], self.eps_bot_t[:, np.newaxis],
            self.z_m[np.newaxis, :],
        )

    sig_tm = tr.Property()

    def _get_sig_tm(self):
        return self.get_sig_c_z(
            self.kappa_t[:, np.newaxis], self.eps_bot_t[:, np.newaxis],
            self.z_m[np.newaxis, :],
        )

    idx = tr.Int(0)

    M_norm = tr.Property()

    def _get_M_norm(self):
        # Section modulus @TODO optimize W for var b
        W = (self.b * self.h**2) / 6
        sig_cr = self.model_params[E_ct] * self.model_params[eps_cr]
        return W * sig_cr

    kappa_norm = tr.Property()

    def _get_kappa_norm(self):
        return self.kappa_cr

    def plot_norm(self, ax1, ax2):
        idx = self.idx
        ax1.plot(self.kappa_t / self.kappa_norm, self.M_t / self.M_norm)
        ax1.plot(self.kappa_t[idx] / self.kappa_norm,
                 self.M_t[idx] / self.M_norm, marker='o')
        ax2.barh(self.z_j, self.N_s_tj[idx, :],
                 height=2, color='red', align='center')
        #ax2.fill_between(eps_z_arr[idx,:], z_arr, 0, alpha=0.1);
        ax3 = ax2.twiny()
#        ax3.plot(self.eps_tm[idx, :], self.z_m, color='k', linewidth=0.8)
        ax3.plot(self.sig_tm[idx, :], self.z_m)
        ax3.axvline(0, linewidth=0.8, color='k')
        ax3.fill_betweenx(self.z_m, self.sig_tm[idx, :], 0, alpha=0.1)
        self._align_xaxis(ax2, ax3)

    def plot(self, ax1, ax2):
        idx = self.idx
        ax1.plot(self.kappa_t, self.M_t / (1e6))
        ax1.set_ylabel('Moment [kN.m]')
        ax1.set_xlabel('Curvature [$m^{-1}$]')
        ax1.plot(self.kappa_t[idx], self.M_t[idx] / (1e6), marker='o')
        ax2.barh(self.z_j, self.N_s_tj[idx, :],
                 height=6, color='red', align='center')
        #ax2.plot(self.N_s_tj[idx, :], self.z_j, color='red')
        #print('Z', self.z_j)
        #print(self.N_s_tj[idx, :])
        #ax2.fill_between(eps_z_arr[idx,:], z_arr, 0, alpha=0.1);
        ax3 = ax2.twiny()
#        ax3.plot(self.eps_tm[idx, :], self.z_m, color='k', linewidth=0.8)
        ax3.plot(self.sig_tm[idx, :], self.z_m)
        ax3.axvline(0, linewidth=0.8, color='k')
        ax3.fill_betweenx(self.z_m, self.sig_tm[idx, :], 0, alpha=0.1)
        self._align_xaxis(ax2, ax3)

    def _align_xaxis(self, ax1, ax2):
        """Align zeros of the two axes, zooming them out by same ratio"""
        axes = (ax1, ax2)
        extrema = [ax.get_xlim() for ax in axes]
        tops = [extr[1] / (extr[1] - extr[0]) for extr in extrema]
        # Ensure that plots (intervals) are ordered bottom to top:
        if tops[0] > tops[1]:
            axes, extrema, tops = [list(reversed(l))
                                   for l in (axes, extrema, tops)]

        # How much would the plot overflow if we kept current zoom levels?
        tot_span = tops[1] + 1 - tops[0]

        b_new_t = extrema[0][0] + tot_span * (extrema[0][1] - extrema[0][0])
        t_new_b = extrema[1][1] - tot_span * (extrema[1][1] - extrema[1][0])
        axes[0].set_xlim(extrema[0][0], b_new_t)
        axes[1].set_xlim(t_new_b, extrema[1][1])
Esempio n. 3
0
class config(HasTraits):
    uuid = traits.Str(desc="UUID")

    # Directories
    working_dir = Directory(mandatory=True,
                            desc="Location of the Nipype working directory")
    base_dir = Directory(
        os.path.abspath('.'),
        mandatory=True,
        desc='Base directory of data. (Should be subject-independent)')
    sink_dir = Directory(mandatory=True,
                         desc="Location where the BIP will store the results")
    crash_dir = Directory(mandatory=False,
                          desc="Location to store crash files")
    surf_dir = Directory(
        desc="freesurfer directory. subject id's should be the same")
    save_script_only = traits.Bool(False)
    # Execution
    run_using_plugin = Bool(
        False,
        usedefault=True,
        desc="True to run pipeline with plugin, False to run serially")
    plugin = traits.Enum("PBS",
                         "MultiProc",
                         "SGE",
                         "Condor",
                         usedefault=True,
                         desc="plugin to use, if run_using_plugin=True")
    plugin_args = traits.Dict({"qsub_args": "-q many"},
                              usedefault=True,
                              desc='Plugin arguments.')
    test_mode = Bool(
        False,
        mandatory=False,
        usedefault=True,
        desc='Affects whether where and if the workflow keeps its \
                            intermediary files. True to keep intermediary files. '
    )
    timeout = traits.Float(30.0)
    # DataGrabber
    datagrabber = traits.Instance(Data, ())

    # segstats
    use_reg = traits.Bool(True)
    inverse_reg = traits.Bool(True)
    use_standard_label = traits.Bool(
        False, desc="use same label file for all subjects")
    label_file = traits.File()
    use_annotation = traits.Bool(
        False,
        desc=
        "use same annotation file for all subjects (will warp to subject space"
    )
    use_subject_annotation = traits.Bool(
        False,
        desc="you need to change datragrabber to\
                                           have outputs lh_annotation and rh_annotation"
    )
    annot_space = traits.String("fsaverage5",
                                desc="subject space of annot file")
    lh_annotation = traits.File()
    rh_annotation = traits.File()
    color_table_file = traits.Enum("Default", "Color_Table", "GCA_color_table",
                                   "None")
    color_file = traits.File()
    proj = traits.BaseTuple(("frac", 0, 1, 0.1), traits.Enum("abs", "frac"),
                            traits.Float(), traits.Float(), traits.Float())
    statname = traits.Str('segstats1', desc="description of the segstat")
Esempio n. 4
0
class MATS2DScalarDamage(MATS2DEval):
    r'''Isotropic damage model.
    '''

    name = 'isotropic damage model'
    node_name = 'isotropic damage model'

    tree = ['omega_fn','strain_norm']

    omega_fn = EitherType(
        options=[('exp-slope', ExpSlopeDamageFn),
                 ('linear', LinearDamageFn),
                 ('abaqus', AbaqusDamageFn),
                 ('fracture-energy', GfDamageFn),
                 ('weibull-CDF', WeibullDamageFn),
                 ],
        MAT=True,
        on_option_change='link_omega_to_mats'
    )

    D_alg = Float(0)
    r'''Selector of the stiffness calculation.
    '''

    eps_max = Float(0.03, ALG=True)
    # upon change of the type attribute set the link to the material model
    def link_omega_to_mats(self):
        self.omega_fn_.trait_set(mats=self,
                                 E_name='E',
                                 x_max_name='eps_max')

    #=========================================================================
    # Material model
    #=========================================================================
    strain_norm = EitherType(
        options=[('Rankine', SN2DRankine),
                 ('Masars', SN2DMasars),
                 ('Energy', SN2DEnergy)],
        on_option_change='link_strain_norm_to_mats'
    )

    def link_strain_norm_to_mats(self):
        self.strain_norm_.trait_set(mats=self)

    state_var_shapes = {'kappa': (),
                        'omega': ()}
    r'''
    Shapes of the state variables
    to be stored in the global array at the level 
    of the domain.
    '''

    def get_corr_pred(self, eps_ab_n1, tn1, kappa, omega):
        r'''
        Corrector predictor computation.
        @param eps_app_eng input variable - engineering strain
        '''
        eps_eq = self.strain_norm_.get_eps_eq(eps_ab_n1, kappa)
        I = self.omega_fn_.get_f_trial(eps_eq, kappa)
        eps_eq_I = eps_eq[I]
        kappa[I] = eps_eq_I
        omega[I] = self._omega(eps_eq_I)
        phi = (1.0 - omega)
        D_abcd = np.einsum(
            '...,abcd->...abcd',
            phi, self.D_abcd
        )
        sig_ab = np.einsum(
            '...abcd,...cd->...ab',
            D_abcd, eps_ab_n1
        )
        if self.D_alg > 0:
            domega_ds_I = self._omega_derivative(eps_eq_I)
            deps_eq_I = self.strain_norm_.get_deps_eq(eps_ab_n1[I])
            D_red_I = np.einsum('...,...ef,cdef,...ef->...cdef', domega_ds_I,
                                deps_eq_I, self.D_abcd, eps_ab_n1[I]) * self.D_alg
            D_abcd[I] -= D_red_I

        return sig_ab, D_abcd

    def _omega(self, kappa):
        return self.omega_fn_(kappa)

    def _omega_derivative(self, kappa):
        return self.omega_fn_.diff(kappa)

    ipw_view = View(
        Item('E'),
        Item('nu'),
        Item('strain_norm'),
        Item('omega_fn'),
        Item('stress_state'),
        Item('D_alg', latex=r'\theta_\mathrm{alg. stiff.}',
                editor=FloatRangeEditor(low=0,high=1)),
        Item('eps_max'),
        Item('G_f', latex=r'G_\mathrm{f}^{\mathrm{estimate}}', readonly=True),
    )

    G_f = tr.Property(Float, depends_on='state_changed')
    @tr.cached_property
    def _get_G_f(self):
        eps_max = self.eps_max
        n_eps = 1000
        eps11_range = np.linspace(1e-9,eps_max,n_eps)
        eps_range = np.zeros((len(eps11_range), 2, 2))
        eps_range[:,1,1] = eps11_range
        state_vars = { var : np.zeros( (len(eps11_range),) + shape )
            for var, shape in self.state_var_shapes.items()
        }
        sig_range, D = self.get_corr_pred(eps_range, 1, **state_vars)
        sig11_range = sig_range[:,1,1]
        return np.trapz(sig11_range, eps11_range)

    def subplots(self, fig):
        ax_sig = fig.subplots(1,1)
        ax_d_sig = ax_sig.twinx()
        return ax_sig, ax_d_sig

    def update_plot(self, axes):
        ax_sig, ax_d_sig = axes
        eps_max = self.eps_max
        n_eps = 100
        eps11_range = np.linspace(1e-9,eps_max,n_eps)
        eps_range = np.zeros((n_eps, 2, 2))
        eps_range[:,0,0] = eps11_range
        state_vars = { var : np.zeros( (n_eps,) + shape )
            for var, shape in self.state_var_shapes.items()
        }
        sig_range, D_range = self.get_corr_pred(eps_range, 1, **state_vars)
        sig11_range = sig_range[:,0,0]
        ax_sig.plot(eps11_range, sig11_range,color='blue')
        d_sig1111_range = D_range[...,0,0,0,0]
        ax_d_sig.plot(eps11_range, d_sig1111_range,
                      linestyle='dashed', color='gray')
        ax_sig.set_xlabel(r'$\varepsilon_{11}$ [-]')
        ax_sig.set_ylabel(r'$\sigma_{11}$ [MPa]')
        ax_d_sig.set_ylabel(r'$\mathrm{d} \sigma_{11} / \mathrm{d} \varepsilon_{11}$ [MPa]')

        ax_d_sig.plot(eps11_range[:-1],
                    (sig11_range[:-1]-sig11_range[1:])/(eps11_range[:-1]-eps11_range[1:]),
                    color='orange', linestyle='dashed')

    def get_omega(self, eps_ab, tn1, **Eps):
        return Eps['omega']

    var_dict = tr.Property(tr.Dict(tr.Str, tr.Callable))
    '''Dictionary of response variables
    '''
    @tr.cached_property
    def _get_var_dict(self):
        var_dict = dict(omega=self.get_omega)
        var_dict.update(super()._get_var_dict())
        return var_dict
Esempio n. 5
0
class DOTSGrid(BMCSLeafNode):
    '''Domain time steppsr on a grid mesh
    '''
    x_0 = tr.Tuple(0., 0., input=True)
    L_x = tr.Float(200, input=True, MESH=True)
    L_y = tr.Float(100, input=True, MESH=True)
    n_x = tr.Int(100, input=True, MESH=True)
    n_y = tr.Int(30, input=True, MESH=True)
    integ_factor = tr.Float(1.0, input=True, MESH=True)
    fets = tr.Instance(IFETSEval, input=True, MESH=True)

    D1_abcd = tr.Array(np.float_, input=True)
    '''Symmetric operator distributing the 
    derivatives of the shape functions into the 
    tensor field
    '''
    def _D1_abcd_default(self):
        delta = np.identity(2)
        # symmetrization operator
        D1_abcd = 0.5 * (np.einsum('ac,bd->abcd', delta, delta) +
                         np.einsum('ad,bc->abcd', delta, delta))
        return D1_abcd

    mesh = tr.Property(tr.Instance(FEGrid), depends_on='+input')

    @tr.cached_property
    def _get_mesh(self):
        return FEGrid(coord_min=self.x_0,
                      coord_max=(self.x_0[0] + self.L_x,
                                 self.x_0[1] + self.L_y),
                      shape=(self.n_x, self.n_y),
                      fets_eval=self.fets)

    cached_grid_values = tr.Property(tr.Tuple, depends_on='+input')

    @tr.cached_property
    def _get_cached_grid_values(self):
        x_Ia = self.mesh.X_Id
        n_I, n_a = x_Ia.shape
        dof_Ia = np.arange(n_I * n_a, dtype=np.int_).reshape(n_I, -1)
        I_Ei = self.mesh.I_Ei
        x_Eia = x_Ia[I_Ei, :]
        dof_Eia = dof_Ia[I_Ei]
        x_Ema = np.einsum('im,Eia->Ema', self.fets.N_im, x_Eia)
        J_Emar = np.einsum('imr,Eia->Emar', self.fets.dN_imr, x_Eia)
        J_Enar = np.einsum('inr,Eia->Enar', self.fets.dN_inr, x_Eia)
        det_J_Em = np.linalg.det(J_Emar)
        inv_J_Emar = np.linalg.inv(J_Emar)
        inv_J_Enar = np.linalg.inv(J_Enar)
        B_Eimabc = np.einsum('abcd,imr,Eidr->Eimabc', self.D1_abcd,
                             self.fets.dN_imr, inv_J_Emar)
        B_Einabc = np.einsum('abcd,inr,Eidr->Einabc', self.D1_abcd,
                             self.fets.dN_inr, inv_J_Enar)
        BB_Emicjdabef = np.einsum('Eimabc,Ejmefd, Em, m->Emicjdabef', B_Eimabc,
                                  B_Eimabc, det_J_Em, self.fets.w_m)
        return (BB_Emicjdabef, B_Eimabc, dof_Eia, x_Eia, dof_Ia, I_Ei,
                B_Einabc, det_J_Em)

    BB_Emicjdabef = tr.Property()
    '''Quadratic form of the kinematic mapping.
    '''

    def _get_BB_Emicjdabef(self):
        return self.cached_grid_values[0]

    B_Eimabc = tr.Property()
    '''Kinematic mapping between displacements and strains in every
    integration point.
    '''

    def _get_B_Eimabc(self):
        return self.cached_grid_values[1]

    B_Einabc = tr.Property()
    '''Kinematic mapping between displacement and strain in every
    visualization point
    '''

    def _get_B_Einabc(self):
        return self.cached_grid_values[6]

    dof_Eia = tr.Property()
    '''Mapping [element, node, direction] -> degree of freedom.
    '''

    def _get_dof_Eia(self):
        return self.cached_grid_values[2]

    x_Eia = tr.Property()
    '''Mapping [element, node, direction] -> value of coordinate.
    '''

    def _get_x_Eia(self):
        return self.cached_grid_values[3]

    dof_Ia = tr.Property()
    '''[global node, direction] -> degree of freedom
    '''

    def _get_dof_Ia(self):
        return self.cached_grid_values[4]

    I_Ei = tr.Property()
    '''[element, node] -> global node
    '''

    def _get_I_Ei(self):
        return self.cached_grid_values[5]

    det_J_Em = tr.Property()
    '''Jacobi determinant in every element and integration point.
    '''

    def _get_det_J_Em(self):
        return self.cached_grid_values[7]

    state_arrays = tr.Property(tr.Dict(tr.Str, tr.Array),
                               depends_on='fets, mats')
    '''Dictionary of state arrays.
    The entry names and shapes are defined by the material
    model.
    '''

    @tr.cached_property
    def _get_state_arrays(self):
        return {
            name: np.zeros((
                self.mesh.n_active_elems,
                self.fets.n_m,
            ) + mats_sa_shape,
                           dtype=np.float_)
            for name, mats_sa_shape in list(
                self.mats.state_array_shapes.items())
        }

    def get_corr_pred(self, U, dU, t_n, t_n1, update_state, algorithmic):
        '''Get the corrector and predictor for the given increment
        of unknown .
        '''
        n_c = self.fets.n_nodal_dofs
        U_Ia = U.reshape(-1, n_c)
        U_Eia = U_Ia[self.I_Ei]
        eps_Emab = np.einsum('Eimabc,Eic->Emab', self.B_Eimabc, U_Eia)
        dU_Ia = dU.reshape(-1, n_c)
        dU_Eia = dU_Ia[self.I_Ei]
        deps_Emab = np.einsum('Eimabc,Eic->Emab', self.B_Eimabc, dU_Eia)
        D_Emabef, sig_Emab = self.mats.get_corr_pred(eps_Emab, deps_Emab, t_n,
                                                     t_n1, update_state,
                                                     algorithmic,
                                                     **self.state_arrays)
        K_Eicjd = self.integ_factor * np.einsum('Emicjdabef,Emabef->Eicjd',
                                                self.BB_Emicjdabef, D_Emabef)
        n_E, n_i, n_c, n_j, n_d = K_Eicjd.shape
        K_E = K_Eicjd.reshape(-1, n_i * n_c, n_j * n_d)
        dof_E = self.dof_Eia.reshape(-1, n_i * n_c)
        K_subdomain = SysMtxArray(mtx_arr=K_E, dof_map_arr=dof_E)
        f_Eic = self.integ_factor * np.einsum(
            'm,Eimabc,Emab,Em->Eic', self.fets.w_m, self.B_Eimabc, sig_Emab,
            self.det_J_Em)
        f_Ei = f_Eic.reshape(-1, n_i * n_c)
        F_dof = np.bincount(dof_E.flatten(), weights=f_Ei.flatten())
        F_int = F_dof
        norm_F_int = np.linalg.norm(F_int)
        return K_subdomain, F_int, norm_F_int
Esempio n. 6
0
##########################################################################
# Apply monkeypatch here
_Undefined.__len__ = _length
##########################################################################

Undefined = _Undefined()


class Str(Unicode):
    """Replaces the default traits.Str based in bytes."""


# Monkeypatch Str and DictStrStr for Python 2 compatibility
traits.Str = Str
DictStrStr = traits.Dict((bytes, str), (bytes, str))
traits.DictStrStr = DictStrStr


class BasePath(TraitType):
    """Defines a trait whose value must be a valid filesystem path."""

    # A description of the type of value this trait accepts:
    exists = False
    resolve = False
    _is_file = False
    _is_dir = False

    @property
    def info_text(self):
        """Create the trait's general description."""
Esempio n. 7
0
class Trainer(t.HasStrictTraits):
    model: models.BaseNet = t.Instance(torch.nn.Module, transient=True)

    def _model_default(self):

        # Merge 'base config' (if requested) and any overrides in 'model_config'
        if self.base_config:
            model_config = get_ref_arch(self.base_config)
        else:
            model_config = {}
        if self.model_config:
            model_config.update(self.model_config)
        if self.data_spec:
            model_config.update(
                {
                    "input_channels": self.data_spec["input_channels"],
                    "num_output_classes": [
                        s["num_classes"] for s in self.data_spec["output_spec"]
                    ],
                }
            )
        # create model accordingly
        model_class = getattr(models, self.model_class)
        return model_class(**model_config)

    base_config: str = t.Str()
    model_config: dict = t.Dict()
    model_class: str = t.Enum("FilterNet", "DeepConvLSTM")

    lr_exp: float = t.Float(-3.0)
    batch_size: int = t.Int()
    win_len: int = t.Int(512)
    n_samples_per_batch: int = t.Int(5000)
    train_step: int = t.Int(16)
    seed: int = t.Int()
    decimation: int = t.Int(1)
    optim_type: str = t.Enum(["Adam", "SGD, RMSprop"])
    loss_func: str = t.Enum(["cross_entropy", "binary_cross_entropy"])
    patience: int = t.Int(10)
    lr_decay: float = t.Float(0.95)
    weight_decay: float = t.Float(1e-4)
    alpha: float = t.Float(0.99)
    momentum: float = t.Float(0.25)
    validation_fold: int = t.Int()
    epoch_size: float = t.Float(2.0)
    y_cols: str = t.Str()
    sensor_subset: str = t.Str()

    has_null_class: bool = t.Bool()

    def _has_null_class_default(self):
        return self.data_spec["output_spec"][0]["classes"][0] in ("", "Null")

    predict_null_class: bool = t.Bool(True)

    _class_weights: torch.Tensor = t.Instance(torch.Tensor)

    def __class_weights_default(self):
        # Not weights for now because didn't seem to increase things significantly and
        #   added yet another hyper-parameter. Using zero didn't seem to work well.
        if False and self.has_null_class and not self.predict_null_class:
            cw = torch.ones(self.model.num_output_classes, device=self.device)
            cw[0] = 0.01
            cw /= cw.sum()
            return cw
        return None

    dataset: str = t.Enum(
        ["opportunity", "smartphone_hapt", "har", "intention_recognition"]
    )
    name: str = t.Str()

    def _name_default(self):
        import time

        modelstr = self.model.__class__.__name__
        timestr = time.strftime("%Y%m%d-%H%M%S")
        return f"{modelstr}_{timestr}"

    model_path: str = t.Str()

    def _model_path_default(self):
        return f"saved_models/{self.name}/"

    data_spec: dict = t.Any()
    epoch_iters: int = t.Int(0)
    train_state: TrainState = t.Instance(TrainState, ())
    cp_iter: int = t.Int()

    cuda: bool = t.Bool(transient=True)

    def _cuda_default(self):
        return torch.cuda.is_available()

    device: str = t.Str(transient=True)

    def _device_default(self):
        return "cuda" if self.cuda else "cpu"

    dl_train: DataLoader = t.Instance(DataLoader, transient=True)

    def _dl_train_default(self):
        return self._get_dl("train")

    dl_val: DataLoader = t.Instance(DataLoader, transient=True)

    def _dl_val_default(self):
        return self._get_dl("val")

    dl_test: DataLoader = t.Instance(DataLoader, transient=True)

    def _dl_test_default(self):
        return self._get_dl("test")

    def _get_dl(self, s):

        if self.dataset == "opportunity":
            from filternet.datasets.opportunity import get_x_y_contig
        elif self.dataset == "smartphone_hapt":
            from filternet.datasets.smartphone_hapt import get_x_y_contig
        elif self.dataset == "har":
            from filternet.datasets.har import get_x_y_contig
        elif self.dataset == "intention_recognition":
            from filternet.datasets.intention_recognition import get_x_y_contig
        else:
            raise ValueError(f"Unknown dataset {self.dataset}")

        kwargs = {}
        if self.y_cols:
            kwargs["y_cols"] = self.y_cols
        if self.sensor_subset:
            kwargs["sensor_subset"] = self.sensor_subset

        Xc, ycs, data_spec = get_x_y_contig(s, **kwargs)

        if s == "train":
            # Training shuffles, and we set epoch size to length of the dataset. We can set train_step as
            # small as we want to get more windows; we'll only run len(Sc)/win_len of them in each training
            # epoch.
            self.epoch_iters = int(len(Xc) / self.decimation)
            X, ys = sliding_window_x_y(
                Xc, ycs, win_len=self.win_len, step=self.train_step, shuffle=False
            )
            # Set the overall data spec using the training set,
            #  and modify later if more info is needed.
            self.data_spec = data_spec
        else:
            # Val and test data are not shuffled.
            # Each point is inferred ~twice b/c step = win_len/2
            X, ys = sliding_window_x_y(
                Xc,
                ycs,
                win_len=self.win_len,
                step=int(self.win_len / 2),
                shuffle=False,  # Cannot be true with windows
            )

        dl = DataLoader(
            TensorDataset(torch.Tensor(X), *[torch.Tensor(y).long() for y in ys]),
            batch_size=self.batch_size,
            shuffle=True if s == "train" else False,
        )
        return dl

    def _batch_size_default(self):
        batch_size = int(self.n_samples_per_batch / self.win_len)
        print(f"Batch size: {batch_size}")
        return batch_size

    optimizer = t.Any(transient=True)

    def _optimizer_default(self):
        if self.optim_type == "SGD":
            optimizer = torch.optim.SGD(
                self.model.parameters(),
                lr=10 ** (self.lr_exp),
                momentum=self.momentum,
                weight_decay=self.weight_decay,
            )
        elif self.optim_type == "Adam":
            optimizer = torch.optim.Adam(
                self.model.parameters(),
                lr=10 ** (self.lr_exp),
                weight_decay=self.weight_decay,
                amsgrad=True,
            )
        elif self.optim_type == "RMSprop":
            optimizer = torch.optim.RMSprop(
                self.model.parameters(),
                lr=10 ** (self.lr_exp),
                alpha=self.alpha,
                weight_decay=self.weight_decay,
                momentum=self.momentum,
            )
        else:
            raise NotImplementedError(self.optim_type)
        return optimizer

    iteration: int = t.Property(t.Int)

    def _get_iteration(self):
        return len(self.train_state.epoch_records) + 1

    lr_scheduler = t.Any(transient=True)

    def _lr_scheduler_default(self):
        lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
            self.optimizer, self.lr_decay  # , last_epoch=self._iteration
        )

        # If this is being re-instantiated in mid-training, then we must
        #  iterate scheduler forward to match the training step.
        for i in range(self.iteration):
            if self.lr_decay != 1:
                lr_scheduler.step()

        return lr_scheduler

    #####
    # Training Methods
    ##
    def _train_batch(self, data, targets):
        self.optimizer.zero_grad()
        loss, output, _targets, _ = self._run_model_on_batch(data, targets)
        loss.backward()
        self.optimizer.step()
        # if self.max_lr:
        #     self.lr_scheduler.step()

        return loss, output, _targets

    def _run_model_on_batch(self, data, targets):
        targets = torch.stack(targets)

        if self.cuda:
            data, targets = data.cuda(), targets.cuda()

        output = self.model(data)

        _targets = self.model.transform_targets(targets, one_hot=False)
        if self.loss_func == "cross_entropy":
            _losses = [
                F.cross_entropy(o, t, weight=self._class_weights)
                for o, t in zip(output, _targets)
            ]
            loss = sum(_losses)
        elif self.loss_func == "binary_cross_entropy":
            _targets_onehot = self.model.transform_targets(targets, one_hot=True)
            _losses = [
                F.binary_cross_entropy_with_logits(o, t, weight=self._class_weights)
                for o, t in zip(output, _targets_onehot)
            ]
            loss = sum(_losses)
        else:
            raise NotImplementedError(self.loss)

        # Assume only 1 output:

        return loss, output[0], _targets[0], _losses[0]

    def _calc_validation_loss(self):
        running_loss = 0
        self.model.eval()
        with torch.no_grad():
            for batch_idx, (data, *targets) in enumerate(self.dl_val):
                loss, _, _, _ = self._run_model_on_batch(data, targets)
                running_loss += loss.item() * data.size(0)

        return running_loss / len(self.dl_val.dataset)

    def _train_epoch(self):

        self.model.train()

        train_losses = []
        train_accs = []

        for batch_idx, (data, *targets) in enumerate(self.dl_train):
            if (
                batch_idx * data.shape[0] * data.shape[2]
                > self.epoch_iters * self.epoch_size
            ):
                # we've effectively finished one epoch worth of data; break!
                break

            batch_loss, batch_output, batch_targets = self._train_batch(data, targets)
            train_losses.append(batch_loss.detach().cpu().item())
            batch_preds = torch.argmax(batch_output, 1, False)
            train_accs.append(
                (batch_preds == batch_targets).detach().cpu().float().mean().item()
            )

        if self.lr_decay != 1:
            self.lr_scheduler.step()

        return EpochMetrics(loss=np.mean(train_losses), acc=np.mean(train_accs))

    def _val_epoch(self):
        return self._eval_epoch(self.dl_val)

    def _eval_epoch(self, data_loader):
        # Validation
        self.model.eval()

        losses = []
        outputs = []
        targets = []

        with torch.no_grad():
            for batch_idx, (data, *target) in enumerate(data_loader):
                (
                    batch_loss,
                    batch_output,
                    batch_targets,
                    train_losses,
                ) = self._run_model_on_batch(data, target)

                losses.append(batch_loss.detach().cpu().item())
                outputs.append(
                    torch.argmax(batch_output, 1, False)
                    .detach()
                    .cpu()
                    .data.numpy()
                    .flatten()
                )
                targets.append(batch_targets.detach().cpu().data.numpy().flatten())

        targets = np.hstack(targets)
        outputs = np.hstack(outputs)
        acc = sklearn.metrics.accuracy_score(targets, outputs)
        f1 = sklearn.metrics.f1_score(targets, outputs, average="weighted")

        return EpochMetrics(loss=np.mean(losses), acc=acc, f1=f1)

    def init_data(self):
        # Initiate loading of datasets, model
        _, _, _ = self.dl_train, self.dl_val, self.dl_test
        _ = self.model

    def init_train(self):

        # initialization
        if self.seed:
            torch.manual_seed(self.seed)
        if self.cuda:
            if self.seed:
                torch.cuda.manual_seed(self.seed)
        self.model.to(self.device)

    def train_one_epoch(self, verbose=True) -> EpochRecord:
        """ traing a single epoch -- method tailored to the Ray.tune methodology."""
        epoch_record = EpochRecord(epoch=len(self.train_state.epoch_records))
        self.train_state.epoch_records.append(epoch_record)

        with Timer("Train Epoch", log_output=verbose) as t:
            epoch_record.train = self._train_epoch()
        epoch_record.iter_s_cpu = t.interval_cpu
        epoch_record.iter_s_wall = t.interval_wall
        epoch_record.lr = self.optimizer.param_groups[0]["lr"]

        with Timer("Val Epoch", log_output=verbose):
            epoch_record.val = self._val_epoch()

        df = self.train_state.to_df()

        # Early stopping / checkpointing implementation
        df["raw_metric"] = df.val_loss / df.val_f1
        df["ewma_smoothed_loss"] = (
            df["raw_metric"].ewm(ignore_na=False, halflife=3).mean()
        )
        df["instability_penalty"] = (
            df["raw_metric"].rolling(5, min_periods=3).std().fillna(0.75)
        )
        stopping_metric = df["stopping_metric"] = (
            df["ewma_smoothed_loss"] + df["instability_penalty"]
        )
        epoch_record.stopping_metric = df["stopping_metric"].iloc[-1]

        idx_this_iter = stopping_metric.index.max()
        idx_best_yet = stopping_metric.idxmin()
        self.train_state.best_sm = df.loc[idx_best_yet, "stopping_metric"]
        self.train_state.best_loss = df.loc[idx_best_yet, "val_loss"]
        self.train_state.best_f1 = df.loc[idx_best_yet, "val_f1"]

        if idx_best_yet == idx_this_iter:
            # Best yet! Checkpoint.
            epoch_record.should_checkpoint = True
            self.cp_iter = epoch_record.epoch

        else:
            if self.patience is not None:
                patience_counter = idx_this_iter - idx_best_yet
                assert patience_counter >= 0
                if patience_counter > self.patience:
                    if verbose:
                        print(
                            f"Early stop! Out of patience ( {patience_counter} > {self.patience} )"
                        )
                    epoch_record.done = True

        if verbose:
            self.print_train_summary()

        return epoch_record

    def train(self, max_epochs=50, verbose=True):
        """ A pretty standard training loop, constrained to stop in `max_epochs` but may stop early if our
        custom stopping metric does not improve for `self.patience` epochs. Always checkpoints
        when a new best stopping_metric is achieved. An alternative to using
        ray.tune for training."""

        self.init_data()
        self.init_train()

        while True:
            epoch_record = self.train_one_epoch(verbose=verbose)

            if epoch_record.should_checkpoint:
                last_cp = self._save()
                if verbose:
                    print(f"<<<< Checkpointed ({last_cp}) >>>")
            if epoch_record.done:
                break
            if epoch_record.epoch >= max_epochs:
                break

        # Save trainer state, but not model"
        self._save(save_model=False)
        if verbose:
            print(self.model_path)

    def print_train_summary(self):
        df = self.train_state.to_df()

        with pd.option_context(
            "display.max_rows",
            100,
            "display.max_columns",
            100,
            "display.precision",
            3,
            "display.width",
            180,
        ):
            print(df.drop(["done"], axis=1, errors="ignore"))

    def _save(self, checkpoint_dir=None, save_model=True, save_trainer=True):
        """ Saves/checkpoints model state and training state to disk. """
        if checkpoint_dir is None:
            checkpoint_dir = self.model_path
        else:
            self.model_path = checkpoint_dir

        os.makedirs(checkpoint_dir, exist_ok=True)

        # save model params
        model_path = os.path.join(checkpoint_dir, "model.pth")
        trainer_path = os.path.join(checkpoint_dir, "trainer.pth")

        if save_model:
            torch.save(self.model.state_dict(), model_path)
        if save_trainer:
            with open(trainer_path, "wb") as f:
                pickle.dump(self, f)

        return checkpoint_dir

    def _restore(self, checkpoint_dir=None):
        """ Restores model state and training state from disk. """

        if checkpoint_dir is None:
            checkpoint_dir = self.model_path

        model_path = os.path.join(checkpoint_dir, "model.pth")
        trainer_path = os.path.join(checkpoint_dir, "trainer.pth")

        # Reconstitute old trainer and copy state to this trainer.
        with open(trainer_path, "rb") as f:
            other_trainer = pickle.load(f)

        self.__setstate__(other_trainer.__getstate__())

        # Load model (after loading state in case we need to re-initialize model from config)
        self.model.load_state_dict(torch.load(model_path, map_location=self.device))

        # Be careful to reinitialize optimizer and lr scheduler
        self.optimizer = self._optimizer_default()
        self.lr_scheduler = self._lr_scheduler_default()
Esempio n. 8
0
class DataSet(_traits.HasTraits):
    '''Data set for holding one matrix

    and associated metadata
    The reccomended access methods as properties for the DataSet object:
     * id: An identity string that is unique for each object
     * display_name: An human friendly name that for the datataset
     * kind: The data set type
     * missing_data: Boolean value indicatin if the data set have "holes"
     * n_vars: Number of variables
     * n_objs: Number of objects
     * var_n: List containing variable names
     * obj_n: List containing object names
     * values: The matrix values as an 2D Numpy array
     * mat: The matrix as an Pandas DataFrame
    '''
    mat = _traits.Instance(_pd.DataFrame, ())

    _id = _traits.Int()
    id = _traits.Property()
    new_id = _itr.count(start=101).next

    display_name = _traits.Unicode('Unnamed data set')

    kind = _traits.Enum(DS_TYPES)

    # FIXME: Only color
    style = _traits.Instance('VisualStyle', ())

    # Example: {'species': [SubSet, SubSet], 'location': [SubSet, SubSet]}
    # Column subsets
    subs = _traits.Dict(_traits.Unicode, _traits.List)
    # Row subsets
    rsubs = _traits.Dict(_traits.Unicode, _traits.List)

    row_factors = _traits.List()
    col_factors = _traits.List()

    # FIXME: This data set has missing data
    # do you want to do somthing about it?
    # * throw rows/cols with missing data
    # * do imputation
    missing_data = _traits.Property(_traits.Bool)

    n_vars = _traits.Property()
    n_objs = _traits.Property()
    var_n = _traits.Property()
    obj_n = _traits.Property()
    values = _traits.Property()

    # FIXME: This is a dubious solution
    matcat = _traits.Instance(_pd.DataFrame)
    valuescat = _traits.Property()


    def _get_values(self):
        if self.missing_data:
            return _np.ma.masked_invalid(self.mat.values)
        else:
            return self.mat.values


    def _make_matcat(self):
        matcat = self.mat.copy()
        for cn, ssl in self.subs.items():
            cs = _pd.Series(index=self.mat.index)
            for ss in ssl:
                cs[list(ss.row_selector)] = ss.id
                matcat[cn] = cs
        for cn, ssl in self.rsubs.items():
            cs = _pd.Series(index=self.mat.index)
            for ss in ssl:
                cs[list(ss.row_selector)] = ss.id
                matcat[cn] = cs
        self.matcat = matcat


    def _get_valuescat(self):
        try:
            return self.matcat.values
        except AttributeError:
            # self._make_matcat()
            return self.matcat.values


    def _get_n_vars(self):
        return self.mat.shape[1]


    def _get_n_objs(self):
        return self.mat.shape[0]


    def _get_var_n(self):
        return list(self.mat.columns)


    def _get_obj_n(self):
        return list(self.mat.index)


    def __id_default(self):
        return DataSet.new_id()


    def __eq__(self, other):
        return self.id == other


    def __ne__(self, other):
        return self.id != other


    def _get_id(self):
        return str(self._id)


    def _get_missing_data(self):
        # FIXME: I must look more into this
        try:
            return _np.any(_np.isnan(self.mat.values))
        except TypeError:
            return False


    def get_subset_groups(self):
        return self.subs.keys()


    def get_subsets(self, group):
        return self.subs[group]


    def get_subset_rows(self, subset):
        ''' Return a subset from given subset id'''
        return self.mat.loc[list(subset.row_selector)]


    def copy(self, transpose=False):
        new = self.clone_traits(traits=['display_name', 'kind', 'style', 'subs'])
        if transpose:
            tmp = self.mat.copy()
            new.mat = tmp.transpose()
        else:
            new.mat = self.mat.copy()
        return new
Esempio n. 9
0
class ClassWardMetrics(t.HasStrictTraits):
    segment_twoset_results: dict = t.Dict()
    event_detailed_scores: dict = t.Dict()
    event_standard_scores: dict = t.Dict()
Esempio n. 10
0
class EvalModel(t.HasStrictTraits):
    trainer: Trainer = t.Any()
    model: mo.BaseNet = t.DelegatesTo("trainer")
    dl_test: DataLoader = t.DelegatesTo("trainer")
    data_spec: dict = t.DelegatesTo("trainer")
    cuda: bool = t.DelegatesTo("trainer")
    device: str = t.DelegatesTo("trainer")
    loss_func: str = t.DelegatesTo("trainer")
    model_path: str = t.DelegatesTo("trainer")
    has_null_class: bool = t.DelegatesTo("trainer")
    predict_null_class: bool = t.DelegatesTo("trainer")

    # 'prediction' mode employs overlap and reconstructs signal
    #   as a contiguous timeseries w/ optional windowing.
    #   It aims for best accuracy/f1 by using overlap, and will
    #   typically outperform 'training' mode.
    # 'training' mode does not average repeated point and does
    #   not window; it should product acc/loss/f1 similar to
    #   training mode.
    run_mode: str = t.Enum(["prediction", "training"])
    window: str = t.Enum(["hanning", "boxcar"])
    eval_batch_size: int = t.Int(100)

    target_names: ty.List[str] = t.ListStr()

    def _target_names_default(self):
        target_names = self.data_spec["output_spec"][0]["classes"]

        if self.has_null_class:
            assert target_names[0] in ("", "Null")

            if not self.predict_null_class:
                target_names = target_names[1:]

        return target_names

    def _run_model_on_batch(self, data, targets):
        targets = torch.stack(targets)

        if self.cuda:
            data, targets = data.cuda(), targets.cuda()

        output = self.model(data)

        _targets = self.model.transform_targets(targets, one_hot=False)
        if self.loss_func == "cross_entropy":
            _losses = [F.cross_entropy(o, t) for o, t in zip(output, _targets)]
            loss = sum(_losses)
        elif self.loss_func == "binary_cross_entropy":
            _targets_onehot = self.model.transform_targets(targets,
                                                           one_hot=True)
            _losses = [
                F.binary_cross_entropy_with_logits(o, t)
                for o, t in zip(output, _targets_onehot)
            ]
            loss = sum(_losses)
        else:
            raise NotImplementedError(self.loss)

        # Assume only 1 output:

        return loss, output[0], _targets[0], _losses[0]

    def run_test_set(self, dl=None):
        """ Runs `self.model` on `self.dl_test` (or a provided dl) and stores results for subsequent evaluation. """
        if dl is None:
            dl = self.dl_test

        if self.cuda:
            self.model.cuda()
        self.model.eval()
        if self.eval_batch_size:
            dl = DataLoader(dl.dataset,
                            batch_size=self.eval_batch_size,
                            shuffle=False)
        #
        #     # Xc, yc = data.get_x_y_contig('test')
        X, *ys = dl.dataset.tensors
        # X: [N, input_chans, win_len]
        step = int(X.shape[2] / 2)
        assert torch.equal(X[0, :, step], X[1, :, 0])

        losses = []
        outputsraw = []
        outputs = []
        targets = []

        with Timer("run", log_output=False) as tr:
            with Timer("infer", log_output=False) as ti:
                for batch_idx, (data, *target) in enumerate(dl):
                    (
                        batch_loss,
                        batch_output,
                        batch_targets,
                        train_losses,
                    ) = self._run_model_on_batch(data, target)

                    losses.append(batch_loss.detach().cpu().item())
                    outputsraw.append(batch_output.detach().cpu().data.numpy())
                    outputs.append(
                        torch.argmax(batch_output, 1,
                                     False).detach().cpu().data.numpy())
                    targets.append(batch_targets.detach().cpu().data.numpy())
            self.infer_time_s_cpu = ti.interval_cpu
            self.infer_time_s_wall = ti.interval_wall

            self.loss = np.mean(losses)
            targets = np.concatenate(targets, axis=0)  # [N, out_win_len]
            outputsraw = np.concatenate(
                outputsraw, axis=0)  # [N, n_out_classes, out_win_len]
            outputs = np.concatenate(outputs,
                                     axis=0)  # [N, n_out_classes, out_win_len]

            # win_len = toutputsraw[0].shape[-1]
            if (self.model.output_type == "many_to_one_takelast"
                    or self.run_mode == "training"):
                self.targets = np.concatenate(targets, axis=-1)  # [N,]
                self.outputsraw = np.concatenate(
                    outputsraw, axis=-1)  # [n_out_classes, N,]
                self.outputs = np.concatenate(outputs, axis=-1)  # [N,]

            elif self.run_mode == "prediction":
                n_segments, n_classes, out_win_len = outputsraw.shape

                output_step = int(out_win_len / 2)

                if self.window == "hanning":
                    EPS = 0.001  # prevents divide-by-zero
                    arr_window = (1 - EPS) * np.hanning(out_win_len) + EPS
                elif self.window == "boxcar":
                    arr_window = np.ones((out_win_len, ))
                else:
                    raise ValueError()

                # Allocate space for merged predictions
                if self.has_null_class and not self.predict_null_class:
                    outputsraw2 = np.zeros(
                        (n_segments + 1, n_classes - 1, output_step, 2))
                    window2 = np.zeros(
                        (n_segments + 1, n_classes - 1, output_step,
                         2))  # [N+1, out_win_len/2, 2]
                    # Drop in outputs/window vals in the two layers
                    outputsraw = outputsraw[:, 1:, :]
                else:
                    outputsraw2 = np.zeros(
                        (n_segments + 1, n_classes, output_step, 2))
                    window2 = np.zeros((n_segments + 1, n_classes, output_step,
                                        2))  # [N+1, out_win_len/2, 2]

                # Drop in outputs/window vals in the two layers
                outputsraw2[:-1, :, :, 0] = outputsraw[:, :, :output_step]
                outputsraw2[1:, :, :,
                            1] = outputsraw[:, :, output_step:output_step * 2]
                window2[:-1, :, :, 0] = arr_window[:output_step]
                window2[1:, :, :, 1] = arr_window[output_step:output_step * 2]

                merged_outputsraw = (outputsraw2 * window2).sum(
                    axis=3) / (window2).sum(axis=3)
                softmaxed_merged_outputsraw = softmax(merged_outputsraw,
                                                      axis=1)
                merged_outputs = np.argmax(softmaxed_merged_outputsraw, 1)

                self.outputsraw = np.concatenate(merged_outputsraw, axis=-1)
                self.outputs = np.concatenate(merged_outputs, axis=-1)
                self.targets = np.concatenate(
                    np.concatenate(
                        [
                            targets[:, :output_step],
                            targets[[-1], output_step:output_step * 2],
                        ],
                        axis=0,
                    ),
                    axis=-1,
                )
            else:
                raise ValueError()

        if self.has_null_class and not self.predict_null_class:
            not_null_mask = self.targets > 0
            self.outputsraw = self.outputsraw[..., not_null_mask]
            self.outputs = self.outputs[not_null_mask]
            self.targets = self.targets[not_null_mask]
            self.targets -= 1

        self.n_samples_in = np.prod(dl.dataset.tensors[1].shape)
        self.n_samples_out = len(self.outputs)
        self.infer_samples_per_s = self.n_samples_in / self.infer_time_s_wall
        self.run_time_s_cpu = tr.interval_cpu
        self.run_time_s_wall = tr.interval_wall

    loss: float = t.Float()
    targets: np.ndarray = t.Array()
    outputsraw: np.ndarray = t.Array()
    outputs: np.ndarray = t.Array()
    n_samples_in: int = t.Int()
    n_samples_out: int = t.Int()
    infer_samples_per_s: float = t.Float()

    infer_time_s_cpu: float = t.Float()
    infer_time_s_wall: float = t.Float()
    run_time_s_cpu: float = t.Float()
    run_time_s_wall: float = t.Float()

    extra: dict = t.Dict({})

    acc: float = t.Float()
    f1: float = t.Float()
    f1_mean: float = t.Float()
    event_f1: float = t.Float()
    classification_report_txt: str = t.Str()
    classification_report_dict: dict = t.Dict()
    classification_report_df: pd.DataFrame = t.Property(
        t.Instance(pd.DataFrame))
    confusion_matrix: np.ndarray = t.Array()

    nonull_acc: float = t.Float()
    nonull_f1: float = t.Float()
    nonull_f1_mean: float = t.Float()
    nonull_classification_report_txt: str = t.Str()
    nonull_classification_report_dict: dict = t.Dict()
    nonull_classification_report_df: pd.DataFrame = t.Property(
        t.Instance(pd.DataFrame))
    nonull_confusion_matrix: np.ndarray = t.Array()

    def calc_metrics(self):

        self.acc = sklearn.metrics.accuracy_score(self.targets, self.outputs)
        self.f1 = sklearn.metrics.f1_score(self.targets,
                                           self.outputs,
                                           average="weighted")
        self.f1_mean = sklearn.metrics.f1_score(self.targets,
                                                self.outputs,
                                                average="macro")

        self.classification_report_txt = sklearn.metrics.classification_report(
            self.targets,
            self.outputs,
            digits=3,
            labels=np.arange(len(self.target_names)),
            target_names=self.target_names,
        )
        self.classification_report_dict = sklearn.metrics.classification_report(
            self.targets,
            self.outputs,
            digits=3,
            output_dict=True,
            labels=np.arange(len(self.target_names)),
            target_names=self.target_names,
        )
        self.confusion_matrix = sklearn.metrics.confusion_matrix(
            self.targets, self.outputs)

        # Now, ignoring the null/none class:
        if self.has_null_class and self.predict_null_class:
            # assume null class comes fistnonull_mask = self.targets > 0
            nonull_mask = self.targets > 0
            nonull_targets = self.targets[nonull_mask]
            # nonull_outputs = self.outputs[nonull_mask]
            nonull_outputs = self.outputsraw[1:, :].argmax(
                axis=0)[nonull_mask] + 1

            self.nonull_acc = sklearn.metrics.accuracy_score(
                nonull_targets, nonull_outputs)
            self.nonull_f1 = sklearn.metrics.f1_score(nonull_targets,
                                                      nonull_outputs,
                                                      average="weighted")
            self.nonull_f1_mean = sklearn.metrics.f1_score(nonull_targets,
                                                           nonull_outputs,
                                                           average="macro")
            self.nonull_classification_report_txt = sklearn.metrics.classification_report(
                nonull_targets,
                nonull_outputs,
                digits=3,
                labels=np.arange(len(self.target_names)),
                target_names=self.target_names,
            )
            self.nonull_classification_report_dict = sklearn.metrics.classification_report(
                nonull_targets,
                nonull_outputs,
                digits=3,
                output_dict=True,
                labels=np.arange(len(self.target_names)),
                target_names=self.target_names,
            )
            self.nonull_confusion_matrix = sklearn.metrics.confusion_matrix(
                nonull_targets, nonull_outputs)
        else:
            self.nonull_acc = self.acc
            self.nonull_f1 = self.f1
            self.nonull_f1_mean = self.f1_mean
            self.nonull_classification_report_txt = self.classification_report_txt
            self.nonull_classification_report_dict = self.classification_report_dict
            self.nonull_confusion_matrix = self.confusion_matrix

    ward_metrics: WardMetrics = t.Instance(WardMetrics)

    def calc_ward_metrics(self):
        """ Do event-wise metrics, using the `wardmetrics` package which implements metrics from:

         [1]    J. A. Ward, P. Lukowicz, and H. W. Gellersen, “Performance metrics for activity recognition,”
                    ACM Trans. Intell. Syst. Technol., vol. 2, no. 1, pp. 1–23, Jan. 2011.
        """

        import wardmetrics

        # Must be in prediction mode -- otherwise, data is not contiguous, ward metrics will be bogus
        assert self.run_mode == "prediction"

        targets = self.targets
        predictions = self.outputs

        wmetrics = WardMetrics()

        targets_events = wardmetrics.frame_results_to_events(targets)
        preds_events = wardmetrics.frame_results_to_events(predictions)

        for i, class_name in enumerate(self.target_names):
            class_wmetrics = ClassWardMetrics()

            t = targets_events.get(str(i), [])
            p = preds_events.get(str(i), [])
            # class_wmetrics['t'] = t
            # class_wmetrics['p'] = p

            try:
                assert len(t) and len(p)
                (
                    twoset_results,
                    segments_with_scores,
                    segment_counts,
                    normed_segment_counts,
                ) = wardmetrics.eval_segments(t, p)
                class_wmetrics.segment_twoset_results = twoset_results

                (
                    gt_event_scores,
                    det_event_scores,
                    detailed_scores,
                    standard_scores,
                ) = wardmetrics.eval_events(t, p)
                class_wmetrics.event_detailed_scores = detailed_scores
                class_wmetrics.event_standard_scores = standard_scores
            except (AssertionError, ZeroDivisionError) as e:
                class_wmetrics.segment_twoset_results = {}
                class_wmetrics.event_detailed_scores = {}
                class_wmetrics.event_standard_scores = {}
                # print("Empty Results or targets for a class.")
                # raise ValueError("Empty Results or targets for a class.")

            wmetrics.class_ward_metrics.append(class_wmetrics)

        tt = []
        pp = []
        for i, class_name in enumerate(self.target_names):
            # skip null class for combined eventing:
            if class_name in ("", "Null"):
                continue

            if len(tt) or len(pp):
                offset = np.max(tt + pp) + 2
            else:
                offset = 0
            [(a + offset, b + offset) for (a, b) in t]

            t = targets_events.get(str(i), [])
            p = preds_events.get(str(i), [])

            tt += [(a + offset, b + offset) for (a, b) in t]
            pp += [(a + offset, b + offset) for (a, b) in p]

        t = tt
        p = pp

        class_wmetrics = ClassWardMetrics()
        assert len(t) and len(p)
        (
            twoset_results,
            segments_with_scores,
            segment_counts,
            normed_segment_counts,
        ) = wardmetrics.eval_segments(t, p)
        class_wmetrics.segment_twoset_results = twoset_results

        (
            gt_event_scores,
            det_event_scores,
            detailed_scores,
            standard_scores,
        ) = wardmetrics.eval_events(t, p)
        class_wmetrics.event_detailed_scores = detailed_scores
        class_wmetrics.event_standard_scores = standard_scores

        # Reformat as dataframe for easier calculations
        df = pd.DataFrame(
            [cm.event_standard_scores for cm in wmetrics.class_ward_metrics],
            index=self.target_names,
        )
        df.loc["all_nonull"] = class_wmetrics.event_standard_scores

        # Calculate F1's to summarize recall/precision for each class
        df["f1"] = (2 * (df["precision"] * df["recall"]) /
                    (df["precision"] + df["recall"]))
        df["f1 (weighted)"] = (
            2 * (df["precision (weighted)"] * df["recall (weighted)"]) /
            (df["precision (weighted)"] + df["recall (weighted)"]))

        # Load dataframes into dictionary output
        wmetrics.df_event_scores = df
        wmetrics.df_event_detailed_scores = pd.DataFrame(
            [cm.event_detailed_scores for cm in wmetrics.class_ward_metrics],
            index=self.target_names,
        )
        wmetrics.df_segment_2set_results = pd.DataFrame(
            [cm.segment_twoset_results for cm in wmetrics.class_ward_metrics],
            index=self.target_names,
        )
        wmetrics.overall_ward_metrics = class_wmetrics

        self.ward_metrics = wmetrics
        self.event_f1 = self.ward_metrics.df_event_scores.loc["all_nonull",
                                                              "f1"]

    def _get_classification_report_df(self):
        df = pd.DataFrame(self.classification_report_dict).T

        # Include Ward-metrics-derived "Event F1 (unweighted by length)"
        if self.ward_metrics:
            df["event_f1"] = self.ward_metrics.df_event_scores["f1"]
        else:
            df["event_f1"] = np.nan

            # Calculate various summary averages
        df.loc["macro avg", "event_f1"] = df["event_f1"].iloc[:-3].mean()
        df.loc["weighted avg", "event_f1"] = (
            df["event_f1"].iloc[:-3] *
            df["support"].iloc[:-3]).sum() / df["support"].iloc[:-3].sum()

        df["support"] = df["support"].astype(int)

        return df

    def _get_nonull_classification_report_df(self):
        target_names = self.target_names
        if not (target_names[0] in ("", "Null")):
            return None

        df = pd.DataFrame(self.nonull_classification_report_dict).T

        df["support"] = df["support"].astype(int)

        return df

    def _save(self, checkpoint_dir=None):
        """ Saves/checkpoints model state and training state to disk. """
        if checkpoint_dir is None:
            checkpoint_dir = self.model_path

        os.makedirs(checkpoint_dir, exist_ok=True)

        # save model params
        evalmodel_path = os.path.join(checkpoint_dir, "evalmodel.pth")

        with open(evalmodel_path, "wb") as f:
            pickle.dump(self, f)

        return checkpoint_dir

    def _restore(self, checkpoint_dir=None):
        """ Restores model state and training state from disk. """

        if checkpoint_dir is None:
            checkpoint_dir = self.model_path

        evalmodel_path = os.path.join(checkpoint_dir, "evalmodel.pth")

        # Reconstitute old trainer and copy state to this trainer.
        with open(evalmodel_path, "rb") as f:
            other_evalmodel = pickle.load(f)

        self.__setstate__(other_evalmodel.__getstate__())

        self.trainer._restore(checkpoint_dir)
Esempio n. 11
0
class EnsembleTrainer(t.HasStrictTraits):
    def __init__(self, config={}, **kwargs):
        trainer_template = Trainer(**config)
        super().__init__(trainer_template=trainer_template,
                         config=config,
                         **kwargs)

    config: dict = t.Dict()

    trainer_template: Trainer = t.Instance(Trainer)
    trainers: ty.List[Trainer] = t.List(t.Instance(Trainer))

    n_folds = t.Int(5)

    dl_test: DataLoader = t.DelegatesTo("trainer_template")
    data_spec: dict = t.DelegatesTo("trainer_template")
    cuda: bool = t.DelegatesTo("trainer_template")
    device: str = t.DelegatesTo("trainer_template")
    loss_func: str = t.DelegatesTo("trainer_template")
    batch_size: int = t.DelegatesTo("trainer_template")
    win_len: int = t.DelegatesTo("trainer_template")
    has_null_class: bool = t.DelegatesTo("trainer_template")
    predict_null_class: bool = t.DelegatesTo("trainer_template")
    name: str = t.Str()

    def _name_default(self):
        import time

        modelstr = "Ensemble"
        timestr = time.strftime("%Y%m%d-%H%M%S")
        return f"{modelstr}_{timestr}"

    X_folds = t.Tuple(transient=True)
    ys_folds = t.Tuple(transient=True)

    def _trainers_default(self):
        # Temp trainer for grabbing datasets, etc
        tt = self.trainer_template
        tt.init_data()

        # Combine official train & val sets
        X = torch.cat(
            [tt.dl_train.dataset.tensors[0], tt.dl_val.dataset.tensors[0]])
        ys = [
            torch.cat([yt, yv]) for yt, yv in zip(
                tt.dl_train.dataset.tensors[1:], tt.dl_val.dataset.tensors[1:])
        ]
        # make folds
        fold_len = int(np.ceil(len(X) / self.n_folds))
        self.X_folds = torch.split(X, fold_len)
        self.ys_folds = [torch.split(y, fold_len) for y in ys]

        trainers = []
        for i_val_fold in range(self.n_folds):
            trainer = Trainer(
                validation_fold=i_val_fold,
                name=f"{self.name}/{i_val_fold}",
                **self.config,
            )

            trainer.dl_test = tt.dl_test

            trainers.append(trainer)

        return trainers

    model: models.BaseNet = t.Instance(torch.nn.Module, transient=True)

    def _model_default(self):
        model = models.FilterNetEnsemble()
        model.set_models([trainer.model for trainer in self.trainers])
        return model

    model_path: str = t.Str()

    def _model_path_default(self):
        return f"saved_models/{self.name}/"

    def init_data(self):
        # Initiate loading of datasets, model
        pass
        # for trainer in self.trainers:
        #     trainer.init_data()

    def init_train(self):
        pass
        # for trainer in self.trainers:
        #     trainer.init_train()

    def train(self, max_epochs=50):
        """ A pretty standard training loop, constrained to stop in `max_epochs` but may stop early if our
        custom stopping metric does not improve for `self.patience` epochs. Always checkpoints
        when a new best stopping_metric is achieved. An alternative to using
        ray.tune for training."""

        for trainer in self.trainers:
            # Add data to trainer

            X_train = torch.cat([
                arr for i, arr in enumerate(self.X_folds)
                if i != trainer.validation_fold
            ])
            ys_train = [
                torch.cat([
                    arr for i, arr in enumerate(y)
                    if i != trainer.validation_fold
                ]) for y in self.ys_folds
            ]

            X_val = torch.cat([
                arr for i, arr in enumerate(self.X_folds)
                if i == trainer.validation_fold
            ])
            ys_val = [
                torch.cat([
                    arr for i, arr in enumerate(y)
                    if i == trainer.validation_fold
                ]) for y in self.ys_folds
            ]

            trainer.dl_train = DataLoader(
                TensorDataset(torch.Tensor(X_train), *ys_train),
                batch_size=trainer.batch_size,
                shuffle=True,
            )
            trainer.data_spec = self.trainer_template.data_spec
            trainer.epoch_iters = self.trainer_template.epoch_iters
            trainer.dl_val = DataLoader(
                TensorDataset(torch.Tensor(X_val), *ys_val),
                batch_size=trainer.batch_size,
                shuffle=False,
            )

            # Now clear local vars to save ranm
            X_train = ys_train = X_val = ys_val = None

            trainer.init_data()
            trainer.init_train()
            trainer.train(max_epochs=max_epochs)

            # Clear trainer train and val datasets to save ram
            trainer.dl_train = t.Undefined
            trainer.dl_val = t.Undefined

            print(f"RESTORING TO best model")
            trainer._restore()
            trainer._save()

            trainer.print_train_summary()

            em = EvalModel(trainer=trainer)

            em.run_test_set()
            em.calc_metrics()
            em.calc_ward_metrics()
            print(em.classification_report_df.to_string(float_format="%.3f"))
            em._save()

    def print_train_summary(self):
        for trainer in self.trainers:
            trainer.print_train_summary()

    def _save(self, checkpoint_dir=None, save_model=True, save_trainer=True):
        """ Saves/checkpoints model state and training state to disk. """
        if checkpoint_dir is None:
            checkpoint_dir = self.model_path
        else:
            self.model_path = checkpoint_dir

        os.makedirs(checkpoint_dir, exist_ok=True)

        # save model params
        model_path = os.path.join(checkpoint_dir, "model.pth")
        trainer_path = os.path.join(checkpoint_dir, "trainer.pth")

        if save_model:
            torch.save(self.model.state_dict(), model_path)
        if save_trainer:
            with open(trainer_path, "wb") as f:
                pickle.dump(self, f)

        return checkpoint_dir

    def _restore(self, checkpoint_dir=None):
        """ Restores model state and training state from disk. """

        if checkpoint_dir is None:
            checkpoint_dir = self.model_path

        model_path = os.path.join(checkpoint_dir, "model.pth")
        trainer_path = os.path.join(checkpoint_dir, "trainer.pth")

        # Reconstitute old trainer and copy state to this trainer.
        with open(trainer_path, "rb") as f:
            other_trainer = pickle.load(f)

        self.__setstate__(other_trainer.__getstate__())

        # Load sub-models
        for trainer in self.trainers:
            trainer._restore()

        # Load model (after loading state in case we need to re-initialize model from config)
        self.model.load_state_dict(
            torch.load(model_path, map_location=self.device))
Esempio n. 12
0
class ExperimentSnake(traits.HasTraits):
    """Main Experiment Snake GUI that sends arbitrary actions based on the 
    experiment runner sequence and actions that have been set up."""

    #mainLog = utilities.TextDisplay()
    mainLog = outputStream.OutputStream()
    statusString = traits.String("Press Start Snake to begin...")
    isRunning = traits.Bool(False)  # true when the snake is running
    sequenceStarted = traits.Bool(
        False)  # flashes true for ~1ms when sequence starts
    queue = traits.Int(0)

    variables = traits.Dict(
        key_trait=traits.Str, value_trait=traits.Float
    )  #dictionary mapping variable names in Exp control to their values in this sequence
    timingEdges = traits.Dict(
        key_trait=traits.Str, value_trait=traits.Float
    )  #dictionary mapping timing Edge names in Exp control to their values in this sequence
    statusList = [
    ]  #eventually will contain the information gathered from experiment Runner each time we poll

    startAction = traitsui.Action(name='start',
                                  action='_startSnake',
                                  image=pyface.image_resource.ImageResource(
                                      os.path.join('icons', 'start.png')))
    stopAction = traitsui.Action(name='stop',
                                 action='_stopSnakeToolbar',
                                 image=pyface.image_resource.ImageResource(
                                     os.path.join('icons', 'stop.png')))
    reloadHWAsAction = traitsui.Action(
        name='reload',
        action='_reloadHWAsToolbar',
        image=pyface.image_resource.ImageResource(
            os.path.join('icons', 'reload.png')))

    connectionTimer = traits.Instance(
        Timer
    )  # polls the experiment runner and starts off callbacks at appropriate times
    statusStringTimer = traits.Instance(
        Timer)  #updates status bar at regular times (less freque)
    getCurrentTimer = traits.Instance(
        Timer
    )  #waits for get current to return which marks the beginning of a sequence

    getCurrentThread = traits.Instance(SocketThread)

    connectionPollFrequency = traits.Float(
        1000.0)  #milliseconds defines accuracy you will perform callbacks at
    statusStringFrequency = traits.Float(2000.0)  #milliseconds
    getCurrentFrequency = traits.Float(
        1000.0)  #milliseconds should be shorter than the sequence

    timeRunning = traits.Float(0.0)  #how long the sequence has been running
    totalTime = traits.Float(0.0)  # total length of sequence
    runnerHalted = traits.Bool(True)  # true if runner is halted
    haltedCount = 0
    progress = traits.Float(0.0)  # % of cycle complete
    #progressBar = ProgressDialog()
    hardwareActions = traits.List(hardwareAction.hardwareAction.HardwareAction)

    examineVariablesDictionary = traits.Instance(
        variableDictionary.ExamineVariablesDictionary)
    xmlString = ""  # STRING that will contain entire XML File

    menubar = traitsui.MenuBar(
        traitsui.Menu(
            traitsui.Action(name='Start Snake', action='_startSnake'),
            traitsui.Action(name='Stop Snake', action='_stopSnake'),
            traitsui.Action(name='Reload', action='_reloadHWAs'),
            traitsui.Menu(traitsui.Action(name='DEBUG',
                                          action='_changeLoggingLevelDebug'),
                          traitsui.Action(name='INFO',
                                          action='_changeLoggingLevelInfo'),
                          traitsui.Action(name='WARNING',
                                          action='_changeLoggingLevelWarning'),
                          traitsui.Action(name='ERROR',
                                          action='_changeLoggingLevelError'),
                          name="Log Level"),
            name='Menu'))

    toolbar = traitsui.ToolBar(startAction, stopAction, reloadHWAsAction)

    mainSnakeGroup = traitsui.VGroup(
        traitsui.Item('statusString', show_label=False, style='readonly'),
        traitsui.Item('mainLog',
                      show_label=False,
                      springy=True,
                      style='custom',
                      editor=traitsui.InstanceEditor()))

    hardwareActionsGroup = traitsui.Group(traitsui.Item(
        'hardwareActions',
        show_label=False,
        style='custom',
        editor=traitsui.ListEditor(style="custom")),
                                          label="Hardware Actions",
                                          show_border=True)

    variableExaminerGroup = traitsui.Group(traitsui.Item(
        "examineVariablesDictionary",
        editor=traitsui.InstanceEditor(),
        style="custom",
        show_label=False),
                                           label="Variable Examiner")

    sidePanelGroup = traitsui.VSplit(hardwareActionsGroup,
                                     variableExaminerGroup)

    traits_view = traitsui.View(traitsui.HSplit(sidePanelGroup,
                                                mainSnakeGroup,
                                                show_labels=True),
                                resizable=True,
                                menubar=menubar,
                                toolbar=toolbar,
                                width=0.5,
                                height=0.75,
                                title="Experiment Snake",
                                icon=pyface.image_resource.ImageResource(
                                    os.path.join('icons', 'snakeIcon.ico')))

    def __init__(self, **traits):
        """ takes no  arguments to construct the snake. Everything is done through GUI.
        Snake construction makes a ExperimentSnakeConnection object and writes to the 
        main log window"""

        super(ExperimentSnake, self).__init__(**traits)
        self.connection = experimentRunnerConnection.Connection(
        )  #can override default ports and IP
        self.hardwareActions = [
            hardwareAction.sequenceLoggerHWA.SequenceLogger(
                0.0, snakeReference=self),
            hardwareAction.experimentTablesHWA.ExperimentTables(
                0.0, snakeReference=self, enabled=False),
            hardwareAction.dlicEvapHWA.EvaporationRamp(1.0,
                                                       snakeReference=self),
            #hardwareAction.dlicRFSweepHWA.DLICRFSweep(1.0, snakeReference = self,enabled=False),
            hardwareAction.dlicRFSweepLZHWA.DLICRFSweep(1.0,
                                                        snakeReference=self,
                                                        enabled=False),
            hardwareAction.dlicRFSweepLZWithPowerCtrlHWA.DLICRFSweep(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.dlicRFSweepLZWithPowerCtrl13PreparationHWA.
            DLICRFSweep(1.0, snakeReference=self, enabled=True),
            hardwareAction.dlicPiPulseHWA.DLICPiPulse(1.0,
                                                      snakeReference=self,
                                                      enabled=False),
            hardwareAction.evapAttenuationHWA.EvapAttenuation(
                1.0, snakeReference=self),
            hardwareAction.greyMollassesOffsetFreqHWA.GreyMollassesOffset(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.evapAttenuation2HWA.EvapAttenuation(
                "EvapSnakeAttenuationTimeFinal",
                snakeReference=self,
                enabled=False),
            hardwareAction.picomotorPlugHWA.PicomotorPlug(1.0,
                                                          snakeReference=self,
                                                          enabled=False),
            hardwareAction.windFreakOffsetLockHWA.WindFreak(
                0.0, snakeReference=self, enabled=False),
            hardwareAction.windFreakOffsetLockHighFieldImagingHWA.WindFreak(
                0.0, snakeReference=self, enabled=True),
            hardwareAction.windFreakOffsetLock6ImagingHWA.WindFreak(
                2.0, snakeReference=self, enabled=False),
            hardwareAction.windFreak6To1HWA.WindFreak(2.0,
                                                      snakeReference=self,
                                                      enabled=False),
            hardwareAction.windFreakOffsetLockLaser3.WindFreak(
                3.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelNaZSFreq(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelNaZSAtten(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelNaZSEOMFreq(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelNaZSEOMAtten(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelNaSpecFreq(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelLiImaging(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelLiImagingDetuning(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelLiPushPulseAttenuation(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelLiPushPulseDetuning(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelNaDarkSpotAOMFreq(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelNaDarkSpotAOMAtten(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelNaMOTFreq(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelNaMOTAtten(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelNaMOTEOMAtten(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelNaImagingDP(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelLiMOTRep(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelLiMOTCool(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelLiOpticalPump(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelNa2to2OpticalPumpingFreq(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelNa2to2OpticalPumpingAtt(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelNaHighFieldImagingFreq(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.AOMChannelHWAs.AOMChannelNaHighFieldImagingAtt(
                1.0, snakeReference=self, enabled=False),
            hardwareAction.digitalMultimeterCurrentMeasureHWA.
            DigitalMultimeterMeasurement(1.0,
                                         snakeReference=self,
                                         enabled=True),
            hardwareAction.MXGPiPulseHWA.PiPulse(1.0,
                                                 snakeReference=self,
                                                 enabled=False),
            hardwareAction.variableExplorerHWA.VariableExplorer(
                2.0, snakeReference=self, enabled=False),
            hardwareAction.jds6600HWA.JDS6600HWA(1.0,
                                                 snakeReference=self,
                                                 enabled=False),
            hardwareAction.watchdogHWA.WatchdogHWA(18.0,
                                                   snakeReference=self,
                                                   enabled=True)
        ]
        introString = """Welcome to experiment snake."""

        self.mainLog.addLine(introString, 1)

    def initialiseHardwareActions(self):
        for hdwAct in self.hardwareActions:
            if hdwAct.enabled:
                returnString = hdwAct.init()
                hdwAct.variablesReference = self.variables
                self.mainLog.addLine(returnString)

    def closeHardwareActions(self):
        """ this function is called when the user presses stop key. it should cleanly close or 
        shutdown all hardware. user must appropriately implement the hardware action close function"""
        for hdwAct in self.hardwareActions:
            if hdwAct.initialised:
                returnString = hdwAct.close()
                self.mainLog.addLine(returnString)

    def _startSnake(self):
        """action call back from menu or toolbar. Simply starts the timer that
        polls the runner and makes the isRunning bool true  """
        self.mainLog.addLine("Experiment Snake Started", 1)
        self.isRunning = True
        self.getCurrentBlocking()
        self.initialiseHardwareActions()
        self.startTimers()

    def newSequenceStarted(self):
        """called by GetCurrent Thread at the beginning of every sequence """
        if self.isRunning:  #otherwise we have already stopped before new sequence began again
            self.getStatusUpdate()
            self.mainLog.addLine("New cycle started: %s" % self.statusList[0],
                                 1)
            self.refreshExamineVariablesDictionary(
            )  # update the examine variables dictionary to reflect the latest values
            self.refreshVariableDependentCallbackTimes(
            )  # if a callback time is a timing edge name or variable name we must pull the value here
        else:
            self.mainLog.addLine("final connection closed")
        for hdwAct in self.hardwareActions:
            hdwAct.awaitingCallback = True

    def _stopSnakeToolbar(self):
        """if snake is stopped, addLine to main log and then run stopSnake """
        self.mainLog.addLine(
            "Experiment Snake Stopped (you should still wait till the end of this sequence before continuing)",
            1)
        self._stopSnake()

    def _reloadHWAsToolbar(self):
        """if snake is stopped, addLine to main log and then run stopSnake """
        self.mainLog.addLine(
            "Experiment Snake Stopped (you should still wait till the end of this sequence before continuing)",
            1)
        self._reloadHWAs()

    def _reloadHWAs(self):
        """if snake is stopped, addLine to main log and then run stopSnake """
        self.mainLog.addLine("Reloading hardware actions (advanced feature)",
                             3)
        reload(hardwareAction.hardwareAction)
        reload(hardwareAction.sequenceLoggerHWA)
        reload(hardwareAction.dlicEvapHWA)
        reload(hardwareAction.dlicRFSweepHWA)
        reload(hardwareAction.dlicRFSweepHWA)
        reload(hardwareAction.evapAttenuationHWA)
        reload(hardwareAction.evapAttenuation2HWA)
        reload(hardwareAction.picomotorPlugHWA)
        reload(hardwareAction.windFreakOffsetLockHWA)
        #reload( hardwareAction.AOMChannelHWAs)#CAUSES REFERENCING PROBLEMS!
        reload(hardwareAction.experimentTablesHWA)
        reload(hardwareAction.windFreakOffsetLockHighFieldImagingHWA)
        reload(hardwareAction.greyMollassesOffsetFreqHWA)
        reload(hardwareAction.dlicRFSweepLZHWA)
        reload(hardwareAction.digitalMultimeterCurrentMeasureHWA)
        self.__init__()

    def stopTimers(self):
        """stops all timers with error catching """
        try:
            #stop any previous timer, should only have 1 timer at a time
            if self.connectionTimer is not None:
                self.connectionTimer.stop()
        except Exception as e:
            logger.error(
                "couldn't stop current timer before starting new one: %s" %
                e.message)
        try:
            #stop any previous timer, should only have 1 timer at a time
            if self.statusStringTimer is not None:
                self.statusStringTimer.stop()
        except Exception as e:
            logger.error(
                "couldn't stop current timer before starting new one: %s" %
                e.message)
        try:
            #stop any previous timer, should only have 1 timer at a time
            if self.getCurrentTimer is not None:
                self.getCurrentTimer.stop()
        except Exception as e:
            logger.error(
                "couldn't stop current timer before starting new one: %s" %
                e.message)

    def _stopSnake(self):
        """Simply stops the timers, shuts down hardware and sets isRunning bool false  """
        self.stopTimers()
        self.closeHardwareActions()
        self.isRunning = False

    def startTimers(self):
        """This timer object polls the experiment runner regularly polling at any time"""
        #stop any previous timers
        self.stopTimers()
        #start timer
        self.connectionTimer = Timer(self.connectionPollFrequency,
                                     self.getStatus)
        time.sleep(0.1)
        self.statusStringTimer = Timer(self.statusStringFrequency,
                                       self.updateStatusString)
        time.sleep(0.1)
        self.getCurrentTimer = Timer(self.getCurrentFrequency, self.getCurrent)
        """Menu action function to change logger level """
        logger.info("timers started")

    def getStatus(self):
        """calls the connection objects get status function and updates the statusList """
        logger.debug("starting getStatus")
        try:
            self.getStatusUpdate()
            self.checkForCallback()
        except Exception as e:
            logger.error("error in getStatus Function")
            logger.error("error: %s " % e.message)
            self.mainLog.addLine(
                "error in getStatus Function. Error: %s" % e.message, 4)

    def getStatusUpdate(self):
        """Calls get status and updates times """
        try:
            statusString = self.connection.getStatus()
        except socket.error as e:
            logger.error(
                "failed to get status . message=%s . errno=%s . errstring=%s "
                % (e.message, e.errno, e.strerror))
            self.mainLog.addLine(
                "Failed to get status from Experiment Runner. message=%s . errno=%s . errstring=%s"
                % (e.message, e.errno, e.strerror), 3)
            self.mainLog.addLine(
                "Cannot update timeRunning - callbacks could be wrong this sequence!",
                4)
            return
        self.statusList = statusString.split("\n")
        timeFormat = '%d/%m/%Y %H:%M:%S'
        timeBegin = datetime.datetime.strptime(self.statusList[2], timeFormat)
        timeCurrent = datetime.datetime.strptime(self.statusList[3],
                                                 timeFormat)
        self.timeRunning = (timeCurrent - timeBegin).total_seconds()
        logger.debug("time Running = %s " % self.timeRunning)

    def checkForCallback(self):
        """if we've received a sequence, we check through all callback times and
        send off a callback on a hardware action if appropriate"""
        try:
            for hdwAct in [
                    hdwA for hdwA in self.hardwareActions if hdwA.enabled
            ]:  #only iterate through enable hardware actions
                if hdwAct.awaitingCallback and self.timeRunning >= hdwAct.callbackTime:  #callback should be started!
                    try:
                        logger.debug("attempting to callback %s " %
                                     hdwAct.hardwareActionName)
                        hdwAct.setVariablesDictionary(self.variables)
                        logger.debug("vars dictionary set to %s " %
                                     self.variables)
                        callbackReturnString = hdwAct.callback()
                        self.mainLog.addLine(
                            "%s @ %s secs : %s" %
                            (hdwAct.hardwareActionName, self.timeRunning,
                             callbackReturnString), 2)
                        hdwAct.awaitingCallback = False
                        hdwAct.callbackCounter += 1
                    except Exception as e:
                        logger.error(
                            "error while performing callback on %s. see error message below"
                            % (hdwAct.hardwareActionName))
                        logger.error("error: %s " % e.message)
                        self.mainLog.addLine(
                            "error while performing callback on %s. Error: %s"
                            % (hdwAct.hardwareActionName, e.message), 4)
        except Exception as e:
            logger.error("error in checkForCallbackFunction")
            logger.error("error: %s " % e.message)
            self.mainLog.addLine(
                "error in checkForCallbackFunction. Error: %s" % e.message, 4)

    def getCurrent(self):
        """calls the connection objects get status function and updates the variables dictionary """
        if self.getCurrentThread and self.getCurrentThread.isAlive():
            #logger.debug( "getCurrent - already waiting - will not start new thread")
            #removed the above as it fills the log without any useful information
            self.sequenceStarted = False
            return
        else:
            logger.info("starting getCurrent Thread")
            self.getCurrentThread = SocketThread()
            self.getCurrentThread.snakeReference = self  # for calling functions of the snake
            self.getCurrentThread.start()

    def getCurrentBlocking(self):
        """calls getCurrent and won't return until XML parsed. unlike above threaded function
        This is useful when starting up the snake so that we don't start looking for hardware events
        until a sequence has started and we have received XML"""
        self.mainLog.addLine("Waiting for next sequence to start")
        self.xmlString = self.connection.getCurrent(
        )  # only returns at the beginning of a sequence! Experiment runner then returns the entire XML file
        logger.debug("length of xml string = %s " % len(self.xmlString))
        logger.debug("end of xml file is like [-30:]= %s" %
                     self.xmlString[-30:])
        try:
            root = ET.fromstring(self.xmlString)
            variables = root.find("variables")
            self.variables = {
                child[0].text: float(child[1].text)
                for child in variables
            }
            #timing edges dictionary : name--> value
            self.timingEdges = {
                timingEdge.find("name").text:
                float(timingEdge.find("value").text)
                for timingEdge in root.find("timing")
            }
            self.newSequenceStarted()
        except ET.ParseError as e:
            self.mainLog.addLine("Error. Could not parse XML: %s" % e.message,
                                 3)
            self.mainLog.addLine(
                "Possible cause is that buffer is full. is XML length %s>= limit %s ????"
                % (len(self.xmlString), self.connection.BUFFER_SIZE_XML), 3)
            logger.error("could not parse XML: %s " % self.xmlString)
            logger.error(e.message)

    def updateStatusString(self):
        """update the status string with first element of return of GETSTATUS. 
        similiar to experiment control and camera control. It also does the analysis
        of progress that doesn't need to be as accurate (e.g. progress bar)"""
        logger.info("starting update status string")
        self.statusString = self.statusList[
            0] + "- Time Running = %s " % self.timeRunning
        self.queue = int(self.statusList[1])
        timeFormat = '%d/%m/%Y %H:%M:%S'
        timeBegin = datetime.datetime.strptime(self.statusList[2], timeFormat)
        timeEnd = datetime.datetime.strptime(self.statusList[4], timeFormat)
        self.timeTotal = (timeEnd - timeBegin).total_seconds()
        if self.timeRunning > self.timeTotal:
            self.haltedCount += 1
            self.runnerHalted = True
            if self.haltedCount == 0:
                logger.critical("runner was stopped.")
                self.mainLog.addLine("Runner stopped!", 3)
                self.closeHardwareActions()
        else:
            if self.haltedCount > 0:
                self.initialiseHardwareActions()
            self.haltedCount = 0
            self.runnerHalted = False
        self.progress = 100.0 * self.timeRunning / self.timeTotal

    def _examineVariablesDictionary_default(self):
        if len(self.hardwareActions) > 0:
            logger.debug(
                "returning first hardware action %s for examineVariablesDictionary default"
                % self.hardwareActions[0].hardwareActionName)
            return variableDictionary.ExamineVariablesDictionary(
                hdwA=self.hardwareActions[0]
            )  #default is the first in the list
        else:
            logger.warning(
                "hardwareActions list was empty. how should I populate variable examiner...?!."
            )
            return None

    def updateExamineVariablesDictionary(self, hdwA):
        """Populates the examineVariablesDictionary Pane appropriately. It is passed the 
        hdwA so that it can find the necessary variables"""
        self.examineVariablesDictionary.hdwA = hdwA
        self.examineVariablesDictionary.hardwareActionName = hdwA.hardwareActionName
        self.examineVariablesDictionary.updateDisplayList()
        logger.critical("examineVariablesDictionary changed")

    def refreshExamineVariablesDictionary(self):
        """calls the updateDisplayList function of examineVariables Dictionary 
        this updates the values in the display list to the latest values in variables
        dictionary. useful for refereshing at the beginning of a sequence"""
        self.examineVariablesDictionary.updateDisplayList()
        logger.info("refreshed examine variables dictionary")

    def refreshVariableDependentCallbackTimes(self):
        """if a HWA is variable dependent call back time, we refresh the value 
        using this function. THis should be called in each sequence"""
        [
            hdwA.parseCallbackTime() for hdwA in self.hardwareActions
            if hdwA.callbackTimeVariableDependent
        ]

    def _changeLoggingLevelDebug(self):
        """Menu action function to change logger level """
        logger.setLevel(logging.DEBUG)

    def _changeLoggingLevelInfo(self):
        """Menu action function to change logger level """
        logger.setLevel(logging.INFO)

    def _changeLoggingLevelWarning(self):
        """Menu action function to change logger level """
        logger.setLevel(logging.WARNING)

    def _changeLoggingLevelError(self):
        """Menu action function to change logger level """
        logger.setLevel(logging.ERROR)
Esempio n. 13
0
class MATSXDMicroplaneDamageODF(MATSXDEval):

    epsilon_0 = Float(59e-6,
                      label="a",
                      desc="Lateral pressure coefficient",
                      enter_set=True,
                      auto_set=False)

    epsilon_f = Float(250e-6,
                      label="a",
                      desc="Lateral pressure coefficient",
                      enter_set=True,
                      auto_set=False)

    c_T = Float(0.00,
                label="a",
                desc="Lateral pressure coefficient",
                enter_set=True,
                auto_set=False)

    zeta_G = Float(1.0,
                   label="zeta_G",
                   desc="anisotropy parameter",
                   enter_set=True,
                   auto_set=False)

    state_var_shapes = tr.Property(tr.Dict(), depends_on='n_mp')
    '''Dictionary of state variable entries with their array shapes.
    '''
    @cached_property
    def _get_state_var_shapes(self):
        return {'kappa': (self.n_mp, ), 'omega': (self.n_mp, )}

    #-------------------------------------------------------------------------
    # MICROPLANE-Kinematic constraints
    #-------------------------------------------------------------------------

    # get the dyadic product of the microplane normals
    _MPNN = Property(depends_on='n_mp')

    @cached_property
    def _get__MPNN(self):
        # dyadic product of the microplane normals

        MPNN_nij = einsum('ni,nj->nij', self._MPN, self._MPN)
        return MPNN_nij

    # get the third order tangential tensor (operator) for each microplane
    _MPTT = Property(depends_on='n_mp')

    @cached_property
    def _get__MPTT(self):
        # Third order tangential tensor for each microplane
        delta = identity(2)
        MPTT_nijr = 0.5 * (
            einsum('ni,jr -> nijr', self._MPN, delta) +
            einsum('nj,ir -> njir', self._MPN, delta) -
            2.0 * einsum('ni,nj,nr -> nijr', self._MPN, self._MPN, self._MPN))
        return MPTT_nijr

    def _get_e_Emna(self, eps_Emab):
        # Projection of apparent strain onto the individual microplanes
        e_ni = einsum('nb,Emba->Emna', self._MPN, eps_Emab)
        return e_ni

    def _get_e_N_Emn(self, e_Emna):
        # get the normal strain array for each microplane
        e_N_Emn = einsum('Emna, na->Emn', e_Emna, self._MPN)
        return e_N_Emn

    def _get_e_equiv_Emn(self, e_Emna):
        '''
        Returns a list of the microplane equivalent strains
        based on the list of microplane strain vectors
        '''
        # magnitude of the normal strain vector for each microplane
        e_N_Emn = self._get_e_N_Emn(e_Emna)
        # positive part of the normal strain magnitude for each microplane
        e_N_pos_Emn = (np.abs(e_N_Emn) + e_N_Emn) / 2
        # normal strain vector for each microplane
        e_N_Emna = einsum('Emn,ni -> Emni', e_N_Emn, self._MPN)
        # tangent strain ratio
        c_T = self.c_T
        # tangential strain vector for each microplane
        e_T_Emna = e_Emna - e_N_Emna
        # squared tangential strain vector for each microplane
        e_TT_Emn = einsum('Emni,Emni -> Emn', e_T_Emna, e_T_Emna)
        # equivalent strain for each microplane
        e_equiv_Emn = sqrt(e_N_pos_Emn * e_N_pos_Emn + c_T * e_TT_Emn)
        return e_equiv_Emn

    def update_state_variables(self, eps_Emab, kappa, omega):

        e_Emna = self._get_e_Emna(eps_Emab)
        eps_eq_Emn = self._get_e_equiv_Emn(e_Emna)
        f_trial_Emn = eps_eq_Emn - self.epsilon_0
        I = np.where(f_trial_Emn > 0)
        kappa[I] = eps_eq_Emn[I]
        omega[I] = self._get_omega(eps_eq_Emn[I])
        return I

    def _get_omega(self, kappa_Emn):
        '''
        Return new value of damage parameter
        @param kappa:
        '''
        omega_Emn = np.zeros_like(kappa_Emn)
        epsilon_0 = self.epsilon_0
        epsilon_f = self.epsilon_f
        kappa_idx = np.where(kappa_Emn >= epsilon_0)
        omega_Emn[kappa_idx] = (1.0 -
                                (epsilon_0 / kappa_Emn[kappa_idx] *
                                 np.exp(-1.0 *
                                        (kappa_Emn[kappa_idx] - epsilon_0) /
                                        (epsilon_f - epsilon_0))))
        return omega_Emn

    def _get_phi_Emab(self, kappa_Emn):
        # Returns the 2nd order damage tensor 'phi_mtx'
        # scalar integrity factor for each microplane
        phi_Emn = 1.0 - self._get_omega(kappa_Emn)
        # integration terms for each microplanes
        phi_Emab = einsum('Emn,n,nab->Emab', phi_Emn, self._MPW, self._MPNN)
        return phi_Emab

#     def _get_beta_Emabcd(self, phi_Emab):
#         '''
#         Returns the 4th order damage tensor 'beta4' using sum-type symmetrization
#         (cf. [Jir99], Eq.(21))
#         '''
#         delta = identity(2)
#         beta_Emijkl = 0.25 * (einsum('Emik,jl->Emijkl', phi_Emab, delta) +
#                               einsum('Emil,jk->Emijkl', phi_Emab, delta) +
#                               einsum('Emjk,il->Emijkl', phi_Emab, delta) +
#                               einsum('Emjl,ik->Emijkl', phi_Emab, delta))
#
#         return beta_Emijkl

#----------------------------------------------------------------
#  the fourth order volumetric-identity tensor
#----------------------------------------------------------------

    def _get_I_vol_abcd(self):

        delta = identity(2)
        I_vol_abcd = (1.0 / 3.0) * einsum('ab,cd -> abcd', delta, delta)
        return I_vol_abcd

    #----------------------------------------------------------------
    # Returns the fourth order deviatoric-identity tensor
    #----------------------------------------------------------------
    def _get_I_dev_abcd(self):

        delta = identity(2)
        I_dev_abcd = 0.5 * (einsum('ac,bd -> abcd', delta, delta) +
                            einsum('ad,bc -> abcd', delta, delta)) \
            - (1. / 3.0) * einsum('ab,cd -> abcd', delta, delta)

        return I_dev_abcd

    #----------------------------------------------------------------
    # Returns the fourth order tensor P_vol [Wu.2009]
    #----------------------------------------------------------------
    def _get_P_vol_ab(self):

        delta = identity(2)
        P_vol_ab = (1.0 / 3.0) * delta
        return P_vol_ab

    #----------------------------------------------------------------
    # Returns the fourth order tensor P_dev [Wu.2009]
    #----------------------------------------------------------------
    def _get_P_dev_nabc(self):

        delta = identity(2)
        P_dev_nabc = 0.5 * einsum('nd,da,bc -> nabc', self._MPN, delta, delta)
        return P_dev_nabc

    #----------------------------------------------------------------
    # Returns the outer product of P_vol [Wu.2009]
    #----------------------------------------------------------------
    def _get_PP_vol_abcd(self):

        delta = identity(2)
        PP_vol_abcd = (1.0 / 9.0) * einsum('ab,cd -> abcd', delta, delta)
        return PP_vol_abcd

    #----------------------------------------------------------------
    # Returns the inner product of P_dev
    #----------------------------------------------------------------
    def _get_PP_dev_nabcd(self):

        delta = identity(2)
        PP_dev_nabcd = 0.5 * (0.5 * (einsum('na,nc,bd -> nabcd', self._MPN, self._MPN, delta) +
                                     einsum('na,nd,bc -> nabcd', self._MPN, self._MPN, delta)) +
                              0.5 * (einsum('ac,nb,nd -> nabcd', delta, self._MPN, self._MPN) +
                                     einsum('ad,nb,nc -> nabcd', delta, self._MPN, self._MPN))) - \
            (1.0 / 3.0) * (einsum('na,nb,cd -> nabcd', self._MPN, self._MPN, delta) +
                           einsum('ab,nc,nd -> nabcd', delta, self._MPN, self._MPN)) + \
            (1.0 / 9.0) * einsum('ab,cd -> abcd', delta, delta)

        return PP_dev_nabcd

    #--------------------------------------------------------------------------
    # Returns the fourth order secant stiffness tensor (cf. [Wu.2009], Eq.(29))
    #--------------------------------------------------------------------------
    def _get_S_1_Emabcd(self, eps_Emab, kappa, omega):

        K0 = self.E / (1.0 - 2.0 * self.nu)
        G0 = self.E / (1.0 + self.nu)

        phi_Emn = 1.0 - self._get_omega(kappa)

        PP_vol_abcd = self._get_PP_vol_abcd()
        PP_dev_nabcd = self._get_PP_dev_nabcd()
        I_dev_abcd = self._get_I_dev_abcd()

        S_1_Emabcd = K0 * einsum('Emn, n, abcd-> Emabcd', phi_Emn, self._MPW, PP_vol_abcd) + \
            G0 * 2.0 * self.zeta_G * einsum('Emn, n, nabcd-> Emabcd',
                                            phi_Emn, self._MPW, PP_dev_nabcd) - (1.0 / 3.0) * (
                2.0 * self.zeta_G - 1.0) * G0 * einsum('Emn, n, abcd-> Emabcd',
                                                       phi_Emn, self._MPW, I_dev_abcd)

        return S_1_Emabcd

#     #------------------------------------------
#     # scalar damage factor for each microplane
#     #------------------------------------------
#     def _get_d_Em(self, s_Emng, eps_Emab):
#
#         d_Emn = 1.0 - self.get_state_variables(s_Emng, eps_Emab)[0]
#
#         d_Em = (1.0 / 3.0) * einsum('Emn,n-> Em',  d_Emn, self._MPW)
#
#         return d_Em
#
#     #------------------------------------------
#     # The 4th order volumetric damage tensor
#     #------------------------------------------
#     def _get_M_vol_abcd(self, sctx, eps_app_eng, sigma_kk):
#
#         d = self._get_Em( s_Emng, eps_Emab)
#         delta = identity(2)
#
#         I_4th_abcd = 0.5 * (einsum('ac,bd -> ijkl', delta, delta) +
#                             einsum('il,jk -> ijkl', delta, delta))
#
#         # print 'M_vol', (1 - d) * I_4th_ijkl
#
#         return (1 - d) * I_4th_ijkl
#
#     #------------------------------------------
#     # The 4th order deviatoric damage tensor
#     #------------------------------------------
#     def _get_M_dev_tns(self, phi_mtx):
#
#         delta = identity(3)
#         I_4th_ijkl = 0.5 * (einsum('ik,jl -> ijkl', delta, delta) +
#                             einsum('il,jk -> ijkl', delta, delta))
#         tr_phi_mtx = np.trace(phi_mtx)
#
#         M_dev_ijkl = self.zeta_G * (0.5 * (einsum('ik,jl->ijkl', delta, phi_mtx) +
#                                            einsum('il,jk->ijkl', delta, phi_mtx)) +
#                                     0.5 * (einsum('ik,jl->ijkl', phi_mtx, delta) +
#                                            einsum('il,jk->ijkl', phi_mtx, delta))) \
#             - (2. * self.zeta_G - 1.) * (tr_phi_mtx / 3.) * I_4th_ijkl
#
#         return M_dev_ijkl
#
#     #--------------------------------------------------------------------------
#     # Returns the fourth order secant stiffness tensor (cf. [Wu.2009], Eq.(31))
#     #--------------------------------------------------------------------------
#     def _get_S_2_Emabcd(self, sctx, eps_app_eng, sigma_kk):
#
#         K0 = self.E / (1. - 2. * self.nu)
#         G0 = self.E / (1. + self.nu)
#
#         I_vol_ijkl = self._get_I_vol_4()
#         I_dev_ijkl = self._get_I_dev_4()
#         phi_mtx = self._get_phi_mtx(sctx, eps_app_eng, sigma_kk)
#         M_vol_ijkl = self._get_M_vol_tns(sctx, eps_app_eng, sigma_kk)
#         M_dev_ijkl = self._get_M_dev_tns(phi_mtx)
#
#         S_2_ijkl = K0 * einsum('ijmn,mnrs,rskl -> ijkl', I_vol_ijkl, M_vol_ijkl, I_vol_ijkl ) \
#             + G0 * einsum('ijmn,mnrs,rskl -> ijkl', I_dev_ijkl, M_dev_ijkl, I_dev_ijkl)\
#
#         return S_2_ijkl
#
#     #--------------------------------------------------------------------------
#     # Returns the fourth order secant stiffness tensor (cf. [Wu.2009], Eq.(34))
#     #--------------------------------------------------------------------------
#     def _get_S_3_Emabcd(self, sctx, eps_app_eng, sigma_kk):
#
#         K0 = self.E / (1. - 2. * self.nu)
#         G0 = self.E / (1. + self.nu)
#
#         I_vol_ijkl = self._get_I_vol_4()
#         I_dev_ijkl = self._get_I_dev_4()
#
#         # The fourth order elastic stiffness tensor
#         S_0_ijkl = K0 * I_vol_ijkl + G0 * I_dev_ijkl
#
#         d_n = self._get_state_variables(sctx, eps_app_eng, sigma_kk)[:, 5]
#
#         PP_vol_4 = self._get_PP_vol_4()
#         PP_dev_4 = self._get_PP_dev_4()
#
#         delta = identity(3)
#         I_4th_ijkl = einsum('ik,jl -> ijkl', delta, delta)
#
#         D_ijkl = einsum('n,n,ijkl->ijkl', d_n, self._MPW, PP_vol_4) + \
#             2 * self.zeta_G * einsum('n,n,nijkl->ijkl', d_n, self._MPW, PP_dev_4) - (
#                 1 / 3.) * (2 * self.zeta_G - 1) * einsum('n,n,ijkl->ijkl', d_n, self._MPW, I_dev_ijkl)
#
#         phi_ijkl = (I_4th_ijkl - D_ijkl)
#
#         S_ijkl = einsum('ijmn,mnkl', phi_ijkl, S_0_ijkl)
#
#         return S_ijkl
#
#-------------------------------------------------------------------------
# Returns the fourth order secant stiffness tensor using (double orthotropic) assumption
#-------------------------------------------------------------------------

    def _get_S_4_Emabcd(self, eps_Emab, kappa, omega):

        K0 = self.E / (1.0 - 2.0 * self.nu)
        G0 = self.E / (1.0 + self.nu)

        I_vol_abcd = self._get_I_vol_abcd()
        I_dev_abcd = self._get_I_dev_abcd()
        delta = identity(2)

        phi_Emab = self._get_phi_Emab(kappa)
        D_Emab = delta - phi_Emab
        d_Em = (1.0 / 3.0) * np.trace(D_Emab)
        D_bar_Emab = self.zeta_G * (D_Emab - d_Em * delta)

        S_4_Emabcd = (1.0 - d_Em) * K0 * I_vol_abcd + (1.0 - d_Em) * G0 * I_dev_abcd + (2.0 / 3.0) * (G0 - K0) * \
            (einsum('ij,Emkl -> Emijkl', delta, D_bar_Emab) +
             einsum('Emij,kl -> Emijkl', D_bar_Emab, delta)) + 0.5 * (-K0 + 2.0 * G0) * \
            (0.5 * (einsum('ik,Emjl -> Emijkl', delta, D_bar_Emab) + einsum('Emil,jk -> Emijkl', D_bar_Emab, delta)) +
             0.5 * (einsum('Emil,jk -> Emijkl', D_bar_Emab, delta) + einsum('ik,Emjl -> Emijkl', delta, D_bar_Emab)))

        return S_4_Emabcd

    def _get_S_5_Emabcd(self, eps_Emab, kappa, omega):
        #----------------------------------------------------------------------
        # Returns the fourth order secant stiffness tensor (restrctive orthotropic)
        #----------------------------------------------------------------------

        K0 = self.E / (1. - 2. * self.nu)
        G0 = self.E / (1. + self.nu)

        delta = identity(2)
        phi_Emab = self._get_phi_Emab(kappa)
        # damaged stiffness without simplification
        S_5_Emabcd = (1.0 / 3.0) * (K0 - G0) * 0.5 * ((einsum('ij,Emkl -> Emijkl', delta, phi_Emab) +
                                                       einsum('Emij,kl -> Emijkl', phi_Emab, delta))) + \
            G0 * 0.5 * ((0.5 * (einsum('ik,Emjl -> Emijkl', delta, phi_Emab) + einsum('Emil,jk -> Emijkl', phi_Emab, delta)) +
                         0.5 * (einsum('Emik,jl -> ijkl', phi_Emab, delta) + einsum('il,Emjk  -> Emijkl', delta, phi_Emab))))

        # print 'S_5_Emabcd', S_5_Emabcd
        return S_5_Emabcd


#
#     #-------------------------------------------------------------------------
#     # Returns the fourth order secant stiffness tensor (double orthotropic N-T split)
#     #-------------------------------------------------------------------------
#     def _get_S_5_Emabcd(self, sctx, eps_app_eng, sigma_kk):
#
#         E_N = self.E / (3.0 - 2.0 * (1.0 + self.nu))
#         E_T = self.E / (1. + self.nu)
#
#         I_vol_ijkl = self._get_I_vol_4()
#         I_dev_ijkl = self._get_I_dev_4()
#         delta = identity(3)
#         phi_mtx = self._get_phi_mtx(sctx, eps_app_eng, sigma_kk)
#         D_ij = delta - phi_mtx
#         d = (1. / 3.) * np.trace(D_ij)
#         D_bar_ij = self.zeta_G * (D_ij - d * delta)
#
#         S_5_ijkl = (1 - d) * E_N * I_vol_ijkl + (1 - d) * E_T * I_dev_ijkl + (2 / 3.) * (E_T - E_N) * \
#             (einsum('ij,kl -> ijkl', delta, D_bar_ij) +
#              einsum('ij,kl -> ijkl', D_bar_ij, delta)) + 0.5 * (2 * E_T - E_N) *\
#             (0.5 * (einsum('ik,jl -> ijkl', delta, D_bar_ij) + einsum('il,jk -> ijkl', D_bar_ij, delta)) +
#              0.5 * (einsum('il,jk -> ijkl', D_bar_ij, delta) + einsum('ik,jl -> ijkl', delta, D_bar_ij)))
#
#         return S_5_ijkl

#-------------------------------------------------------------------------
# Evaluation - get the corrector and predictor
#-------------------------------------------------------------------------

    def get_corr_pred(self, eps_Emab_n1, tn1, kappa, omega):

        I = self.update_variables(eps_Emab_n1, kappa, omega)

        #----------------------------------------------------------------------
        # if the regularization using the crack-band concept is on calculate the
        # effective element length in the direction of principle strains
        #----------------------------------------------------------------------
        # if self.regularization:
        #    h = self.get_regularizing_length(sctx, eps_app_eng)
        #    self.phi_fn.h = h

        #         #------------------------------------------------------------------
        #         # Damage tensor (2th order):
        #         #------------------------------------------------------------------
        #         phi_Emab = self._get_phi_Emab(kappa_Emn)
        #
        #         #------------------------------------------------------------------
        #         # Damage tensor (4th order) using product- or sum-type symmetrization:
        #         #------------------------------------------------------------------
        #         beta_Emabcd = self._get_beta_Emabcd(phi_Emab)
        #
        #         #------------------------------------------------------------------
        #         # Damaged stiffness tensor calculated based on the damage tensor beta4:
        #         #------------------------------------------------------------------
        #         D_Emabcd, = einsum(
        #             'Emijab, abef, Emabef -> Emijab', beta_Emabcd, self.D_abef, beta_Emabcd)

        D_Emabcd = self._get_S_1_Emabcd(eps_Emab_n1, kappa, omega)

        sig_Emab = einsum('Emabcd,Emcd -> Emab', D_Emabcd, eps_Emab_n1)

        return D_Emabcd, sig_Emab
Esempio n. 14
0
class PipelineConfiguration(traits.HasTraits):

    # project settings
    project_dir = traits.Directory(
        exists=False, desc="data path to where the project is stored")

    # project metadata (for connectome file)
    project_metadata = traits.Dict(
        desc="project metadata to be stored in the connectome file")
    # DEPRECATED: this field is deprecated after version >1.0.2
    generator = traits.Str()

    # parcellation scheme
    parcellation_scheme = traits.Enum("NativeFreesurfer",
                                      ["Lausanne2008", "NativeFreesurfer"],
                                      desc="used parcellation scheme")

    # choose between 'L' (linear) and 'N' (non-linear) and 'B' (bbregister)
    registration_mode = traits.Enum(
        "Linear", ["Linear", "Nonlinear", "BBregister"],
        desc="registration mode: linear or non-linear or bbregister")

    diffusion_imaging_model = traits.Enum("DSI", ["DSI", "DTI", "QBALL"])

    # DSI
    nr_of_gradient_directions = traits.Str('515')
    nr_of_sampling_directions = traits.Str('181')
    odf_recon_param = traits.Str('-b0 1 -dsi -p 4 -sn 0')
    hardi_recon_param = traits.Str('-b0 1 -p 3 -sn 0')

    # DTI
    gradient_table_file = traits.File(exists=False)
    gradient_table = traits.Enum('siemens_64', [
        'custom', 'mgh_dti_006', 'mgh_dti_018', 'mgh_dti_030', 'mgh_dti_042',
        'mgh_dti_060', 'mgh_dti_072', 'mgh_dti_090', 'mgh_dti_120',
        'mgh_dti_144', 'siemens_06', 'siemens_12', 'siemens_20', 'siemens_256',
        'siemens_30', 'siemens_64'
    ])
    nr_of_b0 = traits.Str('1')
    max_b0_val = traits.Str('1000')
    dti_recon_param = traits.Str('')
    dtb_dtk2dir_param = traits.Str('')

    # tractography
    streamline_param = traits.Str('--angle 60  --seeds 32')

    # registration
    lin_reg_param = traits.Str('-usesqform -nosearch -dof 6 -cost mutualinfo')
    nlin_reg_bet_T2_param = traits.Str('-f 0.35 -g 0.15')
    nlin_reg_bet_b0_param = traits.Str('-f 0.2 -g 0.2')
    nlin_reg_fnirt_param = traits.Str(
        '--subsamp=8,4,2,2 --miter==5,5,5,5 --lambda=240,120,90,30 --splineorder=3 --applyinmask=0,0,1,1 --applyrefmask=0,0,1,1'
    )
    bb_reg_param = traits.Str('--init-header --dti')

    # dicom converter
    do_convert_diffusion = traits.Bool(True)
    do_convert_T1 = traits.Bool(True)
    do_convert_T2 = traits.Bool(False)
    do_convert_fMRI = traits.Bool(False)

    # rsfmri
    # choose between 'L' (linear) and 'B' (bbregister)
    rsfmri_registration_mode = traits.Enum(
        "Linear", ["Linear", "BBregister"],
        desc="registration mode: linear or bbregister")
    rsfmri_lin_reg_param = traits.Str(
        '-usesqform -nosearch -dof 6 -cost mutualinfo')
    rsfmri_bb_reg_param = traits.Str('--init-header --dti')
    do_save_mat = traits.Bool(True)

    # rsfmri PREPROCESSING STEPS
    rsfmri_slice_timing = traits.Enum("none", [
        "none", "bottom-top interleaved", "top-bottom interleaved",
        "bottom-top", "top-bottom"
    ],
                                      desc="time slicing mode")
    rsfmri_smoothing = traits.Str('0')
    rsfmri_discard = traits.Str('5')
    rsfmri_nuisance_global = traits.Bool(False)
    rsfmri_nuisance_WM = traits.Bool(True)
    rsfmri_nuisance_CSF = traits.Bool(True)
    rsfmri_nuisance_motion = traits.Bool(True)
    rsfmri_detrending = traits.Bool(True)
    rsfmri_lowpass = traits.Str('1')
    rsfmri_scrubbing_parameters = traits.Bool(True)
    rsfmri_scrubbing_apply = traits.Bool(True)
    rsfmri_scrubbing_FD = traits.Str('0.5')
    rsfmri_scrubbing_DVARS = traits.Str('5')

    # DEPRECATED:
    subject_raw_glob_diffusion = traits.Str("*.*")
    subject_raw_glob_T1 = traits.Str("*.*")
    subject_raw_glob_T2 = traits.Str("*.*")
    extract_diffusion_metadata = traits.Bool(False)

    # subject
    subject_name = traits.Str()
    subject_timepoint = traits.Str()
    subject_workingdir = traits.Directory()
    subject_logger = None
    subject_metadata = [
        KeyValue(key='description', value=''),
        KeyValue(key='', value=''),
        KeyValue(key='', value=''),
        KeyValue(key='', value=''),
        KeyValue(key='', value=''),
        KeyValue(key='', value=''),
    ]

    active_createfolder = traits.Bool(True)
    active_dicomconverter = traits.Bool(False)
    active_registration = traits.Bool(False)
    active_segmentation = traits.Bool(False)
    active_parcellation = traits.Bool(False)
    active_applyregistration = traits.Bool(False)
    active_reconstruction = traits.Bool(False)
    active_tractography = traits.Bool(False)
    active_fiberfilter = traits.Bool(False)
    active_connectome = traits.Bool(False)
    active_statistics = traits.Bool(False)
    active_cffconverter = traits.Bool(False)
    active_rsfmri_registration = traits.Bool(False)
    active_rsfmri_preprocessing = traits.Bool(False)
    active_rsfmri_connectionmatrix = traits.Bool(False)
    skip_completed_stages = traits.Bool(False)

    # metadata
    creator = traits.Str()
    email = traits.Str()
    publisher = traits.Str()
    created = traits.Date()
    modified = traits.Date()
    license = traits.Str()
    #    rights = traits.Str()
    reference = traits.Str()
    #    relation =  traits.Str()
    species = traits.Str('H**o sapiens')
    description = traits.Str()

    # segmentation
    recon_all_param = traits.Str('-all -no-isrunning')

    # parcellation
    custompar_nrroi = traits.Int()
    custompar_nodeinfo = traits.File()
    custompar_volumeparcell = traits.File()

    # fiber filtering
    apply_splinefilter = traits.Bool(
        True, desc='apply the spline filtering from diffusion toolkit')
    apply_fiberlength = traits.Bool(True, desc='apply cutoff to fiber lengths')
    fiber_cutoff_lower = traits.Float(
        20.0,
        desc='cut fibers that are shorter in length than given length in mm')
    fiber_cutoff_upper = traits.Float(
        500.0,
        desc='cut fibers that are longer in length than given length in mm')

    # measures
    connection_P0 = traits.Bool(False)
    connection_gfa = traits.Bool(False)
    connection_kurtosis = traits.Bool(False)
    connection_skewness = traits.Bool(False)
    connection_adc = traits.Bool(False)
    connection_fa = traits.Bool(False)

    # cff converter
    cff_fullnetworkpickle = traits.Bool(
        True,
        desc='stores the full network pickle generated by connectome creation')
    cff_cmatpickle = traits.Bool(True)
    cff_originalfibers = traits.Bool(True, desc='stores original fibers')
    cff_filteredfibers = traits.Bool(True, desc='stores filtered fibers')
    cff_finalfiberlabels = traits.Bool(
        True, desc='stores final fibers and their labelarrays')
    cff_fiberarr = traits.Bool(True)
    cff_rawdiffusion = traits.Bool(True)
    cff_scalars = traits.Bool(True)
    cff_rawT1 = traits.Bool(True)
    cff_rawT2 = traits.Bool(True)
    cff_roisegmentation = traits.Bool(
        True, desc='stores multi-resolution parcellation volumes')
    cff_surfaces = traits.Bool(True,
                               desc='stores individually genertated surfaces')
    cff_surfacelabels = traits.Bool(
        True, desc='stores individually genertated surfaces')

    # do you want to do manual white matter mask correction?
    wm_handling = traits.Enum(
        1, [1, 2, 3],
        desc="in what state should the freesurfer step be processed")

    # custom parcellation
    parcellation = traits.Dict(
        desc="provide the dictionary with your parcellation.")

    # start up fslview
    inspect_registration = traits.Bool(
        False, desc='start fslview to inspect the the registration results')
    fsloutputtype = traits.Enum('NIFTI', ['NIFTI'])

    # connectome creation
    compute_curvature = traits.Bool(False)

    # email notification, needs a local smtp server
    # sudo apt-get install postfix
    emailnotify = traits.ListStr(
        [], desc='the email address to send stage completion status message')

    freesurfer_home = traits.Directory(exists=False, desc="path to Freesurfer")
    fsl_home = traits.Directory(exists=False, desc="path to FSL")
    dtk_home = traits.Directory(exists=False, desc="path to diffusion toolkit")

    # This file stores descriptions of the inputs/outputs to each stage of the
    # CMP pipeline.  It can be queried using the PipelineStatus python object
    pipeline_status_file = traits.Str("cmp.status")

    # Pipeline status object
    pipeline_status = pipeline_status.PipelineStatus()

    def _get_lausanne_parcellation(self, parcel="NativeFreesurfer"):

        if parcel == "Lausanne2008":
            return {
                'scale33': {
                    'number_of_regions':
                    83,
                    # contains name, url, color, freesurfer_label, etc. used for connection matrix
                    'node_information_graphml':
                    op.join(
                        self.get_lausanne_parcellation_path('resolution83'),
                        'resolution83.graphml'),
                    # scalar node values on fsaverage? or atlas?
                    'surface_parcellation':
                    None,
                    # scalar node values in fsaverage volume?
                    'volume_parcellation':
                    None,
                    # the subdirectory name from where to copy parcellations, with hemispheric wildcard
                    'fs_label_subdir_name':
                    'regenerated_%s_36',
                    # should we subtract the cortical rois for the white matter mask?
                    'subtract_from_wm_mask':
                    1,
                },
                'scale60': {
                    'number_of_regions':
                    129,
                    'node_information_graphml':
                    op.join(
                        self.get_lausanne_parcellation_path('resolution150'),
                        'resolution150.graphml'),
                    'surface_parcellation':
                    None,
                    'volume_parcellation':
                    None,
                    'fs_label_subdir_name':
                    'regenerated_%s_60',
                    'subtract_from_wm_mask':
                    1,
                },
                'scale125': {
                    'number_of_regions':
                    234,
                    'node_information_graphml':
                    op.join(
                        self.get_lausanne_parcellation_path('resolution258'),
                        'resolution258.graphml'),
                    'surface_parcellation':
                    None,
                    'volume_parcellation':
                    None,
                    'fs_label_subdir_name':
                    'regenerated_%s_125',
                    'subtract_from_wm_mask':
                    1,
                },
                'scale250': {
                    'number_of_regions':
                    463,
                    'node_information_graphml':
                    op.join(
                        self.get_lausanne_parcellation_path('resolution500'),
                        'resolution500.graphml'),
                    'surface_parcellation':
                    None,
                    'volume_parcellation':
                    None,
                    'fs_label_subdir_name':
                    'regenerated_%s_250',
                    'subtract_from_wm_mask':
                    1,
                },
                'scale500': {
                    'number_of_regions':
                    1015,
                    'node_information_graphml':
                    op.join(
                        self.get_lausanne_parcellation_path('resolution1015'),
                        'resolution1015.graphml'),
                    'surface_parcellation':
                    None,
                    'volume_parcellation':
                    None,
                    'fs_label_subdir_name':
                    'regenerated_%s_500',
                    'subtract_from_wm_mask':
                    1,
                },
            }
        else:
            return {
                'freesurferaparc': {
                    'number_of_regions':
                    83,
                    # contains name, url, color, freesurfer_label, etc. used for connection matrix
                    'node_information_graphml':
                    op.join(
                        self.get_lausanne_parcellation_path('freesurferaparc'),
                        'resolution83.graphml'),
                    # scalar node values on fsaverage? or atlas?
                    'surface_parcellation':
                    None,
                    # scalar node values in fsaverage volume?
                    'volume_parcellation':
                    None,
                }
            }

    def __init__(self, **kwargs):
        # NOTE: In python 2.6, object.__init__ no longer accepts input
        # arguments.  HasTraits does not define an __init__ and
        # therefore these args were being ignored.
        super(PipelineConfiguration, self).__init__(**kwargs)

        # the default parcellation provided
        self.parcellation = self._get_lausanne_parcellation(
            parcel="NativeFreesurfer")

        self.can_use_dipy = dipy_here

        # no email notify
        self.emailnotify = []

        # default gradient table for DTI
        self.gradient_table_file = self.get_cmp_gradient_table('siemens_64')

        # try to discover paths from environment variables
        try:
            self.freesurfer_home = op.join(os.environ['FREESURFER_HOME'])
            self.fsl_home = op.join(os.environ['FSLDIR'])
            self.dtk_home = os.environ['DTDIR']
            self.dtk_matrices = op.join(self.dtk_home, 'matrices')
        except KeyError:
            pass

        self.fsloutputtype = 'NIFTI'
        os.environ['FSLOUTPUTTYPE'] = self.fsloutputtype
        os.environ['FSLOUTPUTTYPE'] = 'NIFTI'

    def consistency_check(self):
        """ Provides a checking facility for configuration objects """

        # project name not empty
        if not op.exists(self.project_dir):
            msg = 'Your project directory does not exist!'
            raise Exception(msg)

        # check metadata
        if self.creator == '':
            raise Exception('You need to enter creator metadata!')
        if self.publisher == '':
            raise Exception('You need to enter publisher metadata!')
        if self.email == '':
            raise Exception('You need to enter email of a contact person!')

        # check if software paths exists
        pas = {
            'configuration.freesurfer_home': self.freesurfer_home,
            'configuration.fsl_home': self.fsl_home,
            'configuration.dtk_home': self.dtk_home,
            'configuration.dtk_matrices': self.dtk_matrices
        }
        for k, p in pas.items():
            if not op.exists(p):
                msg = 'Required software path for %s does not exists: %s' % (k,
                                                                             p)
                raise Exception(msg)

        if self.subject_workingdir == '':
            msg = 'No working directory defined for subject'
            raise Exception(msg)
#        else:
#            wdir = self.get_subj_dir()
#            if not op.exists(wdir):
#                msg = 'Working directory %s does not exists for subject' % (wdir)
#                raise Exception(msg)
#            else:
#                wdiff = op.join(self.get_raw_diffusion())
#                print wdiff
#                if not op.exists(wdiff):
#                    msg = 'Diffusion MRI subdirectory %s does not exists for the subject' % wdiff
#                    raise Exception(msg)
#                wt1 = op.join(self.get_rawt1())
#                if not op.exists(wt1):
#                    msg = 'Structural MRI subdirectory %s T1 does not exist in RAWDATA' % wt1
#                    raise Exception(msg)

    def get_cmp_home(self):
        """ Return the cmp home path """
        return op.dirname(__file__)

    def get_rawdata(self):
        """ Return raw data path for the subject """
        return op.join(self.get_subj_dir(), 'RAWDATA')

    def get_log(self):
        """ Get subject log dir """
        return op.join(self.get_subj_dir(), 'LOG')

    def get_logname(self, suffix='.log'):
        """ Get a generic name for the log and pickle files """
        a = dt.datetime.now()
        return 'pipeline-%s-%02i%02i-%s-%s%s' % (
            a.date().isoformat(), a.time().hour, a.time().minute,
            self.subject_name, self.subject_timepoint, suffix)

    def get_logger(self):
        """ Get the logger instance created """
        if self.subject_logger is None:
            # setup logger for the subject
            self.subject_logger = \
                getLog(os.path.join(self.get_log(), self.get_logname()))
            return self.subject_logger
        else:
            return self.subject_logger

    def get_rawglob(self, modality):
        """ DEPRECATED: Get the file name endings for modality """

        if modality == 'diffusion':
            if not self.subject_raw_glob_diffusion == '':
                return self.subject_raw_glob_diffusion
            else:
                raise Exception('No raw_glob_diffusion defined for subject')

        elif modality == 'T1':
            if not self.subject_raw_glob_T1 == '':
                return self.subject_raw_glob_T1
            else:
                raise Exception('No raw_glob_T1 defined for subject')

        elif modality == 'T2':
            if not self.subject_raw_glob_T2 == '':
                return self.subject_raw_glob_T2
            else:
                raise Exception('No raw_glob_T2 defined for subject')

    def get_dicomfiles(self, modality):
        """ Get a list of dicom files for the requested modality. Tries to
        discover them automatically
        """
        from glob import glob

        if modality == 'diffusion':
            pat = self.get_raw_diffusion()
        elif modality == 'T1':
            pat = self.get_rawt1()
        elif modality == 'T2':
            pat = self.get_rawt2()
        elif modality == 'fMRI':
            pat = self.get_rawrsfmri()

        # discover files with *.* and *
        difiles = sorted(glob(op.join(pat, '*.*')) + glob(op.join(pat, '*')))

        # exclude potential .nii and .nii.gz files
        difiles = [
            e for e in difiles
            if not e.endswith('.nii') and not e.endswith('.nii.gz')
        ]

        # check if no files and throw exception
        if len(difiles) == 0:
            raise Exception('Could not find any DICOM files in folder %s' %
                            pat)

        return difiles

    def get_rawrsfmri(self):
        """ Get raw functional MRI path for subject """
        return op.join(self.get_rawdata(), 'fMRI')

    def get_rawt1(self):
        """ Get raw structural MRI T1 path for subject """
        return op.join(self.get_rawdata(), 'T1')

    def get_rawt2(self):
        """ Get raw structural MRI T2 path for subject """
        return op.join(self.get_rawdata(), 'T2')

    def get_subj_dir(self):
        return self.subject_workingdir

    def get_raw_diffusion(self):
        """ Get the raw diffusion path for subject """
        if self.diffusion_imaging_model == 'DSI':
            return op.join(self.get_subj_dir(), 'RAWDATA', 'DSI')
        elif self.diffusion_imaging_model == 'DTI':
            return op.join(self.get_subj_dir(), 'RAWDATA', 'DTI')
        elif self.diffusion_imaging_model == 'QBALL':
            return op.join(self.get_subj_dir(), 'RAWDATA', 'QBALL')

    def get_fs(self):
        """ Returns the subject root folder path for freesurfer files """
        return op.join(self.get_subj_dir(), 'FREESURFER')

    def get_stats(self):
        """ Return statistic output path """
        return op.join(self.get_subj_dir(), 'STATS')

    def get_cffdir(self):
        """ Returns path to store connectome file """
        return op.join(self.get_cmp(), 'cff')

    def get_nifti(self):
        """ Returns the subject root folder path for nifti files """
        return op.join(self.get_subj_dir(), 'NIFTI')

    def get_nifti_trafo(self):
        """ Returns the path to the subjects transformation / registration matrices """
        return op.join(self.get_nifti(), 'transformations')

    def get_nifti_bbregister(self):
        """ Returns the path to the subjects transformation / registration matrices, bbregister mode """
        return op.join(self.get_nifti(), 'bbregister')

    def get_diffusion_metadata(self):
        """ Diffusion metadata, i.e. where gradient_table.txt is stored """
        return op.join(self.get_nifti(), 'diffusion_metadata')

    def get_nifti_wm_correction(self):
        """ Returns the path to the subjects wm_correction path """
        return op.join(self.get_nifti(), 'wm_correction')

    def get_cmp(self):
        return op.join(self.get_subj_dir(), 'CMP')

    def get_cmp_rawdiff(self, ):
        return op.join(self.get_cmp(), 'raw_diffusion')

    def get_cmp_rawdiff_reconout(self):
        """ Returns the output path for diffusion reconstruction without prefix"""
        if self.diffusion_imaging_model == 'DSI':
            return op.join(self.get_cmp(), 'raw_diffusion', 'odf_0')
        elif self.diffusion_imaging_model == 'DTI':
            return op.join(self.get_cmp(), 'raw_diffusion', 'dti_0')
        elif self.diffusion_imaging_model == 'QBALL':
            return op.join(self.get_cmp(), 'raw_diffusion', 'qball_0')

    def get_cmp_rawdiff_resampled(self):
        return op.join(self.get_cmp_rawdiff(), '2x2x2')

    def get_cmp_fsout(self):
        return op.join(self.get_cmp(), 'fs_output')

    def get_cmp_fibers(self):
        return op.join(self.get_cmp(), 'fibers')

    def get_cmp_scalars(self):
        return op.join(self.get_cmp(), 'scalars')

    def get_cmp_matrices(self):
        return op.join(self.get_cmp_fibers(), 'matrices')

    def get_cmp_fmri(self):
        return op.join(self.get_cmp(), 'fMRI')

    def get_cmp_fmri_preproc(self):
        return op.join(self.get_cmp_fmri(), 'preprocessing')

    def get_cmp_fmri_matrices(self):
        return op.join(self.get_cmp_fmri(), 'matrices')

    def get_cmp_fmri_timeseries(self):
        return op.join(self.get_cmp_fmri(), 'timeseries')

    def get_cmp_tracto_mask(self):
        return op.join(self.get_cmp_fsout(), 'HR')

    def get_cmp_tracto_mask_tob0(self):
        return op.join(self.get_cmp_fsout(), 'HR__registered-TO-b0')

    def get_custom_gradient_table(self):
        """ Returns the absolute path to the custom gradient table
        with optional b-values in the 4th row """
        return self.gradient_table_file

    def get_cmp_gradient_table(self, name):
        """ Return default gradient tables shipped with CMP. These are mainly derived from
        Diffusion Toolkit """
        cmp_path = op.dirname(__file__)
        return op.join(cmp_path, 'data', 'diffusion', 'gradient_tables',
                       name + '.txt')

    def get_dtb_streamline_vecs_file(self, as_text=False):
        """ Returns the odf directions file used for DTB_streamline """
        cmp_path = op.dirname(__file__)
        if as_text:
            return op.join(cmp_path, 'data', 'diffusion', 'odf_directions',
                           '181_vecs.txt')
        else:
            return op.join(cmp_path, 'data', 'diffusion', 'odf_directions',
                           '181_vecs.dat')

    # XXX
    def get_cmp_scalarfields(self):
        """ Returns a list with tuples with the scalar field name and the
        absolute path to its nifti file """

        ret = []

        if self.diffusion_imaging_model == 'DSI':
            # add gfa per default
            ret.append(('gfa', op.join(self.get_cmp_scalars(),
                                       'dsi_gfa.nii.gz')))
            # XXX: add adc per default

        elif self.diffusion_imaging_model == 'DTI':
            # nothing to add yet for DTI
            pass

        return ret

    def get_dtk_dsi_matrix(self):
        """ Returns the DSI matrix from Diffusion Toolkit
        
        The parameters have to be set in the configuration object with keys:
        1. number of gradient directions : 'nr_of_gradient_directions'
        2. number of sampling directions : 'nr_of_sampling_directions'
        
        Example
        -------
        
        confobj.nr_of_gradient_directions = 515
        confobj.nr_of_sampling_directions = 181
        
        Returns matrix including absolute path to DSI_matrix_515x181.dat
        
        """

        grad = self.nr_of_gradient_directions
        samp = self.nr_of_sampling_directions
        fpath = op.join(self.dtk_matrices,
                        "DSI_matrix_%sx%s.dat" % (grad, samp))
        if not op.exists(fpath):
            msg = "DSI matrix does not exists: %s" % fpath
            raise Exception(msg)
        return fpath

    def get_lausanne_atlas(self, name=None):
        """ Return the absolute path to the lausanne parcellation atlas
        for the resolution name """

        cmp_path = op.dirname(__file__)

        provided_atlases = [
            'myatlas_36_rh.gcs', 'myatlasP1_16_rh.gcs', 'myatlasP17_28_rh.gcs',
            'myatlasP29_36_rh.gcs', 'myatlas_60_rh.gcs', 'myatlas_125_rh.gcs',
            'myatlas_250_rh.gcs', 'myatlas_36_lh.gcs', 'myatlasP1_16_lh.gcs',
            'myatlasP17_28_lh.gcs', 'myatlasP29_36_lh.gcs',
            'myatlas_60_lh.gcs', 'myatlas_125_lh.gcs', 'myatlas_250_lh.gcs'
        ]

        if name in provided_atlases:
            return op.join(cmp_path, 'data', 'colortable_and_gcs',
                           'my_atlas_gcs', name)
        else:
            msg = "Atlas %s does not exists" % name
            raise Exception(msg)

    def get_freeview_lut(self, name):
        """ Returns the Look-Up-Table as text file for a given parcellation scheme
        in  a dictionary """

        cmp_path = op.dirname(__file__)
        if name == "NativeFreesurfer":
            return {
                'freesurferaparc':
                op.join(cmp_path, 'data', 'parcellation', 'nativefreesurfer',
                        'freesurferaparc', 'FreeSurferColorLUT_adapted.txt')
            }
        else:
            return ""

    def get_lausanne_parcellation_path(self, parcellationname):

        cmp_path = op.dirname(__file__)

        if self.parcellation_scheme == "Lausanne2008":
            allowed_default_parcel = [
                'resolution83', 'resolution150', 'resolution258',
                'resolution500', 'resolution1015'
            ]
            if parcellationname in allowed_default_parcel:
                return op.join(cmp_path, 'data', 'parcellation',
                               'lausanne2008', parcellationname)
            else:
                msg = "Not a valid default parcellation name for the lausanne2008 parcellation scheme"
                raise Exception(msg)

        else:
            allowed_default_parcel = ['freesurferaparc']
            if parcellationname in allowed_default_parcel:
                return op.join(cmp_path, 'data', 'parcellation',
                               'nativefreesurfer', parcellationname)
            else:
                msg = "Not a valid default parcellation name for the NativeFreesurfer parcellation scheme"
                raise Exception(msg)

    def get_cmp_binary_path(self):
        """ Returns the path to the binary files for the current platform
        and architecture """

        if sys.platform == 'linux2':

            import platform as pf
            if '32' in pf.architecture()[0]:
                return op.join(op.dirname(__file__), "binary", "linux2",
                               "bit32")
            elif '64' in pf.architecture()[0]:
                return op.join(op.dirname(__file__), "binary", "linux2",
                               "bit64")
        else:
            raise ('No binary files compiled for your platform!')

    def get_pipeline_status_file(self):
        """Returns the absolute path of the pipeline status file"""
        return op.join(self.get_subj_dir(), self.pipeline_status_file)

    def init_pipeline_status(self):
        """Create the 'cmp.status'.  The 'cmp.status' file contains information
        about the inputs/outputs of each pipeline stage"""
        status_file = op.join(self.get_subj_dir(), self.pipeline_status_file)
        self.pipeline_status.Pipeline.name = "cmp"
        self.pipeline_status.SaveToFile(status_file)

    def update_pipeline_status(self):
        """Update the pipeline status on disk with the current status in memory"""
        status_file = op.join(self.get_subj_dir(), self.pipeline_status_file)
        self.pipeline_status.SaveToFile(status_file)
Esempio n. 15
0
class config(HasTraits):
    uuid = traits.Str(desc="UUID")

    # Directories
    working_dir = Directory(mandatory=True, desc="Location of the Nipype working directory")
    base_dir = Directory(os.path.abspath('.'),exists=True, desc='Base directory of data. (Should be subject-independent)')
    sink_dir = Directory(mandatory=True, desc="Location where the BIP will store the results")
    crash_dir = Directory(mandatory=False, desc="Location to store crash files")
    surf_dir = Directory(os.path.abspath('.'),mandatory=True, desc="Freesurfer subjects directory")

    # Execution
    run_using_plugin = Bool(False, usedefault=True, desc="True to run pipeline with plugin, False to run serially")
    plugin = traits.Enum("PBS", "MultiProc", "SGE", "Condor",
        usedefault=True,
        desc="plugin to use, if run_using_plugin=True")
    plugin_args = traits.Dict({"qsub_args": "-q many"},
        usedefault=True, desc='Plugin arguments.')
    test_mode = Bool(False, mandatory=False, usedefault=True,
        desc='Affects whether where and if the workflow keeps its \
                            intermediary files. True to keep intermediary files. ')
    # Subjects
    datagrabber = traits.Instance(Data, ())
    subjects = traits.List(traits.Str, mandatory=True, usedefault=True,
        desc="Subject id's. Note: These MUST match the subject id's in the \
                                Freesurfer directory. For simplicity, the subject id's should \
                                also match with the location of individual functional files.")
    func_template = traits.String('%s/cleaned_resting.nii.gz')
    reg_template = traits.String('%s/cleaned_resting_reg.dat')
    ref_template = traits.String('%s/cleaned_resting_ref.nii.gz')
    combine_surfaces = traits.Bool() 

    # Target surface
    target_surf = traits.Enum('fsaverage4', 'fsaverage3', 'fsaverage5',
                              'fsaverage6', 'fsaverage', 'subject',
                              desc='which average surface to map to')
    surface_fwhm = traits.List([5], traits.Float(), mandatory=True,
        usedefault=True,
        desc="How much to smooth on target surface")
    projection_stem = traits.Str('-projfrac-avg 0 1 0.1',
                                 desc='how to project data onto the surface')
    combine_surfaces = traits.Bool(desc=('compute correlation matrix across'
                                         'both left and right surfaces'))

    # Saving output
    out_type = traits.Enum('mat', 'hdf5', desc='mat or hdf5')
    hdf5_package = traits.Enum('h5py', 'pytables',
        desc='which hdf5 package to use')
    # Advanced Options
    use_advanced_options = traits.Bool()
    advanced_script = traits.Code()
    save_script_only = traits.Bool(False)

    # Atlas mapping
    surface_atlas = traits.Str('None',
                               desc='Name of parcellation atlas')

    # Buttons
    check_func_datagrabber = Button("Check")

    def _check_func_datagrabber_fired(self):
        subs = self.subjects
        for s in subs:
            for template in [self.func_template, self.ref_template,
                             self.reg_template]:
                check_path(os.path.join(self.base_dir, template % s))
            check_path(os.path.join(self.surf_dir, s))
Esempio n. 16
0
class Factor(_traits.HasTraits):
    '''Represent different factors as independent variables

    FIXME: is the order of levels important, the uniqueness?
    looks more like a mapping type than a sequences type
    '''
    name = _traits.Unicode()
    levels = _traits.Dict(_traits.Unicode, Level)
    # Can be used to check if this can be used for a given dataset
    size = _traits.Property()
    _size = _traits.Int()
    default_ds_axis = _traits.Enum(("row", "col"))


    def __init__(self, name, size, *levels, **kwargs):
        '''Create a factor object

        name: The factor name
        levels: none, one or several level objects
        check_idx: check index strategy; no, toss_overlaping
        '''
        check_idx = kwargs.pop('check_idx', "no")
        super(Factor, self).__init__(name=name, size=size, **kwargs)
        for level in levels:
            self.add_level(level, check_idx)


    def add_level(self, level, check_idx="no"):
        '''Add level

        level: a level object
        check_idx: "no", "toss_overlaping", "excpet_overlapping", "return_overlapping"
        '''
        self.name_check(level.name)
        self.bound_check(level.selector)
        if check_idx == "no":
            self.levels[level.name] = level
        elif check_idx == "toss_overlaping":
            self.toss_overlaping(level)
            self.levels[level.name] = level
        else:
            msg = "Not valid value for check_idx: {0}".format(check_idx)
            raise ValueError(msg)


    def name_check(self, new_name):
        if new_name in self.levels:
            msg = "Level name collision. Name: {0} already exist in factor: {1}".format(
                new_name, self.name)
            raise ValueError(msg)


    def bound_check(self, selector):
        if self.size <= max(selector):
            msg = "Index in level out of bounds"
            raise IndexError(msg)


    def toss_overlaping(self, level):
        '''
        FIXME: This will mainpulate an existing object. Is that unproblematic?
        '''
        existing_idx = []
        for lv in self.levels.itervalues():
            existing_idx.extend(lv.selector)
        exist = set(existing_idx)
        unique  = set(level.selector).difference(exist)
        level.selector = list(unique)


    def get_values(self, dataset, name, axis=None):
        '''Return numpy subarray
        '''
        idx = self.levels[name].selector
        if axis is not None:
            if axis == 0:
                return dataset.mat.values[idx,:]
            elif axis == 1:
                return dataset.mat.values[:,idx]
            raise ValueError('Illegale value for axis')
        else:
            if self.default_ds_axis == "row":
                return dataset.mat.values[idx,:]
            else:
                return dataset.mat.values[:,idx]


    def get_labels(self, dataset, name, axis=None):
        '''Return list of selected labels
        '''
        idx = self.levels[name].selector
        if axis is not None:
            if axis == 0:
                return dataset.mat.index[idx]
            elif axis == 1:
                return dataset.mat.columns[idx]
            raise ValueError('Illegale value for axis')
        else:
            if self.default_ds_axis == "row":
                return dataset.mat.index[idx]
            else:
                return dataset.mat.columns[idx]


    def _get_all_leveled(self):
        '''The index combined from all the levels
        '''
        return list(_itr.chain.from_iterable([lv.selector for lv in self.levels.itervalues()]))


    def get_combined_levels_subset(self, dataset, axis=None):
        '''Return numpy subarray
        '''
        sub_ds = dataset.copy()
        idx = self._get_all_leveled()
        if axis is not None:
            if axis == 0:
                sub_ds.mat = dataset.mat.iloc[idx,:]
                return sub_ds
            elif axis == 1:
                sub_ds.mat = dataset.mat.iloc[:,idx]
                return sub_ds
            raise ValueError('Illegale value for axis')
        else:
            if self.default_ds_axis == "row":
                sub_ds.mat = dataset.mat[idx,:]
                return sub_ds
            else:
                sub_ds.mat = dataset.mat[:,idx]
                return sub_ds


    def get_combined_levels_values(self, dataset, axis=None):
        '''Return numpy subarray
        '''
        idx = self._get_all_leveled()
        if axis is not None:
            if axis == 0:
                return dataset.mat.values[idx,:]
            elif axis == 1:
                return dataset.mat.values[:,idx]
            raise ValueError('Illegale value for axis')
        else:
            if self.default_ds_axis == "row":
                return dataset.mat.values[idx,:]
            else:
                return dataset.mat.values[:,idx]


    def get_combined_levels_labels(self, dataset, axis=None):
        '''Return list of all selected labels
        '''
        idx = self._get_all_leveled()
        if axis is not None:
            if axis == 0:
                return dataset.mat.index[idx]
            elif axis == 1:
                return dataset.mat.columns[idx]
            raise ValueError('Illegale value for axis')
        else:
            if self.default_ds_axis == "row":
                return dataset.mat.index[idx]
            else:
                return dataset.mat.columns[idx]


    def _get_nonleveled(self, dataset, axis):
        '''The indexes for a dataset that is not in a level
        '''
        lvs = self._get_all_leveled()
        return list(set(range(dataset.mat.shape[axis])).difference(set(lvs)))


    def get_rest_values(self, dataset, axis=None):
        '''Return numpy subarray
        '''
        if axis is not None:
            if axis == 0:
                idx = self._get_nonleveled(dataset, 0)
                return dataset.mat.values[idx,:]
            elif axis == 1:
                idx = self._get_nonleveled(dataset, 1)
                return dataset.mat.values[:,idx]
            raise ValueError('Illegale value for axis')
        else:
            if self.default_ds_axis == "row":
                idx = self._get_nonleveled(dataset, 0)
                return dataset.mat.values[idx,:]
            else:
                idx = self._get_nonleveled(dataset, 1)
                return dataset.mat.values[:,idx]


    def get_rest_labels(self, dataset, axis=None):
        '''Return list of not selected labels
        '''
        if axis is not None:
            if axis == 0:
                idx = self._get_nonleveled(dataset, 0)
                return dataset.mat.index[idx]
            elif axis == 1:
                idx = self._get_nonleveled(dataset, 1)
                return dataset.mat.columns[idx]
            raise ValueError('Illegale value for axis')
        else:
            if self.default_ds_axis == "row":
                idx = self._get_nonleveled(dataset, 0)
                return dataset.mat.index[idx]
            else:
                idx = self._get_nonleveled(dataset, 1)
                return dataset.mat.columns[idx]


    def __len__(self):
        return len(self.levels)


    # def __getitem__(self, key):
    #     pass


    # def __setitem__(self, key, value):
    #     pass


    # def __iter__(self):
    #     pass


    def _set_size(self, sz):
        self._size = sz


    def _get_size(self):
        return self._size
Esempio n. 17
0
class config(HasTraits):
    uuid = traits.Str(desc="UUID")

    # Directories
    working_dir = Directory(mandatory=True,
                            desc="Location of the Nipype working directory")
    base_dir = Directory(
        os.path.abspath('.'),
        mandatory=True,
        desc='Base directory of data. (Should be subject-independent)')
    sink_dir = Directory(mandatory=True,
                         desc="Location where the BIP will store the results")
    crash_dir = Directory(mandatory=False,
                          desc="Location to store crash files")
    save_script_only = traits.Bool(False)
    # Execution
    run_using_plugin = Bool(
        False,
        usedefault=True,
        desc="True to run pipeline with plugin, False to run serially")
    plugin = traits.Enum("PBS",
                         "MultiProc",
                         "SGE",
                         "Condor",
                         usedefault=True,
                         desc="plugin to use, if run_using_plugin=True")
    plugin_args = traits.Dict({"qsub_args": "-q many"},
                              usedefault=True,
                              desc='Plugin arguments.')
    test_mode = Bool(
        False,
        mandatory=False,
        usedefault=True,
        desc='Affects whether where and if the workflow keeps its \
                            intermediary files. True to keep intermediary files. '
    )
    timeout = traits.Float(14.0)
    # Subjects
    #subjects = traits.List(traits.Str, mandatory=True, usedefault=True,
    #    desc="Subject id's. Note: These MUST match the subject id's in the \
    #                            Freesurfer directory. For simplicity, the subject id's should \
    #                            also match with the location of individual functional files.")
    #fwhm=traits.List(traits.Float())
    #copes_template = traits.String('%s/preproc/output/fwhm_%s/cope*.nii.gz')
    #varcopes_template = traits.String('%s/preproc/output/fwhm_%s/varcope*.nii.gz')
    #contrasts = traits.List(traits.Str,desc="contrasts")

    datagrabber = traits.Instance(Data, ())

    # Regression
    design_csv = traits.File(desc="design .csv file")
    reg_contrasts = traits.Code(
        desc=
        "function named reg_contrasts which takes in 0 args and returns contrasts"
    )
    run_mode = traits.Enum("flame1", "ols", "flame12")
    #Normalization
    norm_template = traits.File(desc='Template of files')
    use_mask = traits.Bool(False)
    mask_file = traits.File()
    #Correction:
    run_correction = traits.Bool(False)
    p_threshold = traits.Float(0.05)
    z_threshold = traits.Float(2.3)
    connectivity = traits.Int(26)
    do_randomize = traits.Bool(False)
    num_iterations = traits.Int(5000)
    # Advanced Options
    use_advanced_options = traits.Bool()
    advanced_script = traits.Code()

    # Buttons
    check_func_datagrabber = Button("Check")
Esempio n. 18
0
class MATS3DMplDamageEEQ(MATS3DEval):
    # To use the model directly in the simulator specify the
    # time stepping classes

    epsilon_0 = Float(59.0e-6,
                      label="a",
                      desc="Lateral pressure coefficient",
                      enter_set=True,
                      auto_set=False)

    epsilon_f = Float(250.0e-6,
                      label="a",
                      desc="Lateral pressure coefficient",
                      enter_set=True,
                      auto_set=False)

    c_T = Float(0.01,
                label="a",
                desc="Lateral pressure coefficient",
                enter_set=True,
                auto_set=False)

    #=========================================================================
    # Configurational parameters
    #=========================================================================
    state_var_shapes = tr.Property(tr.Dict(), depends_on='n_mp')
    r'''
    Shapes of the state variables
    to be stored in the global array at the level 
    of the domain.
    '''
    @cached_property
    def _get_state_var_shapes(self):
        return {'kappa_n': (self.n_mp, ), 'omega_n': (self.n_mp, )}

    U_var_shape = (6, )
    '''Shape of the primary variable required by the TStepState.
    '''

    node_name = 'Desmorat model'

    tree_node_list = tr.List([])

    #=========================================================================
    # Evaluation - get the corrector and predictor
    #=========================================================================

    def get_corr_pred(self, eps_ab, tn1, kappa_n, omega_n):

        self._update_state_variables(eps_ab, kappa_n, omega_n)
        #----------------------------------------------------------------------
        # if the regularization using the crack-band concept is on calculate the
        # effective element length in the direction of principle strains
        #----------------------------------------------------------------------
        # if self.regularization:
        #    h = self.get_regularizing_length(sctx, eps_app_eng)
        #    self.phi_fn.h = h

        #------------------------------------------------------------------
        # Damage tensor (2th order):
        #------------------------------------------------------------------
        phi_ab = self._get_phi_ab(kappa_n)

        #------------------------------------------------------------------
        # Damage tensor (4th order) using product- or sum-type symmetrization:
        #------------------------------------------------------------------
        beta_abcd = self._get_beta_abcd(phi_ab)

        #------------------------------------------------------------------
        # Damaged stiffness tensor calculated based on the damage tensor beta4:
        #------------------------------------------------------------------
        D_ijab = einsum('...ijab, abef, ...cdef -> ...ijcd', beta_abcd,
                        self.D_abef, beta_abcd)

        sig_ab = einsum('...abef,...ef -> ...ab', D_ijab, eps_ab)

        return sig_ab, D_ijab

    #=========================================================================
    # MICROPLANE-Kinematic constraints
    #=========================================================================

    _MPNN = Property(depends_on='n_mp')
    r'''Get the dyadic product of the microplane normals
    '''

    @cached_property
    def _get__MPNN(self):
        # dyadic product of the microplane normals

        MPNN_nij = einsum('ni,nj->nij', self._MPN, self._MPN)
        return MPNN_nij

    _MPTT = Property(depends_on='n_mp')
    r'''Get the third order tangential tensor (operator) for each microplane
    '''

    @cached_property
    def _get__MPTT(self):
        # Third order tangential tensor for each microplane
        delta = identity(3)
        MPTT_nijr = 0.5 * (
            einsum('ni,jr -> nijr', self._MPN, delta) +
            einsum('nj,ir -> njir', self._MPN, delta) -
            2.0 * einsum('ni,nj,nr -> nijr', self._MPN, self._MPN, self._MPN))
        return MPTT_nijr

    def _get_e_na(self, eps_ab):
        r'''
        Projection of apparent strain onto the individual microplanes
        '''
        e_ni = einsum('nb,...ba->...na', self._MPN, eps_ab)
        return e_ni

    def _get_e_N_n(self, e_na):
        r'''
        Get the normal strain array for each microplane
        '''
        e_N_n = einsum('...na, na->...n', e_na, self._MPN)
        return e_N_n

    def _get_e_equiv_n(self, e_na):
        r'''
        Returns a list of the microplane equivalent strains
        based on the list of microplane strain vectors
        '''
        # magnitude of the normal strain vector for each microplane
        e_N_n = self._get_e_N_n(e_na)
        # positive part of the normal strain magnitude for each microplane
        e_N_pos_n = (np.abs(e_N_n) + e_N_n) / 2.0
        # normal strain vector for each microplane
        e_N_na = einsum('...n,ni -> ...ni', e_N_n, self._MPN)
        # tangent strain ratio
        c_T = self.c_T
        # tangential strain vector for each microplane
        e_T_na = e_na - e_N_na
        # squared tangential strain vector for each microplane
        e_TT_n = einsum('...ni,...ni -> ...n', e_T_na, e_T_na)
        # equivalent strain for each microplane
        e_equiv_n = sqrt(e_N_pos_n * e_N_pos_n + c_T * e_TT_n)
        return e_equiv_n

    def _update_state_variables(self, eps_ab, kappa_n, omega_n):
        e_na = self._get_e_na(eps_ab)
        eps_eq_n = self._get_e_equiv_n(e_na)
        f_trial_n = eps_eq_n - self.epsilon_0
        I = np.where(f_trial_n > 0)
        k_n = np.max(np.array([kappa_n[I], eps_eq_n[I]]), axis=0)
        kappa_n[I] = k_n
        omega_n[I] = self._get_omega(k_n)

    def _get_omega(self, kappa_n):
        '''
        Return new value of damage parameter
        @param kappa:
        '''
        omega_n = np.zeros_like(kappa_n)
        epsilon_0 = self.epsilon_0
        epsilon_f = self.epsilon_f
        I = np.where(kappa_n >= epsilon_0)
        omega_n[I] = (
            1.0 - (epsilon_0 / kappa_n[I] * np.exp(-1.0 *
                                                   (kappa_n[I] - epsilon_0) /
                                                   (epsilon_f - epsilon_0))))
        return omega_n

    def _get_phi_ab(self, kappa_n):
        # Returns the 2nd order damage tensor 'phi_mtx'
        # scalar integrity factor for each microplane
        phi_n = np.sqrt(1.0 - self._get_omega(kappa_n))
        # print 'phi_Emn', phi_Emn[:, -1, :]
        # integration terms for each microplanes
        phi_ab = einsum('...n,n,nab->...ab', phi_n, self._MPW, self._MPNN)
        return phi_ab

    def _get_beta_abcd(self, phi_ab):
        '''
        Returns the 4th order damage tensor 'beta4' using sum-type symmetrization
        (cf. [Jir99], Eq.(21))
        '''
        delta = identity(3)
        beta_ijkl = 0.25 * (einsum('...ik,jl->...ijkl', phi_ab, delta) +
                            einsum('...il,jk->...ijkl', phi_ab, delta) +
                            einsum('...jk,il->...ijkl', phi_ab, delta) +
                            einsum('...jl,ik->...ijkl', phi_ab, delta))
        return beta_ijkl

    #-----------------------------------------------
    # number of microplanes - currently fixed for 3D
    #-----------------------------------------------
    n_mp = Constant(28)

    #-----------------------------------------------
    # get the normal vectors of the microplanes
    #-----------------------------------------------
    _MPN = Property(depends_on='n_mp')

    @cached_property
    def _get__MPN(self):
        return array([[.577350259, .577350259, .577350259],
                      [.577350259, .577350259, -.577350259],
                      [.577350259, -.577350259, .577350259],
                      [.577350259, -.577350259, -.577350259],
                      [.935113132, .250562787, .250562787],
                      [.935113132, .250562787, -.250562787],
                      [.935113132, -.250562787, .250562787],
                      [.935113132, -.250562787, -.250562787],
                      [.250562787, .935113132, .250562787],
                      [.250562787, .935113132, -.250562787],
                      [.250562787, -.935113132, .250562787],
                      [.250562787, -.935113132, -.250562787],
                      [.250562787, .250562787, .935113132],
                      [.250562787, .250562787, -.935113132],
                      [.250562787, -.250562787, .935113132],
                      [.250562787, -.250562787, -.935113132],
                      [.186156720, .694746614, .694746614],
                      [.186156720, .694746614, -.694746614],
                      [.186156720, -.694746614, .694746614],
                      [.186156720, -.694746614, -.694746614],
                      [.694746614, .186156720, .694746614],
                      [.694746614, .186156720, -.694746614],
                      [.694746614, -.186156720, .694746614],
                      [.694746614, -.186156720, -.694746614],
                      [.694746614, .694746614, .186156720],
                      [.694746614, .694746614, -.186156720],
                      [.694746614, -.694746614, .186156720],
                      [.694746614, -.694746614, -.186156720]])

    #-------------------------------------
    # get the weights of the microplanes
    #-------------------------------------
    _MPW = Property(depends_on='n_mp')

    @cached_property
    def _get__MPW(self):
        return array([
            .0160714276, .0160714276, .0160714276, .0160714276, .0204744730,
            .0204744730, .0204744730, .0204744730, .0204744730, .0204744730,
            .0204744730, .0204744730, .0204744730, .0204744730, .0204744730,
            .0204744730, .0158350505, .0158350505, .0158350505, .0158350505,
            .0158350505, .0158350505, .0158350505, .0158350505, .0158350505,
            .0158350505, .0158350505, .0158350505
        ]) * 6.0

    def _get_lame_params(self):
        la = self.E * self.nu / ((1. + self.nu) * (1. - 2. * self.nu))
        # second Lame parameter (shear modulus)
        mu = self.E / (2. + 2. * self.nu)
        return la, mu

    D_abef = tr.Property(tr.Array, depends_on='+input')

    @tr.cached_property
    def _get_D_abef(self):
        la = self._get_lame_params()[0]
        mu = self._get_lame_params()[1]
        delta = identity(3)
        D_abef = (einsum(',ij,kl->ijkl', la, delta, delta) +
                  einsum(',ik,jl->ijkl', mu, delta, delta) +
                  einsum(',il,jk->ijkl', mu, delta, delta))

        return D_abef

    #-----------------------------------------------------------
    # Response variables
    #-----------------------------------------------------------

    def get_phi_ab(self, eps_ab, tn1, kappa_n, omega_n):
        return self._get_phi_ab(kappa_n)

    def get_omega_ab(self, eps_ab, tn1, kappa_n, omega_n):
        return np.identity(3) - self._get_phi_ab(kappa_n)

    def get_max_omega(self, eps_Emab, t_n1, kappa_n, omega_n):
        return np.max(omega_n, axis=-1)

    def _get_var_dict(self):
        var_dict = super(MATS3DMplDamageEEQ, self)._get_var_dict()
        var_dict.update(max_omega=self.get_max_omega,
                        phi_ab=self.get_phi_ab,
                        omega_ab=self.get_omega_ab)
        return var_dict
Esempio n. 19
0
class config(HasTraits):
    uuid = traits.Str(desc="UUID")
    desc = traits.Str(desc='Workflow description')
    # Directories
    working_dir = Directory(mandatory=True, desc="Location of the Nipype working directory")
    base_dir = Directory(os.path.abspath('.'),mandatory=True, desc='Base directory of data. (Should be subject-independent)')
    sink_dir = Directory(os.path.abspath('.'),mandatory=True, desc="Location where the BIP will store the results")
    field_dir = Directory(desc="Base directory of field-map data (Should be subject-independent) \
                                                 Set this value to None if you don't want fieldmap distortion correction")
    crash_dir = Directory(mandatory=False, desc="Location to store crash files")
    surf_dir = Directory(mandatory=True, desc= "Freesurfer subjects directory")

    # Execution

    run_using_plugin = Bool(False, usedefault=True, desc="True to run pipeline with plugin, False to run serially")
    plugin = traits.Enum("PBS", "PBSGraph","MultiProc", "SGE", "Condor",
        usedefault=True,
        desc="plugin to use, if run_using_plugin=True")
    plugin_args = traits.Dict({"qsub_args": "-q many"},
        usedefault=True, desc='Plugin arguments.')
    test_mode = Bool(False, mandatory=False, usedefault=True,
        desc='Affects whether where and if the workflow keeps its \
                            intermediary files. True to keep intermediary files. ')
    # Subjects

    subjects= traits.List(traits.Str, mandatory=True, usedefault=True,
        desc="Subject id's. Note: These MUST match the subject id's in the \
                                Freesurfer directory. For simplicity, the subject id's should \
                                also match with the location of individual functional files.")
    dwi_template = traits.String('%s/functional.nii.gz')
    bval_template = traits.String('%s/bval')
    bvec_template = traits.String('%s/fbvec')
    run_datagrabber_without_submitting = traits.Bool(desc="Run the datagrabber without \
    submitting to the cluster")
    timepoints_to_remove = traits.Int(0,usedefault=True)

    # Fieldmap

    use_fieldmap = Bool(False, mandatory=False, usedefault=True,
        desc='True to include fieldmap distortion correction. Note: field_dir \
                                     must be specified')
    magnitude_template = traits.String('%s/magnitude.nii.gz')
    phase_template = traits.String('%s/phase.nii.gz')
    TE_diff = traits.Float(desc='difference in B0 field map TEs')
    sigma = traits.Int(2, desc='2D spatial gaussing smoothing stdev (default = 2mm)')
    echospacing = traits.Float(desc="EPI echo spacing")

    # Bvecs
    do_rotate_bvecs = traits.Bool(True, usedefault=True)

    # Advanced Options
    use_advanced_options = traits.Bool()
    advanced_script = traits.Code()

    # Buttons
    check_func_datagrabber = Button("Check")
    check_field_datagrabber = Button("Check")

    def _check_func_datagrabber_fired(self):
        subs = self.subjects

        for s in subs:
            if not os.path.exists(os.path.join(self.base_dir,self.dwi_template % s)):
                print "ERROR", os.path.join(self.base_dir,self.dwi_template % s), "does NOT exist!"
                break
            else:
                print os.path.join(self.base_dir,self.dwi_template % s), "exists!"

    def _check_field_datagrabber_fired(self):
        subs = self.subjects

        for s in subs:
            if not os.path.exists(os.path.join(self.field_dir,self.magnitude_template % s)):
                print "ERROR:", os.path.join(self.field_dir,self.magnitude_template % s), "does NOT exist!"
                break
            else:
                print os.path.join(self.base_dir,self.magnitude_template % s), "exists!"
            if not os.path.exists(os.path.join(self.field_dir,self.phase_template % s)):
                print "ERROR:", os.path.join(self.field_dir,self.phase_template % s), "does NOT exist!"
                break
            else:
                print os.path.join(self.base_dir,self.phase_template % s), "exists!"
Esempio n. 20
0
class Data(HasTraits):
    fields = traits.List(traits.Instance(DataBase, ()))
    base_directory = Directory(os.path.abspath('.'))
    template = traits.Str('*')
    template_args = traits.Dict({"a": "b"}, usedefault=True)
    field_template = traits.Dict({"key": ["hi"]}, usedefault=True)
    sort = traits.Bool(True)

    if use_view:
        check = traits.Button("Check")
        view = get_view()

    def __init__(self, outfields=None):
        if outfields:
            d_ft = {}
            d_ta = {}
            for out in outfields:
                d_ft[out] = '%s'
                d_ta[out] = [['name']]
            self.field_template = d_ft
            self.template_args = d_ta
            self.outfields = outfields

    def _get_infields(self):
        infields = []
        for f in self.fields:
            infields.append(f.name)
        return infields

    def _add_iterable(self, field):
        import nipype.interfaces.utility as niu
        import nipype.pipeline.engine as pe
        it = pe.Node(niu.IdentityInterface(fields=[field.name]),
                     name=field.name + "_iterable")
        it.iterables = (field.name, field.values)
        return it

    def _set_inputs(self):
        self._node_added = False
        set_dict = {}
        for f in self.fields:
            if not f.iterable:
                set_dict[f.name] = f.values
            else:
                it = self._add_iterable(f)
                self._node_added = True
                self._wk.connect(it, f.name, self._dg, f.name)
        self._dg.inputs.trait_set(**set_dict)

    def create_dataflow(self):
        import nipype.interfaces.io as nio
        import nipype.pipeline.engine as pe
        self._wk = pe.Workflow(name='custom_datagrabber')
        self._dg = pe.Node(nio.DataGrabber(outfields=self.outfields,
                                           infields=self._get_infields(),
                                           sort_filelist=self.sort),
                           name='datagrabber')
        self._set_inputs()
        self._dg.inputs.base_directory = self.base_directory
        self._dg.inputs.field_template = self.field_template
        self._dg.inputs.template_args = self.template_args
        self._dg.inputs.template = self.template
        if not self._node_added:
            self._wk.add_nodes([self._dg])
        return self._wk

    def get_fields(self):
        foo = self.get()
        d = {}
        for key, item in foo.iteritems():
            if not key.startswith('_'):
                if isinstance(item, list):
                    d[key] = []
                    for it in item:
                        if isinstance(it, DataBase):
                            d[key].append(it.get())
                        else:
                            d[key].append(it)
                else:
                    d[key] = item
        return d

    def set_fields(self, d):
        for key, item in d.iteritems():
            if not key == "fields":
                self.set(**{key: item})
            else:
                foo = []
                for f in item:
                    tmp = DataBase()
                    tmp.set(**f)
                    foo.append(tmp)
                self.set(**{key: foo})

    def _check_fired(self):
        dg = self.create_dataflow()
        dg.run()
class config(HasTraits):
    uuid = traits.Str(desc="UUID")
    desc = traits.Str(desc="Workflow Description")
    # Directories
    working_dir = Directory(mandatory=True,
                            desc="Location of the Nipype working directory")
    sink_dir = Directory(os.path.abspath('.'),
                         mandatory=True,
                         desc="Location where the BIP will store the results")
    crash_dir = Directory(mandatory=False,
                          desc="Location to store crash files")
    json_sink = Directory(mandatory=False, desc="Location to store json_files")
    surf_dir = Directory(mandatory=True, desc="Freesurfer subjects directory")
    save_script_only = traits.Bool(False)
    # Execution

    run_using_plugin = Bool(
        False,
        usedefault=True,
        desc="True to run pipeline with plugin, False to run serially")
    plugin = traits.Enum("PBS",
                         "MultiProc",
                         "SGE",
                         "Condor",
                         usedefault=True,
                         desc="plugin to use, if run_using_plugin=True")
    plugin_args = traits.Dict({"qsub_args": "-q many"},
                              usedefault=True,
                              desc='Plugin arguments.')
    test_mode = Bool(
        False,
        mandatory=False,
        usedefault=True,
        desc='Affects whether where and if the workflow keeps its \
                            intermediary files. True to keep intermediary files. '
    )
    timeout = traits.Float(14.0)
    # Subjects

    #subjects= traits.List(traits.Str, mandatory=True, usedefault=True,
    #    desc="Subject id's. Note: These MUST match the subject id's in the \
    #                            Freesurfer directory. For simplicity, the subject id's should \
    #                            also match with the location of individual functional files.")

    datagrabber = traits.Instance(Data, ())
    # First Level

    interscan_interval = traits.Float()
    film_threshold = traits.Float()
    input_units = traits.Enum('scans', 'secs')
    is_sparse = traits.Bool(False)
    model_hrf = traits.Bool(True)
    stimuli_as_impulses = traits.Bool(True)
    use_temporal_deriv = traits.Bool(True)
    volumes_in_cluster = traits.Int(1)
    ta = traits.Float()
    tr = traits.Float()
    hpcutoff = traits.Float()
    scan_onset = traits.Int(0)
    scale_regressors = traits.Bool(True)
    #bases = traits.Dict({'dgamma':{'derivs': False}},use_default=True)
    bases = traits.Dict(
        {'dgamma': {
            'derivs': False
        }}, use_default=True
    )  #traits.Enum('dgamma','gamma','none'), traits.Enum(traits.Dict(traits.Enum('derivs',None), traits.Bool),None), desc="name of basis function and options e.g., {'dgamma': {'derivs': True}}")

    # preprocessing info
    preproc_config = traits.File(desc="preproc config file")
    use_compcor = traits.Bool(desc="use noise components from CompCor")
    #advanced_options
    use_advanced_options = Bool(False)
    advanced_options = traits.Code()
class config(HasTraits):
    uuid = traits.Str(desc="UUID")

    # Directories
    working_dir = Directory(mandatory=True,
                            desc="Location of the Nipype working directory")
    base_dir = Directory(
        os.path.abspath('.'),
        mandatory=True,
        desc='Base directory of data. (Should be subject-independent)')
    sink_dir = Directory(mandatory=True,
                         desc="Location where the BIP will store the results")
    crash_dir = Directory(mandatory=False,
                          desc="Location to store crash files")
    save_script_only = traits.Bool(False)

    # Execution
    run_using_plugin = Bool(
        False,
        usedefault=True,
        desc="True to run pipeline with plugin, False to run serially")
    plugin = traits.Enum("PBS",
                         "MultiProc",
                         "SGE",
                         "Condor",
                         usedefault=True,
                         desc="plugin to use, if run_using_plugin=True")
    plugin_args = traits.Dict({"qsub_args": "-q many"},
                              usedefault=True,
                              desc='Plugin arguments.')
    test_mode = Bool(
        False,
        mandatory=False,
        usedefault=True,
        desc='Affects whether where and if the workflow keeps its \
                            intermediary files. True to keep intermediary files. '
    )
    timeout = traits.Float(14.0)
    datagrabber = traits.Instance(Data, ())

    # Regression
    run_one_sample_T_test = traits.Bool(True)
    run_regression = traits.Bool()
    design_csv = traits.File(desc="design .csv file")
    reg_contrasts = traits.Code(
        desc=
        "function named reg_contrasts which takes in 0 args and returns contrasts"
    )
    use_regressors = traits.Bool()
    estimation_method = traits.Enum('Classical', 'Bayesian', 'Bayesian2')
    include_intercept = traits.Bool(True)
    #Normalization

    norm_template = traits.File(desc='Template of files')
    use_mask = traits.Bool(False)
    mask_file = traits.File(desc='already binarized mask file to use')

    #Correction:
    p_threshold = traits.Float(0.05)
    height_threshold = traits.Float(0.05)
    min_cluster_size = traits.Int(25)
    # Advanced Options
    use_advanced_options = traits.Bool()
    advanced_script = traits.Code()

    # Buttons
    check_func_datagrabber = Button("Check")
Esempio n. 23
0
class config(HasTraits):
    uuid = traits.Str(desc="UUID")

    # Directories
    working_dir = Directory(mandatory=True,
                            desc="Location of the Nipype working directory")
    base_dir = Directory(
        os.path.abspath('.'),
        mandatory=True,
        desc='Base directory of data. (Should be subject-independent)')
    sink_dir = Directory(mandatory=True,
                         desc="Location where the BIP will store the results")
    crash_dir = Directory(mandatory=False,
                          desc="Location to store crash files")

    # Execution
    run_using_plugin = Bool(
        False,
        usedefault=True,
        desc="True to run pipeline with plugin, False to run serially")
    plugin = traits.Enum("PBS",
                         "MultiProc",
                         "SGE",
                         "Condor",
                         usedefault=True,
                         desc="plugin to use, if run_using_plugin=True")
    plugin_args = traits.Dict({"qsub_args": "-q many"},
                              usedefault=True,
                              desc='Plugin arguments.')
    test_mode = Bool(
        False,
        mandatory=False,
        usedefault=True,
        desc='Affects whether where and if the workflow keeps its \
                            intermediary files. True to keep intermediary files. '
    )
    # Subjects
    subjects = traits.List(
        traits.Str,
        mandatory=True,
        usedefault=True,
        desc="Subject id's. Note: These MUST match the subject id's in the \
                                Freesurfer directory. For simplicity, the subject id's should \
                                also match with the location of individual functional files."
    )
    fwhm = traits.List(traits.Float())
    inputs_template = traits.String('%s/preproc/output/fwhm_%s/*.nii.gz')
    meanfunc_template = traits.String('%s/preproc/mean/*_mean.nii.gz')
    fsl_mat_template = traits.String('%s/preproc/bbreg/*.mat')
    unwarped_brain_template = traits.String('%s/smri/unwarped_brain/*.nii*')
    affine_transformation_template = traits.String(
        '%s/smri/affine_transformation/*.nii*')
    warp_field_template = traits.String('%s/smri/warped_field/*.nii*')

    #Normalization
    standard_transform_template = traits.File(
        mandatory=True, desc='Standard template to warp to')
    standard_warp_field_template = traits.String()
    standard_affine_transformation_template = traits.String()
    standard_norm_template = traits.File()
    standard_warp_field_template = traits.File()
    standard_affine_transformation_template = traits.File()
    # Advanced Options
    use_advanced_options = traits.Bool()
    advanced_script = traits.Code()

    # Buttons
    check_func_datagrabber = Button("Check")